summaryrefslogtreecommitdiffstats
path: root/drivers/soc/fsl
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/soc/fsl
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/soc/fsl')
-rw-r--r--drivers/soc/fsl/Kconfig55
-rw-r--r--drivers/soc/fsl/Makefile12
-rw-r--r--drivers/soc/fsl/dpaa2-console.c331
-rw-r--r--drivers/soc/fsl/dpio/Makefile8
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h58
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c337
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c898
-rw-r--r--drivers/soc/fsl/dpio/dpio.c238
-rw-r--r--drivers/soc/fsl/dpio/dpio.h94
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c1853
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h664
-rw-r--r--drivers/soc/fsl/guts.c279
-rw-r--r--drivers/soc/fsl/qbman/Kconfig68
-rw-r--r--drivers/soc/fsl/qbman/Makefile13
-rw-r--r--drivers/soc/fsl/qbman/bman.c819
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c320
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c244
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h83
-rw-r--r--drivers/soc/fsl/qbman/bman_test.c53
-rw-r--r--drivers/soc/fsl/qbman/bman_test.h35
-rw-r--r--drivers/soc/fsl/qbman/bman_test_api.c151
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c89
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h134
-rw-r--r--drivers/soc/fsl/qbman/qman.c3053
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c917
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c342
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h282
-rw-r--r--drivers/soc/fsl/qbman/qman_test.c62
-rw-r--r--drivers/soc/fsl/qbman/qman_test.h34
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c247
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c629
-rw-r--r--drivers/soc/fsl/qe/Kconfig68
-rw-r--r--drivers/soc/fsl/qe/Makefile14
-rw-r--r--drivers/soc/fsl/qe/gpio.c335
-rw-r--r--drivers/soc/fsl/qe/qe.c682
-rw-r--r--drivers/soc/fsl/qe/qe_common.c250
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c487
-rw-r--r--drivers/soc/fsl/qe/qe_io.c186
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c217
-rw-r--r--drivers/soc/fsl/qe/qmc.c1536
-rw-r--r--drivers/soc/fsl/qe/tsa.c846
-rw-r--r--drivers/soc/fsl/qe/tsa.h42
-rw-r--r--drivers/soc/fsl/qe/ucc.c657
-rw-r--r--drivers/soc/fsl/qe/ucc_fast.c395
-rw-r--r--drivers/soc/fsl/qe/ucc_slow.c359
-rw-r--r--drivers/soc/fsl/qe/usb.c52
-rw-r--r--drivers/soc/fsl/rcpm.c199
47 files changed, 18727 insertions, 0 deletions
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
new file mode 100644
index 0000000000..fcec6ed83d
--- /dev/null
+++ b/drivers/soc/fsl/Kconfig
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# NXP/Freescale QorIQ series SOC drivers
+#
+
+menu "NXP/Freescale QorIQ SoC drivers"
+
+source "drivers/soc/fsl/qbman/Kconfig"
+source "drivers/soc/fsl/qe/Kconfig"
+
+config FSL_GUTS
+ bool
+ select SOC_BUS
+ help
+ The global utilities block controls power management, I/O device
+ enabling, power-onreset(POR) configuration monitoring, alternate
+ function selection for multiplexed signals,and clock control.
+ This driver is to manage and access global utilities block.
+ Initially only reading SVR and registering soc device are supported.
+ Other guts accesses, such as reading RCW, should eventually be moved
+ into this driver as well.
+
+config FSL_MC_DPIO
+ tristate "QorIQ DPAA2 DPIO driver"
+ depends on FSL_MC_BUS
+ select SOC_BUS
+ select FSL_GUTS
+ select DIMLIB
+ help
+ Driver for the DPAA2 DPIO object. A DPIO provides queue and
+ buffer management facilities for software to interact with
+ other DPAA2 objects. This driver does not expose the DPIO
+ objects individually, but groups them under a service layer
+ API.
+
+config DPAA2_CONSOLE
+ tristate "QorIQ DPAA2 console driver"
+ depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ default y
+ help
+ Console driver for DPAA2 platforms. Exports 2 char devices,
+ /dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
+ which can be used to dump the Management Complex and AIOP
+ firmware logs.
+
+config FSL_RCPM
+ bool "Freescale RCPM support"
+ depends on PM_SLEEP && (ARM || ARM64)
+ help
+ The NXP QorIQ Processors based on ARM Core have RCPM module
+ (Run Control and Power Management), which performs all device-level
+ tasks associated with power management, such as wakeup source control.
+ Note that currently this driver will not support PowerPC based
+ QorIQ processor.
+endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
new file mode 100644
index 0000000000..906f1cd8af
--- /dev/null
+++ b/drivers/soc/fsl/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Linux Kernel SOC fsl specific device drivers
+#
+
+obj-$(CONFIG_FSL_DPAA) += qbman/
+obj-$(CONFIG_QUICC_ENGINE) += qe/
+obj-$(CONFIG_CPM) += qe/
+obj-$(CONFIG_FSL_RCPM) += rcpm.o
+obj-$(CONFIG_FSL_GUTS) += guts.o
+obj-$(CONFIG_FSL_MC_DPIO) += dpio/
+obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c
new file mode 100644
index 0000000000..1dca693b6b
--- /dev/null
+++ b/drivers/soc/fsl/dpaa2-console.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Freescale DPAA2 Platforms Console Driver
+ *
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2018 NXP
+ */
+
+#define pr_fmt(fmt) "dpaa2-console: " fmt
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+/* MC firmware base low/high registers indexes */
+#define MCFBALR_OFFSET 0
+#define MCFBAHR_OFFSET 1
+
+/* Bit masks used to get the most/least significant part of the MC base addr */
+#define MC_FW_ADDR_MASK_HIGH 0x1FFFF
+#define MC_FW_ADDR_MASK_LOW 0xE0000000
+
+#define MC_BUFFER_OFFSET 0x01000000
+#define MC_BUFFER_SIZE (1024 * 1024 * 16)
+#define MC_OFFSET_DELTA MC_BUFFER_OFFSET
+
+#define AIOP_BUFFER_OFFSET 0x06000000
+#define AIOP_BUFFER_SIZE (1024 * 1024 * 16)
+#define AIOP_OFFSET_DELTA 0
+
+#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
+#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
+
+/* MC and AIOP Magic words */
+#define MAGIC_MC 0x4d430100
+#define MAGIC_AIOP 0x41494F50
+
+struct log_header {
+ __le32 magic_word;
+ char reserved[4];
+ __le32 buf_start;
+ __le32 buf_length;
+ __le32 last_byte;
+};
+
+struct console_data {
+ void __iomem *map_addr;
+ struct log_header __iomem *hdr;
+ void __iomem *start_addr;
+ void __iomem *end_addr;
+ void __iomem *end_of_data;
+ void __iomem *cur_ptr;
+};
+
+static struct resource mc_base_addr;
+
+static inline void adjust_end(struct console_data *cd)
+{
+ u32 last_byte = readl(&cd->hdr->last_byte);
+
+ cd->end_of_data = cd->start_addr + LAST_BYTE(last_byte);
+}
+
+static u64 get_mc_fw_base_address(void)
+{
+ u64 mcfwbase = 0ULL;
+ u32 __iomem *mcfbaregs;
+
+ mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr));
+ if (!mcfbaregs) {
+ pr_err("could not map MC Firmware Base registers\n");
+ return 0;
+ }
+
+ mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) &
+ MC_FW_ADDR_MASK_HIGH;
+ mcfwbase <<= 32;
+ mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_ADDR_MASK_LOW;
+ iounmap(mcfbaregs);
+
+ pr_debug("MC base address at 0x%016llx\n", mcfwbase);
+ return mcfwbase;
+}
+
+static ssize_t dpaa2_console_size(struct console_data *cd)
+{
+ ssize_t size;
+
+ if (cd->cur_ptr <= cd->end_of_data)
+ size = cd->end_of_data - cd->cur_ptr;
+ else
+ size = (cd->end_addr - cd->cur_ptr) +
+ (cd->end_of_data - cd->start_addr);
+
+ return size;
+}
+
+static int dpaa2_generic_console_open(struct inode *node, struct file *fp,
+ u64 offset, u64 size,
+ u32 expected_magic,
+ u32 offset_delta)
+{
+ u32 read_magic, wrapped, last_byte, buf_start, buf_length;
+ struct console_data *cd;
+ u64 base_addr;
+ int err;
+
+ cd = kmalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ base_addr = get_mc_fw_base_address();
+ if (!base_addr) {
+ err = -EIO;
+ goto err_fwba;
+ }
+
+ cd->map_addr = ioremap(base_addr + offset, size);
+ if (!cd->map_addr) {
+ pr_err("cannot map console log memory\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ cd->hdr = (struct log_header __iomem *)cd->map_addr;
+ read_magic = readl(&cd->hdr->magic_word);
+ last_byte = readl(&cd->hdr->last_byte);
+ buf_start = readl(&cd->hdr->buf_start);
+ buf_length = readl(&cd->hdr->buf_length);
+
+ if (read_magic != expected_magic) {
+ pr_warn("expected = %08x, read = %08x\n",
+ expected_magic, read_magic);
+ err = -EIO;
+ goto err_magic;
+ }
+
+ cd->start_addr = cd->map_addr + buf_start - offset_delta;
+ cd->end_addr = cd->start_addr + buf_length;
+
+ wrapped = last_byte & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
+
+ adjust_end(cd);
+ if (wrapped && cd->end_of_data != cd->end_addr)
+ cd->cur_ptr = cd->end_of_data + 1;
+ else
+ cd->cur_ptr = cd->start_addr;
+
+ fp->private_data = cd;
+
+ return 0;
+
+err_magic:
+ iounmap(cd->map_addr);
+
+err_ioremap:
+err_fwba:
+ kfree(cd);
+
+ return err;
+}
+
+static int dpaa2_mc_console_open(struct inode *node, struct file *fp)
+{
+ return dpaa2_generic_console_open(node, fp,
+ MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
+ MAGIC_MC, MC_OFFSET_DELTA);
+}
+
+static int dpaa2_aiop_console_open(struct inode *node, struct file *fp)
+{
+ return dpaa2_generic_console_open(node, fp,
+ AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
+ MAGIC_AIOP, AIOP_OFFSET_DELTA);
+}
+
+static int dpaa2_console_close(struct inode *node, struct file *fp)
+{
+ struct console_data *cd = fp->private_data;
+
+ iounmap(cd->map_addr);
+ kfree(cd);
+ return 0;
+}
+
+static ssize_t dpaa2_console_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct console_data *cd = fp->private_data;
+ size_t bytes = dpaa2_console_size(cd);
+ size_t bytes_end = cd->end_addr - cd->cur_ptr;
+ size_t written = 0;
+ void *kbuf;
+ int err;
+
+ /* Check if we need to adjust the end of data addr */
+ adjust_end(cd);
+
+ if (cd->end_of_data == cd->cur_ptr)
+ return 0;
+
+ if (count < bytes)
+ bytes = count;
+
+ kbuf = kmalloc(bytes, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ if (bytes > bytes_end) {
+ memcpy_fromio(kbuf, cd->cur_ptr, bytes_end);
+ if (copy_to_user(buf, kbuf, bytes_end)) {
+ err = -EFAULT;
+ goto err_free_buf;
+ }
+ buf += bytes_end;
+ cd->cur_ptr = cd->start_addr;
+ bytes -= bytes_end;
+ written += bytes_end;
+ }
+
+ memcpy_fromio(kbuf, cd->cur_ptr, bytes);
+ if (copy_to_user(buf, kbuf, bytes)) {
+ err = -EFAULT;
+ goto err_free_buf;
+ }
+ cd->cur_ptr += bytes;
+ written += bytes;
+
+ kfree(kbuf);
+ return written;
+
+err_free_buf:
+ kfree(kbuf);
+
+ return err;
+}
+
+static const struct file_operations dpaa2_mc_console_fops = {
+ .owner = THIS_MODULE,
+ .open = dpaa2_mc_console_open,
+ .release = dpaa2_console_close,
+ .read = dpaa2_console_read,
+};
+
+static struct miscdevice dpaa2_mc_console_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dpaa2_mc_console",
+ .fops = &dpaa2_mc_console_fops
+};
+
+static const struct file_operations dpaa2_aiop_console_fops = {
+ .owner = THIS_MODULE,
+ .open = dpaa2_aiop_console_open,
+ .release = dpaa2_console_close,
+ .read = dpaa2_console_read,
+};
+
+static struct miscdevice dpaa2_aiop_console_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dpaa2_aiop_console",
+ .fops = &dpaa2_aiop_console_fops
+};
+
+static int dpaa2_console_probe(struct platform_device *pdev)
+{
+ int error;
+
+ error = of_address_to_resource(pdev->dev.of_node, 0, &mc_base_addr);
+ if (error < 0) {
+ pr_err("of_address_to_resource() failed for %pOF with %d\n",
+ pdev->dev.of_node, error);
+ return error;
+ }
+
+ error = misc_register(&dpaa2_mc_console_dev);
+ if (error) {
+ pr_err("cannot register device %s\n",
+ dpaa2_mc_console_dev.name);
+ goto err_register_mc;
+ }
+
+ error = misc_register(&dpaa2_aiop_console_dev);
+ if (error) {
+ pr_err("cannot register device %s\n",
+ dpaa2_aiop_console_dev.name);
+ goto err_register_aiop;
+ }
+
+ return 0;
+
+err_register_aiop:
+ misc_deregister(&dpaa2_mc_console_dev);
+err_register_mc:
+ return error;
+}
+
+static int dpaa2_console_remove(struct platform_device *pdev)
+{
+ misc_deregister(&dpaa2_mc_console_dev);
+ misc_deregister(&dpaa2_aiop_console_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dpaa2_console_match_table[] = {
+ { .compatible = "fsl,dpaa2-console",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dpaa2_console_match_table);
+
+static struct platform_driver dpaa2_console_driver = {
+ .driver = {
+ .name = "dpaa2-console",
+ .pm = NULL,
+ .of_match_table = dpaa2_console_match_table,
+ },
+ .probe = dpaa2_console_probe,
+ .remove = dpaa2_console_remove,
+};
+module_platform_driver(dpaa2_console_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Roy Pledge <roy.pledge@nxp.com>");
+MODULE_DESCRIPTION("DPAA2 console driver");
diff --git a/drivers/soc/fsl/dpio/Makefile b/drivers/soc/fsl/dpio/Makefile
new file mode 100644
index 0000000000..b9ff24c765
--- /dev/null
+++ b/drivers/soc/fsl/dpio/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# QorIQ DPAA2 DPIO driver
+#
+
+obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
+
+fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
new file mode 100644
index 0000000000..2fbcb78cda
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef _FSL_DPIO_CMD_H
+#define _FSL_DPIO_CMD_H
+
+/* DPIO Version */
+#define DPIO_VER_MAJOR 4
+#define DPIO_VER_MINOR 2
+
+/* Command Versioning */
+
+#define DPIO_CMD_ID_OFFSET 4
+#define DPIO_CMD_BASE_VERSION 1
+
+#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
+#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
+#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
+#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
+#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
+#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
+#define DPIO_CMDID_RESET DPIO_CMD(0x005)
+#define DPIO_CMDID_SET_STASHING_DEST DPIO_CMD(0x120)
+
+struct dpio_cmd_open {
+ __le32 dpio_id;
+};
+
+#define DPIO_CHANNEL_MODE_MASK 0x3
+
+struct dpio_rsp_get_attr {
+ /* cmd word 0 */
+ __le32 id;
+ __le16 qbman_portal_id;
+ u8 num_priorities;
+ u8 channel_mode;
+ /* cmd word 1 */
+ __le64 qbman_portal_ce_addr;
+ /* cmd word 2 */
+ __le64 qbman_portal_ci_addr;
+ /* cmd word 3 */
+ __le32 qbman_version;
+ __le32 pad1;
+ /* cmd word 4 */
+ __le32 clk;
+};
+
+struct dpio_stashing_dest {
+ u8 sdest;
+};
+
+#endif /* _FSL_DPIO_CMD_H */
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
new file mode 100644
index 0000000000..9e3fddd8f5
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright NXP 2016
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/sys_soc.h>
+
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "qbman-portal.h"
+#include "dpio.h"
+#include "dpio-cmd.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("DPIO Driver");
+
+struct dpio_priv {
+ struct dpaa2_io *io;
+};
+
+static cpumask_var_t cpus_unused_mask;
+
+static const struct soc_device_attribute ls1088a_soc[] = {
+ {.family = "QorIQ LS1088A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute ls2080a_soc[] = {
+ {.family = "QorIQ LS2080A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute ls2088a_soc[] = {
+ {.family = "QorIQ LS2088A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute lx2160a_soc[] = {
+ {.family = "QorIQ LX2160A"},
+ { /* sentinel */ }
+};
+
+static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu)
+{
+ int cluster_base, cluster_size;
+
+ if (soc_device_match(ls1088a_soc)) {
+ cluster_base = 2;
+ cluster_size = 4;
+ } else if (soc_device_match(ls2080a_soc) ||
+ soc_device_match(ls2088a_soc) ||
+ soc_device_match(lx2160a_soc)) {
+ cluster_base = 0;
+ cluster_size = 2;
+ } else {
+ dev_err(&dpio_dev->dev, "unknown SoC version\n");
+ return -1;
+ }
+
+ return cluster_base + cpu / cluster_size;
+}
+
+static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct dpio_priv *priv = dev_get_drvdata(dev);
+
+ return dpaa2_io_irq(priv->io);
+}
+
+static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
+{
+ struct fsl_mc_device_irq *irq;
+
+ irq = dpio_dev->irqs[0];
+
+ /* clear the affinity hint */
+ irq_set_affinity_hint(irq->virq, NULL);
+}
+
+static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
+{
+ int error;
+ struct fsl_mc_device_irq *irq;
+
+ irq = dpio_dev->irqs[0];
+ error = devm_request_irq(&dpio_dev->dev,
+ irq->virq,
+ dpio_irq_handler,
+ 0,
+ dev_name(&dpio_dev->dev),
+ &dpio_dev->dev);
+ if (error < 0) {
+ dev_err(&dpio_dev->dev,
+ "devm_request_irq() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /* set the affinity hint */
+ if (irq_set_affinity_hint(irq->virq, cpumask_of(cpu)))
+ dev_err(&dpio_dev->dev,
+ "irq_set_affinity failed irq %d cpu %d\n",
+ irq->virq, cpu);
+
+ return 0;
+}
+
+static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
+{
+ struct dpio_attr dpio_attrs;
+ struct dpaa2_io_desc desc;
+ struct dpio_priv *priv;
+ int err = -ENOMEM;
+ struct device *dev = &dpio_dev->dev;
+ int possible_next_cpu;
+ int sdest;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto err_priv_alloc;
+
+ dev_set_drvdata(dev, priv);
+
+ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
+ if (err) {
+ dev_dbg(dev, "MC portal allocation failed\n");
+ err = -EPROBE_DEFER;
+ goto err_priv_alloc;
+ }
+
+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+ &dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpio_reset(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_reset() failed\n");
+ goto err_reset;
+ }
+
+ err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
+ &dpio_attrs);
+ if (err) {
+ dev_err(dev, "dpio_get_attributes() failed %d\n", err);
+ goto err_get_attr;
+ }
+ desc.qman_version = dpio_attrs.qbman_version;
+ desc.qman_clk = dpio_attrs.clk;
+
+ err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_enable() failed %d\n", err);
+ goto err_get_attr;
+ }
+
+ /* initialize DPIO descriptor */
+ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
+ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
+ desc.dpio_id = dpio_dev->obj_desc.id;
+
+ /* get the cpu to use for the affinity hint */
+ possible_next_cpu = cpumask_first(cpus_unused_mask);
+ if (possible_next_cpu >= nr_cpu_ids) {
+ dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
+ err = -ERANGE;
+ goto err_allocate_irqs;
+ }
+ desc.cpu = possible_next_cpu;
+ cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask);
+
+ sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu);
+ if (sdest >= 0) {
+ err = dpio_set_stashing_destination(dpio_dev->mc_io, 0,
+ dpio_dev->mc_handle,
+ sdest);
+ if (err)
+ dev_err(dev, "dpio_set_stashing_destination failed for cpu%d\n",
+ desc.cpu);
+ }
+
+ if (dpio_dev->obj_desc.region_count < 3) {
+ /* No support for DDR backed portals, use classic mapping */
+ /*
+ * Set the CENA regs to be the cache inhibited area of the
+ * portal to avoid coherency issues if a user migrates to
+ * another core.
+ */
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]),
+ MEMREMAP_WC);
+ } else {
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start,
+ resource_size(&dpio_dev->regions[2]),
+ MEMREMAP_WB);
+ }
+
+ if (IS_ERR(desc.regs_cena)) {
+ dev_err(dev, "devm_memremap failed\n");
+ err = PTR_ERR(desc.regs_cena);
+ goto err_allocate_irqs;
+ }
+
+ desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]));
+ if (!desc.regs_cinh) {
+ err = -ENOMEM;
+ dev_err(dev, "devm_ioremap failed\n");
+ goto err_allocate_irqs;
+ }
+
+ err = fsl_mc_allocate_irqs(dpio_dev);
+ if (err) {
+ dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err);
+ goto err_allocate_irqs;
+ }
+
+ priv->io = dpaa2_io_create(&desc, dev);
+ if (!priv->io) {
+ dev_err(dev, "dpaa2_io_create failed\n");
+ err = -ENOMEM;
+ goto err_dpaa2_io_create;
+ }
+
+ err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
+ if (err)
+ goto err_register_dpio_irq;
+
+ dev_info(dev, "probed\n");
+ dev_dbg(dev, " receives_notifications = %d\n",
+ desc.receives_notifications);
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+ return 0;
+
+err_dpaa2_io_create:
+ unregister_dpio_irq_handlers(dpio_dev);
+err_register_dpio_irq:
+ fsl_mc_free_irqs(dpio_dev);
+err_allocate_irqs:
+ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+err_get_attr:
+err_reset:
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+err_open:
+ fsl_mc_portal_free(dpio_dev->mc_io);
+err_priv_alloc:
+ return err;
+}
+
+/* Tear down interrupts for a given DPIO object */
+static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
+{
+ unregister_dpio_irq_handlers(dpio_dev);
+ fsl_mc_free_irqs(dpio_dev);
+}
+
+static void dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
+{
+ struct device *dev;
+ struct dpio_priv *priv;
+ int err = 0, cpu;
+
+ dev = &dpio_dev->dev;
+ priv = dev_get_drvdata(dev);
+ cpu = dpaa2_io_get_cpu(priv->io);
+
+ dpaa2_io_down(priv->io);
+
+ dpio_teardown_irqs(dpio_dev);
+
+ cpumask_set_cpu(cpu, cpus_unused_mask);
+
+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+ &dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_open() failed\n");
+ goto err_open;
+ }
+
+ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+err_open:
+ fsl_mc_portal_free(dpio_dev->mc_io);
+}
+
+static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpio",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_dpio_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_dpio_probe,
+ .remove = dpaa2_dpio_remove,
+ .match_id_table = dpaa2_dpio_match_id_table
+};
+
+static int dpio_driver_init(void)
+{
+ if (!zalloc_cpumask_var(&cpus_unused_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(cpus_unused_mask, cpu_online_mask);
+
+ return fsl_mc_driver_register(&dpaa2_dpio_driver);
+}
+
+static void dpio_driver_exit(void)
+{
+ free_cpumask_var(cpus_unused_mask);
+ fsl_mc_driver_unregister(&dpaa2_dpio_driver);
+}
+module_init(dpio_driver_init);
+module_exit(dpio_driver_exit);
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
new file mode 100644
index 0000000000..1d2b27e3ea
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -0,0 +1,898 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2019 NXP
+ *
+ */
+#include <linux/types.h>
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dim.h>
+#include <linux/slab.h>
+
+#include "dpio.h"
+#include "qbman-portal.h"
+
+struct dpaa2_io {
+ struct dpaa2_io_desc dpio_desc;
+ struct qbman_swp_desc swp_desc;
+ struct qbman_swp *swp;
+ struct list_head node;
+ /* protect against multiple management commands */
+ spinlock_t lock_mgmt_cmd;
+ /* protect notifications list */
+ spinlock_t lock_notifications;
+ struct list_head notifications;
+ struct device *dev;
+
+ /* Net DIM */
+ struct dim rx_dim;
+ /* protect against concurrent Net DIM updates */
+ spinlock_t dim_lock;
+ u16 event_ctr;
+ u64 bytes;
+ u64 frames;
+};
+
+struct dpaa2_io_store {
+ unsigned int max;
+ dma_addr_t paddr;
+ struct dpaa2_dq *vaddr;
+ void *alloced_addr; /* unaligned value from kmalloc() */
+ unsigned int idx; /* position of the next-to-be-returned entry */
+ struct qbman_swp *swp; /* portal used to issue VDQCR */
+ struct device *dev; /* device used for DMA mapping */
+};
+
+/* keep a per cpu array of DPIOs for fast access */
+static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
+static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
+static DEFINE_SPINLOCK(dpio_list_lock);
+
+static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
+ int cpu)
+{
+ if (d)
+ return d;
+
+ if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
+ return NULL;
+
+ /*
+ * If cpu == -1, choose the current cpu, with no guarantees about
+ * potentially being migrated away.
+ */
+ if (cpu < 0)
+ cpu = raw_smp_processor_id();
+
+ /* If a specific cpu was requested, pick it up immediately */
+ return dpio_by_cpu[cpu];
+}
+
+static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
+{
+ if (d)
+ return d;
+
+ d = service_select_by_cpu(d, -1);
+ if (d)
+ return d;
+
+ spin_lock(&dpio_list_lock);
+ d = list_entry(dpio_list.next, struct dpaa2_io, node);
+ list_del(&d->node);
+ list_add_tail(&d->node, &dpio_list);
+ spin_unlock(&dpio_list_lock);
+
+ return d;
+}
+
+/**
+ * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
+ * @cpu: the cpu id
+ *
+ * Return the affine dpaa2_io service, or NULL if there is no service affined
+ * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
+ * service.
+ */
+struct dpaa2_io *dpaa2_io_service_select(int cpu)
+{
+ if (cpu == DPAA2_IO_ANY_CPU)
+ return service_select(NULL);
+
+ return service_select_by_cpu(NULL, cpu);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+
+static void dpaa2_io_dim_work(struct work_struct *w)
+{
+ struct dim *dim = container_of(w, struct dim, work);
+ struct dim_cq_moder moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim);
+
+ dpaa2_io_set_irq_coalescing(d, moder.usec);
+ dim->state = DIM_START_MEASURE;
+}
+
+/**
+ * dpaa2_io_create() - create a dpaa2_io object.
+ * @desc: the dpaa2_io descriptor
+ * @dev: the actual DPIO device
+ *
+ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
+ * DPIO object.
+ *
+ * Return a valid dpaa2_io object for success, or NULL for failure.
+ */
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
+ struct device *dev)
+{
+ struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+ u32 qman_256_cycles_per_ns;
+
+ if (!obj)
+ return NULL;
+
+ /* check if CPU is out of range (-1 means any cpu) */
+ if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
+ kfree(obj);
+ return NULL;
+ }
+
+ obj->dpio_desc = *desc;
+ obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
+ obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+ obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk;
+ obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+
+ /* Compute how many 256 QBMAN cycles fit into one ns. This is because
+ * the interrupt timeout period register needs to be specified in QBMAN
+ * clock cycles in increments of 256.
+ */
+ qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000);
+ obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns;
+ obj->swp = qbman_swp_init(&obj->swp_desc);
+
+ if (!obj->swp) {
+ kfree(obj);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&obj->node);
+ spin_lock_init(&obj->lock_mgmt_cmd);
+ spin_lock_init(&obj->lock_notifications);
+ spin_lock_init(&obj->dim_lock);
+ INIT_LIST_HEAD(&obj->notifications);
+
+ /* For now only enable DQRR interrupts */
+ qbman_swp_interrupt_set_trigger(obj->swp,
+ QBMAN_SWP_INTERRUPT_DQRI);
+ qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
+ if (obj->dpio_desc.receives_notifications)
+ qbman_swp_push_set(obj->swp, 0, 1);
+
+ spin_lock(&dpio_list_lock);
+ list_add_tail(&obj->node, &dpio_list);
+ if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
+ dpio_by_cpu[desc->cpu] = obj;
+ spin_unlock(&dpio_list_lock);
+
+ obj->dev = dev;
+
+ memset(&obj->rx_dim, 0, sizeof(obj->rx_dim));
+ INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
+ obj->event_ctr = 0;
+ obj->bytes = 0;
+ obj->frames = 0;
+
+ return obj;
+}
+
+/**
+ * dpaa2_io_down() - release the dpaa2_io object.
+ * @d: the dpaa2_io object to be released.
+ *
+ * The "struct dpaa2_io" type can represent an individual DPIO object (as
+ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
+ * which can be used to group/encapsulate multiple DPIO objects. In all cases,
+ * each handle obtained should be released using this function.
+ */
+void dpaa2_io_down(struct dpaa2_io *d)
+{
+ spin_lock(&dpio_list_lock);
+ dpio_by_cpu[d->dpio_desc.cpu] = NULL;
+ list_del(&d->node);
+ spin_unlock(&dpio_list_lock);
+
+ kfree(d);
+}
+
+#define DPAA_POLL_MAX 32
+
+/**
+ * dpaa2_io_irq() - ISR for DPIO interrupts
+ *
+ * @obj: the given DPIO object.
+ *
+ * Return IRQ_HANDLED for success or IRQ_NONE if there
+ * were no pending interrupts.
+ */
+irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
+{
+ const struct dpaa2_dq *dq;
+ int max = 0;
+ struct qbman_swp *swp;
+ u32 status;
+
+ obj->event_ctr++;
+
+ swp = obj->swp;
+ status = qbman_swp_interrupt_read_status(swp);
+ if (!status)
+ return IRQ_NONE;
+
+ dq = qbman_swp_dqrr_next(swp);
+ while (dq) {
+ if (qbman_result_is_SCN(dq)) {
+ struct dpaa2_io_notification_ctx *ctx;
+ u64 q64;
+
+ q64 = qbman_result_SCN_ctx(dq);
+ ctx = (void *)(uintptr_t)q64;
+ ctx->cb(ctx);
+ } else {
+ pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
+ }
+ qbman_swp_dqrr_consume(swp, dq);
+ ++max;
+ if (max > DPAA_POLL_MAX)
+ goto done;
+ dq = qbman_swp_dqrr_next(swp);
+ }
+done:
+ qbman_swp_interrupt_clear_status(swp, status);
+ qbman_swp_interrupt_set_inhibit(swp, 0);
+ return IRQ_HANDLED;
+}
+
+/**
+ * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
+ *
+ * @d: the given DPIO object.
+ *
+ * Return the cpu associated with the DPIO object
+ */
+int dpaa2_io_get_cpu(struct dpaa2_io *d)
+{
+ return d->dpio_desc.cpu;
+}
+EXPORT_SYMBOL(dpaa2_io_get_cpu);
+
+/**
+ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
+ * notifications on the given DPIO service.
+ * @d: the given DPIO service.
+ * @ctx: the notification context.
+ * @dev: the device that requests the register
+ *
+ * The caller should make the MC command to attach a DPAA2 object to
+ * a DPIO after this function completes successfully. In that way:
+ * (a) The DPIO service is "ready" to handle a notification arrival
+ * (which might happen before the "attach" command to MC has
+ * returned control of execution back to the caller)
+ * (b) The DPIO service can provide back to the caller the 'dpio_id' and
+ * 'qman64' parameters that it should pass along in the MC command
+ * in order for the object to be configured to produce the right
+ * notification fields to the DPIO service.
+ *
+ * Return 0 for success, or -ENODEV for failure.
+ */
+int dpaa2_io_service_register(struct dpaa2_io *d,
+ struct dpaa2_io_notification_ctx *ctx,
+ struct device *dev)
+{
+ struct device_link *link;
+ unsigned long irqflags;
+
+ d = service_select_by_cpu(d, ctx->desired_cpu);
+ if (!d)
+ return -ENODEV;
+
+ link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!link)
+ return -EINVAL;
+
+ ctx->dpio_id = d->dpio_desc.dpio_id;
+ ctx->qman64 = (u64)(uintptr_t)ctx;
+ ctx->dpio_private = d;
+ spin_lock_irqsave(&d->lock_notifications, irqflags);
+ list_add(&ctx->node, &d->notifications);
+ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+
+ /* Enable the generation of CDAN notifications */
+ if (ctx->is_cdan)
+ return qbman_swp_CDAN_set_context_enable(d->swp,
+ (u16)ctx->id,
+ ctx->qman64);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
+
+/**
+ * dpaa2_io_service_deregister - The opposite of 'register'.
+ * @service: the given DPIO service.
+ * @ctx: the notification context.
+ * @dev: the device that requests to be deregistered
+ *
+ * This function should be called only after sending the MC command to
+ * to detach the notification-producing device from the DPIO.
+ */
+void dpaa2_io_service_deregister(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx,
+ struct device *dev)
+{
+ struct dpaa2_io *d = ctx->dpio_private;
+ unsigned long irqflags;
+
+ if (ctx->is_cdan)
+ qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
+
+ spin_lock_irqsave(&d->lock_notifications, irqflags);
+ list_del(&ctx->node);
+ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
+
+/**
+ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
+ * @d: the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
+ * considered "disarmed". Ie. the user can issue pull dequeue operations on that
+ * traffic source for as long as it likes. Eventually it may wish to "rearm"
+ * that source to allow it to produce another FQDAN/CDAN, that's what this
+ * function achieves.
+ *
+ * Return 0 for success.
+ */
+int dpaa2_io_service_rearm(struct dpaa2_io *d,
+ struct dpaa2_io_notification_ctx *ctx)
+{
+ unsigned long irqflags;
+ int err;
+
+ d = service_select_by_cpu(d, ctx->desired_cpu);
+ if (!unlikely(d))
+ return -ENODEV;
+
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ if (ctx->is_cdan)
+ err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
+ else
+ err = qbman_swp_fq_schedule(d->swp, ctx->id);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
+
+/**
+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_fq(&pd, fqid);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
+
+/**
+ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
+ * @d: the given DPIO service.
+ * @channelid: the given channel id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
+
+/**
+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
+
+/**
+ * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
+ * to a frame queue using one fqid.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ * @nb: number of frames to be enqueud
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd,
+ int nb)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
+
+/**
+ * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
+ * to different frame queue using a list of fqids.
+ * @d: the given DPIO service.
+ * @fqid: the given list of frame queue ids.
+ * @fd: the frame descriptor which is enqueued.
+ * @nb: number of frames to be enqueud
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
+ u32 *fqid,
+ const struct dpaa2_fd *fd,
+ int nb)
+{
+ struct qbman_eq_desc *ed;
+ int i, ret;
+
+ ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+ d = service_select(d);
+ if (!d) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < nb; i++) {
+ qbman_eq_desc_clear(&ed[i]);
+ qbman_eq_desc_set_no_orp(&ed[i], 0);
+ qbman_eq_desc_set_fq(&ed[i], fqid[i]);
+ }
+
+ ret = qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
+out:
+ kfree(ed);
+ return ret;
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
+
+/**
+ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
+ * @d: the given DPIO service.
+ * @qdid: the given queuing destination id.
+ * @prio: the given queuing priority.
+ * @qdbin: the given queuing destination bin.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
+ u32 qdid, u8 prio, u16 qdbin,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
+
+/**
+ * dpaa2_io_service_release() - Release buffers to a buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the buffer pool id.
+ * @buffers: the buffers to be released.
+ * @num_buffers: the number of the buffers to be released.
+ *
+ * Return 0 for success, and negative error code for failure.
+ */
+int dpaa2_io_service_release(struct dpaa2_io *d,
+ u16 bpid,
+ const u64 *buffers,
+ unsigned int num_buffers)
+{
+ struct qbman_release_desc rd;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_release_desc_clear(&rd);
+ qbman_release_desc_set_bpid(&rd, bpid);
+
+ return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
+
+/**
+ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the buffer pool id.
+ * @buffers: the buffer addresses for acquired buffers.
+ * @num_buffers: the expected number of the buffers to acquire.
+ *
+ * Return a negative error code if the command failed, otherwise it returns
+ * the number of buffers acquired, which may be less than the number requested.
+ * Eg. if the buffer pool is empty, this will return zero.
+ */
+int dpaa2_io_service_acquire(struct dpaa2_io *d,
+ u16 bpid,
+ u64 *buffers,
+ unsigned int num_buffers)
+{
+ unsigned long irqflags;
+ int err;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
+
+/*
+ * 'Stores' are reusable memory blocks for holding dequeue results, and to
+ * assist with parsing those results.
+ */
+
+/**
+ * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
+ * @max_frames: the maximum number of dequeued result for frames, must be <= 32.
+ * @dev: the device to allow mapping/unmapping the DMAable region.
+ *
+ * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
+ * The 'dpaa2_io_store' returned is a DPIO service managed object.
+ *
+ * Return pointer to dpaa2_io_store struct for successfully created storage
+ * memory, or NULL on error.
+ */
+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
+ struct device *dev)
+{
+ struct dpaa2_io_store *ret;
+ size_t size;
+
+ if (!max_frames || (max_frames > 32))
+ return NULL;
+
+ ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->max = max_frames;
+ size = max_frames * sizeof(struct dpaa2_dq) + 64;
+ ret->alloced_addr = kzalloc(size, GFP_KERNEL);
+ if (!ret->alloced_addr) {
+ kfree(ret);
+ return NULL;
+ }
+
+ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
+ ret->paddr = dma_map_single(dev, ret->vaddr,
+ sizeof(struct dpaa2_dq) * max_frames,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, ret->paddr)) {
+ kfree(ret->alloced_addr);
+ kfree(ret);
+ return NULL;
+ }
+
+ ret->idx = 0;
+ ret->dev = dev;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
+
+/**
+ * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
+ * result.
+ * @s: the storage memory to be destroyed.
+ */
+void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
+{
+ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
+ DMA_FROM_DEVICE);
+ kfree(s->alloced_addr);
+ kfree(s);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
+
+/**
+ * dpaa2_io_store_next() - Determine when the next dequeue result is available.
+ * @s: the dpaa2_io_store object.
+ * @is_last: indicate whether this is the last frame in the pull command.
+ *
+ * When an object driver performs dequeues to a dpaa2_io_store, this function
+ * can be used to determine when the next frame result is available. Once
+ * this function returns non-NULL, a subsequent call to it will try to find
+ * the next dequeue result.
+ *
+ * Note that if a pull-dequeue has a NULL result because the target FQ/channel
+ * was empty, then this function will also return NULL (rather than expecting
+ * the caller to always check for this. As such, "is_last" can be used to
+ * differentiate between "end-of-empty-dequeue" and "still-waiting".
+ *
+ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
+ */
+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
+{
+ int match;
+ struct dpaa2_dq *ret = &s->vaddr[s->idx];
+
+ match = qbman_result_has_new_result(s->swp, ret);
+ if (!match) {
+ *is_last = 0;
+ return NULL;
+ }
+
+ s->idx++;
+
+ if (dpaa2_dq_is_pull_complete(ret)) {
+ *is_last = 1;
+ s->idx = 0;
+ /*
+ * If we get an empty dequeue result to terminate a zero-results
+ * vdqcr, return NULL to the caller rather than expecting him to
+ * check non-NULL results every time.
+ */
+ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
+ ret = NULL;
+ } else {
+ prefetch(&s->vaddr[s->idx]);
+ *is_last = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
+
+/**
+ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
+ * @d: the given DPIO object.
+ * @fqid: the id of frame queue to be queried.
+ * @fcnt: the queried frame count.
+ * @bcnt: the queried byte count.
+ *
+ * Knowing the FQ count at run-time can be useful in debugging situations.
+ * The instantaneous frame- and byte-count are hereby returned.
+ *
+ * Return 0 for a successful query, and negative error code if query fails.
+ */
+int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
+ u32 *fcnt, u32 *bcnt)
+{
+ struct qbman_fq_query_np_rslt state;
+ struct qbman_swp *swp;
+ unsigned long irqflags;
+ int ret;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ swp = d->swp;
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ ret = qbman_fq_query_state(swp, fqid, &state);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+ if (ret)
+ return ret;
+ *fcnt = qbman_fq_state_frame_count(&state);
+ *bcnt = qbman_fq_state_byte_count(&state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
+
+/**
+ * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
+ * buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the index of buffer pool to be queried.
+ * @num: the queried number of buffers in the buffer pool.
+ *
+ * Return 0 for a successful query, and negative error code if query fails.
+ */
+int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
+{
+ struct qbman_bp_query_rslt state;
+ struct qbman_swp *swp;
+ unsigned long irqflags;
+ int ret;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ swp = d->swp;
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ ret = qbman_bp_query(swp, bpid, &state);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+ if (ret)
+ return ret;
+ *num = qbman_bp_info_num_free_bufs(&state);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
+
+/**
+ * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1,
+ irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing);
+
+/**
+ * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing);
+
+/**
+ * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing
+ * @d: the given DPIO object
+ * @use_adaptive_rx_coalesce: adaptive coalescing state
+ */
+void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce)
+{
+ d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing);
+
+/**
+ * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state
+ * @d: the given DPIO object
+ *
+ * Return 1 when adaptive coalescing is enabled on the DPIO object and 0
+ * otherwise.
+ */
+int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d)
+{
+ return d->swp->use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing);
+
+/**
+ * dpaa2_io_update_net_dim() - Update Net DIM
+ * @d: the given DPIO object
+ * @frames: how many frames have been dequeued by the user since the last call
+ * @bytes: how many bytes have been dequeued by the user since the last call
+ */
+void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
+{
+ struct dim_sample dim_sample = {};
+
+ if (!d->swp->use_adaptive_rx_coalesce)
+ return;
+
+ spin_lock(&d->dim_lock);
+
+ d->bytes += bytes;
+ d->frames += frames;
+
+ dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
+ net_dim(&d->rx_dim, dim_sample);
+
+ spin_unlock(&d->dim_lock);
+}
+EXPORT_SYMBOL(dpaa2_io_update_net_dim);
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
new file mode 100644
index 0000000000..8ed606ffaa
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "dpio.h"
+#include "dpio-cmd.h"
+
+/*
+ * Data Path I/O Portal API
+ * Contains initialization APIs and runtime control APIs for DPIO
+ */
+
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpio_id: DPIO unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpio_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_cmd_open *dpio_cmd;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ dpio_cmd = (struct dpio_cmd_open *)cmd.params;
+ dpio_cmd->dpio_id = cpu_to_le32(dpio_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpio_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_attributes() - Retrieve DPIO attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpio_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_rsp_get_attr *dpio_rsp;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpio_rsp->id);
+ attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id);
+ attr->num_priorities = dpio_rsp->num_priorities;
+ attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK;
+ attr->qbman_portal_ce_offset =
+ le64_to_cpu(dpio_rsp->qbman_portal_ce_addr);
+ attr->qbman_portal_ci_offset =
+ le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
+ attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
+ attr->clk = le32_to_cpu(dpio_rsp->clk);
+
+ return 0;
+}
+
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 sdest)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_stashing_dest *dpio_cmd;
+
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
+ cmd_flags, token);
+ dpio_cmd = (struct dpio_stashing_dest *)cmd.params;
+ dpio_cmd->sdest = sdest;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_api_version - Get Data Path I/O API version
+ * @mc_io: Pointer to MC portal's DPIO object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of DPIO API
+ * @minor_ver: Minor version of DPIO API
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
+
+/**
+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
new file mode 100644
index 0000000000..7fda44f0d7
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef __FSL_DPIO_H
+#define __FSL_DPIO_H
+
+struct fsl_mc_io;
+
+int dpio_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpio_id,
+ u16 *token);
+
+int dpio_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * enum dpio_channel_mode - DPIO notification channel mode
+ * @DPIO_NO_CHANNEL: No support for notification channel
+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
+ * dedicated channel in the DPIO; user should point the queue's
+ * destination in the relevant interface to this DPIO
+ */
+enum dpio_channel_mode {
+ DPIO_NO_CHANNEL = 0,
+ DPIO_LOCAL_CHANNEL = 1,
+};
+
+/**
+ * struct dpio_cfg - Structure representing DPIO configuration
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ */
+struct dpio_cfg {
+ enum dpio_channel_mode channel_mode;
+ u8 num_priorities;
+};
+
+int dpio_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpio_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpio_attr - Structure representing DPIO attributes
+ * @id: DPIO object ID
+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
+ * @qbman_portal_id: Software portal ID
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ * @qbman_version: QBMAN version
+ * @clk: QBMAN clock frequency value in Hz
+ */
+struct dpio_attr {
+ int id;
+ u64 qbman_portal_ce_offset;
+ u64 qbman_portal_ci_offset;
+ u16 qbman_portal_id;
+ enum dpio_channel_mode channel_mode;
+ u8 num_priorities;
+ u32 qbman_version;
+ u32 clk;
+};
+
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpio_attr *attr);
+
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 dest);
+
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+int dpio_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+#endif /* __FSL_DPIO_H */
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
new file mode 100644
index 0000000000..0a3fb6c115
--- /dev/null
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -0,0 +1,1853 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2019 NXP
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <soc/fsl/dpaa2-global.h>
+
+#include "qbman-portal.h"
+
+/* All QBMan command and result structures use this "valid bit" encoding */
+#define QB_VALID_BIT ((u32)0x80)
+
+/* QBMan portal management command codes */
+#define QBMAN_MC_ACQUIRE 0x30
+#define QBMAN_WQCHAN_CONFIGURE 0x46
+
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQCR_PI 0x800
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
+#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_CR_RT 0x900
+#define QBMAN_CINH_SWP_VDQCR_RT 0x940
+#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
+#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
+#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
+#define QBMAN_CINH_SWP_DCAP 0xac0
+#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
+#define QBMAN_CINH_SWP_RCR_PI 0xc00
+#define QBMAN_CINH_SWP_RAR 0xcc0
+#define QBMAN_CINH_SWP_ISR 0xe00
+#define QBMAN_CINH_SWP_IER 0xe40
+#define QBMAN_CINH_SWP_ISDR 0xe80
+#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_ITPR 0xf40
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_CR 0x600
+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR 0x780
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
+#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
+
+/* CENA register offsets in memory-backed mode */
+#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_CR_MEM 0x1600
+#define QBMAN_CENA_SWP_RR_MEM 0x1680
+#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
+
+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
+
+/* Define token used to determine if response written to memory is valid */
+#define QMAN_DQ_TOKEN_VALID 1
+
+/* SDQCR attribute codes */
+#define QB_SDQCR_FC_SHIFT 29
+#define QB_SDQCR_FC_MASK 0x1
+#define QB_SDQCR_DCT_SHIFT 24
+#define QB_SDQCR_DCT_MASK 0x3
+#define QB_SDQCR_TOK_SHIFT 16
+#define QB_SDQCR_TOK_MASK 0xff
+#define QB_SDQCR_SRC_SHIFT 0
+#define QB_SDQCR_SRC_MASK 0xffff
+
+/* opaque token for static dequeues */
+#define QMAN_SDQCR_TOKEN 0xbb
+
+#define QBMAN_EQCR_DCA_IDXMASK 0x0f
+#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
+
+#define EQ_DESC_SIZE_WITHOUT_FD 29
+#define EQ_DESC_SIZE_FD_START 32
+
+enum qbman_sdqcr_dct {
+ qbman_sdqcr_dct_null = 0,
+ qbman_sdqcr_dct_prio_ics,
+ qbman_sdqcr_dct_active_ics,
+ qbman_sdqcr_dct_active
+};
+
+enum qbman_sdqcr_fc {
+ qbman_sdqcr_fc_one = 0,
+ qbman_sdqcr_fc_up_to_3 = 1
+};
+
+/* Internal Function declaration */
+static int qbman_swp_enqueue_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int
+qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+static
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+static int qbman_swp_pull_direct(struct qbman_swp *s,
+ struct qbman_pull_desc *d);
+static int qbman_swp_pull_mem_back(struct qbman_swp *s,
+ struct qbman_pull_desc *d);
+
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
+
+static int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+static int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+
+/* Function pointers */
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+ = qbman_swp_enqueue_direct;
+
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_direct;
+
+int
+(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_desc_direct;
+
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
+ = qbman_swp_pull_direct;
+
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
+ = qbman_swp_dqrr_next_direct;
+
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers)
+ = qbman_swp_release_direct;
+
+/* Portal Access */
+
+static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
+{
+ return readl_relaxed(p->addr_cinh + offset);
+}
+
+static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
+ u32 value)
+{
+ writel_relaxed(value, p->addr_cinh + offset);
+}
+
+static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
+{
+ return p->addr_cena + offset;
+}
+
+#define QBMAN_CINH_SWP_CFG 0xd00
+
+#define SWP_CFG_DQRR_MF_SHIFT 20
+#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_CPBS_SHIFT 15
+#define SWP_CFG_WN_SHIFT 14
+#define SWP_CFG_RPM_SHIFT 12
+#define SWP_CFG_DCM_SHIFT 10
+#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_VPM_SHIFT 7
+#define SWP_CFG_CPM_SHIFT 6
+#define SWP_CFG_SD_SHIFT 5
+#define SWP_CFG_SP_SHIFT 4
+#define SWP_CFG_SE_SHIFT 3
+#define SWP_CFG_DP_SHIFT 2
+#define SWP_CFG_DE_SHIFT 1
+#define SWP_CFG_EP_SHIFT 0
+
+static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
+ u8 epm, int sd, int sp, int se,
+ int dp, int de, int ep)
+{
+ return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
+ est << SWP_CFG_EST_SHIFT |
+ wn << SWP_CFG_WN_SHIFT |
+ rpm << SWP_CFG_RPM_SHIFT |
+ dcm << SWP_CFG_DCM_SHIFT |
+ epm << SWP_CFG_EPM_SHIFT |
+ sd << SWP_CFG_SD_SHIFT |
+ sp << SWP_CFG_SP_SHIFT |
+ se << SWP_CFG_SE_SHIFT |
+ dp << SWP_CFG_DP_SHIFT |
+ de << SWP_CFG_DE_SHIFT |
+ ep << SWP_CFG_EP_SHIFT);
+}
+
+#define QMAN_RT_MODE 0x00000100
+
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ else
+ return (2 * ringsize) - (first - last);
+}
+
+/**
+ * qbman_swp_init() - Create a functional object representing the given
+ * QBMan portal descriptor.
+ * @d: the given qbman swp descriptor
+ *
+ * Return qbman_swp portal for success, NULL if the object cannot
+ * be created.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
+{
+ struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ u32 reg;
+ u32 mask_size;
+ u32 eqcr_pi;
+
+ if (!p)
+ return NULL;
+
+ spin_lock_init(&p->access_spinlock);
+
+ p->desc = d;
+ p->mc.valid_bit = QB_VALID_BIT;
+ p->sdq = 0;
+ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ p->mr.valid_bit = QB_VALID_BIT;
+
+ atomic_set(&p->vdq.available, 1);
+ p->vdq.valid_bit = QB_VALID_BIT;
+ p->dqrr.next_idx = 0;
+ p->dqrr.valid_bit = QB_VALID_BIT;
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
+ p->dqrr.dqrr_size = 4;
+ p->dqrr.reset_bug = 1;
+ } else {
+ p->dqrr.dqrr_size = 8;
+ p->dqrr.reset_bug = 0;
+ }
+
+ p->addr_cena = d->cena_bar;
+ p->addr_cinh = d->cinh_bar;
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 0, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 2, /* EPM: EQCR in ring mode */
+ 1, /* mem stashing drop enable enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable enable */
+ 0); /* EQCR_CI stashing priority enable */
+ } else {
+ memset(p->addr_cena, 0, 64 * 1024);
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 1, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 0, /* EPM: EQCR in ring mode */
+ 1, /* mem stashing drop enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable */
+ 0); /* EQCR_CI stashing priority enable */
+ reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
+ 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
+ 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
+ }
+
+ qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
+ reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
+ if (!reg) {
+ pr_err("qbman: the portal is not enabled!\n");
+ kfree(p);
+ return NULL;
+ }
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
+ qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
+ }
+ /*
+ * SDQCR needs to be initialized to 0 when no channels are
+ * being dequeued from or else the QMan HW will indicate an
+ * error. The values that were calculated above will be
+ * applied when dequeues from a specific channel are enabled.
+ */
+ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
+
+ p->eqcr.pi_ring_size = 8;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ p->eqcr.pi_ring_size = 32;
+ qbman_swp_enqueue_ptr =
+ qbman_swp_enqueue_mem_back;
+ qbman_swp_enqueue_multiple_ptr =
+ qbman_swp_enqueue_multiple_mem_back;
+ qbman_swp_enqueue_multiple_desc_ptr =
+ qbman_swp_enqueue_multiple_desc_mem_back;
+ qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
+ qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
+ qbman_swp_release_ptr = qbman_swp_release_mem_back;
+ }
+
+ for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
+ p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
+ eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
+ p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
+ p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
+ & p->eqcr.pi_ci_mask;
+ p->eqcr.available = p->eqcr.pi_ring_size;
+
+ /* Initialize the software portal with a irq timeout period of 0us */
+ qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
+
+ return p;
+}
+
+/**
+ * qbman_swp_finish() - Create and destroy a functional object representing
+ * the given QBMan portal descriptor.
+ * @p: the qbman_swp object to be destroyed
+ */
+void qbman_swp_finish(struct qbman_swp *p)
+{
+ kfree(p);
+}
+
+/**
+ * qbman_swp_interrupt_read_status()
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_ISR register.
+ */
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
+}
+
+/**
+ * qbman_swp_interrupt_clear_status()
+ * @p: the given software portal
+ * @mask: The mask to clear in SWP_ISR register
+ */
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_trigger() - read interrupt enable register
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_IER register.
+ */
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_IER);
+}
+
+/**
+ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
+ * @p: the given software portal
+ * @mask: The mask of bits to enable in SWP_IER
+ */
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
+ * @p: the given software portal object
+ *
+ * Return the value in the SWP_IIR register.
+ */
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
+}
+
+/**
+ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
+ * @p: the given software portal object
+ * @inhibit: whether to inhibit the IRQs
+ */
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+}
+
+/*
+ * Different management commands all use this common base layer of code to issue
+ * commands and poll for results.
+ */
+
+/*
+ * Returns a pointer to where the caller should fill in their management command
+ * (caller should ignore the verb byte)
+ */
+void *qbman_swp_mc_start(struct qbman_swp *p)
+{
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
+ else
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
+}
+
+/*
+ * Commits merges in the caller-supplied command verb (which should not include
+ * the valid-bit) and submits the command to hardware
+ */
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
+{
+ u8 *v = cmd;
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ dma_wmb();
+ *v = cmd_verb | p->mc.valid_bit;
+ } else {
+ *v = cmd_verb | p->mc.valid_bit;
+ dma_wmb();
+ qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
+ }
+}
+
+/*
+ * Checks for a completed response (returns non-NULL if only if the response
+ * is complete).
+ */
+void *qbman_swp_mc_result(struct qbman_swp *p)
+{
+ u32 *ret, verb;
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ /* Remove the valid-bit - command completed if the rest
+ * is non-zero.
+ */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ } else {
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
+ /* Command completed if the valid bit is toggled */
+ if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
+ return NULL;
+ /* Command completed if the rest is non-zero */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mr.valid_bit ^= QB_VALID_BIT;
+ }
+
+ return ret;
+}
+
+#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
+enum qb_enqueue_commands {
+ enqueue_empty = 0,
+ enqueue_response_always = 1,
+ enqueue_rejects_to_fq = 2
+};
+
+#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
+#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
+#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
+#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
+
+/*
+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ */
+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
+ * @d: the enqueue descriptor.
+ * @respond_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ */
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
+{
+ d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
+ if (respond_success)
+ d->verb |= enqueue_response_always;
+ else
+ d->verb |= enqueue_rejects_to_fq;
+}
+
+/*
+ * Exactly one of the following descriptor "targets" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * -enqueue to a frame queue
+ * -enqueue to a queuing destination
+ */
+
+/**
+ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
+ * @d: the enqueue descriptor
+ * @fqid: the id of the frame queue to be enqueued
+ */
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
+{
+ d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
+ d->tgtid = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
+ * @d: the enqueue descriptor
+ * @qdid: the id of the queuing destination to be enqueued
+ * @qd_bin: the queuing destination bin
+ * @qd_prio: the queuing destination priority
+ */
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+ u32 qd_bin, u32 qd_prio)
+{
+ d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
+ d->tgtid = cpu_to_le32(qdid);
+ d->qdbin = cpu_to_le16(qd_bin);
+ d->qpri = qd_prio;
+}
+
+#define EQAR_IDX(eqar) ((eqar) & 0x7)
+#define EQAR_VB(eqar) ((eqar) & 0x80)
+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
+#define QB_RT_BIT ((u32)0x100)
+/**
+ * qbman_swp_enqueue_direct() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static
+int qbman_swp_enqueue_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ int flags = 0;
+ int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
+
+ if (ret >= 0)
+ ret = 0;
+ else
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * qbman_swp_enqueue_mem_back() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static
+int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ int flags = 0;
+ int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
+
+ if (ret >= 0)
+ ret = 0;
+ else
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = (uint32_t *)d;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ spin_lock(&s->access_spinlock);
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
+
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available) {
+ spin_unlock(&s->access_spinlock);
+ return 0;
+ }
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
+
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++)
+ eqcr_pi++;
+ s->eqcr.pi = eqcr_pi & full_mask;
+ spin_unlock(&s->access_spinlock);
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = (uint32_t *)(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&s->access_spinlock, irq_flags);
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available) {
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
+ return 0;
+ }
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
+
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++)
+ eqcr_pi++;
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+
+ return num_enqueued;
+}
+
+/* Static (push) dequeue */
+
+/**
+ * qbman_swp_push_get() - Get the push dequeue setup
+ * @s: the software portal object
+ * @channel_idx: the channel index to query
+ * @enabled: returned boolean to show whether the push dequeue is enabled
+ * for the given channel
+ */
+void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
+{
+ u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+
+ WARN_ON(channel_idx > 15);
+ *enabled = src | (1 << channel_idx);
+}
+
+/**
+ * qbman_swp_push_set() - Enable or disable push dequeue
+ * @s: the software portal object
+ * @channel_idx: the channel index (0 to 15)
+ * @enable: enable or disable push dequeue
+ */
+void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
+{
+ u16 dqsrc;
+
+ WARN_ON(channel_idx > 15);
+ if (enable)
+ s->sdq |= 1 << channel_idx;
+ else
+ s->sdq &= ~(1 << channel_idx);
+
+ /* Read make the complete src map. If no channels are enabled
+ * the SDQCR must be 0 or else QMan will assert errors
+ */
+ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+ if (dqsrc != 0)
+ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
+ else
+ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
+}
+
+#define QB_VDQCR_VERB_DCT_SHIFT 0
+#define QB_VDQCR_VERB_DT_SHIFT 2
+#define QB_VDQCR_VERB_RLS_SHIFT 4
+#define QB_VDQCR_VERB_WAE_SHIFT 5
+
+enum qb_pull_dt_e {
+ qb_pull_dt_channel,
+ qb_pull_dt_workqueue,
+ qb_pull_dt_framequeue
+};
+
+/**
+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state
+ * @d: the pull dequeue descriptor to be cleared
+ */
+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
+ * @d: the pull dequeue descriptor to be set
+ * @storage: the pointer of the memory to store the dequeue result
+ * @storage_phys: the physical address of the storage memory
+ * @stash: to indicate whether write allocate is enabled
+ *
+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
+ * produced to the given memory location (using the DMA address which
+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
+ * those writes to main-memory express a cache-warming attribute.
+ */
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct dpaa2_dq *storage,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ /* save the virtual address */
+ d->rsp_addr_virt = (u64)(uintptr_t)storage;
+
+ if (!storage) {
+ d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
+ return;
+ }
+ d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
+ if (stash)
+ d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
+ else
+ d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
+
+ d->rsp_addr = cpu_to_le64(storage_phys);
+}
+
+/**
+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
+ * @d: the pull dequeue descriptor to be set
+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive
+ */
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
+{
+ d->numf = numframes - 1;
+}
+
+/*
+ * Exactly one of the following descriptor "actions" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - pull dequeue from the given frame queue (FQ)
+ * - pull dequeue from any FQ in the given work queue (WQ)
+ * - pull dequeue from any FQ in any WQ in the given channel
+ */
+
+/**
+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
+ * @d: the pull dequeue descriptor to be set
+ * @fqid: the frame queue index of the given FQ
+ */
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
+{
+ d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
+ * @d: the pull dequeue descriptor to be set
+ * @wqid: composed of channel id and wqid within the channel
+ * @dct: the dequeue command type
+ */
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+ enum qbman_pull_type_e dct)
+{
+ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(wqid);
+}
+
+/**
+ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
+ * dequeues
+ * @d: the pull dequeue descriptor to be set
+ * @chid: the channel id to be dequeued
+ * @dct: the dequeue command type
+ */
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+ enum qbman_pull_type_e dct)
+{
+ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(chid);
+}
+
+/**
+ * qbman_swp_pull_direct() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static
+int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ struct qbman_pull_desc *p;
+
+ if (!atomic_dec_and_test(&s->vdq.available)) {
+ atomic_inc(&s->vdq.available);
+ return -EBUSY;
+ }
+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
+ p->numf = d->numf;
+ p->tok = QMAN_DQ_TOKEN_VALID;
+ p->dq_src = d->dq_src;
+ p->rsp_addr = d->rsp_addr;
+ p->rsp_addr_virt = d->rsp_addr_virt;
+ dma_wmb();
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+
+ return 0;
+}
+
+/**
+ * qbman_swp_pull_mem_back() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static
+int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ struct qbman_pull_desc *p;
+
+ if (!atomic_dec_and_test(&s->vdq.available)) {
+ atomic_inc(&s->vdq.available);
+ return -EBUSY;
+ }
+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
+ p->numf = d->numf;
+ p->tok = QMAN_DQ_TOKEN_VALID;
+ p->dq_src = d->dq_src;
+ p->rsp_addr = d->rsp_addr;
+ p->rsp_addr_virt = d->rsp_addr_virt;
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
+
+ return 0;
+}
+
+#define QMAN_DQRR_PI_MASK 0xf
+
+/**
+ * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
+{
+ u32 verb;
+ u32 response_verb;
+ u32 flags;
+ struct dpaa2_dq *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /*
+ * We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /*
+ * if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ }
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /*
+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ return NULL;
+ }
+ /*
+ * There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+ if (!s->dqrr.next_idx)
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+ /*
+ * If this is the final response to a volatile dequeue command
+ * indicate that the vdq is available
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESULT_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
+ (flags & DPAA2_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.available);
+
+ prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+ return p;
+}
+
+/**
+ * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
+{
+ u32 verb;
+ u32 response_verb;
+ u32 flags;
+ struct dpaa2_dq *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /*
+ * We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /*
+ * if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ }
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /*
+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ return NULL;
+ }
+ /*
+ * There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+ if (!s->dqrr.next_idx)
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+ /*
+ * If this is the final response to a volatile dequeue command
+ * indicate that the vdq is available
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESULT_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
+ (flags & DPAA2_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.available);
+
+ prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+ return p;
+}
+
+/**
+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
+ * qbman_swp_dqrr_next().
+ * @s: the software portal object
+ * @dq: the DQRR entry to be consumed
+ */
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+ qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+}
+
+/**
+ * qbman_result_has_new_result() - Check and get the dequeue response from the
+ * dq storage memory set in pull dequeue command
+ * @s: the software portal object
+ * @dq: the dequeue result read from the memory
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ *
+ * Only used for user-provided storage of dequeue results, not DQRR. For
+ * efficiency purposes, the driver will perform any required endianness
+ * conversion to ensure that the user's dequeue result storage is in host-endian
+ * format. As such, once the user has called qbman_result_has_new_result() and
+ * been returned a valid dequeue result, they should not call it again on
+ * the same memory location (except of course if another dequeue command has
+ * been executed to produce a new result to that location).
+ */
+int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+ if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
+ return 0;
+
+ /*
+ * Set token to be 0 so we will detect change back to 1
+ * next time the looping is traversed. Const is cast away here
+ * as we want users to treat the dequeue responses as read only.
+ */
+ ((struct dpaa2_dq *)dq)->dq.tok = 0;
+
+ /*
+ * Determine whether VDQCR is available based on whether the
+ * current result is sitting in the first storage location of
+ * the busy command.
+ */
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.available);
+ }
+
+ return 1;
+}
+
+/**
+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the pull dequeue descriptor to be cleared
+ */
+void qbman_release_desc_clear(struct qbman_release_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+ d->verb = 1 << 5; /* Release Command Valid */
+}
+
+/**
+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ * @d: the pull dequeue descriptor to be set
+ * @bpid: the bpid value to be set
+ */
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
+{
+ d->bpid = cpu_to_le16(bpid);
+}
+
+/**
+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
+ * interrupt source should be asserted after the release command is completed.
+ * @d: the pull dequeue descriptor to be set
+ * @enable: enable (1) or disable (0) value
+ */
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
+{
+ if (enable)
+ d->verb |= 1 << 6;
+ else
+ d->verb &= ~(1 << 6);
+}
+
+#define RAR_IDX(rar) ((rar) & 0x7)
+#define RAR_VB(rar) ((rar) & 0x80)
+#define RAR_SUCCESS(rar) ((rar) & 0x100)
+
+/**
+ * qbman_swp_release_direct() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
+{
+ int i;
+ struct qbman_release_desc *p;
+ u32 rar;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ /* Start the release command */
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ for (i = 0; i < num_buffers; i++)
+ p->buf[i] = cpu_to_le64(buffers[i]);
+ p->bpid = d->bpid;
+
+ /*
+ * Set the verb byte, have to substitute in the valid-bit
+ * and the number of buffers.
+ */
+ dma_wmb();
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+
+ return 0;
+}
+
+/**
+ * qbman_swp_release_mem_back() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
+{
+ int i;
+ struct qbman_release_desc *p;
+ u32 rar;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ /* Start the release command */
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ for (i = 0; i < num_buffers; i++)
+ p->buf[i] = cpu_to_le64(buffers[i]);
+ p->bpid = d->bpid;
+
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
+
+ return 0;
+}
+
+struct qbman_acquire_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 bpid;
+ u8 num;
+ u8 reserved2[59];
+};
+
+struct qbman_acquire_rslt {
+ u8 verb;
+ u8 rslt;
+ __le16 reserved;
+ u8 num;
+ u8 reserved2[3];
+ __le64 buf[7];
+};
+
+/**
+ * qbman_swp_acquire() - Issue a buffer acquire command
+ * @s: the software portal object
+ * @bpid: the buffer pool index
+ * @buffers: a pointer pointing to the acquired buffer addresses
+ * @num_buffers: number of buffers to be acquired, must be less than 8
+ *
+ * Return 0 for success, or negative error code if the acquire command
+ * fails.
+ */
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+ unsigned int num_buffers)
+{
+ struct qbman_acquire_desc *p;
+ struct qbman_acquire_rslt *r;
+ int i;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->bpid = cpu_to_le16(bpid);
+ p->num = num_buffers;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
+ if (unlikely(!r)) {
+ pr_err("qbman: acquire from BPID %d failed, no response\n",
+ bpid);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
+ bpid, r->rslt);
+ return -EIO;
+ }
+
+ WARN_ON(r->num > num_buffers);
+
+ /* Copy the acquired buffers to the caller's array */
+ for (i = 0; i < r->num; i++)
+ buffers[i] = le64_to_cpu(r->buf[i]);
+
+ return (int)r->num;
+}
+
+struct qbman_alt_fq_state_desc {
+ u8 verb;
+ u8 reserved[3];
+ __le32 fqid;
+ u8 reserved2[56];
+};
+
+struct qbman_alt_fq_state_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 reserved[62];
+};
+
+#define ALT_FQ_FQID_MASK 0x00FFFFFF
+
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+ u8 alt_fq_verb)
+{
+ struct qbman_alt_fq_state_desc *p;
+ struct qbman_alt_fq_state_rslt *r;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
+ if (unlikely(!r)) {
+ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
+ alt_fq_verb);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
+ fqid, r->verb, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+struct qbman_cdan_ctrl_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 ch;
+ u8 we;
+ u8 ctrl;
+ __le16 reserved2;
+ __le64 cdan_ctx;
+ u8 reserved3[48];
+
+};
+
+struct qbman_cdan_ctrl_rslt {
+ u8 verb;
+ u8 rslt;
+ __le16 ch;
+ u8 reserved[60];
+};
+
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+ u8 we_mask, u8 cdan_en,
+ u64 ctx)
+{
+ struct qbman_cdan_ctrl_desc *p = NULL;
+ struct qbman_cdan_ctrl_rslt *r = NULL;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->ch = cpu_to_le16(channelid);
+ p->we = we_mask;
+ if (cdan_en)
+ p->ctrl = 1;
+ else
+ p->ctrl = 0;
+ p->cdan_ctx = cpu_to_le64(ctx);
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
+ if (unlikely(!r)) {
+ pr_err("qbman: wqchan config failed, no response\n");
+ return -EIO;
+ }
+
+ WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
+ channelid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+#define QBMAN_RESPONSE_VERB_MASK 0x7f
+#define QBMAN_FQ_QUERY_NP 0x45
+#define QBMAN_BP_QUERY 0x32
+
+struct qbman_fq_query_desc {
+ u8 verb;
+ u8 reserved[3];
+ __le32 fqid;
+ u8 reserved2[56];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
+ struct qbman_fq_query_np_rslt *r)
+{
+ struct qbman_fq_query_desc *p;
+ void *resp;
+
+ p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ /* FQID is a 24 bit value */
+ p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
+ resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
+ if (!resp) {
+ pr_err("qbman: Query FQID %d NP fields failed, no response\n",
+ fqid);
+ return -EIO;
+ }
+ *r = *(struct qbman_fq_query_np_rslt *)resp;
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
+
+ /* Determine success or failure */
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
+ p->fqid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
+}
+
+u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return le32_to_cpu(r->byte_cnt);
+}
+
+struct qbman_bp_query_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 bpid;
+ u8 reserved2[60];
+};
+
+int qbman_bp_query(struct qbman_swp *s, u16 bpid,
+ struct qbman_bp_query_rslt *r)
+{
+ struct qbman_bp_query_desc *p;
+ void *resp;
+
+ p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->bpid = cpu_to_le16(bpid);
+ resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
+ if (!resp) {
+ pr_err("qbman: Query BPID %d fields failed, no response\n",
+ bpid);
+ return -EIO;
+ }
+ *r = *(struct qbman_bp_query_rslt *)resp;
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
+
+ /* Determine success or failure */
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
+ pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
+ bpid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
+{
+ return le32_to_cpu(a->fill);
+}
+
+/**
+ * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff)
+{
+ u32 itp, max_holdoff;
+
+ /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles
+ * increments. This depends on the QBMAN internal frequency.
+ */
+ itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns;
+ if (itp > 4096) {
+ max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000;
+ pr_err("irq_holdoff must be <= %uus\n", max_holdoff);
+ return -EINVAL;
+ }
+
+ if (irq_threshold >= p->dqrr.dqrr_size) {
+ pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1);
+ return -EINVAL;
+ }
+
+ p->irq_threshold = irq_threshold;
+ p->irq_holdoff = irq_holdoff;
+
+ qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold);
+ qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp);
+
+ return 0;
+}
+
+/**
+ * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold (an IRQ is generated when there are more
+ * DQRR entries in the portal than the threshold)
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff)
+{
+ if (irq_threshold)
+ *irq_threshold = p->irq_threshold;
+ if (irq_holdoff)
+ *irq_holdoff = p->irq_holdoff;
+}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
new file mode 100644
index 0000000000..b23883dd27
--- /dev/null
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -0,0 +1,664 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2019 NXP
+ *
+ */
+#ifndef __FSL_QBMAN_PORTAL_H
+#define __FSL_QBMAN_PORTAL_H
+
+#include <soc/fsl/dpaa2-fd.h>
+
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_5000 0x05000000
+
+#define QMAN_REV_MASK 0xffff0000
+
+struct dpaa2_dq;
+struct qbman_swp;
+
+/* qbman software portal descriptor structure */
+struct qbman_swp_desc {
+ void *cena_bar; /* Cache-enabled portal base address */
+ void __iomem *cinh_bar; /* Cache-inhibited portal base address */
+ u32 qman_version;
+ u32 qman_clk;
+ u32 qman_256_cycles_per_ns;
+};
+
+#define QBMAN_SWP_INTERRUPT_EQRI 0x01
+#define QBMAN_SWP_INTERRUPT_EQDI 0x02
+#define QBMAN_SWP_INTERRUPT_DQRI 0x04
+#define QBMAN_SWP_INTERRUPT_RCRI 0x08
+#define QBMAN_SWP_INTERRUPT_RCDI 0x10
+#define QBMAN_SWP_INTERRUPT_VDCI 0x20
+
+/* the structure for pull dequeue descriptor */
+struct qbman_pull_desc {
+ u8 verb;
+ u8 numf;
+ u8 tok;
+ u8 reserved;
+ __le32 dq_src;
+ __le64 rsp_addr;
+ u64 rsp_addr_virt;
+ u8 padding[40];
+};
+
+enum qbman_pull_type_e {
+ /* dequeue with priority precedence, respect intra-class scheduling */
+ qbman_pull_type_prio = 1,
+ /* dequeue with active FQ precedence, respect ICS */
+ qbman_pull_type_active,
+ /* dequeue with active FQ precedence, no ICS */
+ qbman_pull_type_active_noics
+};
+
+/* Definitions for parsing dequeue entries */
+#define QBMAN_RESULT_MASK 0x7f
+#define QBMAN_RESULT_DQ 0x60
+#define QBMAN_RESULT_FQRN 0x21
+#define QBMAN_RESULT_FQRNI 0x22
+#define QBMAN_RESULT_FQPN 0x24
+#define QBMAN_RESULT_FQDAN 0x25
+#define QBMAN_RESULT_CDAN 0x26
+#define QBMAN_RESULT_CSCN_MEM 0x27
+#define QBMAN_RESULT_CGCU 0x28
+#define QBMAN_RESULT_BPSCN 0x29
+#define QBMAN_RESULT_CSCN_WQ 0x2a
+
+/* QBMan FQ management command codes */
+#define QBMAN_FQ_SCHEDULE 0x48
+#define QBMAN_FQ_FORCE 0x49
+#define QBMAN_FQ_XON 0x4d
+#define QBMAN_FQ_XOFF 0x4e
+
+/* structure of enqueue descriptor */
+struct qbman_eq_desc {
+ u8 verb;
+ u8 dca;
+ __le16 seqnum;
+ __le16 orpid;
+ __le16 reserved1;
+ __le32 tgtid;
+ __le32 tag;
+ __le16 qdbin;
+ u8 qpri;
+ u8 reserved[3];
+ u8 wae;
+ u8 rspid;
+ __le64 rsp_addr;
+};
+
+struct qbman_eq_desc_with_fd {
+ struct qbman_eq_desc desc;
+ u8 fd[32];
+};
+
+/* buffer release descriptor */
+struct qbman_release_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 bpid;
+ __le32 reserved2;
+ __le64 buf[7];
+};
+
+/* Management command result codes */
+#define QBMAN_MC_RSLT_OK 0xf0
+
+#define CODE_CDAN_WE_EN 0x1
+#define CODE_CDAN_WE_CTX 0x4
+
+/* portal data structure */
+struct qbman_swp {
+ const struct qbman_swp_desc *desc;
+ void *addr_cena;
+ void __iomem *addr_cinh;
+
+ /* Management commands */
+ struct {
+ u32 valid_bit; /* 0x00 or 0x80 */
+ } mc;
+
+ /* Management response */
+ struct {
+ u32 valid_bit; /* 0x00 or 0x80 */
+ } mr;
+
+ /* Push dequeues */
+ u32 sdq;
+
+ /* Volatile dequeues */
+ struct {
+ atomic_t available; /* indicates if a command can be sent */
+ u32 valid_bit; /* 0x00 or 0x80 */
+ struct dpaa2_dq *storage; /* NULL if DQRR */
+ } vdq;
+
+ /* DQRR */
+ struct {
+ u32 next_idx;
+ u32 valid_bit;
+ u8 dqrr_size;
+ int reset_bug; /* indicates dqrr reset workaround is needed */
+ } dqrr;
+
+ struct {
+ u32 pi;
+ u32 pi_vb;
+ u32 pi_ring_size;
+ u32 pi_ci_mask;
+ u32 ci;
+ int available;
+ u32 pend;
+ u32 no_pfdr;
+ } eqcr;
+
+ spinlock_t access_spinlock;
+
+ /* Interrupt coalescing */
+ u32 irq_threshold;
+ u32 irq_holdoff;
+ int use_adaptive_rx_coalesce;
+};
+
+/* Function pointers */
+extern
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+extern
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+extern
+int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+extern
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
+extern
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
+extern
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+
+/* Functions */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
+void qbman_swp_finish(struct qbman_swp *p);
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
+
+void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
+void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
+
+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct dpaa2_dq *storage,
+ dma_addr_t storage_phys,
+ int stash);
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+ enum qbman_pull_type_e dct);
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+ enum qbman_pull_type_e dct);
+
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
+
+int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
+
+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+ u32 qd_bin, u32 qd_prio);
+
+
+void qbman_release_desc_clear(struct qbman_release_desc *d);
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+ unsigned int num_buffers);
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+ u8 alt_fq_verb);
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+ u8 we_mask, u8 cdan_en,
+ u64 ctx);
+
+void *qbman_swp_mc_start(struct qbman_swp *p);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
+void *qbman_swp_mc_result(struct qbman_swp *p);
+
+/**
+ * qbman_swp_enqueue() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static inline int
+qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ return qbman_swp_enqueue_ptr(s, d, fd);
+}
+
+/**
+ * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static inline int
+qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static inline int
+qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
+}
+
+/**
+ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
+ * @dq: the dequeue result to be checked
+ *
+ * DQRR entries may contain non-dequeue results, ie. notifications
+ */
+static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
+}
+
+/**
+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
+ * @dq: the dequeue result to be checked
+ *
+ */
+static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
+{
+ return !qbman_result_is_DQ(dq);
+}
+
+/* FQ Data Availability */
+static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
+}
+
+/* Channel Data Availability */
+static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
+}
+
+/* Congestion State Change */
+static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
+}
+
+/* Buffer Pool State Change */
+static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
+}
+
+/* Congestion Group Count Update */
+static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
+}
+
+/* Retirement */
+static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
+}
+
+/* Retirement Immediate */
+static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
+}
+
+ /* Park */
+static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
+}
+
+/**
+ * qbman_result_SCN_state() - Get the state field in State-change notification
+ */
+static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
+{
+ return scn->scn.state;
+}
+
+#define SCN_RID_MASK 0x00FFFFFF
+
+/**
+ * qbman_result_SCN_rid() - Get the resource id in State-change notification
+ */
+static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
+{
+ return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
+}
+
+/**
+ * qbman_result_SCN_ctx() - Get the context data in State-change notification
+ */
+static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
+{
+ return le64_to_cpu(scn->scn.ctx);
+}
+
+/**
+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state
+ * @s: the software portal object
+ * @fqid: the index of frame queue to be scheduled
+ *
+ * There are a couple of different ways that a FQ can end up parked state,
+ * This schedules it.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
+}
+
+/**
+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state
+ * @s: the software portal object
+ * @fqid: the index of frame queue to be forced
+ *
+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
+ * and thus be available for selection by any channel-dequeuing behaviour (push
+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
+ * empty at the time this happens, the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
+}
+
+/**
+ * qbman_swp_fq_xon() - sets FQ flow-control to XON
+ * @s: the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
+}
+
+/**
+ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
+ * @s: the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ * XOFF FQs will remain in the tenatively-scheduled state, even when
+ * non-empty, meaning they won't be selected for scheduled dequeuing.
+ * If a FQ is changed to XOFF after it had already become truly-scheduled
+ * to a channel, and a pull dequeue of that channel occurs that selects
+ * that FQ for dequeuing, then the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
+}
+
+/* If the user has been allocated a channel object that is going to generate
+ * CDANs to another channel, then the qbman_swp_CDAN* functions will be
+ * necessary.
+ *
+ * CDAN-enabled channels only generate a single CDAN notification, after which
+ * they need to be reenabled before they'll generate another. The idea is
+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
+ * reenable step. Each function generates a distinct command to hardware, so a
+ * combination function is provided if the user wishes to modify the "context"
+ * (which shows up in each CDAN message) each time they reenable, as a single
+ * command to hardware.
+ */
+
+/**
+ * qbman_swp_CDAN_set_context() - Set CDAN context
+ * @s: the software portal object
+ * @channelid: the channel index
+ * @ctx: the context to be set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
+ u64 ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_CTX,
+ 0, ctx);
+}
+
+/**
+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 1, 0);
+}
+
+/**
+ * qbman_swp_CDAN_disable() - disable CDAN for the channel
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 0, 0);
+}
+
+/**
+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ * @ctx:i the context set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
+ u16 channelid,
+ u64 ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
+ 1, ctx);
+}
+
+/* Wraps up submit + poll-for-result */
+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+ u8 cmd_verb)
+{
+ int loopvar = 2000;
+
+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
+
+ do {
+ cmd = qbman_swp_mc_result(swp);
+ } while (!cmd && loopvar--);
+
+ WARN_ON(!loopvar);
+
+ return cmd;
+}
+
+/* Query APIs */
+struct qbman_fq_query_np_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 st1;
+ u8 st2;
+ u8 reserved[2];
+ __le16 od1_sfdr;
+ __le16 od2_sfdr;
+ __le16 od3_sfdr;
+ __le16 ra1_sfdr;
+ __le16 ra2_sfdr;
+ __le32 pfdr_hptr;
+ __le32 pfdr_tptr;
+ __le32 frm_cnt;
+ __le32 byte_cnt;
+ __le16 ics_surp;
+ u8 is;
+ u8 reserved2[29];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
+ struct qbman_fq_query_np_rslt *r);
+u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
+u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
+
+struct qbman_bp_query_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 reserved[4];
+ u8 bdi;
+ u8 state;
+ __le32 fill;
+ __le32 hdotr;
+ __le16 swdet;
+ __le16 swdxt;
+ __le16 hwdet;
+ __le16 hwdxt;
+ __le16 swset;
+ __le16 swsxt;
+ __le16 vbpid;
+ __le16 icid;
+ __le64 bpscn_addr;
+ __le64 bpscn_ctx;
+ __le16 hw_targ;
+ u8 dbe;
+ u8 reserved2;
+ u8 sdcnt;
+ u8 hdcnt;
+ u8 sscnt;
+ u8 reserved3[9];
+};
+
+int qbman_bp_query(struct qbman_swp *s, u16 bpid,
+ struct qbman_bp_query_rslt *r);
+
+u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
+
+/**
+ * qbman_swp_release() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+static inline int qbman_swp_release(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers)
+{
+ return qbman_swp_release_ptr(s, d, buffers, num_buffers);
+}
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static inline int qbman_swp_pull(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
+{
+ return qbman_swp_pull_ptr(s, d);
+}
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ return qbman_swp_dqrr_next_ptr(s);
+}
+
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff);
+
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff);
+
+#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
new file mode 100644
index 0000000000..6bf3e6a980
--- /dev/null
+++ b/drivers/soc/fsl/guts.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale QorIQ Platforms GUTS Driver
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/sys_soc.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/fsl/guts.h>
+
+struct fsl_soc_die_attr {
+ char *die;
+ u32 svr;
+ u32 mask;
+};
+
+struct fsl_soc_data {
+ const char *sfp_compat;
+ u32 uid_offset;
+};
+
+/* SoC die attribute definition for QorIQ platform */
+static const struct fsl_soc_die_attr fsl_soc_die[] = {
+ /*
+ * Power Architecture-based SoCs T Series
+ */
+
+ /* Die: T4240, SoC: T4240/T4160/T4080 */
+ { .die = "T4240",
+ .svr = 0x82400000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T1040, SoC: T1040/T1020/T1042/T1022 */
+ { .die = "T1040",
+ .svr = 0x85200000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T2080, SoC: T2080/T2081 */
+ { .die = "T2080",
+ .svr = 0x85300000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T1024, SoC: T1024/T1014/T1023/T1013 */
+ { .die = "T1024",
+ .svr = 0x85400000,
+ .mask = 0xfff00000,
+ },
+
+ /*
+ * ARM-based SoCs LS Series
+ */
+
+ /* Die: LS1043A, SoC: LS1043A/LS1023A */
+ { .die = "LS1043A",
+ .svr = 0x87920000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS2080A, SoC: LS2080A/LS2040A/LS2085A */
+ { .die = "LS2080A",
+ .svr = 0x87010000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1088A, SoC: LS1088A/LS1048A/LS1084A/LS1044A */
+ { .die = "LS1088A",
+ .svr = 0x87030000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1012A, SoC: LS1012A */
+ { .die = "LS1012A",
+ .svr = 0x87040000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS1046A, SoC: LS1046A/LS1026A */
+ { .die = "LS1046A",
+ .svr = 0x87070000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS2088A, SoC: LS2088A/LS2048A/LS2084A/LS2044A */
+ { .die = "LS2088A",
+ .svr = 0x87090000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1021A, SoC: LS1021A/LS1020A/LS1022A */
+ { .die = "LS1021A",
+ .svr = 0x87000000,
+ .mask = 0xfff70000,
+ },
+ /* Die: LX2160A, SoC: LX2160A/LX2120A/LX2080A */
+ { .die = "LX2160A",
+ .svr = 0x87360000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1028A, SoC: LS1028A */
+ { .die = "LS1028A",
+ .svr = 0x870b0000,
+ .mask = 0xff3f0000,
+ },
+ { },
+};
+
+static const struct fsl_soc_die_attr *fsl_soc_die_match(
+ u32 svr, const struct fsl_soc_die_attr *matches)
+{
+ while (matches->svr) {
+ if (matches->svr == (svr & matches->mask))
+ return matches;
+ matches++;
+ }
+ return NULL;
+}
+
+static u64 fsl_guts_get_soc_uid(const char *compat, unsigned int offset)
+{
+ struct device_node *np;
+ void __iomem *sfp_base;
+ u64 uid;
+
+ np = of_find_compatible_node(NULL, NULL, compat);
+ if (!np)
+ return 0;
+
+ sfp_base = of_iomap(np, 0);
+ if (!sfp_base) {
+ of_node_put(np);
+ return 0;
+ }
+
+ uid = ioread32(sfp_base + offset);
+ uid <<= 32;
+ uid |= ioread32(sfp_base + offset + 4);
+
+ iounmap(sfp_base);
+ of_node_put(np);
+
+ return uid;
+}
+
+static const struct fsl_soc_data ls1028a_data = {
+ .sfp_compat = "fsl,ls1028a-sfp",
+ .uid_offset = 0x21c,
+};
+
+/*
+ * Table for matching compatible strings, for device tree
+ * guts node, for Freescale QorIQ SOCs.
+ */
+static const struct of_device_id fsl_guts_of_match[] = {
+ { .compatible = "fsl,qoriq-device-config-1.0", },
+ { .compatible = "fsl,qoriq-device-config-2.0", },
+ { .compatible = "fsl,p1010-guts", },
+ { .compatible = "fsl,p1020-guts", },
+ { .compatible = "fsl,p1021-guts", },
+ { .compatible = "fsl,p1022-guts", },
+ { .compatible = "fsl,p1023-guts", },
+ { .compatible = "fsl,p2020-guts", },
+ { .compatible = "fsl,bsc9131-guts", },
+ { .compatible = "fsl,bsc9132-guts", },
+ { .compatible = "fsl,mpc8536-guts", },
+ { .compatible = "fsl,mpc8544-guts", },
+ { .compatible = "fsl,mpc8548-guts", },
+ { .compatible = "fsl,mpc8568-guts", },
+ { .compatible = "fsl,mpc8569-guts", },
+ { .compatible = "fsl,mpc8572-guts", },
+ { .compatible = "fsl,ls1021a-dcfg", },
+ { .compatible = "fsl,ls1043a-dcfg", },
+ { .compatible = "fsl,ls2080a-dcfg", },
+ { .compatible = "fsl,ls1088a-dcfg", },
+ { .compatible = "fsl,ls1012a-dcfg", },
+ { .compatible = "fsl,ls1046a-dcfg", },
+ { .compatible = "fsl,lx2160a-dcfg", },
+ { .compatible = "fsl,ls1028a-dcfg", .data = &ls1028a_data},
+ {}
+};
+
+static int __init fsl_guts_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ static struct soc_device *soc_dev;
+ const struct fsl_soc_die_attr *soc_die;
+ const struct fsl_soc_data *soc_data;
+ const struct of_device_id *match;
+ struct ccsr_guts __iomem *regs;
+ const char *machine = NULL;
+ struct device_node *np;
+ bool little_endian;
+ u64 soc_uid = 0;
+ u32 svr;
+ int ret;
+
+ np = of_find_matching_node_and_match(NULL, fsl_guts_of_match, &match);
+ if (!np)
+ return 0;
+ soc_data = match->data;
+
+ regs = of_iomap(np, 0);
+ if (!regs) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ little_endian = of_property_read_bool(np, "little-endian");
+ if (little_endian)
+ svr = ioread32(&regs->svr);
+ else
+ svr = ioread32be(&regs->svr);
+ iounmap(regs);
+ of_node_put(np);
+
+ /* Register soc device */
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ if (of_property_read_string(of_root, "model", &machine))
+ of_property_read_string_index(of_root, "compatible", 0, &machine);
+ if (machine) {
+ soc_dev_attr->machine = kstrdup(machine, GFP_KERNEL);
+ if (!soc_dev_attr->machine)
+ goto err_nomem;
+ }
+
+ soc_die = fsl_soc_die_match(svr, fsl_soc_die);
+ if (soc_die) {
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ %s",
+ soc_die->die);
+ } else {
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ");
+ }
+ if (!soc_dev_attr->family)
+ goto err_nomem;
+
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "svr:0x%08x", svr);
+ if (!soc_dev_attr->soc_id)
+ goto err_nomem;
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
+ (svr >> 4) & 0xf, svr & 0xf);
+ if (!soc_dev_attr->revision)
+ goto err_nomem;
+
+ if (soc_data)
+ soc_uid = fsl_guts_get_soc_uid(soc_data->sfp_compat,
+ soc_data->uid_offset);
+ if (soc_uid)
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX",
+ soc_uid);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err;
+ }
+
+ pr_info("Machine: %s\n", soc_dev_attr->machine);
+ pr_info("SoC family: %s\n", soc_dev_attr->family);
+ pr_info("SoC ID: %s, Revision: %s\n",
+ soc_dev_attr->soc_id, soc_dev_attr->revision);
+
+ return 0;
+
+err_nomem:
+ ret = -ENOMEM;
+err:
+ kfree(soc_dev_attr->machine);
+ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr->serial_number);
+ kfree(soc_dev_attr);
+
+ return ret;
+}
+core_initcall(fsl_guts_init);
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644
index 0000000000..bdecb86bb6
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig FSL_DPAA
+ bool "QorIQ DPAA1 framework support"
+ depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE) && ARCH_DMA_ADDR_T_64BIT)
+ select GENERIC_ALLOCATOR
+ help
+ The Freescale Data Path Acceleration Architecture (DPAA) is a set of
+ hardware components on specific QorIQ multicore processors.
+ This architecture provides the infrastructure to support simplified
+ sharing of networking interfaces and accelerators by multiple CPUs.
+ The major h/w blocks composing DPAA are BMan and QMan.
+
+ The Buffer Manager (BMan) is a hardware buffer pool management block
+ that allows software and accelerators on the datapath to acquire and
+ release buffers in order to build frames.
+
+ The Queue Manager (QMan) is a hardware queue management block
+ that allows software and accelerators on the datapath to enqueue and
+ dequeue frames in order to communicate.
+
+if FSL_DPAA
+
+config FSL_DPAA_CHECKING
+ bool "Additional driver checking"
+ help
+ Compiles in additional checks, to sanity-check the drivers and
+ any use of the exported API. Not recommended for performance.
+
+config FSL_BMAN_TEST
+ tristate "BMan self-tests"
+ help
+ Compile the BMan self-test code. These tests will
+ exercise the BMan APIs to confirm functionality
+ of both the software drivers and hardware device.
+
+config FSL_BMAN_TEST_API
+ bool "High-level API self-test"
+ depends on FSL_BMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine
+ to the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST
+ tristate "QMan self-tests"
+ help
+ Compile self-test code for QMan.
+
+config FSL_QMAN_TEST_API
+ bool "QMan high-level self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine to
+ the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST_STASH
+ bool "QMan 'hot potato' data-stashing self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This performs a "hot potato" style test enqueuing/dequeuing a frame
+ across a series of FQs scheduled to different portals (and cpus), with
+ DQRR, data and context stashing always on.
+
+endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644
index 0000000000..811312ad52
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \
+ bman_portal.o qman_portal.o \
+ bman.o qman.o dpaa_sys.o
+
+obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o
+bman-test-y = bman_test.o
+bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o
+
+obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o
+qman-test-y = qman_test.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 0000000000..6cc1847e53
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,819 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x3000
+#define BM_REG_RCR_CI_CINH 0x3100
+#define BM_REG_RCR_ITR 0x3200
+#define BM_REG_CFG 0x3300
+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
+#define BM_REG_ISR 0x3e00
+#define BM_REG_IER 0x3e40
+#define BM_REG_ISDR 0x3e80
+#define BM_REG_IIR 0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+#else
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x0000
+#define BM_REG_RCR_CI_CINH 0x0004
+#define BM_REG_RCR_ITR 0x0008
+#define BM_REG_CFG 0x0100
+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
+#define BM_REG_ISR 0x0e00
+#define BM_REG_IER 0x0e04
+#define BM_REG_ISDR 0x0e08
+#define BM_REG_IIR 0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+#endif
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+/* Release Command */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+};
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+struct bm_mc_command {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used by acquire command */
+ u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+ struct {
+ u8 verb;
+ u8 bpid;
+ u8 __reserved[62];
+ };
+ struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT 10000 /* us */
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ union bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void *ce; /* cache-enabled */
+ __be32 *ce_be; /* Same as above but for direct access */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+ return ioread32be(p->addr.ci + offset);
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+ iowrite32be(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+ return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
+}
+
+struct bman_portal {
+ struct bm_portal p;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ /* probing time config params for cpu-affine portals */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
+ */
+struct bman_pool {
+ /* index of the buffer pool to encapsulate (0-63) */
+ u32 bpid;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+};
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct bman_portal *p = ptr;
+ struct bm_portal *portal = &p->p;
+ u32 clear = p->irq_sources;
+ u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ clear |= poll_portal_slow(p, is);
+ bm_out(portal, BM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+/* --- RCR API --- */
+
+#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~RCR_CARRY;
+
+ return (struct bm_rcr_entry *)addr;
+}
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
+}
+#endif
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
+{
+ /* increment to the next RCR pointer and handle overflow and 'vbit' */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = rcr_carryclear(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static int bm_rcr_get_avail(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static int bm_rcr_get_fill(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(portal, BM_REG_RCR_ITR, ithresh);
+}
+
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
+}
+
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+ diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 1;
+#endif
+ dpaa_zero(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ DPAA_ASSERT(rcr->available >= 1);
+ dma_wmb();
+ rcursor = rcr->cursor;
+ rcursor->_ncw_verb = myverb | rcr->vbit;
+ dpaa_flush(rcursor);
+ rcr_inc(rcr);
+ rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ enum bm_rcr_cmode cmode)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+ BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+ | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(portal, BM_REG_CFG, cfg);
+ return 0;
+}
+
+static void bm_rcr_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_rcr *rcr = &portal->rcr;
+ int i;
+
+ DPAA_ASSERT(!rcr->busy);
+
+ i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr_ptr2idx(rcr->cursor))
+ pr_crit("losing uncommitted RCR entries\n");
+
+ i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+#endif
+}
+
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
+ 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static void bm_mc_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_hw;
+#endif
+}
+
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!rr->verb) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+ union bm_mc_result **mcr)
+{
+ int timeout = BM_MCR_TIMEOUT;
+
+ do {
+ *mcr = bm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
+{
+ bm_out(portal, BM_REG_SCN(0), 0);
+ bm_out(portal, BM_REG_SCN(1), 0);
+}
+
+static int bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
+{
+ struct bm_portal *p;
+ int ret;
+
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt_ce;
+ p->addr.ce_be = c->addr_virt_ce;
+ p->addr.ci = c->addr_virt_ci;
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ dev_err(c->dev, "RCR initialisation failed\n");
+ goto fail_rcr;
+ }
+ if (bm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_disable(p);
+
+ /* Write-to-clear any stale interrupt status bits */
+ bm_out(p, BM_REG_ISDR, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_out(p, BM_REG_IER, 0);
+ bm_out(p, BM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
+ goto fail_affinity;
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
+
+ bm_out(p, BM_REG_ISDR, 0);
+ bm_out(p, BM_REG_IIR, 0);
+
+ return 0;
+
+fail_rcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+fail_rcr:
+ return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
+{
+ struct bman_portal *portal;
+ int err;
+
+ portal = &per_cpu(bman_affine_portal, c->cpu);
+ err = bman_create_portal(portal, c);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+ u32 ret = is;
+
+ if (is & BM_PIRQ_RCRI) {
+ bm_rcr_cce_update(&p->p);
+ bm_rcr_set_ithresh(&p->p, 0);
+ bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+ is &= ~BM_PIRQ_RCRI;
+ }
+
+ /* There should be no status register bits left undefined */
+ DPAA_ASSERT(!is);
+ return ret;
+}
+
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ p->irq_sources |= bits & BM_PIRQ_VISIBLE;
+ bm_out(&p->p, BM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+ return 0;
+}
+
+int bm_shutdown_pool(u32 bpid)
+{
+ int err = 0;
+ struct bm_mc_command *bm_cmd;
+ union bm_mc_result *bm_res;
+
+
+ struct bman_portal *p = get_affine_portal();
+ while (1) {
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(&p->p);
+ bm_cmd->bpid = bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+ pr_crit("BMan Acquire Command timedout\n");
+ err = -ETIMEDOUT;
+ goto done;
+ }
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ /* Pool is empty */
+ goto done;
+ }
+ }
+done:
+ put_affine_portal();
+ return err;
+}
+
+struct gen_pool *bm_bpalloc;
+
+static int bm_alloc_bpid_range(u32 *result, u32 count)
+{
+ unsigned long addr;
+
+ addr = gen_pool_alloc(bm_bpalloc, count);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+static int bm_release_bpid(u32 bpid)
+{
+ int ret;
+
+ ret = bm_shutdown_pool(bpid);
+ if (ret) {
+ pr_debug("BPID %d leaked\n", bpid);
+ return ret;
+ }
+
+ gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+
+struct bman_pool *bman_new_pool(void)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (bm_alloc_bpid_range(&bpid, 1))
+ return NULL;
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+
+ pool->bpid = bpid;
+
+ return pool;
+err:
+ bm_release_bpid(bpid);
+ return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+ bm_release_bpid(pool->bpid);
+
+ kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+int bman_get_bpid(const struct bman_pool *pool)
+{
+ return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ unsigned long irqflags;
+ int avail, timeout = 1000; /* 1ms */
+ int i = num - 1;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ do {
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ if (likely(r))
+ break;
+
+ udelay(1);
+ } while (--timeout);
+
+ if (unlikely(!timeout))
+ return -ETIMEDOUT;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+ bm_buffer_set_bpid(r->bufs, pool->bpid);
+ if (i)
+ memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_release);
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ union bm_mc_result *mcr;
+ int ret;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ mcc = bm_mc_start(&p->p);
+ mcc->bpid = pool->bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ if (!bm_mc_result_timeout(&p->p, &mcr)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Timeout\n");
+ return -ETIMEDOUT;
+ }
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs)
+ memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
+
+ put_affine_portal();
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+ return portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644
index 0000000000..cb24a08be0
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -0,0 +1,320 @@
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FBPR_BARE 0x0c00
+#define REG_FBPR_BAR 0x0c04
+#define REG_FBPR_AR 0x0c10
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_ERR_ISDR 0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
+
+struct bman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+ { BM_EIRQ_IVCI, "Invalid Command Verb" },
+ { BM_EIRQ_FLWI, "FBPR Low Watermark" },
+ { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { BM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+ return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = bm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+/* Track if probe has occurred and if cleanup is required */
+static int __bman_probed;
+static int __bman_requires_cleanup;
+
+
+static int bm_set_memory(u64 ba, u32 size)
+{
+ u32 bar, bare;
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+ is_power_of_2(size));
+ /* choke if '[e]ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+
+ /* Check to see if BMan has already been initialized */
+ bar = bm_ccsr_in(REG_FBPR_BAR);
+ if (bar) {
+ /* Maker sure ba == what was programmed) */
+ bare = bm_ccsr_in(REG_FBPR_BARE);
+ if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
+ pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
+ ba, bare, bar);
+ return -ENOMEM;
+ }
+ pr_info("BMan BAR already configured\n");
+ __bman_requires_cleanup = 1;
+ return 1;
+ }
+
+ bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_AR, exp - 1);
+ return 0;
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+ fbpr_a = rmem->base;
+ fbpr_sz = rmem->size;
+
+ WARN_ON(!(fbpr_a && fbpr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = bm_ccsr_in(REG_ERR_IER);
+ isr_val = bm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = bm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+ if (bman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ bman_hwerr_txts[i].txt);
+ if (bman_hwerr_txts[i].mask & ecsr_val) {
+ /* Re-arm error capture registers */
+ bm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ bman_hwerr_txts[i].mask);
+ ier_val &= ~bman_hwerr_txts[i].mask;
+ bm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+int bman_is_probed(void)
+{
+ return __bman_probed;
+}
+EXPORT_SYMBOL_GPL(bman_is_probed);
+
+int bman_requires_cleanup(void)
+{
+ return __bman_requires_cleanup;
+}
+
+void bman_done_cleanup(void)
+{
+ __bman_requires_cleanup = 0;
+}
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+ int ret, err_irq;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ u16 id, bm_pool_cnt;
+ u8 major, minor;
+
+ __bman_probed = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
+ node);
+ return -ENXIO;
+ }
+ bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!bm_ccsr_start)
+ return -ENXIO;
+
+ bm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ bman_ip_rev = BMAN_REV10;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else if (major == 2 && minor == 0) {
+ bman_ip_rev = BMAN_REV20;
+ bm_pool_cnt = 8;
+ } else if (major == 2 && minor == 1) {
+ bman_ip_rev = BMAN_REV21;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else {
+ dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+ id, major, minor);
+ return -ENODEV;
+ }
+
+ /*
+ * If FBPR memory wasn't defined using the qbman compatible string
+ * try using the of_reserved_mem_device method
+ */
+ if (!fbpr_a) {
+ ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+
+ dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
+
+ bm_set_memory(fbpr_a, fbpr_sz);
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %pOF IRQ\n", node);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
+ ret, node);
+ return ret;
+ }
+ /* Disable Buffer Pool State Change */
+ bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+ if (IS_ERR(bm_bpalloc)) {
+ ret = PTR_ERR(bm_bpalloc);
+ dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* seed BMan resource pool */
+ ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+ 0, bm_pool_cnt - 1, ret);
+ return ret;
+ }
+
+ __bman_probed = 1;
+
+ return 0;
+};
+
+static const struct of_device_id fsl_bman_ids[] = {
+ {
+ .compatible = "fsl,bman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_bman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_bman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 0000000000..4d7b9caee1
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,244 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct bman_portal *affine_bportals[NR_CPUS];
+static struct cpumask portal_cpus;
+static int __bman_portals_probed;
+/* protect bman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(bman_lock);
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+ struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ bman_p_irqsource_add(p, BM_PIRQ_RCRI);
+ affine_bportals[pcfg->cpu] = p;
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static int bman_offline_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return 0;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return 0;
+
+ /* use any other online CPU */
+ cpu = cpumask_any_but(cpu_online_mask, cpu);
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ return 0;
+}
+
+static int bman_online_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return 0;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return 0;
+
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ return 0;
+}
+
+int bman_portals_probed(void)
+{
+ return __bman_portals_probed;
+}
+EXPORT_SYMBOL_GPL(bman_portals_probed);
+
+static int bman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct bm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ int irq, cpu, err, i;
+
+ err = bman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(&pdev->dev, "failing probe due to bman probe error\n");
+ return -ENODEV;
+ }
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg) {
+ __bman_portals_probed = -1;
+ return -ENOMEM;
+ }
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
+ goto err_ioremap1;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
+ goto err_ioremap1;
+ }
+
+ pcfg->cpu = -1;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ goto err_ioremap1;
+ pcfg->irq = irq;
+
+ pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+ resource_size(addr_phys[0]),
+ QBMAN_MEMREMAP_ATTR);
+ if (!pcfg->addr_virt_ce) {
+ dev_err(dev, "memremap::CE failed\n");
+ goto err_ioremap1;
+ }
+
+ pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+ resource_size(addr_phys[1]));
+ if (!pcfg->addr_virt_ci) {
+ dev_err(dev, "ioremap::CI failed\n");
+ goto err_ioremap2;
+ }
+
+ spin_lock(&bman_lock);
+ cpu = cpumask_first_zero(&portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ __bman_portals_probed = 1;
+ /* unassigned portal, skip init */
+ spin_unlock(&bman_lock);
+ goto check_cleanup;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&bman_lock);
+ pcfg->cpu = cpu;
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ bman_offline_cpu(cpu);
+
+check_cleanup:
+ if (__bman_portals_probed == 1 && bman_requires_cleanup()) {
+ /*
+ * BMan wasn't reset prior to boot (Kexec for example)
+ * Empty all the buffer pools so they are in reset state
+ */
+ for (i = 0; i < BM_POOL_MAX; i++) {
+ err = bm_shutdown_pool(i);
+ if (err) {
+ dev_err(dev, "Failed to shutdown bpool %d\n",
+ i);
+ goto err_portal_init;
+ }
+ }
+ bman_done_cleanup();
+ }
+
+ return 0;
+
+err_portal_init:
+ iounmap(pcfg->addr_virt_ci);
+err_ioremap2:
+ memunmap(pcfg->addr_virt_ce);
+err_ioremap1:
+ __bman_portals_probed = -1;
+
+ return -ENXIO;
+}
+
+static const struct of_device_id bman_portal_ids[] = {
+ {
+ .compatible = "fsl,bman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bman_portal_ids,
+ },
+ .probe = bman_portal_probe,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qbman_portal:online",
+ bman_online_cpu, bman_offline_cpu);
+ if (ret < 0) {
+ pr_err("bman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
+ return 0;
+}
+
+module_driver(bman_portal_driver,
+ bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 0000000000..aa3981e049
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,83 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
+
+extern struct gen_pool *bm_bpalloc;
+
+struct bm_portal_config {
+ /* Portal addresses */
+ void *addr_virt_ce;
+ void __iomem *addr_virt_ci;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ struct device *dev;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+};
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+ */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
+
+int bman_requires_cleanup(void);
+void bman_done_cleanup(void);
+
+int bm_shutdown_pool(u32 bpid);
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 0000000000..09b1c960b2
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,53 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+ int loop = 1;
+
+ while (loop--)
+ bman_test_api();
+#endif
+ return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 0000000000..037ed342ad
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,35 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+void bman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 0000000000..6f6bdd154f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,151 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+#define NUM_BUFS 93
+#define LOOPS 3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+static struct bman_pool *pool;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+static void bufs_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BUFS; i++)
+ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+ bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+ if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
+
+ /*
+ * On SoCs with BMan revison 2.0, BMan only respects the 40
+ * LS-bits of buffer addresses, masking off the upper 8-bits on
+ * release commands. The API provides for 48-bit addresses
+ * because some SoCs support all 48-bits. When generating
+ * garbage addresses for testing, we either need to zero the
+ * upper 8-bits when releasing to BMan (otherwise we'll be
+ * disappointed when the buffers we acquire back from BMan
+ * don't match), or we need to mask the upper 8-bits off when
+ * comparing. We do the latter.
+ */
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return -1;
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return 1;
+ } else {
+ if (bm_buffer_get64(a) < bm_buffer_get64(b))
+ return -1;
+ if (bm_buffer_get64(a) > bm_buffer_get64(b))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bufs_confirm(void)
+{
+ int i, j;
+
+ for (i = 0; i < NUM_BUFS; i++) {
+ int matches = 0;
+
+ for (j = 0; j < NUM_BUFS; j++)
+ if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+ matches++;
+ WARN_ON(matches != 1);
+ }
+}
+
+/* test */
+void bman_test_api(void)
+{
+ int i, loops = LOOPS;
+
+ bufs_init();
+
+ pr_info("%s(): Starting\n", __func__);
+
+ pool = bman_new_pool();
+ if (!pool) {
+ pr_crit("bman_new_pool() failed\n");
+ goto failed;
+ }
+
+ /* Release buffers */
+do_loop:
+ i = 0;
+ while (i < NUM_BUFS) {
+ int num = 8;
+
+ if (i + num > NUM_BUFS)
+ num = NUM_BUFS - i;
+ if (bman_release(pool, bufs_in + i, num)) {
+ pr_crit("bman_release() failed\n");
+ goto failed;
+ }
+ i += num;
+ }
+
+ /* Acquire buffers */
+ while (i > 0) {
+ int tmp, num = 8;
+
+ if (num > i)
+ num = i;
+ tmp = bman_acquire(pool, bufs_out + i - num, num);
+ WARN_ON(tmp != num);
+ i -= num;
+ }
+ i = bman_acquire(pool, NULL, 1);
+ WARN_ON(i > 0);
+
+ bufs_confirm();
+
+ if (--loops)
+ goto do_loop;
+
+ /* Clean up */
+ bman_free_pool(pool);
+ pr_info("%s(): Finished\n", __func__);
+ return;
+
+failed:
+ WARN_ON(1);
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
new file mode 100644
index 0000000000..3375145004
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -0,0 +1,89 @@
+/* Copyright 2017 NXP Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of NXP Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "dpaa_sys.h"
+
+/*
+ * Initialize a devices private memory region
+ */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+ size_t *size)
+{
+ struct device_node *mem_node;
+ struct reserved_mem *rmem;
+ int err;
+ __be32 *res_array;
+
+ mem_node = of_parse_phandle(dev->of_node, "memory-region", idx);
+ if (!mem_node) {
+ dev_err(dev, "No memory-region found for index %d\n", idx);
+ return -ENODEV;
+ }
+
+ rmem = of_reserved_mem_lookup(mem_node);
+ if (!rmem) {
+ dev_err(dev, "of_reserved_mem_lookup() returned NULL\n");
+ return -ENODEV;
+ }
+ *addr = rmem->base;
+ *size = rmem->size;
+
+ /*
+ * Check if the reg property exists - if not insert the node
+ * so upon kexec() the same memory region address will be preserved.
+ * This is needed because QBMan HW does not allow the base address/
+ * size to be modified once set.
+ */
+ if (!of_property_present(mem_node, "reg")) {
+ struct property *prop;
+
+ prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+ prop->value = res_array = devm_kzalloc(dev, sizeof(__be32) * 4,
+ GFP_KERNEL);
+ if (!prop->value)
+ return -ENOMEM;
+ res_array[0] = cpu_to_be32(upper_32_bits(*addr));
+ res_array[1] = cpu_to_be32(lower_32_bits(*addr));
+ res_array[2] = cpu_to_be32(upper_32_bits(*size));
+ res_array[3] = cpu_to_be32(lower_32_bits(*size));
+ prop->length = sizeof(__be32) * 4;
+ prop->name = devm_kstrdup(dev, "reg", GFP_KERNEL);
+ if (!prop->name)
+ return -ENOMEM;
+ err = of_add_property(mem_node, prop);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 0000000000..ae8afa552b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,134 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+static inline void dpaa_flush(void *p)
+{
+ /*
+ * Only PPC needs to flush the cache currently - on ARM the mapping
+ * is non cacheable
+ */
+#ifdef CONFIG_PPC
+ flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#endif
+}
+
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#define dpaa_zero(p) memset(p, 0, 64)
+
+static inline void dpaa_touch_ro(void *p)
+{
+#if (L1_CACHE_BYTES == 32)
+ prefetch(p+32);
+#endif
+ prefetch(p);
+}
+
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
+{
+ dpaa_invalidate(p);
+ dpaa_touch_ro(p);
+}
+
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
+
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF 0x80000000
+
+/* Initialize the devices private memory region */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+ size_t *size);
+
+/* memremap() attributes for different platforms */
+#ifdef CONFIG_PPC
+#define QBMAN_MEMREMAP_ATTR MEMREMAP_WB
+#else
+#define QBMAN_MEMREMAP_ATTR MEMREMAP_WC
+#endif
+
+static inline int dpaa_set_portal_irq_affinity(struct device *dev,
+ int irq, int cpu)
+{
+ int ret = 0;
+
+ if (!irq_can_set_affinity(irq)) {
+ dev_err(dev, "unable to set IRQ affinity\n");
+ return -EINVAL;
+ }
+
+ if (cpu == -1 || !cpu_online(cpu))
+ cpu = cpumask_any(cpu_online_mask);
+
+ ret = irq_set_affinity(irq, cpumask_of(cpu));
+ if (ret)
+ dev_err(dev, "irq_set_affinity() on CPU %d failed\n", cpu);
+
+ return ret;
+}
+
+#endif /* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 0000000000..739e4eee6b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,3053 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_DQRR_IT_MAX 15
+#define QMAN_ITP_MAX 0xFFF
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x3000
+#define QM_REG_EQCR_CI_CINH 0x3040
+#define QM_REG_EQCR_ITR 0x3080
+#define QM_REG_DQRR_PI_CINH 0x3100
+#define QM_REG_DQRR_CI_CINH 0x3140
+#define QM_REG_DQRR_ITR 0x3180
+#define QM_REG_DQRR_DCAP 0x31C0
+#define QM_REG_DQRR_SDQCR 0x3200
+#define QM_REG_DQRR_VDQCR 0x3240
+#define QM_REG_DQRR_PDQCR 0x3280
+#define QM_REG_MR_PI_CINH 0x3300
+#define QM_REG_MR_CI_CINH 0x3340
+#define QM_REG_MR_ITR 0x3380
+#define QM_REG_CFG 0x3500
+#define QM_REG_ISR 0x3600
+#define QM_REG_IER 0x3640
+#define QM_REG_ISDR 0x3680
+#define QM_REG_IIR 0x36C0
+#define QM_REG_ITPR 0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3040
+#define QM_CL_DQRR_PI_CENA 0x3100
+#define QM_CL_DQRR_CI_CENA 0x3140
+#define QM_CL_MR_PI_CENA 0x3300
+#define QM_CL_MR_CI_CENA 0x3340
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+#else
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x0000
+#define QM_REG_EQCR_CI_CINH 0x0004
+#define QM_REG_EQCR_ITR 0x0008
+#define QM_REG_DQRR_PI_CINH 0x0040
+#define QM_REG_DQRR_CI_CINH 0x0044
+#define QM_REG_DQRR_ITR 0x0048
+#define QM_REG_DQRR_DCAP 0x0050
+#define QM_REG_DQRR_SDQCR 0x0054
+#define QM_REG_DQRR_VDQCR 0x0058
+#define QM_REG_DQRR_PDQCR 0x005c
+#define QM_REG_MR_PI_CINH 0x0080
+#define QM_REG_MR_CI_CINH 0x0084
+#define QM_REG_MR_ITR 0x0088
+#define QM_REG_CFG 0x0100
+#define QM_REG_ISR 0x0e00
+#define QM_REG_IER 0x0e04
+#define QM_REG_ISDR 0x0e08
+#define QM_REG_IIR 0x0e0c
+#define QM_REG_ITPR 0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3100
+#define QM_CL_DQRR_PI_CENA 0x3200
+#define QM_CL_DQRR_CI_CENA 0x3300
+#define QM_CL_MR_PI_CENA 0x3400
+#define QM_CL_MR_CI_CENA 0x3500
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+#endif
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 dca;
+ __be16 seqnum;
+ u8 __reserved[4];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
+ struct qm_fd fd;
+ u8 __reserved3[32];
+} __packed __aligned(8);
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ const struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ union qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+/* "FQ" command layout */
+struct qm_mcc_fq {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ __be32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+
+/* "CGR" command layout */
+struct qm_mcc_cgr {
+ u8 _ncw_verb;
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+};
+
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+union qm_mc_command {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 __reserved[63];
+ };
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_fq fq;
+ struct qm_mcc_cgr cgr;
+};
+
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+ u8 verb;
+ u8 result;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT 10000 /* us */
+union qm_mc_result {
+ struct {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[62];
+ };
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ struct qm_mcr_queryfq_np queryfq_np;
+};
+
+struct qm_mc {
+ union qm_mc_command *cr;
+ union qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+struct qm_addr {
+ void *ce; /* cache-enabled */
+ __be32 *ce_be; /* same value as above but for direct access */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ /*
+ * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+ * and including 'mc' fits within a cacheline (yay!). The 'config' part
+ * is setup-only, so isn't a cause for a concern. In other words, don't
+ * rearrange this structure on a whim, there be dragons ...
+ */
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+ return ioread32be(p->addr.ci + offset);
+}
+
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+ iowrite32be(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
+{
+ return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
+}
+
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~EQCR_CARRY;
+
+ return (struct qm_eqcr_entry *)addr;
+}
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
+{
+ /* increment to the next EQCR pointer and handle overflow and 'vbit' */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = eqcr_carryclear(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (pi != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("losing uncommitted EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+ (QM_EQCR_SIZE - 1);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
+{
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
+ DPAA_ASSERT(eqcr->available >= 1);
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+ eqcr_commit_checks(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+ dma_wmb();
+ eqcursor = eqcr->cursor;
+ eqcursor->_ncw_verb = myverb | eqcr->vbit;
+ dpaa_flush(eqcursor);
+ eqcr_inc(eqcr);
+ eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+ const struct qm_dqrr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~DQRR_CARRY;
+
+ return (const struct qm_dqrr_entry *)addr;
+}
+
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
+}
+
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
+{
+ return dqrr_carryclear(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(portal, QM_REG_CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (dqrr->cmode != qm_dqrr_cdc &&
+ dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = dqrr_inc(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+ /*
+ * If PAMU is not available we need to invalidate the cache.
+ * When PAMU is available the cache is updated by stash
+ */
+ dpaa_invalidate_touch_ro(res);
+#endif
+ if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+ int idx = dqrr_ptr2idx(dq);
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPAA_ASSERT((dqrr->ring + idx) == dq);
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+
+ if (ithresh > QMAN_DQRR_IT_MAX)
+ return -EINVAL;
+
+ qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+
+ return 0;
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~MR_CARRY;
+
+ return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+ return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+ ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != mr_ptr2idx(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = mr_inc(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+ union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+
+ if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = mr_inc(res);
+ }
+ dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = mr_ptr2idx(mr->cursor);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ u8 rr0, rr1;
+ struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ /*
+ * The expected valid bit polarity for the next CR command is 0
+ * if RR1 contains a valid response, and is 1 if RR0 contains a
+ * valid response. If both RR contain all 0, this indicates either
+ * that no command has been executed since reset (in which case the
+ * expected valid bit polarity is 1)
+ */
+ rr0 = mc->rr->verb;
+ rr1 = (mc->rr+1)->verb;
+ if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
+ mc->rridx = 1;
+ else
+ mc->rridx = 0;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!rr->verb) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+ union qm_mc_result **mcr)
+{
+ int timeout = QM_MCR_TIMEOUT;
+
+ do {
+ *mcr = qm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ fq->flags |= mask;
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ fq->flags &= ~mask;
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ /* probing time config params for cpu-affine portals */
+ const struct qm_portal_config *config;
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ struct work_struct congestion_work;
+ struct work_struct mr_work;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+struct qman_portal *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(qman_affine_portal);
+}
+
+
+static inline struct qman_portal *get_portal_for_channel(u16 channel)
+{
+ int i;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (affine_portals[i] &&
+ affine_portals[i]->config->channel == channel)
+ return affine_portals[i];
+ }
+
+ return NULL;
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
+{
+ int res;
+
+ if (!portal)
+ return -EINVAL;
+
+ res = qm_dqrr_set_ithresh(&portal->p, ithresh);
+ if (res)
+ return res;
+
+ portal->p.dqrr.ithresh = ithresh;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_dqrr_set_ithresh);
+
+void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
+{
+ if (portal && ithresh)
+ *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
+}
+EXPORT_SYMBOL(qman_dqrr_get_ithresh);
+
+void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
+{
+ if (portal && iperiod)
+ *iperiod = qm_in(&portal->p, QM_REG_ITPR);
+}
+EXPORT_SYMBOL(qman_portal_get_iperiod);
+
+int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
+{
+ if (!portal || iperiod > QMAN_ITP_MAX)
+ return -EINVAL;
+
+ qm_out(&portal->p, QM_REG_ITPR, iperiod);
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_portal_set_iperiod);
+
+int qman_wq_alloc(void)
+{
+ qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+ if (!qm_portal_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+
+void qman_enable_irqs(void)
+{
+ int i;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (affine_portals[i]) {
+ qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
+ qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
+ }
+
+ }
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+ num_fqids = _num_fqids;
+
+ fq_table = vzalloc(array3_size(sizeof(struct qman_fq *),
+ num_fqids, 2));
+ if (!fq_table)
+ return -ENOMEM;
+
+ pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+ fq_table, num_fqids * 2);
+ return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+ struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (WARN_ON(idx >= num_fqids * 2))
+ return NULL;
+#endif
+ fq = fq_table[idx];
+ DPAA_ASSERT(!fq || idx == fq->idx);
+
+ return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+ return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+ return idx_to_fq(tag);
+#else
+ return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+ return fq->idx;
+#else
+ return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit, bool sched_napi);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+ u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+ u32 clear = 0;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI) {
+ __poll_portal_fast(p, QMAN_POLL_LIMIT, true);
+ clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
+ }
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
+ qm_out(&p->p, QM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const union qm_mr_entry *msg;
+loop:
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ mdelay(1);
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+ /* PAMU is required for stashing */
+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+ portal->use_eqcr_ci_stashing = 0;
+#endif
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt_ce;
+ p->addr.ce_be = c->addr_virt_ce;
+ p->addr.ci = c->addr_virt_ci;
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ dev_err(c->dev, "EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ dev_err(c->dev, "DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ dev_err(c->dev, "MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+ qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+ qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+ portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ INIT_WORK(&portal->congestion_work, qm_congestion_task);
+ INIT_WORK(&portal->mr_work, qm_mr_process_task);
+ portal->bits = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ isdr = 0xffffffff;
+ qm_out(p, QM_REG_ISDR, isdr);
+ portal->irq_sources = 0;
+ qm_out(p, QM_REG_IER, 0);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ qm_out(p, QM_REG_IIR, 1);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
+ goto fail_affinity;
+
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_out(p, QM_REG_ISDR, isdr);
+ ret = qm_eqcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_out(p, QM_REG_ISDR, isdr);
+ if (qm_dqrr_current(p)) {
+ dev_dbg(c->dev, "DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
+ }
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ const union qm_mr_entry *e = qm_mr_current(p);
+
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
+ e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_out(p, QM_REG_ISR, 0xffffffff);
+ qm_out(p, QM_REG_ISDR, 0);
+ if (!qman_requires_cleanup())
+ qm_out(p, QM_REG_IIR, 0);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->cgrs);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *portal;
+ int err;
+
+ portal = &per_cpu(qman_affine_portal, c->cpu);
+ err = qman_create_portal(portal, c, cgrs);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ affine_channels[c->cpu] = c->channel;
+ affine_portals[c->cpu] = portal;
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+ struct qman_portal *qm = get_affine_portal();
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ put_affine_portal();
+ return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg, u8 verb)
+{
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+ fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+}
+
+static void qm_congestion_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ congestion_work);
+ struct qman_cgrs rr, c;
+ union qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ spin_unlock(&p->cgr_lock);
+ dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ return;
+ }
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+ &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+}
+
+static void qm_mr_process_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ mr_work);
+ const union qm_mr_entry *msg;
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+
+ preempt_disable();
+
+ while (1) {
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ break;
+
+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is clear */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = fqid_to_fq(qm_fqid_get(&msg->fq));
+ if (WARN_ON(!fq))
+ break;
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+ fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ pr_crit_once("Leaking DCP ERNs!\n");
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+ fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
+ fq->cb.ern(p, fq, msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ }
+
+ qm_mr_cci_consume(&p->p, num);
+ qman_p_irqsource_add(p, QM_PIRQ_MRI);
+ preempt_enable();
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ if (is & QM_PIRQ_CSCI) {
+ qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->congestion_work);
+ }
+
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ qman_p_irqsource_remove(p, QM_PIRQ_MRI);
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->mr_work);
+ }
+
+ return is;
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit, bool sched_napi)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust context_b as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq, sched_napi);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: context_b points to the FQ */
+ fq = tag_to_fq(be32_to_cpu(dq->context_b));
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq, sched_napi);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
+
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ p->irq_sources |= bits & QM_PIRQ_VISIBLE;
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+ u32 ier;
+
+ /*
+ * Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+ local_irq_save(irqflags);
+ bits &= QM_PIRQ_VISIBLE;
+ p->irq_sources &= ~bits;
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ ier = qm_in(&p->p, QM_REG_IER);
+ /*
+ * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_out(&p->p, QM_REG_ISR, ~ier);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
+
+ cpu = portal->config->cpu;
+ put_affine_portal();
+ }
+ WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+struct qman_portal *qman_get_affine_portal(int cpu)
+{
+ return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_start_using_portal(struct qman_portal *p, struct device *dev)
+{
+ return (!device_link_add(dev, p->config->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(qman_start_using_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+ return __poll_portal_fast(p, limit, false);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ fq->fqid = fqid;
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+
+ /* A context_b of 0 is allegedly special, so don't use that fqid */
+ if (fqid == 0 || fqid >= num_fqids) {
+ WARN(1, "bad fqid %d\n", fqid);
+ return -EINVAL;
+ }
+
+ fq->idx = fqid * 2;
+ if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+ fq->idx++;
+
+ WARN_ON(fq_table[fq->idx]);
+ fq_table[fq->idx] = fq;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+
+ DPAA_ASSERT(fq_table[fq->idx]);
+ fq_table[fq->idx] = NULL;
+ return;
+ default:
+ break;
+ }
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ u8 res, myverb;
+ int ret = 0;
+
+ myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+ ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ struct qman_portal *p = qman_dma_portal;
+
+ phys_fq = dma_map_single(p->config->dev, fq,
+ sizeof(*fq), DMA_TO_DEVICE);
+ if (dma_mapping_error(p->config->dev, phys_fq)) {
+ dev_err(p->config->dev, "dma_mapping failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ int wq = 0;
+
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_DESTWQ);
+ wq = 4;
+ }
+ qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+ }
+ qm_mc_commit(&p->p, myverb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "MCR timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ if (opts) {
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+ if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state != qman_fq_state_parked) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_sched)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state == qman_fq_state_retired ||
+ fq->state == qman_fq_state_oos) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (res == QM_MCR_RESULT_OK) {
+ ret = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ union qm_mr_entry msg;
+
+ msg.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ qm_fqid_set(&msg.fq, fq->fqid);
+ msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ ret = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+ fq->state != qman_fq_state_retired) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *np = mcr->queryfq_np;
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ ret = -ERANGE;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_query_fq_np);
+
+static int qman_query_cgr(struct qman_cgr *cgr,
+ struct qm_mcr_querycgr *cgrd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->cgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ else {
+ dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+ mcr_result_str(mcr->result));
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+ struct qm_mcr_querycgr query_cgr;
+ int err;
+
+ err = qman_query_cgr(cgr, &query_cgr);
+ if (err)
+ return err;
+
+ *result = !!query_cgr.cgr.cs;
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+ unsigned long irqflags;
+ int ret = -EBUSY;
+
+ local_irq_save(irqflags);
+ if (p->vdqcr_owned)
+ goto out;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto out;
+
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ p->vdqcr_owned = fq;
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ ret = 0;
+out:
+ local_irq_restore(irqflags);
+ return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+ int ret;
+
+ *p = get_affine_portal();
+ ret = set_p_vdqcr(*p, fq, vdqcr);
+ put_affine_portal();
+ return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !set_vdqcr(p, fq, vdqcr));
+ else
+ wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_retired)
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+ else
+ ret = set_vdqcr(&p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /*
+ * NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue.
+ */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags;
+ u8 avail;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ goto out;
+
+ qm_fqid_set(eq, fq->fqid);
+ eq->tag = cpu_to_be32(fq_to_tag(fq));
+ eq->fd = *fd;
+
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ if (mcr->result != QM_MCR_RESULT_OK)
+ ret = -EIO;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
+
+/* congestion state change notification target update control */
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
+ else
+ cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
+}
+
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
+ else
+ cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
+}
+
+static u8 qman_cgr_cpus[CGR_NUM];
+
+void qman_init_cgr_all(void)
+{
+ struct qman_cgr cgr;
+ int err_cnt = 0;
+
+ for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+ if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+ err_cnt++;
+ }
+
+ if (err_cnt)
+ pr_err("Warning: %d error%s while initialising CGR h/w\n",
+ err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ int ret;
+ struct qman_portal *p;
+
+ /*
+ * We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= CGR_NUM)
+ return -EINVAL;
+
+ preempt_disable();
+ p = get_affine_portal();
+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+ preempt_enable();
+
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto out;
+
+ qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+ local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+
+ /* send init if flags indicate so */
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto out;
+ }
+
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success */
+ dev_err(p->config->dev, "CGR HW state partially modified\n");
+ ret = 0;
+ goto out;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en &&
+ qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+out:
+ spin_unlock(&p->cgr_lock);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+static struct qman_portal *qman_cgr_get_affine_portal(struct qman_cgr *cgr)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ /* attempt to delete from other portal than creator */
+ dev_err(p->config->dev, "CGR not owned by current portal");
+ dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+ put_affine_portal();
+ return NULL;
+ }
+
+ return p;
+}
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ unsigned long irqflags;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
+
+ if (!p)
+ return -EINVAL;
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if (i->cgrid == cgr->cgrid && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+
+ local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+ qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+ struct qman_cgr *cgr;
+ struct completion completion;
+};
+
+static void qman_delete_cgr_smp_call(void *p)
+{
+ qman_delete_cgr((struct qman_cgr *)p);
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_delete_cgr_smp_call, cgr, true);
+ preempt_enable();
+ return;
+ }
+
+ qman_delete_cgr(cgr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+{
+ int ret;
+ unsigned long irqflags;
+ struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
+
+ if (!p)
+ return -EINVAL;
+
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ ret = qm_modify_cgr(cgr, 0, opts);
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+}
+
+struct update_cgr_params {
+ struct qman_cgr *cgr;
+ struct qm_mcc_initcgr *opts;
+ int ret;
+};
+
+static void qman_update_cgr_smp_call(void *p)
+{
+ struct update_cgr_params *params = p;
+
+ params->ret = qman_update_cgr(params->cgr, params->opts);
+}
+
+int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+{
+ struct update_cgr_params params = {
+ .cgr = cgr,
+ .opts = opts,
+ };
+
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id())
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_update_cgr_smp_call, &params,
+ true);
+ else
+ params.ret = qman_update_cgr(cgr, opts);
+ preempt_enable();
+ return params.ret;
+}
+EXPORT_SYMBOL(qman_update_cgr_safe);
+
+/* Cleanup FQs */
+
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
+{
+ const union qm_mr_entry *msg;
+ int found = 0;
+
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ while (msg) {
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+ found = 1;
+ qm_mr_next(p);
+ qm_mr_cci_consume_to_current(p);
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ }
+ return found;
+}
+
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+ bool wait)
+{
+ const struct qm_dqrr_entry *dqrr;
+ int found = 0;
+
+ do {
+ qm_dqrr_pvb_update(p);
+ dqrr = qm_dqrr_current(p);
+ if (!dqrr)
+ cpu_relax();
+ } while (wait && !dqrr);
+
+ while (dqrr) {
+ if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
+ found = 1;
+ qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+ qm_dqrr_pvb_update(p);
+ qm_dqrr_next(p);
+ dqrr = qm_dqrr_current(p);
+ }
+ return found;
+}
+
+#define qm_mr_drain(p, V) \
+ _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
+
+#define qm_dqrr_drain_wait(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+ _qm_dqrr_consume_and_match(p, 0, 0, false)
+
+int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p, *channel_portal;
+ struct device *dev;
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ int orl_empty, drain = 0, ret = 0;
+ u32 channel, res;
+ u8 state;
+
+ p = get_affine_portal();
+ dev = p->config->dev;
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ goto out; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ /* Need to store these since the MCR gets reused */
+ channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+ qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+ if (channel < qm_channel_pool1) {
+ channel_portal = get_portal_for_channel(channel);
+ if (channel_portal == NULL) {
+ dev_err(dev, "Can't find portal for dedicated channel 0x%x\n",
+ channel);
+ ret = -EIO;
+ goto out;
+ }
+ } else
+ channel_portal = p;
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(&channel_portal->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
+ dev_err(dev, "ALTER_RETIRE timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (res == QM_MCR_RESULT_OK)
+ drain_mr_fqrni(&channel_portal->p);
+
+ if (res == QM_MCR_RESULT_PENDING) {
+ /*
+ * Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ int found_fqrn = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < qm_channel_pool1 + 15) {
+ /* Pool channel, enable the bit in the portal */
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ } else {
+ dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+ fqid, channel);
+ ret = -EBUSY;
+ goto out;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(&channel_portal->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(&channel_portal->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ do {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_drain_nomatch(&channel_portal->p);
+ /* Process message ring too */
+ found_fqrn = qm_mr_drain(&channel_portal->p,
+ FQRN);
+ cpu_relax();
+ } while (!found_fqrn);
+ /* Restore SDQCR */
+ qm_dqrr_sdqcr_set(&channel_portal->p,
+ channel_portal->sdqcr);
+
+ }
+ if (res != QM_MCR_RESULT_OK &&
+ res != QM_MCR_RESULT_PENDING) {
+ dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+ fqid, res);
+ ret = -EIO;
+ goto out;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /*
+ * ORL had no entries, no need to wait until the
+ * ERNs come in
+ */
+ orl_empty = 1;
+ }
+ /*
+ * Retirement succeeded, check to see if FQ needs
+ * to be drained
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ do {
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ /*
+ * Wait for a dequeue and process the dequeues,
+ * making sure to empty the ring completely
+ */
+ } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+ }
+
+ while (!orl_empty) {
+ /* Wait for the ORL to have been completely drained */
+ orl_empty = qm_mr_drain(&p->p, FQRL);
+ cpu_relax();
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_OOS:
+ /* Done */
+ break;
+
+ default:
+ ret = -EIO;
+ }
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal)
+{
+ return portal->config;
+}
+EXPORT_SYMBOL(qman_get_qm_portal_config);
+
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+{
+ unsigned long addr;
+
+ if (!p)
+ return -ENODEV;
+
+ addr = gen_pool_alloc(p, cnt);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+int qman_alloc_pool_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+ int ret = qman_shutdown_fq(fqid);
+
+ if (ret) {
+ pr_debug("FQID %d leaked\n", fqid);
+ return ret;
+ }
+
+ gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+ /*
+ * We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose destination channel is the pool-channel being released.
+ * When a non-OOS FQD is found we attempt to clean it up
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err == -ERANGE)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ else if (WARN_ON(err))
+ return err;
+
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return err;
+ if (qm_fqd_get_chan(&fqd) == qp) {
+ /* The channel is the FQ's target, clean it */
+ err = qman_shutdown_fq(fq.fqid);
+ if (err)
+ /*
+ * Couldn't shut down the FQ
+ * so the pool must be leaked
+ */
+ return err;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_pool(u32 qp)
+{
+ int ret;
+
+ ret = qpool_cleanup(qp);
+ if (ret) {
+ pr_debug("CHID %d leaked\n", qp);
+ return ret;
+ }
+
+ gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
+
+static int cgr_cleanup(u32 cgrid)
+{
+ /*
+ * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+ * error, looking for non-OOS FQDs whose CGR is the CGR being released
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err == -ERANGE)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ else if (WARN_ON(err))
+ return err;
+
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return err;
+ if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
+ fqd.cgid == cgrid) {
+ pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+ cgrid, fq.fqid);
+ return -EIO;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_cgrid(u32 cgrid)
+{
+ int ret;
+
+ ret = cgr_cleanup(cgrid);
+ if (ret) {
+ pr_debug("CGRID %d leaked\n", cgrid);
+ return ret;
+ }
+
+ gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 0000000000..157659fd03
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,917 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+EXPORT_SYMBOL(qm_channel_caam);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
+#define REG_DD_CFG 0x0200
+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC 0x0400
+#define REG_PFDR_FP_HEAD 0x0404
+#define REG_PFDR_FP_TAIL 0x0408
+#define REG_PFDR_FP_LWIT 0x0410
+#define REG_PFDR_CFG 0x0414
+#define REG_SFDR_CFG 0x0500
+#define REG_SFDR_IN_USE 0x0504
+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID 0x0630
+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_ECIR2 0x0a0c
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_MCR 0x0b00
+#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG 0x0be0
+#define REG_HID_CFG 0x0bf0
+#define REG_IDLE_STAT 0x0bf4
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FQD_BARE 0x0c00
+#define REG_PFDR_BARE 0x0c20
+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE 0x0c80
+#define REG_QCSP_BAR 0x0c84
+#define REG_CI_SCHED_CFG 0x0d00
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_CI_RLM_AVG 0x0d14
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR 0x01000000
+#define MCR_get_rslt(v) (u8)((v) >> 24)
+#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r) ((r) == 0xf0)
+#define MCR_rslt_eaccess(r) ((r) == 0xf8)
+#define MCR_rslt_inval(r) ((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV 4
+#define QM_CI_SCHED_CFG_SRQ_W 3
+#define QM_CI_SCHED_CFG_RW_W 2
+#define QM_CI_SCHED_CFG_BMAN_W 2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+ qm_wq_portal = 0,
+ qm_wq_pool = 1,
+ qm_wq_fman0 = 2,
+ qm_wq_fman1 = 3,
+ qm_wq_caam = 4,
+ qm_wq_pme = 5,
+ qm_wq_first = qm_wq_portal,
+ qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+ qm_memory_fqd,
+ qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IFSI)
+
+struct qm_ecir {
+ u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+ return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+ return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+ u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+ return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+ return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+ u32 info; /* memid[24-27], eadr[0-11] */
+ /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+ { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+ { QM_EIRQ_CTDE, "Corenet Target Data Error" },
+ { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+ { QM_EIRQ_PLWI, "PFDR Low Watermark" },
+ { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { QM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+ { QM_EIRQ_ICVI, "Invalid Command Verb" },
+ { QM_EIRQ_IFSI, "Invalid Flow Control State" },
+ { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+ { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+ { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+ { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+ { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+ { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+ { QM_EIRQ_IESI, "Invalid Enqueue State" },
+ { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+ { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+ { 0x01FF, 24, "FQD cache tag memory 0" },
+ { 0x01FF, 24, "FQD cache tag memory 1" },
+ { 0x01FF, 24, "FQD cache tag memory 2" },
+ { 0x01FF, 24, "FQD cache tag memory 3" },
+ { 0x0FFF, 512, "FQD cache memory" },
+ { 0x07FF, 128, "SFDR memory" },
+ { 0x01FF, 72, "WQ context memory" },
+ { 0x00FF, 240, "CGR memory" },
+ { 0x00FF, 302, "Internal Order Restoration List memory" },
+ { 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+static int __qman_probed;
+static int __qman_requires_cleanup;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+ return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+ return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+ DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+ portal == qm_dc_portal_fman1);
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+ else
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+ u8 csw5, u8 csw6, u8 csw7)
+{
+ qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+ qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+ qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+ (QM_CI_SCHED_CFG_SRCCIV << 24) |
+ (QM_CI_SCHED_CFG_SRQ_W << 8) |
+ (QM_CI_SCHED_CFG_RW_W << 4) |
+ QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = qm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+#define PFDR_AR_EN BIT(31)
+static int qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+ void *ptr;
+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+ u32 exp = ilog2(size);
+ u32 bar, bare;
+
+ /* choke if size isn't within range */
+ DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+ is_power_of_2(size));
+ /* choke if 'ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+
+ /* Check to see if QMan has already been initialized */
+ bar = qm_ccsr_in(offset + REG_offset_BAR);
+ if (bar) {
+ /* Maker sure ba == what was programmed) */
+ bare = qm_ccsr_in(offset);
+ if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
+ pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
+ ba, bare, bar);
+ return -ENOMEM;
+ }
+ __qman_requires_cleanup = 1;
+ /* Return 1 to indicate memory was previously programmed */
+ return 1;
+ }
+ /* Need to temporarily map the area to make sure it is zeroed */
+ ptr = memremap(ba, size, MEMREMAP_WB);
+ if (!ptr) {
+ pr_crit("memremap() of QMan private memory failed\n");
+ return -ENOMEM;
+ }
+ memset(ptr, 0, size);
+
+#ifdef CONFIG_PPC
+ /*
+ * PPC doesn't appear to flush the cache on memunmap() but the
+ * cache must be flushed since QMan does non coherent accesses
+ * to this memory
+ */
+ flush_dcache_range((unsigned long) ptr, (unsigned long) ptr+size);
+#endif
+ memunmap(ptr);
+
+ qm_ccsr_out(offset, upper_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+ return 0;
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+ qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+ qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+ qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+ u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+ DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+ /* Make sure the command interface is 'idle' */
+ if (!MCR_rslt_idle(rslt)) {
+ dev_crit(dev, "QMAN_MCR isn't idle");
+ WARN_ON(1);
+ }
+
+ /* Write the MCR command params then the verb */
+ qm_ccsr_out(REG_MCP(0), pfdr_start);
+ /*
+ * TODO: remove this - it's a workaround for a model bug that is
+ * corrected in more recent versions. We use the workaround until
+ * everyone has upgraded.
+ */
+ qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+ dma_wmb();
+ qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+ /* Poll for the result */
+ do {
+ rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+ } while (!MCR_rslt_idle(rslt));
+ if (MCR_rslt_ok(rslt))
+ return 0;
+ if (MCR_rslt_eaccess(rslt))
+ return -EACCES;
+ if (MCR_rslt_inval(rslt))
+ return -EINVAL;
+ dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+ return -ENODEV;
+}
+
+/*
+ * QMan needs two global memory areas initialized at boot time:
+ * 1) FQD: Frame Queue Descriptors used to manage frame queues
+ * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
+ * Both areas are reserved using the device tree reserved memory framework
+ * and the addresses and sizes are initialized when the QMan device is probed
+ */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+#ifdef CONFIG_PPC
+/*
+ * Support for PPC Device Tree backward compatibility when compatible
+ * string is set to fsl-qman-fqd and fsl-qman-pfdr
+ */
+static int zero_priv_mem(phys_addr_t addr, size_t sz)
+{
+ /* map as cacheable, non-guarded */
+ void __iomem *tmpp = ioremap_cache(addr, sz);
+
+ if (!tmpp)
+ return -ENOMEM;
+
+ memset_io(tmpp, 0, sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + sz);
+ iounmap(tmpp);
+
+ return 0;
+}
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+ fqd_a = rmem->base;
+ fqd_sz = rmem->size;
+
+ WARN_ON(!(fqd_a && fqd_sz));
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+ pfdr_a = rmem->base;
+ pfdr_sz = rmem->size;
+
+ WARN_ON(!(pfdr_a && pfdr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+
+#endif
+
+unsigned int qm_get_fqid_maxcnt(void)
+{
+ return fqd_sz / 64;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ dev_warn(dev, "ErrInt, EDATA:\n");
+ i = bit_count / 32;
+ if (bit_count % 32) {
+ i++;
+ mask = ~(mask << bit_count % 32);
+ }
+ j = 16 - i;
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+ u32 ecsr_val)
+{
+ struct qm_ecir ecir_val;
+ struct qm_eadr eadr_val;
+ int memid;
+
+ ecir_val.info = qm_ccsr_in(REG_ECIR);
+ /* Is portal info valid */
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ struct qm_ecir2 ecir2_val;
+
+ ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+ qm_ecir2_get_pnum(&ecir2_val));
+ }
+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_v3_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_v3_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ } else {
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+ qm_ecir_get_pnum(&ecir_val));
+ }
+ if (ecsr_val & FQID_ECSR_ERR)
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ }
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = qm_ccsr_in(REG_ERR_IER);
+ isr_val = qm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = qm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+ if (qman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ qman_hwerr_txts[i].txt);
+ if (qman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(dev, isr_mask,
+ ecsr_val);
+ /* Re-arm error capture registers */
+ qm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ qman_hwerr_txts[i].mask);
+ ier_val &= ~qman_hwerr_txts[i].mask;
+ qm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+ int i, err;
+
+ /* FQD memory */
+ err = qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ if (err < 0)
+ return err;
+ /* PFDR memory */
+ err = qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+ if (err < 0)
+ return err;
+ /* Only initialize PFDRs if the QMan was not initialized before */
+ if (err == 0) {
+ err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+ if (err)
+ return err;
+ }
+ /* thresholds */
+ qm_set_pfdr_threshold(512, 64);
+ qm_set_sfdr_threshold(128);
+ /* clear stale PEBI bit from interrupt status register */
+ qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+ /* corenet initiator settings */
+ qm_set_corenet_initiator();
+ /* HID settings */
+ qm_set_hid();
+ /* Set scheduling weights to defaults */
+ for (i = qm_wq_first; i <= qm_wq_last; i++)
+ qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+ /* We are not prepared to accept ERNs for hardware enqueues */
+ qm_set_dc(qm_dc_portal_fman0, 1, 0);
+ qm_set_dc(qm_dc_portal_fman1, 1, 0);
+ return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void __qman_liodn_fixup(u16 channel)
+{
+ static int done;
+ static u32 liodn_offset;
+ u32 before, after;
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+ else
+ before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+ if (!done) {
+ liodn_offset = before & LIO_CFG_LIODN_MASK;
+ done = 1;
+ return;
+ }
+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+ else
+ qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+void qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+ u32 before, after;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+ /* Each pair of vcpu share the same SRQ(SDEST) */
+ cpu_idx /= 2;
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+ } else {
+ before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+ }
+}
+
+static int qman_resource_init(struct device *dev)
+{
+ int pool_chan_num, cgrid_num;
+ int ret, i;
+
+ switch (qman_ip_rev >> 8) {
+ case 1:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ case 2:
+ pool_chan_num = 3;
+ cgrid_num = 64;
+ break;
+ case 3:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+ pool_chan_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+ return ret;
+ }
+
+ ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+ return ret;
+ }
+
+ /* parse pool channels into the SDQCR mask */
+ for (i = 0; i < cgrid_num; i++)
+ qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+ ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
+ qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int qman_is_probed(void)
+{
+ return __qman_probed;
+}
+EXPORT_SYMBOL_GPL(qman_is_probed);
+
+int qman_requires_cleanup(void)
+{
+ return __qman_requires_cleanup;
+}
+
+void qman_done_cleanup(void)
+{
+ qman_enable_irqs();
+ __qman_requires_cleanup = 0;
+}
+
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ int ret, err_irq;
+ u16 id;
+ u8 major, minor;
+
+ __qman_probed = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
+ node);
+ return -ENXIO;
+ }
+ qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!qm_ccsr_start)
+ return -ENXIO;
+
+ qm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+ return -ENODEV;
+ } else if (major == 1 && minor == 1)
+ qman_ip_rev = QMAN_REV11;
+ else if (major == 1 && minor == 2)
+ qman_ip_rev = QMAN_REV12;
+ else if (major == 2 && minor == 0)
+ qman_ip_rev = QMAN_REV20;
+ else if (major == 3 && minor == 0)
+ qman_ip_rev = QMAN_REV30;
+ else if (major == 3 && minor == 1)
+ qman_ip_rev = QMAN_REV31;
+ else if (major == 3 && minor == 2)
+ qman_ip_rev = QMAN_REV32;
+ else {
+ dev_err(dev, "Unknown QMan version\n");
+ return -ENODEV;
+ }
+
+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+ }
+
+ if (fqd_a) {
+#ifdef CONFIG_PPC
+ /*
+ * For PPC backward DT compatibility
+ * FQD memory MUST be zero'd by software
+ */
+ zero_priv_mem(fqd_a, fqd_sz);
+#else
+ WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
+#endif
+ } else {
+ /*
+ * Order of memory regions is assumed as FQD followed by PFDR
+ * in order to ensure allocations from the correct regions the
+ * driver initializes then allocates each piece in order
+ */
+ ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+ dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
+
+ if (!pfdr_a) {
+ /* Setup PFDR memory */
+ ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+ dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
+
+ ret = qman_init_ccsr(dev);
+ if (ret) {
+ dev_err(dev, "CCSR setup failed\n");
+ return ret;
+ }
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %pOF property 'interrupts'\n",
+ node);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
+ ret, node);
+ return ret;
+ }
+
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+ if (IS_ERR(qm_fqalloc)) {
+ ret = PTR_ERR(qm_fqalloc);
+ dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+ if (IS_ERR(qm_qpalloc)) {
+ ret = PTR_ERR(qm_qpalloc);
+ dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+ if (IS_ERR(qm_cgralloc)) {
+ ret = PTR_ERR(qm_cgralloc);
+ dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qman_resource_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+ if (ret)
+ return ret;
+
+ ret = qman_wq_alloc();
+ if (ret)
+ return ret;
+
+ __qman_probed = 1;
+
+ return 0;
+}
+
+static const struct of_device_id fsl_qman_ids[] = {
+ {
+ .compatible = "fsl,qman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_qman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_qman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 0000000000..e23b60618c
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,342 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW 1
+#define CONFIG_FSL_DPA_PIRQ_FAST 1
+
+static struct cpumask portal_cpus;
+static int __qman_portals_probed;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+ struct device *dev = pcfg->dev;
+ int ret;
+
+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+ if (!pcfg->iommu_domain) {
+ dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+ goto no_iommu;
+ }
+ ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);
+ if (ret < 0) {
+ dev_err(dev, "%s(): fsl_pamu_configure_l1_stash() = %d",
+ __func__, ret);
+ goto out_domain_free;
+ }
+ ret = iommu_attach_device(pcfg->iommu_domain, dev);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+
+no_iommu:
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+
+ return;
+
+#ifdef CONFIG_FSL_PAMU
+out_domain_free:
+ iommu_domain_free(pcfg->iommu_domain);
+ pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+ struct qman_portal *p;
+ u32 irq_sources = 0;
+
+ /* We need the same LIODN offset for all portals */
+ qman_liodn_fixup(pcfg->channel);
+
+ pcfg->iommu_domain = NULL;
+ portal_set_cpu(pcfg, pcfg->cpu);
+
+ p = qman_create_affine_portal(pcfg, NULL);
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ /* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+ QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+ irq_sources |= QM_PIRQ_DQRI;
+#endif
+ qman_p_irqsource_add(p, irq_sources);
+
+ spin_lock(&qman_lock);
+ if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+ /* all assigned portals are initialized now */
+ qman_init_cgr_all();
+ }
+
+ if (!qman_dma_portal)
+ qman_dma_portal = p;
+
+ spin_unlock(&qman_lock);
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+ unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU /* TODO */
+ if (pcfg->iommu_domain) {
+ if (fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu) < 0) {
+ dev_err(pcfg->dev,
+ "Failed to update pamu stash setting\n");
+ return;
+ }
+ }
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+}
+
+static int qman_offline_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ /* select any other online CPU */
+ cpu = cpumask_any_but(cpu_online_mask, cpu);
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ qman_portal_update_sdest(pcfg, cpu);
+ }
+ }
+ return 0;
+}
+
+static int qman_online_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ qman_portal_update_sdest(pcfg, cpu);
+ }
+ }
+ return 0;
+}
+
+int qman_portals_probed(void)
+{
+ return __qman_portals_probed;
+}
+EXPORT_SYMBOL_GPL(qman_portals_probed);
+
+static int qman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct qm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ int irq, cpu, err, i;
+ u32 val;
+
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(&pdev->dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg) {
+ __qman_portals_probed = -1;
+ return -ENOMEM;
+ }
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
+ goto err_ioremap1;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
+ goto err_ioremap1;
+ }
+
+ err = of_property_read_u32(node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
+ __qman_portals_probed = -1;
+ return err;
+ }
+ pcfg->channel = val;
+ pcfg->cpu = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ goto err_ioremap1;
+ pcfg->irq = irq;
+
+ pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+ resource_size(addr_phys[0]),
+ QBMAN_MEMREMAP_ATTR);
+ if (!pcfg->addr_virt_ce) {
+ dev_err(dev, "memremap::CE failed\n");
+ goto err_ioremap1;
+ }
+
+ pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+ resource_size(addr_phys[1]));
+ if (!pcfg->addr_virt_ci) {
+ dev_err(dev, "ioremap::CI failed\n");
+ goto err_ioremap2;
+ }
+
+ pcfg->pools = qm_get_pools_sdqcr();
+
+ spin_lock(&qman_lock);
+ cpu = cpumask_first_zero(&portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ __qman_portals_probed = 1;
+ /* unassigned portal, skip init */
+ spin_unlock(&qman_lock);
+ goto check_cleanup;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&qman_lock);
+ pcfg->cpu = cpu;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+ dev_err(dev, "dma_set_mask() failed\n");
+ goto err_portal_init;
+ }
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ qman_offline_cpu(cpu);
+
+check_cleanup:
+ if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
+ /*
+ * QMan wasn't reset prior to boot (Kexec for example)
+ * Empty all the frame queues so they are in reset state
+ */
+ for (i = 0; i < qm_get_fqid_maxcnt(); i++) {
+ err = qman_shutdown_fq(i);
+ if (err) {
+ dev_err(dev, "Failed to shutdown frame queue %d\n",
+ i);
+ goto err_portal_init;
+ }
+ }
+ qman_done_cleanup();
+ }
+
+ return 0;
+
+err_portal_init:
+ iounmap(pcfg->addr_virt_ci);
+err_ioremap2:
+ memunmap(pcfg->addr_virt_ce);
+err_ioremap1:
+ __qman_portals_probed = -1;
+
+ return -ENXIO;
+}
+
+static const struct of_device_id qman_portal_ids[] = {
+ {
+ .compatible = "fsl,qman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = qman_portal_ids,
+ },
+ .probe = qman_portal_probe,
+};
+
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qman_portal:online",
+ qman_online_cpu, qman_offline_cpu);
+ if (ret < 0) {
+ pr_err("qman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
+ return 0;
+}
+
+module_driver(qman_portal_driver,
+ qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 0000000000..fd1cf543fb
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,282 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+ u8 verb;
+ u8 result;
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+ return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+ u8 verb;
+ u8 result;
+ u8 __reserved[30];
+ /* Access this struct using qman_cgrs_get() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+ u8 verb;
+ u8 result;
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[6];
+ u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
+ __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved3[3];
+ u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
+ __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
+}
+
+/* Congestion Groups */
+
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
+{
+ return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ *dest = *src;
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ & *_b++;
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ ^ *_b++;
+}
+
+void qman_init_cgr_all(void);
+
+struct qm_portal_config {
+ /* Portal addresses */
+ void *addr_virt_ce;
+ void __iomem *addr_virt_ci;
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /*
+ * the portal's dedicated channel id, used initialising
+ * frame queues to target this portal when scheduled
+ */
+ u16 channel;
+ /*
+ * mask of pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+ */
+ u32 pools;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
+
+int qman_wq_alloc(void);
+#ifdef CONFIG_FSL_PAMU
+#define qman_liodn_fixup __qman_liodn_fixup
+#else
+static inline void qman_liodn_fixup(u16 channel)
+{
+}
+#endif
+void __qman_liodn_fixup(u16 channel);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+int qman_alloc_fq_table(u32 num_fqids);
+
+/* QMan s/w corenet portal, low-level i/face */
+
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal);
+
+unsigned int qm_get_fqid_maxcnt(void);
+
+int qman_shutdown_fq(u32 fqid);
+
+int qman_requires_cleanup(void);
+void qman_done_cleanup(void);
+void qman_enable_irqs(void);
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 0000000000..18f7f0202f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,62 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+ int loop = 1;
+ int err = 0;
+
+ while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+ err = qman_test_stash();
+ if (err)
+ break;
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+ err = qman_test_api();
+ if (err)
+ break;
+#endif
+ }
+ return err;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 0000000000..41bdbc48ca
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,34 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+int qman_test_stash(void);
+int qman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 0000000000..28fbddc3c2
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,247 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#define CGR_ID 27
+#define POOL_ID 2
+#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES 10
+#define NUM_PARTIAL 4
+#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
+ QM_SDQCR_TYPE_PRIO_QOS | \
+ QM_SDQCR_TOKEN_SET(0x98) | \
+ QM_SDQCR_CHANNELS_DEDICATED | \
+ QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
+#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+ struct qman_fq *,
+ const struct qm_dqrr_entry *,
+ bool sched_napi);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+ .cb.dqrr = cb_dqrr,
+ .cb.ern = cb_ern,
+ .cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *fd)
+{
+ qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
+ qm_fd_set_contig_big(fd, 0x0000ffff);
+ fd->cmd = cpu_to_be32(0xfeedf00d);
+}
+
+static void fd_inc(struct qm_fd *fd)
+{
+ u64 t = qm_fd_addr_get64(fd);
+ int z = t >> 40;
+ unsigned int len, off;
+ enum qm_fd_format fmt;
+
+ t <<= 1;
+ if (z)
+ t |= 1;
+ qm_fd_addr_set64(fd, t);
+
+ fmt = qm_fd_get_format(fd);
+ off = qm_fd_get_offset(fd);
+ len = qm_fd_get_length(fd);
+ len--;
+ qm_fd_set_param(fd, fmt, off, len);
+
+ be32_add_cpu(&fd->cmd, 1);
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
+{
+ bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
+
+ neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
+ neq |= a->cfg != b->cfg;
+ neq |= a->cmd != b->cmd;
+
+ return neq;
+}
+
+/* test */
+static int do_enqueues(struct qman_fq *fq)
+{
+ unsigned int loop;
+ int err = 0;
+
+ for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+ if (qman_enqueue(fq, &fd)) {
+ pr_crit("qman_enqueue() failed\n");
+ err = -EIO;
+ }
+ fd_inc(&fd);
+ }
+
+ return err;
+}
+
+int qman_test_api(void)
+{
+ unsigned int flags, frmcnt;
+ int err;
+ struct qman_fq *fq = &fq_base;
+
+ pr_info("%s(): Starting\n", __func__);
+ fd_init(&fd);
+ fd_init(&fd_dq);
+
+ /* Initialise (parked) FQ */
+ err = qman_create_fq(0, FQ_FLAGS, fq);
+ if (err) {
+ pr_crit("qman_create_fq() failed\n");
+ goto failed;
+ }
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err) {
+ pr_crit("qman_init_fq() failed\n");
+ goto failed;
+ }
+ /* Do enqueues + VDQCR, twice. (Parked FQ) */
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (till-empty);\n");
+ frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+ NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_err("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("scheduled dequeue (till-empty)\n");
+ err = qman_schedule_fq(fq);
+ if (err) {
+ pr_crit("qman_schedule_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, sdqcr_complete);
+
+ /* Retire and OOS the FQ */
+ err = qman_retire_fq(fq, &flags);
+ if (err < 0) {
+ pr_crit("qman_retire_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, retire_complete);
+ if (flags & QMAN_FQ_STATE_BLOCKOOS) {
+ err = -EIO;
+ pr_crit("leaking frames\n");
+ goto failed;
+ }
+ err = qman_oos_fq(fq);
+ if (err) {
+ pr_crit("qman_oos_fq() failed\n");
+ goto failed;
+ }
+ qman_destroy_fq(fq);
+ pr_info("%s(): Finished\n", __func__);
+ return 0;
+
+failed:
+ WARN_ON(1);
+ return err;
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
+{
+ if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
+ pr_err("BADNESS: dequeued frame doesn't match;\n");
+ return qman_cb_dqrr_consume;
+ }
+ fd_inc(&fd_dq);
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
+ sdqcr_complete = 1;
+ wake_up(&waitqueue);
+ }
+ return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ pr_crit("cb_ern() unimplemented");
+ WARN_ON(1);
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
+ pr_crit("unexpected FQS message");
+ WARN_ON(1);
+ return;
+ }
+ pr_info("Retirement message received\n");
+ retire_complete = 1;
+ wake_up(&waitqueue);
+}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 0000000000..b7e8e5ec88
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,629 @@
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/*
+ * Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ * handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ * and advance the iterator for the next loop. This includes a final fixup,
+ * which connects the last handler to the first (and which is why phase 2
+ * and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * initialise FQ objects and advance the iterator for the next loop.
+ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ * initialisation targets the correct cpu.
+ */
+
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
+struct bstrap {
+ int (*fn)(void);
+ atomic_t started;
+};
+static int bstrap_fn(void *bs)
+{
+ struct bstrap *bstrap = bs;
+ int err;
+
+ atomic_inc(&bstrap->started);
+ err = bstrap->fn();
+ if (err)
+ return err;
+ while (!kthread_should_stop())
+ msleep(20);
+ return 0;
+}
+static int on_all_cpus(int (*fn)(void))
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct bstrap bstrap = {
+ .fn = fn,
+ .started = ATOMIC_INIT(0)
+ };
+ struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+ "hotpotato%d", cpu);
+ int ret;
+
+ if (IS_ERR(k))
+ return -ENOMEM;
+ kthread_bind(k, cpu);
+ wake_up_process(k);
+ /*
+ * If we call kthread_stop() before the "wake up" has had an
+ * effect, then the thread may exit with -EINTR without ever
+ * running the function. So poll until it's started before
+ * requesting it to stop.
+ */
+ while (!atomic_read(&bstrap.started))
+ msleep(20);
+ ret = kthread_stop(k);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+struct hp_handler {
+
+ /* The following data is stashed when 'rx' is dequeued; */
+ /* -------------- */
+ /* The Rx FQ, dequeues of which will stash the entire hp_handler */
+ struct qman_fq rx;
+ /* The Tx FQ we should forward to */
+ struct qman_fq tx;
+ /* The value we XOR post-dequeue, prior to validating */
+ u32 rx_mixer;
+ /* The value we XOR pre-enqueue, after validating */
+ u32 tx_mixer;
+ /* what the hotpotato address should be on dequeue */
+ dma_addr_t addr;
+ u32 *frame_ptr;
+
+ /* The following data isn't (necessarily) stashed on dequeue; */
+ /* -------------- */
+ u32 fqid_rx, fqid_tx;
+ /* list node for linking us into 'hp_cpu' */
+ struct list_head node;
+ /* Just to check ... */
+ unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+ /* identify the cpu we run on; */
+ unsigned int processor_id;
+ /* root node for the per-cpu list of handlers */
+ struct list_head handlers;
+ /* list node for linking us into 'hp_cpu_list' */
+ struct list_head node;
+ /*
+ * when repeatedly scanning 'hp_list', each time linking the n'th
+ * handlers together, this is used as per-cpu iterator state
+ */
+ struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static DEFINE_SPINLOCK(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+static void *__frame_ptr;
+static u32 *frame_ptr;
+static dma_addr_t frame_dma;
+
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU 2
+#define HP_LOOPS 8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS 80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD 0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static int allocate_frame_data(void)
+{
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+
+ if (!qman_dma_portal) {
+ pr_crit("portal not available\n");
+ return -EIO;
+ }
+
+ pcfg = qman_get_qm_portal_config(qman_dma_portal);
+
+ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+ if (!__frame_ptr)
+ return -ENOMEM;
+
+ frame_ptr = PTR_ALIGN(__frame_ptr, 64);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+ frame_ptr[loop] = lfsr;
+ lfsr = do_lfsr(lfsr);
+ }
+
+ frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pcfg->dev, frame_dma)) {
+ pr_crit("dma mapping failure\n");
+ kfree(__frame_ptr);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void deallocate_frame_data(void)
+{
+ dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ kfree(__frame_ptr);
+}
+
+static inline int process_frame_data(struct hp_handler *handler,
+ const struct qm_fd *fd)
+{
+ u32 *p = handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+
+ if (qm_fd_addr_get64(fd) != handler->addr) {
+ pr_crit("bad frame address, [%llX != %llX]\n",
+ qm_fd_addr_get64(fd), handler->addr);
+ return -EIO;
+ }
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ *p ^= handler->rx_mixer;
+ if (*p != lfsr) {
+ pr_crit("corrupt frame data");
+ return -EIO;
+ }
+ *p ^= handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ return 0;
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ if (process_frame_data(handler, &dqrr->fd)) {
+ WARN_ON(1);
+ goto skip;
+ }
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ process_frame_data(handler, &dqrr->fd);
+ if (++loop_counter < HP_LOOPS) {
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ goto skip;
+ }
+ } else {
+ pr_info("Received final (%dth) frame\n", loop_counter);
+ wake_up(&queue);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static int create_per_cpu_handlers(void)
+{
+ struct hp_handler *handler;
+ int loop;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ hp_cpu->processor_id = smp_processor_id();
+ spin_lock(&hp_lock);
+ list_add_tail(&hp_cpu->node, &hp_cpu_list);
+ hp_cpu_list_length++;
+ spin_unlock(&hp_lock);
+ INIT_LIST_HEAD(&hp_cpu->handlers);
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+ if (!handler) {
+ pr_crit("kmem_cache_alloc() failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ handler->processor_id = hp_cpu->processor_id;
+ handler->addr = frame_dma;
+ handler->frame_ptr = frame_ptr;
+ list_add_tail(&handler->node, &hp_cpu->handlers);
+ }
+ return 0;
+}
+
+static int destroy_per_cpu_handlers(void)
+{
+ struct list_head *loop, *tmp;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ spin_lock(&hp_lock);
+ list_del(&hp_cpu->node);
+ spin_unlock(&hp_lock);
+ list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+ u32 flags = 0;
+ struct hp_handler *handler = list_entry(loop, struct hp_handler,
+ node);
+ if (qman_retire_fq(&handler->rx, &flags) ||
+ (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+ pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+ WARN_ON(1);
+ return -EIO;
+ }
+ if (qman_oos_fq(&handler->rx)) {
+ pr_crit("qman_oos_fq(rx) failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ qman_destroy_fq(&handler->rx);
+ qman_destroy_fq(&handler->tx);
+ qman_release_fqid(handler->fqid_rx);
+ list_del(&handler->node);
+ kmem_cache_free(hp_handler_slab, handler);
+ }
+ return 0;
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+ u8 res = (offset + (L1_CACHE_BYTES - 1))
+ / (L1_CACHE_BYTES);
+ if (res > 3)
+ return 3;
+ return res;
+}
+#define STASH_DATA_CL \
+ num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+ num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static int init_handler(void *h)
+{
+ struct qm_mcc_initfq opts;
+ struct hp_handler *handler = h;
+ int err;
+
+ if (handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ /* Set up rx */
+ memset(&handler->rx, 0, sizeof(handler->rx));
+ if (handler == special_handler)
+ handler->rx.cb.dqrr = special_dqrr;
+ else
+ handler->rx.cb.dqrr = normal_dqrr;
+ err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+ if (err) {
+ pr_crit("qman_create_fq(rx) failed");
+ goto failed;
+ }
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
+ qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+ err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+ QMAN_INITFQ_FLAG_LOCAL, &opts);
+ if (err) {
+ pr_crit("qman_init_fq(rx) failed");
+ goto failed;
+ }
+ /* Set up tx */
+ memset(&handler->tx, 0, sizeof(handler->tx));
+ err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+ &handler->tx);
+ if (err) {
+ pr_crit("qman_create_fq(tx) failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void init_handler_cb(void *h)
+{
+ if (init_handler(h))
+ WARN_ON(1);
+}
+
+static int init_phase2(void)
+{
+ int loop;
+ u32 fqid = 0;
+ u32 lfsr = 0xdeadbeef;
+ struct hp_cpu *hp_cpu;
+ struct hp_handler *handler;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ int err;
+
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ /* Rx FQID is the previous handler's Tx FQID */
+ hp_cpu->iterator->fqid_rx = fqid;
+ /* Allocate new FQID for Tx */
+ err = qman_alloc_fqid(&fqid);
+ if (err) {
+ pr_crit("qman_alloc_fqid() failed");
+ return err;
+ }
+ hp_cpu->iterator->fqid_tx = fqid;
+ /* Rx mixer is the previous handler's Tx mixer */
+ hp_cpu->iterator->rx_mixer = lfsr;
+ /* Get new mixer for Tx */
+ lfsr = do_lfsr(lfsr);
+ hp_cpu->iterator->tx_mixer = lfsr;
+ }
+ }
+ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+ if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+ return 1;
+ handler->fqid_rx = fqid;
+ handler->rx_mixer = lfsr;
+ /* and tag it as our "special" handler */
+ special_handler = handler;
+ return 0;
+}
+
+static int init_phase3(void)
+{
+ int loop, err;
+ struct hp_cpu *hp_cpu;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ preempt_disable();
+ if (hp_cpu->processor_id == smp_processor_id()) {
+ err = init_handler(hp_cpu->iterator);
+ if (err)
+ return err;
+ } else {
+ smp_call_function_single(hp_cpu->processor_id,
+ init_handler_cb, hp_cpu->iterator, 1);
+ }
+ preempt_enable();
+ }
+ }
+ return 0;
+}
+
+static int send_first_frame(void *ignore)
+{
+ u32 *p = special_handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop, err;
+ struct qm_fd fd;
+
+ if (special_handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ memset(&fd, 0, sizeof(fd));
+ qm_fd_addr_set64(&fd, special_handler->addr);
+ qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ if (*p != lfsr) {
+ err = -EIO;
+ pr_crit("corrupt frame data");
+ goto failed;
+ }
+ *p ^= special_handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ pr_info("Sending first frame\n");
+ err = qman_enqueue(&special_handler->tx, &fd);
+ if (err) {
+ pr_crit("qman_enqueue() failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void send_first_frame_cb(void *ignore)
+{
+ if (send_first_frame(NULL))
+ WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+ int err;
+
+ if (cpumask_weight(cpu_online_mask) < 2) {
+ pr_info("%s(): skip - only 1 CPU\n", __func__);
+ return 0;
+ }
+
+ pr_info("%s(): Starting\n", __func__);
+
+ hp_cpu_list_length = 0;
+ loop_counter = 0;
+ hp_handler_slab = kmem_cache_create("hp_handler_slab",
+ sizeof(struct hp_handler), L1_CACHE_BYTES,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!hp_handler_slab) {
+ err = -EIO;
+ pr_crit("kmem_cache_create() failed");
+ goto failed;
+ }
+
+ err = allocate_frame_data();
+ if (err)
+ goto failed;
+
+ /* Init phase 1 */
+ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+ if (on_all_cpus(create_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ pr_info("Number of cpus: %d, total of %d handlers\n",
+ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+ err = init_phase2();
+ if (err)
+ goto failed;
+
+ err = init_phase3();
+ if (err)
+ goto failed;
+
+ preempt_disable();
+ if (special_handler->processor_id == smp_processor_id()) {
+ err = send_first_frame(NULL);
+ if (err)
+ goto failed;
+ } else {
+ smp_call_function_single(special_handler->processor_id,
+ send_first_frame_cb, NULL, 1);
+ }
+ preempt_enable();
+
+ wait_event(queue, loop_counter == HP_LOOPS);
+ deallocate_frame_data();
+ if (on_all_cpus(destroy_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ kmem_cache_destroy(hp_handler_slab);
+ pr_info("%s(): Finished\n", __func__);
+
+ return 0;
+failed:
+ WARN_ON(1);
+ return err;
+}
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
new file mode 100644
index 0000000000..fa9ffbed0e
--- /dev/null
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# QE Communication options
+#
+
+config QUICC_ENGINE
+ bool "QUICC Engine (QE) framework support"
+ depends on OF && HAS_IOMEM
+ depends on PPC || ARM || ARM64 || COMPILE_TEST
+ select GENERIC_ALLOCATOR
+ select CRC32
+ help
+ The QUICC Engine (QE) is a new generation of communications
+ coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
+ Selecting this option means that you wish to build a kernel
+ for a machine with a QE coprocessor.
+
+config UCC_SLOW
+ bool
+ default y if SERIAL_QE
+ help
+ This option provides qe_lib support to UCC slow
+ protocols: UART, BISYNC, QMC
+
+config UCC_FAST
+ bool
+ default y if UCC_GETH || QE_TDM
+ help
+ This option provides qe_lib support to UCC fast
+ protocols: HDLC, Ethernet, ATM, transparent
+
+config UCC
+ bool
+ default y if UCC_FAST || UCC_SLOW
+
+config CPM_TSA
+ tristate "CPM TSA support"
+ depends on OF && HAS_IOMEM
+ depends on CPM1 || (CPM && COMPILE_TEST)
+ help
+ Freescale CPM Time Slot Assigner (TSA)
+ controller.
+
+ This option enables support for this
+ controller
+
+config CPM_QMC
+ tristate "CPM QMC support"
+ depends on OF && HAS_IOMEM
+ depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST)
+ depends on CPM_TSA
+ help
+ Freescale CPM QUICC Multichannel Controller
+ (QMC)
+
+ This option enables support for this
+ controller
+
+config QE_TDM
+ bool
+ default y if FSL_UCC_HDLC
+
+config QE_USB
+ bool
+ depends on QUICC_ENGINE
+ default y if USB_FSL_QE
+ help
+ QE USB Controller support
diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
new file mode 100644
index 0000000000..ec8506e131
--- /dev/null
+++ b/drivers/soc/fsl/qe/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux ppc-specific parts of QE
+#
+obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
+obj-$(CONFIG_CPM) += qe_common.o
+obj-$(CONFIG_CPM_TSA) += tsa.o
+obj-$(CONFIG_CPM_QMC) += qmc.o
+obj-$(CONFIG_UCC) += ucc.o
+obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
+obj-$(CONFIG_UCC_FAST) += ucc_fast.o
+obj-$(CONFIG_QE_TDM) += qe_tdm.o
+obj-$(CONFIG_QE_USB) += usb.o
+obj-$(CONFIG_QE_GPIO) += gpio.o
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
new file mode 100644
index 0000000000..3ef24ba024
--- /dev/null
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * QUICC Engine GPIOs
+ *
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/gpio/legacy-of-mm-gpiochip.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/property.h>
+
+#include <soc/fsl/qe/qe.h>
+
+struct qe_gpio_chip {
+ struct of_mm_gpio_chip mm_gc;
+ spinlock_t lock;
+
+ /* shadowed data register to clear/set bits safely */
+ u32 cpdata;
+
+ /* saved_regs used to restore dedicated functions */
+ struct qe_pio_regs saved_regs;
+};
+
+static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct qe_gpio_chip *qe_gc =
+ container_of(mm_gc, struct qe_gpio_chip, mm_gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+
+ qe_gc->cpdata = ioread32be(&regs->cpdata);
+ qe_gc->saved_regs.cpdata = qe_gc->cpdata;
+ qe_gc->saved_regs.cpdir1 = ioread32be(&regs->cpdir1);
+ qe_gc->saved_regs.cpdir2 = ioread32be(&regs->cpdir2);
+ qe_gc->saved_regs.cppar1 = ioread32be(&regs->cppar1);
+ qe_gc->saved_regs.cppar2 = ioread32be(&regs->cppar2);
+ qe_gc->saved_regs.cpodr = ioread32be(&regs->cpodr);
+}
+
+static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+ return !!(ioread32be(&regs->cpdata) & pin_mask);
+}
+
+static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+ u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (val)
+ qe_gc->cpdata |= pin_mask;
+ else
+ qe_gc->cpdata &= ~pin_mask;
+
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static void qe_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ for (i = 0; i < gc->ngpio; i++) {
+ if (*mask == 0)
+ break;
+ if (__test_and_clear_bit(i, mask)) {
+ if (test_bit(i, bits))
+ qe_gc->cpdata |= (1U << (QE_PIO_PINS - 1 - i));
+ else
+ qe_gc->cpdata &= ~(1U << (QE_PIO_PINS - 1 - i));
+ }
+ }
+
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
+}
+
+static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ qe_gpio_set(gc, gpio, val);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
+}
+
+struct qe_pin {
+ /*
+ * The qe_gpio_chip name is unfortunate, we should change that to
+ * something like qe_pio_controller. Someday.
+ */
+ struct qe_gpio_chip *controller;
+ int num;
+};
+
+/**
+ * qe_pin_request - Request a QE pin
+ * @dev: device to get the pin from
+ * @index: index of the pin in the device tree
+ * Context: non-atomic
+ *
+ * This function return qe_pin so that you could use it with the rest of
+ * the QE Pin Multiplexing API.
+ */
+struct qe_pin *qe_pin_request(struct device *dev, int index)
+{
+ struct qe_pin *qe_pin;
+ struct gpio_chip *gc;
+ struct gpio_desc *gpiod;
+ int gpio_num;
+ int err;
+
+ qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
+ if (!qe_pin) {
+ dev_dbg(dev, "%s: can't allocate memory\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Request gpio as nonexclusive as it was likely reserved by the
+ * caller, and we are not planning on controlling it, we only need
+ * the descriptor to the to the gpio chip structure.
+ */
+ gpiod = gpiod_get_index(dev, NULL, index,
+ GPIOD_ASIS | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ err = PTR_ERR_OR_ZERO(gpiod);
+ if (err)
+ goto err0;
+
+ gc = gpiod_to_chip(gpiod);
+ gpio_num = desc_to_gpio(gpiod);
+ /* We no longer need this descriptor */
+ gpiod_put(gpiod);
+
+ if (WARN_ON(!gc)) {
+ err = -ENODEV;
+ goto err0;
+ }
+
+ qe_pin->controller = gpiochip_get_data(gc);
+ /*
+ * FIXME: this gets the local offset on the gpio_chip so that the driver
+ * can manipulate pin control settings through its custom API. The real
+ * solution is to create a real pin control driver for this.
+ */
+ qe_pin->num = gpio_num - gc->base;
+
+ if (!fwnode_device_is_compatible(gc->fwnode, "fsl,mpc8323-qe-pario-bank")) {
+ dev_dbg(dev, "%s: tried to get a non-qe pin\n", __func__);
+ err = -EINVAL;
+ goto err0;
+ }
+ return qe_pin;
+err0:
+ kfree(qe_pin);
+ dev_dbg(dev, "%s failed with status %d\n", __func__, err);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(qe_pin_request);
+
+/**
+ * qe_pin_free - Free a pin
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function frees the qe_pin structure and makes a pin available
+ * for further qe_pin_request() calls.
+ */
+void qe_pin_free(struct qe_pin *qe_pin)
+{
+ kfree(qe_pin);
+}
+EXPORT_SYMBOL(qe_pin_free);
+
+/**
+ * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function resets a pin to a dedicated peripheral function that
+ * has been set up by the firmware.
+ */
+void qe_pin_set_dedicated(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs *sregs = &qe_gc->saved_regs;
+ int pin = qe_pin->num;
+ u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
+ u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
+ bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (second_reg) {
+ qe_clrsetbits_be32(&regs->cpdir2, mask2,
+ sregs->cpdir2 & mask2);
+ qe_clrsetbits_be32(&regs->cppar2, mask2,
+ sregs->cppar2 & mask2);
+ } else {
+ qe_clrsetbits_be32(&regs->cpdir1, mask2,
+ sregs->cpdir1 & mask2);
+ qe_clrsetbits_be32(&regs->cppar1, mask2,
+ sregs->cppar1 & mask2);
+ }
+
+ if (sregs->cpdata & mask1)
+ qe_gc->cpdata |= mask1;
+ else
+ qe_gc->cpdata &= ~mask1;
+
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
+ qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_dedicated);
+
+/**
+ * qe_pin_set_gpio - Set a pin to the GPIO mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function sets a pin to the GPIO mode.
+ */
+void qe_pin_set_gpio(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ /* Let's make it input by default, GPIO API is able to change that. */
+ __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_gpio);
+
+static int __init qe_add_gpiochips(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
+ int ret;
+ struct qe_gpio_chip *qe_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct gpio_chip *gc;
+
+ qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
+ if (!qe_gc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&qe_gc->lock);
+
+ mm_gc = &qe_gc->mm_gc;
+ gc = &mm_gc->gc;
+
+ mm_gc->save_regs = qe_gpio_save_regs;
+ gc->ngpio = QE_PIO_PINS;
+ gc->direction_input = qe_gpio_dir_in;
+ gc->direction_output = qe_gpio_dir_out;
+ gc->get = qe_gpio_get;
+ gc->set = qe_gpio_set;
+ gc->set_multiple = qe_gpio_set_multiple;
+
+ ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
+ if (ret)
+ goto err;
+ continue;
+err:
+ pr_err("%pOF: registration failed with status %d\n",
+ np, ret);
+ kfree(qe_gc);
+ /* try others anyway */
+ }
+ return 0;
+}
+arch_initcall(qe_add_gpiochips);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
new file mode 100644
index 0000000000..3ee0c7c1e9
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net)
+ *
+ * Description:
+ * General Purpose functions for the global management of the
+ * QUICC Engine (QE).
+ */
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/crc32.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+static void qe_snums_init(void);
+static int qe_sdma_init(void);
+
+static DEFINE_SPINLOCK(qe_lock);
+DEFINE_SPINLOCK(cmxgcr_lock);
+EXPORT_SYMBOL(cmxgcr_lock);
+
+/* We allocate this here because it is used almost exclusively for
+ * the communication processor devices.
+ */
+struct qe_immap __iomem *qe_immr;
+EXPORT_SYMBOL(qe_immr);
+
+static u8 snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
+static DECLARE_BITMAP(snum_state, QE_NUM_OF_SNUM);
+static unsigned int qe_num_of_snum;
+
+static phys_addr_t qebase = -1;
+
+static struct device_node *qe_get_device_node(void)
+{
+ struct device_node *qe;
+
+ /*
+ * Newer device trees have an "fsl,qe" compatible property for the QE
+ * node, but we still need to support older device trees.
+ */
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (qe)
+ return qe;
+ return of_find_node_by_type(NULL, "qe");
+}
+
+static phys_addr_t get_qe_base(void)
+{
+ struct device_node *qe;
+ int ret;
+ struct resource res;
+
+ if (qebase != -1)
+ return qebase;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return qebase;
+
+ ret = of_address_to_resource(qe, 0, &res);
+ if (!ret)
+ qebase = res.start;
+ of_node_put(qe);
+
+ return qebase;
+}
+
+void qe_reset(void)
+{
+ if (qe_immr == NULL)
+ qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
+
+ qe_snums_init();
+
+ qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ /* Reclaim the MURAM memory for our use. */
+ qe_muram_init();
+
+ if (qe_sdma_init())
+ panic("sdma init failed!");
+}
+
+int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
+{
+ unsigned long flags;
+ u8 mcn_shift = 0, dev_shift = 0;
+ u32 val;
+ int ret;
+
+ spin_lock_irqsave(&qe_lock, flags);
+ if (cmd == QE_RESET) {
+ iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
+ } else {
+ if (cmd == QE_ASSIGN_PAGE) {
+ /* Here device is the SNUM, not sub-block */
+ dev_shift = QE_CR_SNUM_SHIFT;
+ } else if (cmd == QE_ASSIGN_RISC) {
+ /* Here device is the SNUM, and mcnProtocol is
+ * e_QeCmdRiscAssignment value */
+ dev_shift = QE_CR_SNUM_SHIFT;
+ mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
+ } else {
+ if (device == QE_CR_SUBBLOCK_USB)
+ mcn_shift = QE_CR_MCN_USB_SHIFT;
+ else
+ mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
+ }
+
+ iowrite32be(cmd_input, &qe_immr->cp.cecdr);
+ iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
+ &qe_immr->cp.cecr);
+ }
+
+ /* wait for the QE_CR_FLG to clear */
+ ret = readx_poll_timeout_atomic(ioread32be, &qe_immr->cp.cecr, val,
+ (val & QE_CR_FLG) == 0, 0, 100);
+ /* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */
+ spin_unlock_irqrestore(&qe_lock, flags);
+
+ return ret == 0;
+}
+EXPORT_SYMBOL(qe_issue_cmd);
+
+/* Set a baud rate generator. This needs lots of work. There are
+ * 16 BRGs, which can be connected to the QE channels or output
+ * as clocks. The BRGs are in two different block of internal
+ * memory mapped space.
+ * The BRG clock is the QE clock divided by 2.
+ * It was set up long ago during the initial boot phase and is
+ * given to us.
+ * Baud rate clocks are zero-based in the driver code (as that maps
+ * to port numbers). Documentation uses 1-based numbering.
+ */
+static unsigned int brg_clk = 0;
+
+#define CLK_GRAN (1000)
+#define CLK_GRAN_LIMIT (5)
+
+unsigned int qe_get_brg_clk(void)
+{
+ struct device_node *qe;
+ u32 brg;
+ unsigned int mod;
+
+ if (brg_clk)
+ return brg_clk;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return brg_clk;
+
+ if (!of_property_read_u32(qe, "brg-frequency", &brg))
+ brg_clk = brg;
+
+ of_node_put(qe);
+
+ /* round this if near to a multiple of CLK_GRAN */
+ mod = brg_clk % CLK_GRAN;
+ if (mod) {
+ if (mod < CLK_GRAN_LIMIT)
+ brg_clk -= mod;
+ else if (mod > (CLK_GRAN - CLK_GRAN_LIMIT))
+ brg_clk += CLK_GRAN - mod;
+ }
+
+ return brg_clk;
+}
+EXPORT_SYMBOL(qe_get_brg_clk);
+
+#define PVR_VER_836x 0x8083
+#define PVR_VER_832x 0x8084
+
+static bool qe_general4_errata(void)
+{
+#ifdef CONFIG_PPC32
+ return pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x);
+#endif
+ return false;
+}
+
+/* Program the BRG to the given sampling rate and multiplier
+ *
+ * @brg: the BRG, QE_BRG1 - QE_BRG16
+ * @rate: the desired sampling rate
+ * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
+ * GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
+ * then 'multiplier' should be 8.
+ */
+int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
+{
+ u32 divisor, tempval;
+ u32 div16 = 0;
+
+ if ((brg < QE_BRG1) || (brg > QE_BRG16))
+ return -EINVAL;
+
+ divisor = qe_get_brg_clk() / (rate * multiplier);
+
+ if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
+ div16 = QE_BRGC_DIV16;
+ divisor /= 16;
+ }
+
+ /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
+ that the BRG divisor must be even if you're not using divide-by-16
+ mode. */
+ if (qe_general4_errata())
+ if (!div16 && (divisor & 1) && (divisor > 3))
+ divisor++;
+
+ tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
+ QE_BRGC_ENABLE | div16;
+
+ iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_setbrg);
+
+/* Convert a string to a QE clock source enum
+ *
+ * This function takes a string, typically from a property in the device
+ * tree, and returns the corresponding "enum qe_clock" value.
+*/
+enum qe_clock qe_clock_source(const char *source)
+{
+ unsigned int i;
+
+ if (strcasecmp(source, "none") == 0)
+ return QE_CLK_NONE;
+
+ if (strcmp(source, "tsync_pin") == 0)
+ return QE_TSYNC_PIN;
+
+ if (strcmp(source, "rsync_pin") == 0)
+ return QE_RSYNC_PIN;
+
+ if (strncasecmp(source, "brg", 3) == 0) {
+ i = simple_strtoul(source + 3, NULL, 10);
+ if ((i >= 1) && (i <= 16))
+ return (QE_BRG1 - 1) + i;
+ else
+ return QE_CLK_DUMMY;
+ }
+
+ if (strncasecmp(source, "clk", 3) == 0) {
+ i = simple_strtoul(source + 3, NULL, 10);
+ if ((i >= 1) && (i <= 24))
+ return (QE_CLK1 - 1) + i;
+ else
+ return QE_CLK_DUMMY;
+ }
+
+ return QE_CLK_DUMMY;
+}
+EXPORT_SYMBOL(qe_clock_source);
+
+/* Initialize SNUMs (thread serial numbers) according to
+ * QE Module Control chapter, SNUM table
+ */
+static void qe_snums_init(void)
+{
+ static const u8 snum_init_76[] = {
+ 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+ 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
+ 0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
+ 0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
+ 0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
+ 0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
+ 0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
+ 0xF4, 0xF5, 0xFC, 0xFD,
+ };
+ static const u8 snum_init_46[] = {
+ 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+ 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19,
+ 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
+ 0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
+ };
+ struct device_node *qe;
+ const u8 *snum_init;
+ int i;
+
+ bitmap_zero(snum_state, QE_NUM_OF_SNUM);
+ qe_num_of_snum = 28; /* The default number of snum for threads is 28 */
+ qe = qe_get_device_node();
+ if (qe) {
+ i = of_property_read_variable_u8_array(qe, "fsl,qe-snums",
+ snums, 1, QE_NUM_OF_SNUM);
+ if (i > 0) {
+ of_node_put(qe);
+ qe_num_of_snum = i;
+ return;
+ }
+ /*
+ * Fall back to legacy binding of using the value of
+ * fsl,qe-num-snums to choose one of the static arrays
+ * above.
+ */
+ of_property_read_u32(qe, "fsl,qe-num-snums", &qe_num_of_snum);
+ of_node_put(qe);
+ }
+
+ if (qe_num_of_snum == 76) {
+ snum_init = snum_init_76;
+ } else if (qe_num_of_snum == 28 || qe_num_of_snum == 46) {
+ snum_init = snum_init_46;
+ } else {
+ pr_err("QE: unsupported value of fsl,qe-num-snums: %u\n", qe_num_of_snum);
+ return;
+ }
+ memcpy(snums, snum_init, qe_num_of_snum);
+}
+
+int qe_get_snum(void)
+{
+ unsigned long flags;
+ int snum = -EBUSY;
+ int i;
+
+ spin_lock_irqsave(&qe_lock, flags);
+ i = find_first_zero_bit(snum_state, qe_num_of_snum);
+ if (i < qe_num_of_snum) {
+ set_bit(i, snum_state);
+ snum = snums[i];
+ }
+ spin_unlock_irqrestore(&qe_lock, flags);
+
+ return snum;
+}
+EXPORT_SYMBOL(qe_get_snum);
+
+void qe_put_snum(u8 snum)
+{
+ const u8 *p = memchr(snums, snum, qe_num_of_snum);
+
+ if (p)
+ clear_bit(p - snums, snum_state);
+}
+EXPORT_SYMBOL(qe_put_snum);
+
+static int qe_sdma_init(void)
+{
+ struct sdma __iomem *sdma = &qe_immr->sdma;
+ static s32 sdma_buf_offset = -ENOMEM;
+
+ /* allocate 2 internal temporary buffers (512 bytes size each) for
+ * the SDMA */
+ if (sdma_buf_offset < 0) {
+ sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
+ if (sdma_buf_offset < 0)
+ return -ENOMEM;
+ }
+
+ iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
+ &sdma->sdebcr);
+ iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
+ &sdma->sdmr);
+
+ return 0;
+}
+
+/* The maximum number of RISCs we support */
+#define MAX_QE_RISC 4
+
+/* Firmware information stored here for qe_get_firmware_info() */
+static struct qe_firmware_info qe_firmware_info;
+
+/*
+ * Set to 1 if QE firmware has been uploaded, and therefore
+ * qe_firmware_info contains valid data.
+ */
+static int qe_firmware_uploaded;
+
+/*
+ * Upload a QE microcode
+ *
+ * This function is a worker function for qe_upload_firmware(). It does
+ * the actual uploading of the microcode.
+ */
+static void qe_upload_microcode(const void *base,
+ const struct qe_microcode *ucode)
+{
+ const __be32 *code = base + be32_to_cpu(ucode->code_offset);
+ unsigned int i;
+
+ if (ucode->major || ucode->minor || ucode->revision)
+ printk(KERN_INFO "qe-firmware: "
+ "uploading microcode '%s' version %u.%u.%u\n",
+ ucode->id, ucode->major, ucode->minor, ucode->revision);
+ else
+ printk(KERN_INFO "qe-firmware: "
+ "uploading microcode '%s'\n", ucode->id);
+
+ /* Use auto-increment */
+ iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
+ &qe_immr->iram.iadd);
+
+ for (i = 0; i < be32_to_cpu(ucode->count); i++)
+ iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
+
+ /* Set I-RAM Ready Register */
+ iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
+}
+
+/*
+ * Upload a microcode to the I-RAM at a specific address.
+ *
+ * See Documentation/powerpc/qe_firmware.rst for information on QE microcode
+ * uploading.
+ *
+ * Currently, only version 1 is supported, so the 'version' field must be
+ * set to 1.
+ *
+ * The SOC model and revision are not validated, they are only displayed for
+ * informational purposes.
+ *
+ * 'calc_size' is the calculated size, in bytes, of the firmware structure and
+ * all of the microcode structures, minus the CRC.
+ *
+ * 'length' is the size that the structure says it is, including the CRC.
+ */
+int qe_upload_firmware(const struct qe_firmware *firmware)
+{
+ unsigned int i;
+ unsigned int j;
+ u32 crc;
+ size_t calc_size;
+ size_t length;
+ const struct qe_header *hdr;
+
+ if (!firmware) {
+ printk(KERN_ERR "qe-firmware: invalid pointer\n");
+ return -EINVAL;
+ }
+
+ hdr = &firmware->header;
+ length = be32_to_cpu(hdr->length);
+
+ /* Check the magic */
+ if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
+ (hdr->magic[2] != 'F')) {
+ printk(KERN_ERR "qe-firmware: not a microcode\n");
+ return -EPERM;
+ }
+
+ /* Check the version */
+ if (hdr->version != 1) {
+ printk(KERN_ERR "qe-firmware: unsupported version\n");
+ return -EPERM;
+ }
+
+ /* Validate some of the fields */
+ if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
+ printk(KERN_ERR "qe-firmware: invalid data\n");
+ return -EINVAL;
+ }
+
+ /* Validate the length and check if there's a CRC */
+ calc_size = struct_size(firmware, microcode, firmware->count);
+
+ for (i = 0; i < firmware->count; i++)
+ /*
+ * For situations where the second RISC uses the same microcode
+ * as the first, the 'code_offset' and 'count' fields will be
+ * zero, so it's okay to add those.
+ */
+ calc_size += sizeof(__be32) *
+ be32_to_cpu(firmware->microcode[i].count);
+
+ /* Validate the length */
+ if (length != calc_size + sizeof(__be32)) {
+ printk(KERN_ERR "qe-firmware: invalid length\n");
+ return -EPERM;
+ }
+
+ /* Validate the CRC */
+ crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
+ if (crc != crc32(0, firmware, calc_size)) {
+ printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
+ return -EIO;
+ }
+
+ /*
+ * If the microcode calls for it, split the I-RAM.
+ */
+ if (!firmware->split)
+ qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+
+ if (firmware->soc.model)
+ printk(KERN_INFO
+ "qe-firmware: firmware '%s' for %u V%u.%u\n",
+ firmware->id, be16_to_cpu(firmware->soc.model),
+ firmware->soc.major, firmware->soc.minor);
+ else
+ printk(KERN_INFO "qe-firmware: firmware '%s'\n",
+ firmware->id);
+
+ /*
+ * The QE only supports one microcode per RISC, so clear out all the
+ * saved microcode information and put in the new.
+ */
+ memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
+ strscpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
+ qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes);
+ memcpy(qe_firmware_info.vtraps, firmware->vtraps,
+ sizeof(firmware->vtraps));
+
+ /* Loop through each microcode. */
+ for (i = 0; i < firmware->count; i++) {
+ const struct qe_microcode *ucode = &firmware->microcode[i];
+
+ /* Upload a microcode if it's present */
+ if (ucode->code_offset)
+ qe_upload_microcode(firmware, ucode);
+
+ /* Program the traps for this processor */
+ for (j = 0; j < 16; j++) {
+ u32 trap = be32_to_cpu(ucode->traps[j]);
+
+ if (trap)
+ iowrite32be(trap,
+ &qe_immr->rsp[i].tibcr[j]);
+ }
+
+ /* Enable traps */
+ iowrite32be(be32_to_cpu(ucode->eccr),
+ &qe_immr->rsp[i].eccr);
+ }
+
+ qe_firmware_uploaded = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_upload_firmware);
+
+/*
+ * Get info on the currently-loaded firmware
+ *
+ * This function also checks the device tree to see if the boot loader has
+ * uploaded a firmware already.
+ */
+struct qe_firmware_info *qe_get_firmware_info(void)
+{
+ static int initialized;
+ struct device_node *qe;
+ struct device_node *fw = NULL;
+ const char *sprop;
+
+ /*
+ * If we haven't checked yet, and a driver hasn't uploaded a firmware
+ * yet, then check the device tree for information.
+ */
+ if (qe_firmware_uploaded)
+ return &qe_firmware_info;
+
+ if (initialized)
+ return NULL;
+
+ initialized = 1;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return NULL;
+
+ /* Find the 'firmware' child node */
+ fw = of_get_child_by_name(qe, "firmware");
+ of_node_put(qe);
+
+ /* Did we find the 'firmware' node? */
+ if (!fw)
+ return NULL;
+
+ qe_firmware_uploaded = 1;
+
+ /* Copy the data into qe_firmware_info*/
+ sprop = of_get_property(fw, "id", NULL);
+ if (sprop)
+ strscpy(qe_firmware_info.id, sprop,
+ sizeof(qe_firmware_info.id));
+
+ of_property_read_u64(fw, "extended-modes",
+ &qe_firmware_info.extended_modes);
+
+ of_property_read_u32_array(fw, "virtual-traps", qe_firmware_info.vtraps,
+ ARRAY_SIZE(qe_firmware_info.vtraps));
+
+ of_node_put(fw);
+
+ return &qe_firmware_info;
+}
+EXPORT_SYMBOL(qe_get_firmware_info);
+
+unsigned int qe_get_num_of_risc(void)
+{
+ struct device_node *qe;
+ unsigned int num_of_risc = 0;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return num_of_risc;
+
+ of_property_read_u32(qe, "fsl,qe-num-riscs", &num_of_risc);
+
+ of_node_put(qe);
+
+ return num_of_risc;
+}
+EXPORT_SYMBOL(qe_get_num_of_risc);
+
+unsigned int qe_get_num_of_snums(void)
+{
+ return qe_num_of_snum;
+}
+EXPORT_SYMBOL(qe_get_num_of_snums);
+
+static int __init qe_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!np)
+ return -ENODEV;
+ qe_reset();
+ of_node_put(np);
+ return 0;
+}
+subsys_initcall(qe_init);
+
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
+static int qe_resume(struct platform_device *ofdev)
+{
+ if (!qe_alive_during_sleep())
+ qe_reset();
+ return 0;
+}
+
+static int qe_probe(struct platform_device *ofdev)
+{
+ return 0;
+}
+
+static const struct of_device_id qe_ids[] = {
+ { .compatible = "fsl,qe", },
+ { },
+};
+
+static struct platform_driver qe_driver = {
+ .driver = {
+ .name = "fsl-qe",
+ .of_match_table = qe_ids,
+ },
+ .probe = qe_probe,
+ .resume = qe_resume,
+};
+
+builtin_platform_driver(qe_driver);
+#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
new file mode 100644
index 0000000000..9729ce86db
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Common CPM code
+ *
+ * Author: Scott Wood <scottwood@freescale.com>
+ *
+ * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
+ *
+ * Some parts derived from commproc.c/cpm2_common.c, which is:
+ * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
+ * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
+ * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ */
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/qe.h>
+
+static struct gen_pool *muram_pool;
+static DEFINE_SPINLOCK(cpm_muram_lock);
+static void __iomem *muram_vbase;
+static phys_addr_t muram_pbase;
+
+struct muram_block {
+ struct list_head head;
+ s32 start;
+ int size;
+};
+
+static LIST_HEAD(muram_block_list);
+
+/* max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+#define GENPOOL_OFFSET (4096 * 8)
+
+int cpm_muram_init(void)
+{
+ struct device_node *np;
+ struct resource r;
+ __be32 zero[OF_MAX_ADDR_CELLS] = {};
+ resource_size_t max = 0;
+ int i = 0;
+ int ret = 0;
+
+ if (muram_pbase)
+ return 0;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
+ if (!np) {
+ /* try legacy bindings */
+ np = of_find_node_by_name(NULL, "data-only");
+ if (!np) {
+ pr_err("Cannot find CPM muram data node");
+ ret = -ENODEV;
+ goto out_muram;
+ }
+ }
+
+ muram_pool = gen_pool_create(0, -1);
+ if (!muram_pool) {
+ pr_err("Cannot allocate memory pool for CPM/QE muram");
+ ret = -ENOMEM;
+ goto out_muram;
+ }
+ muram_pbase = of_translate_address(np, zero);
+ if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
+ pr_err("Cannot translate zero through CPM muram node");
+ ret = -ENODEV;
+ goto out_pool;
+ }
+
+ while (of_address_to_resource(np, i++, &r) == 0) {
+ if (r.end > max)
+ max = r.end;
+ ret = gen_pool_add(muram_pool, r.start - muram_pbase +
+ GENPOOL_OFFSET, resource_size(&r), -1);
+ if (ret) {
+ pr_err("QE: couldn't add muram to pool!\n");
+ goto out_pool;
+ }
+ }
+
+ muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
+ if (!muram_vbase) {
+ pr_err("Cannot map QE muram");
+ ret = -ENOMEM;
+ goto out_pool;
+ }
+ goto out_muram;
+out_pool:
+ gen_pool_destroy(muram_pool);
+out_muram:
+ of_node_put(np);
+ return ret;
+}
+
+/*
+ * cpm_muram_alloc_common - cpm_muram_alloc common code
+ * @size: number of bytes to allocate
+ * @algo: algorithm for alloc.
+ * @data: data for genalloc's algorithm.
+ *
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
+ */
+static s32 cpm_muram_alloc_common(unsigned long size,
+ genpool_algo_t algo, void *data)
+{
+ struct muram_block *entry;
+ s32 start;
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+ start = gen_pool_alloc_algo(muram_pool, size, algo, data);
+ if (!start) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+ start = start - GENPOOL_OFFSET;
+ memset_io(cpm_muram_addr(start), 0, size);
+ entry->start = start;
+ entry->size = size;
+ list_add(&entry->head, &muram_block_list);
+
+ return start;
+}
+
+/*
+ * cpm_muram_alloc - allocate the requested size worth of multi-user ram
+ * @size: number of bytes to allocate
+ * @align: requested alignment, in bytes
+ *
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+s32 cpm_muram_alloc(unsigned long size, unsigned long align)
+{
+ s32 start;
+ unsigned long flags;
+ struct genpool_data_align muram_pool_data;
+
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ muram_pool_data.align = align;
+ start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
+ &muram_pool_data);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+ return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc);
+
+/**
+ * cpm_muram_free - free a chunk of multi-user ram
+ * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
+ */
+void cpm_muram_free(s32 offset)
+{
+ unsigned long flags;
+ int size;
+ struct muram_block *tmp;
+
+ if (offset < 0)
+ return;
+
+ size = 0;
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ list_for_each_entry(tmp, &muram_block_list, head) {
+ if (tmp->start == offset) {
+ size = tmp->size;
+ list_del(&tmp->head);
+ kfree(tmp);
+ break;
+ }
+ }
+ gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+}
+EXPORT_SYMBOL(cpm_muram_free);
+
+/*
+ * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
+ * @offset: offset of allocation start address
+ * @size: number of bytes to allocate
+ * This function returns @offset if the area was available, a negative
+ * errno otherwise.
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
+{
+ s32 start;
+ unsigned long flags;
+ struct genpool_data_fixed muram_pool_data_fixed;
+
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
+ start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
+ &muram_pool_data_fixed);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+ return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc_fixed);
+
+/**
+ * cpm_muram_addr - turn a muram offset into a virtual address
+ * @offset: muram offset to convert
+ */
+void __iomem *cpm_muram_addr(unsigned long offset)
+{
+ return muram_vbase + offset;
+}
+EXPORT_SYMBOL(cpm_muram_addr);
+
+unsigned long cpm_muram_offset(const void __iomem *addr)
+{
+ return addr - muram_vbase;
+}
+EXPORT_SYMBOL(cpm_muram_offset);
+
+/**
+ * cpm_muram_dma - turn a muram virtual address into a DMA address
+ * @addr: virtual address from cpm_muram_addr() to convert
+ */
+dma_addr_t cpm_muram_dma(void __iomem *addr)
+{
+ return muram_pbase + (addr - muram_vbase);
+}
+EXPORT_SYMBOL(cpm_muram_dma);
+
+/*
+ * As cpm_muram_free, but takes the virtual address rather than the
+ * muram offset.
+ */
+void cpm_muram_free_addr(const void __iomem *addr)
+{
+ if (!addr)
+ return;
+ cpm_muram_free(cpm_muram_offset(addr));
+}
+EXPORT_SYMBOL(cpm_muram_free_addr);
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
new file mode 100644
index 0000000000..bbae3d39c7
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_ic.c
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * QUICC ENGINE Interrupt Controller
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <soc/fsl/qe/qe.h>
+
+#define NR_QE_IC_INTS 64
+
+/* QE IC registers offset */
+#define QEIC_CICR 0x00
+#define QEIC_CIVEC 0x04
+#define QEIC_CIPXCC 0x10
+#define QEIC_CIPYCC 0x14
+#define QEIC_CIPWCC 0x18
+#define QEIC_CIPZCC 0x1c
+#define QEIC_CIMR 0x20
+#define QEIC_CRIMR 0x24
+#define QEIC_CIPRTA 0x30
+#define QEIC_CIPRTB 0x34
+#define QEIC_CHIVEC 0x60
+
+struct qe_ic {
+ /* Control registers offset */
+ __be32 __iomem *regs;
+
+ /* The remapper for this QEIC */
+ struct irq_domain *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+
+ /* VIRQ numbers of QE high/low irqs */
+ int virq_high;
+ int virq_low;
+};
+
+/*
+ * QE interrupt controller internal structure
+ */
+struct qe_ic_info {
+ /* Location of this source at the QIMR register */
+ u32 mask;
+
+ /* Mask register offset */
+ u32 mask_reg;
+
+ /*
+ * For grouped interrupts sources - the interrupt code as
+ * appears at the group priority register
+ */
+ u8 pri_code;
+
+ /* Group priority register offset */
+ u32 pri_reg;
+};
+
+static DEFINE_RAW_SPINLOCK(qe_ic_lock);
+
+static struct qe_ic_info qe_ic_info[] = {
+ [1] = {
+ .mask = 0x00008000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [2] = {
+ .mask = 0x00004000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [3] = {
+ .mask = 0x00002000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [10] = {
+ .mask = 0x00000040,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [11] = {
+ .mask = 0x00000020,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [12] = {
+ .mask = 0x00000010,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [13] = {
+ .mask = 0x00000008,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 4,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [14] = {
+ .mask = 0x00000004,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 5,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [15] = {
+ .mask = 0x00000002,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 6,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [20] = {
+ .mask = 0x10000000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPRTA,
+ },
+ [25] = {
+ .mask = 0x00800000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [26] = {
+ .mask = 0x00400000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [27] = {
+ .mask = 0x00200000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [28] = {
+ .mask = 0x00100000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [32] = {
+ .mask = 0x80000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [33] = {
+ .mask = 0x40000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [34] = {
+ .mask = 0x20000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [35] = {
+ .mask = 0x10000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [36] = {
+ .mask = 0x08000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 4,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [40] = {
+ .mask = 0x00800000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [41] = {
+ .mask = 0x00400000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [42] = {
+ .mask = 0x00200000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [43] = {
+ .mask = 0x00100000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPYCC,
+ },
+};
+
+static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
+{
+ return ioread32be(base + (reg >> 2));
+}
+
+static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
+ u32 value)
+{
+ iowrite32be(value, base + (reg >> 2));
+}
+
+static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
+{
+ return irq_get_chip_data(virq);
+}
+
+static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
+{
+ return irq_data_get_irq_chip_data(d);
+}
+
+static void qe_ic_unmask_irq(struct irq_data *d)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 temp;
+
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+ temp | qe_ic_info[src].mask);
+
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static void qe_ic_mask_irq(struct irq_data *d)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 temp;
+
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+ temp & ~qe_ic_info[src].mask);
+
+ /* Flush the above write before enabling interrupts; otherwise,
+ * spurious interrupts will sometimes happen. To be 100% sure
+ * that the write has reached the device before interrupts are
+ * enabled, the mask register would have to be read back; however,
+ * this is not required for correctness, only to avoid wasting
+ * time on a large number of spurious interrupts. In testing,
+ * a sync reduced the observed spurious interrupts to zero.
+ */
+ mb();
+
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static struct irq_chip qe_ic_irq_chip = {
+ .name = "QEIC",
+ .irq_unmask = qe_ic_unmask_irq,
+ .irq_mask = qe_ic_mask_irq,
+ .irq_mask_ack = qe_ic_mask_irq,
+};
+
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ /* Exact match, unless qe_ic node is NULL */
+ struct device_node *of_node = irq_domain_get_of_node(h);
+ return of_node == NULL || of_node == node;
+}
+
+static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct qe_ic *qe_ic = h->host_data;
+ struct irq_chip *chip;
+
+ if (hw >= ARRAY_SIZE(qe_ic_info)) {
+ pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
+ return -EINVAL;
+ }
+
+ if (qe_ic_info[hw].mask == 0) {
+ printk(KERN_ERR "Can't map reserved IRQ\n");
+ return -EINVAL;
+ }
+ /* Default chip */
+ chip = &qe_ic->hc_irq;
+
+ irq_set_chip_data(virq, qe_ic);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+
+ irq_set_chip_and_handler(virq, chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops qe_ic_host_ops = {
+ .match = qe_ic_host_match,
+ .map = qe_ic_host_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+{
+ int irq;
+
+ BUG_ON(qe_ic == NULL);
+
+ /* get the interrupt source vector. */
+ irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
+
+ if (irq == 0)
+ return 0;
+
+ return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+{
+ int irq;
+
+ BUG_ON(qe_ic == NULL);
+
+ /* get the interrupt source vector. */
+ irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
+
+ if (irq == 0)
+ return 0;
+
+ return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+static void qe_ic_cascade_low(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_high(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ cascade_irq = qe_ic_get_high_irq(qe_ic);
+ if (cascade_irq == 0)
+ cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static int qe_ic_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void (*low_handler)(struct irq_desc *desc);
+ void (*high_handler)(struct irq_desc *desc);
+ struct qe_ic *qe_ic;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
+ if (qe_ic == NULL)
+ return -ENOMEM;
+
+ qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (qe_ic->regs == NULL) {
+ dev_err(dev, "failed to ioremap() registers\n");
+ return -ENODEV;
+ }
+
+ qe_ic->hc_irq = qe_ic_irq_chip;
+
+ qe_ic->virq_high = platform_get_irq(pdev, 0);
+ qe_ic->virq_low = platform_get_irq(pdev, 1);
+
+ if (qe_ic->virq_low <= 0)
+ return -ENODEV;
+
+ if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
+ low_handler = qe_ic_cascade_low;
+ high_handler = qe_ic_cascade_high;
+ } else {
+ low_handler = qe_ic_cascade_muxed_mpic;
+ high_handler = NULL;
+ }
+
+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
+ if (qe_ic->irqhost == NULL) {
+ dev_err(dev, "failed to add irq domain\n");
+ return -ENODEV;
+ }
+
+ qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
+
+ irq_set_handler_data(qe_ic->virq_low, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_low, low_handler);
+
+ if (high_handler) {
+ irq_set_handler_data(qe_ic->virq_high, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_high, high_handler);
+ }
+ return 0;
+}
+static const struct of_device_id qe_ic_ids[] = {
+ { .compatible = "fsl,qe-ic"},
+ { .type = "qeic"},
+ {},
+};
+
+static struct platform_driver qe_ic_driver =
+{
+ .driver = {
+ .name = "qe-ic",
+ .of_match_table = qe_ic_ids,
+ },
+ .probe = qe_ic_init,
+};
+
+static int __init qe_ic_of_init(void)
+{
+ platform_driver_register(&qe_ic_driver);
+ return 0;
+}
+subsys_initcall(qe_ic_of_init);
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
new file mode 100644
index 0000000000..a5e2d0e5ab
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_io.c
+ *
+ * QE Parallel I/O ports configuration routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <LeoLi@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/qe.h>
+
+#undef DEBUG
+
+static struct qe_pio_regs __iomem *par_io;
+static int num_par_io_ports = 0;
+
+int par_io_init(struct device_node *np)
+{
+ struct resource res;
+ int ret;
+ u32 num_ports;
+
+ /* Map Parallel I/O ports registers */
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+ par_io = ioremap(res.start, resource_size(&res));
+ if (!par_io)
+ return -ENOMEM;
+
+ if (!of_property_read_u32(np, "num-ports", &num_ports))
+ num_par_io_ports = num_ports;
+
+ return 0;
+}
+
+void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
+ int open_drain, int assignment, int has_irq)
+{
+ u32 pin_mask1bit;
+ u32 pin_mask2bits;
+ u32 new_mask2bits;
+ u32 tmp_val;
+
+ /* calculate pin location for single and 2 bits information */
+ pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
+
+ /* Set open drain, if required */
+ tmp_val = ioread32be(&par_io->cpodr);
+ if (open_drain)
+ iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
+ else
+ iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
+
+ /* define direction */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+ ioread32be(&par_io->cpdir2) :
+ ioread32be(&par_io->cpdir1);
+
+ /* get all bits mask for 2 bit per port */
+ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+ /* Get the final mask we need for the right definition */
+ new_mask2bits = (u32) (dir << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
+ } else {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
+ }
+ /* define pin assignment */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+ ioread32be(&par_io->cppar2) :
+ ioread32be(&par_io->cppar1);
+
+ new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
+ } else {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
+ }
+}
+EXPORT_SYMBOL(__par_io_config_pin);
+
+int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+ int assignment, int has_irq)
+{
+ if (!par_io || port >= num_par_io_ports)
+ return -EINVAL;
+
+ __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
+ has_irq);
+ return 0;
+}
+EXPORT_SYMBOL(par_io_config_pin);
+
+int par_io_data_set(u8 port, u8 pin, u8 val)
+{
+ u32 pin_mask, tmp_val;
+
+ if (port >= num_par_io_ports)
+ return -EINVAL;
+ if (pin >= QE_PIO_PINS)
+ return -EINVAL;
+ /* calculate pin location */
+ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
+
+ tmp_val = ioread32be(&par_io[port].cpdata);
+
+ if (val == 0) /* clear */
+ iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
+ else /* set */
+ iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
+
+ return 0;
+}
+EXPORT_SYMBOL(par_io_data_set);
+
+int par_io_of_config(struct device_node *np)
+{
+ struct device_node *pio;
+ int pio_map_len;
+ const __be32 *pio_map;
+
+ if (par_io == NULL) {
+ printk(KERN_ERR "par_io not initialized\n");
+ return -1;
+ }
+
+ pio = of_parse_phandle(np, "pio-handle", 0);
+ if (pio == NULL) {
+ printk(KERN_ERR "pio-handle not available\n");
+ return -1;
+ }
+
+ pio_map = of_get_property(pio, "pio-map", &pio_map_len);
+ if (pio_map == NULL) {
+ printk(KERN_ERR "pio-map is not set!\n");
+ return -1;
+ }
+ pio_map_len /= sizeof(unsigned int);
+ if ((pio_map_len % 6) != 0) {
+ printk(KERN_ERR "pio-map format wrong!\n");
+ return -1;
+ }
+
+ while (pio_map_len > 0) {
+ u8 port = be32_to_cpu(pio_map[0]);
+ u8 pin = be32_to_cpu(pio_map[1]);
+ int dir = be32_to_cpu(pio_map[2]);
+ int open_drain = be32_to_cpu(pio_map[3]);
+ int assignment = be32_to_cpu(pio_map[4]);
+ int has_irq = be32_to_cpu(pio_map[5]);
+
+ par_io_config_pin(port, pin, dir, open_drain,
+ assignment, has_irq);
+ pio_map += 6;
+ pio_map_len -= 6;
+ }
+ of_node_put(pio);
+ return 0;
+}
+EXPORT_SYMBOL(par_io_of_config);
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
new file mode 100644
index 0000000000..a3b691875c
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Zhao Qiang <qiang.zhao@nxp.com>
+ *
+ * Description:
+ * QE TDM API Set - TDM specific routines implementations.
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <soc/fsl/qe/qe_tdm.h>
+
+static int set_tdm_framer(const char *tdm_framer_type)
+{
+ if (strcmp(tdm_framer_type, "e1") == 0)
+ return TDM_FRAMER_E1;
+ else if (strcmp(tdm_framer_type, "t1") == 0)
+ return TDM_FRAMER_T1;
+ else
+ return -EINVAL;
+}
+
+static void set_si_param(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si_mode_info *si_info = &ut_info->si_info;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK) {
+ si_info->simr_crt = 1;
+ si_info->simr_rfsd = 0;
+ }
+}
+
+int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
+ struct ucc_tdm_info *ut_info)
+{
+ const char *sprop;
+ int ret = 0;
+ u32 val;
+
+ sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.rx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.rx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.rx_sync > QE_RSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ sprop = of_get_property(np, "fsl,tx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.tx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.tx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.tx_sync > QE_TSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "fsl,tx-timeslot-mask", 0, &val);
+ if (ret) {
+ pr_err("QE-TDM: Invalid tx-timeslot-mask property\n");
+ return -EINVAL;
+ }
+ utdm->tx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,rx-timeslot-mask", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: Invalid rx-timeslot-mask property\n");
+ return ret;
+ }
+ utdm->rx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,tdm-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No fsl,tdm-id property for this UCC\n");
+ return ret;
+ }
+ utdm->tdm_port = val;
+ ut_info->uf_info.tdm_num = utdm->tdm_port;
+
+ if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
+ utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
+ else
+ utdm->tdm_mode = TDM_NORMAL;
+
+ sprop = of_get_property(np, "fsl,tdm-framer-type", NULL);
+ if (!sprop) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No tdm-framer-type property for UCC\n");
+ return ret;
+ }
+ ret = set_tdm_framer(sprop);
+ if (ret < 0)
+ return -EINVAL;
+ utdm->tdm_framer_type = ret;
+
+ ret = of_property_read_u32_index(np, "fsl,siram-entry-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No siram entry id for UCC\n");
+ return ret;
+ }
+ utdm->siram_entry_id = val;
+
+ set_si_param(utdm, ut_info);
+ return ret;
+}
+EXPORT_SYMBOL(ucc_of_parse_tdm);
+
+void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si1 __iomem *si_regs;
+ u16 __iomem *siram;
+ u16 siram_entry_valid;
+ u16 siram_entry_closed;
+ u16 ucc_num;
+ u8 csel;
+ u16 sixmr;
+ u16 tdm_port;
+ u32 siram_entry_id;
+ u32 mask;
+ int i;
+
+ si_regs = utdm->si_regs;
+ siram = utdm->siram;
+ ucc_num = ut_info->uf_info.ucc_num;
+ tdm_port = utdm->tdm_port;
+ siram_entry_id = utdm->siram_entry_id;
+
+ if (utdm->tdm_framer_type == TDM_FRAMER_T1)
+ utdm->num_of_ts = 24;
+ if (utdm->tdm_framer_type == TDM_FRAMER_E1)
+ utdm->num_of_ts = 32;
+
+ /* set siram table */
+ csel = (ucc_num < 4) ? ucc_num + 9 : ucc_num - 3;
+
+ siram_entry_valid = SIR_CSEL(csel) | SIR_BYTE | SIR_CNT(0);
+ siram_entry_closed = SIR_IDLE | SIR_BYTE | SIR_CNT(0);
+
+ for (i = 0; i < utdm->num_of_ts; i++) {
+ mask = 0x01 << i;
+
+ if (utdm->tx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + i]);
+
+ if (utdm->rx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ }
+
+ qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+ qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+
+ /* Set SIxMR register */
+ sixmr = SIMR_SAD(siram_entry_id);
+
+ sixmr &= ~SIMR_SDM_MASK;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK)
+ sixmr |= SIMR_SDM_INTERNAL_LOOPBACK;
+ else
+ sixmr |= SIMR_SDM_NORMAL;
+
+ sixmr |= SIMR_RFSD(ut_info->si_info.simr_rfsd) |
+ SIMR_TFSD(ut_info->si_info.simr_tfsd);
+
+ if (ut_info->si_info.simr_crt)
+ sixmr |= SIMR_CRT;
+ if (ut_info->si_info.simr_sl)
+ sixmr |= SIMR_SL;
+ if (ut_info->si_info.simr_ce)
+ sixmr |= SIMR_CE;
+ if (ut_info->si_info.simr_fe)
+ sixmr |= SIMR_FE;
+ if (ut_info->si_info.simr_gm)
+ sixmr |= SIMR_GM;
+
+ switch (tdm_port) {
+ case 0:
+ iowrite16be(sixmr, &si_regs->sixmr1[0]);
+ break;
+ case 1:
+ iowrite16be(sixmr, &si_regs->sixmr1[1]);
+ break;
+ case 2:
+ iowrite16be(sixmr, &si_regs->sixmr1[2]);
+ break;
+ case 3:
+ iowrite16be(sixmr, &si_regs->sixmr1[3]);
+ break;
+ default:
+ pr_err("QE-TDM: can not find tdm sixmr reg\n");
+ break;
+ }
+}
+EXPORT_SYMBOL(ucc_tdm_init);
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
new file mode 100644
index 0000000000..8dc73cc1a8
--- /dev/null
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -0,0 +1,1536 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * QMC driver
+ *
+ * Copyright 2022 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <soc/fsl/qe/qmc.h>
+#include <linux/dma-mapping.h>
+#include <linux/hdlc.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/fsl/cpm.h>
+#include <sysdev/fsl_soc.h>
+#include "tsa.h"
+
+/* SCC general mode register high (32 bits) */
+#define SCC_GSMRL 0x00
+#define SCC_GSMRL_ENR (1 << 5)
+#define SCC_GSMRL_ENT (1 << 4)
+#define SCC_GSMRL_MODE_QMC (0x0A << 0)
+
+/* SCC general mode register low (32 bits) */
+#define SCC_GSMRH 0x04
+#define SCC_GSMRH_CTSS (1 << 7)
+#define SCC_GSMRH_CDS (1 << 8)
+#define SCC_GSMRH_CTSP (1 << 9)
+#define SCC_GSMRH_CDP (1 << 10)
+
+/* SCC event register (16 bits) */
+#define SCC_SCCE 0x10
+#define SCC_SCCE_IQOV (1 << 3)
+#define SCC_SCCE_GINT (1 << 2)
+#define SCC_SCCE_GUN (1 << 1)
+#define SCC_SCCE_GOV (1 << 0)
+
+/* SCC mask register (16 bits) */
+#define SCC_SCCM 0x14
+/* Multichannel base pointer (32 bits) */
+#define QMC_GBL_MCBASE 0x00
+/* Multichannel controller state (16 bits) */
+#define QMC_GBL_QMCSTATE 0x04
+/* Maximum receive buffer length (16 bits) */
+#define QMC_GBL_MRBLR 0x06
+/* Tx time-slot assignment table pointer (16 bits) */
+#define QMC_GBL_TX_S_PTR 0x08
+/* Rx pointer (16 bits) */
+#define QMC_GBL_RXPTR 0x0A
+/* Global receive frame threshold (16 bits) */
+#define QMC_GBL_GRFTHR 0x0C
+/* Global receive frame count (16 bits) */
+#define QMC_GBL_GRFCNT 0x0E
+/* Multichannel interrupt base address (32 bits) */
+#define QMC_GBL_INTBASE 0x10
+/* Multichannel interrupt pointer (32 bits) */
+#define QMC_GBL_INTPTR 0x14
+/* Rx time-slot assignment table pointer (16 bits) */
+#define QMC_GBL_RX_S_PTR 0x18
+/* Tx pointer (16 bits) */
+#define QMC_GBL_TXPTR 0x1A
+/* CRC constant (32 bits) */
+#define QMC_GBL_C_MASK32 0x1C
+/* Time slot assignment table Rx (32 x 16 bits) */
+#define QMC_GBL_TSATRX 0x20
+/* Time slot assignment table Tx (32 x 16 bits) */
+#define QMC_GBL_TSATTX 0x60
+/* CRC constant (16 bits) */
+#define QMC_GBL_C_MASK16 0xA0
+
+/* TSA entry (16bit entry in TSATRX and TSATTX) */
+#define QMC_TSA_VALID (1 << 15)
+#define QMC_TSA_WRAP (1 << 14)
+#define QMC_TSA_MASK (0x303F)
+#define QMC_TSA_CHANNEL(x) ((x) << 6)
+
+/* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
+#define QMC_SPE_TBASE 0x00
+
+/* Channel mode register (16 bits) */
+#define QMC_SPE_CHAMR 0x02
+#define QMC_SPE_CHAMR_MODE_HDLC (1 << 15)
+#define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13))
+#define QMC_SPE_CHAMR_ENT (1 << 12)
+#define QMC_SPE_CHAMR_POL (1 << 8)
+#define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13)
+#define QMC_SPE_CHAMR_HDLC_CRC (1 << 7)
+#define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0)
+#define QMC_SPE_CHAMR_TRANSP_RD (1 << 14)
+#define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10)
+
+/* Tx internal state (32 bits) */
+#define QMC_SPE_TSTATE 0x04
+/* Tx buffer descriptor pointer (16 bits) */
+#define QMC_SPE_TBPTR 0x0C
+/* Zero-insertion state (32 bits) */
+#define QMC_SPE_ZISTATE 0x14
+/* Channel’s interrupt mask flags (16 bits) */
+#define QMC_SPE_INTMSK 0x1C
+/* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
+#define QMC_SPE_RBASE 0x20
+/* HDLC: Maximum frame length register (16 bits) */
+#define QMC_SPE_MFLR 0x22
+/* TRANSPARENT: Transparent maximum receive length (16 bits) */
+#define QMC_SPE_TMRBLR 0x22
+/* Rx internal state (32 bits) */
+#define QMC_SPE_RSTATE 0x24
+/* Rx buffer descriptor pointer (16 bits) */
+#define QMC_SPE_RBPTR 0x2C
+/* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
+#define QMC_SPE_RPACK 0x30
+/* Zero deletion state (32 bits) */
+#define QMC_SPE_ZDSTATE 0x34
+
+/* Transparent synchronization (16 bits) */
+#define QMC_SPE_TRNSYNC 0x3C
+#define QMC_SPE_TRNSYNC_RX(x) ((x) << 8)
+#define QMC_SPE_TRNSYNC_TX(x) ((x) << 0)
+
+/* Interrupt related registers bits */
+#define QMC_INT_V (1 << 15)
+#define QMC_INT_W (1 << 14)
+#define QMC_INT_NID (1 << 13)
+#define QMC_INT_IDL (1 << 12)
+#define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6)
+#define QMC_INT_MRF (1 << 5)
+#define QMC_INT_UN (1 << 4)
+#define QMC_INT_RXF (1 << 3)
+#define QMC_INT_BSY (1 << 2)
+#define QMC_INT_TXB (1 << 1)
+#define QMC_INT_RXB (1 << 0)
+
+/* BD related registers bits */
+#define QMC_BD_RX_E (1 << 15)
+#define QMC_BD_RX_W (1 << 13)
+#define QMC_BD_RX_I (1 << 12)
+#define QMC_BD_RX_L (1 << 11)
+#define QMC_BD_RX_F (1 << 10)
+#define QMC_BD_RX_CM (1 << 9)
+#define QMC_BD_RX_UB (1 << 7)
+#define QMC_BD_RX_LG (1 << 5)
+#define QMC_BD_RX_NO (1 << 4)
+#define QMC_BD_RX_AB (1 << 3)
+#define QMC_BD_RX_CR (1 << 2)
+
+#define QMC_BD_TX_R (1 << 15)
+#define QMC_BD_TX_W (1 << 13)
+#define QMC_BD_TX_I (1 << 12)
+#define QMC_BD_TX_L (1 << 11)
+#define QMC_BD_TX_TC (1 << 10)
+#define QMC_BD_TX_CM (1 << 9)
+#define QMC_BD_TX_UB (1 << 7)
+#define QMC_BD_TX_PAD (0x0f << 0)
+
+/* Numbers of BDs and interrupt items */
+#define QMC_NB_TXBDS 8
+#define QMC_NB_RXBDS 8
+#define QMC_NB_INTS 128
+
+struct qmc_xfer_desc {
+ union {
+ void (*tx_complete)(void *context);
+ void (*rx_complete)(void *context, size_t length);
+ };
+ void *context;
+};
+
+struct qmc_chan {
+ struct list_head list;
+ unsigned int id;
+ struct qmc *qmc;
+ void __iomem *s_param;
+ enum qmc_mode mode;
+ u64 tx_ts_mask;
+ u64 rx_ts_mask;
+ bool is_reverse_data;
+
+ spinlock_t tx_lock;
+ cbd_t __iomem *txbds;
+ cbd_t __iomem *txbd_free;
+ cbd_t __iomem *txbd_done;
+ struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
+ u64 nb_tx_underrun;
+ bool is_tx_stopped;
+
+ spinlock_t rx_lock;
+ cbd_t __iomem *rxbds;
+ cbd_t __iomem *rxbd_free;
+ cbd_t __iomem *rxbd_done;
+ struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
+ u64 nb_rx_busy;
+ int rx_pending;
+ bool is_rx_halted;
+ bool is_rx_stopped;
+};
+
+struct qmc {
+ struct device *dev;
+ struct tsa_serial *tsa_serial;
+ void __iomem *scc_regs;
+ void __iomem *scc_pram;
+ void __iomem *dpram;
+ u16 scc_pram_offset;
+ cbd_t __iomem *bd_table;
+ dma_addr_t bd_dma_addr;
+ size_t bd_size;
+ u16 __iomem *int_table;
+ u16 __iomem *int_curr;
+ dma_addr_t int_dma_addr;
+ size_t int_size;
+ struct list_head chan_head;
+ struct qmc_chan *chans[64];
+};
+
+static inline void qmc_write16(void __iomem *addr, u16 val)
+{
+ iowrite16be(val, addr);
+}
+
+static inline u16 qmc_read16(void __iomem *addr)
+{
+ return ioread16be(addr);
+}
+
+static inline void qmc_setbits16(void __iomem *addr, u16 set)
+{
+ qmc_write16(addr, qmc_read16(addr) | set);
+}
+
+static inline void qmc_clrbits16(void __iomem *addr, u16 clr)
+{
+ qmc_write16(addr, qmc_read16(addr) & ~clr);
+}
+
+static inline void qmc_write32(void __iomem *addr, u32 val)
+{
+ iowrite32be(val, addr);
+}
+
+static inline u32 qmc_read32(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static inline void qmc_setbits32(void __iomem *addr, u32 set)
+{
+ qmc_write32(addr, qmc_read32(addr) | set);
+}
+
+
+int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
+{
+ struct tsa_serial_info tsa_info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
+ if (ret)
+ return ret;
+
+ info->mode = chan->mode;
+ info->rx_fs_rate = tsa_info.rx_fs_rate;
+ info->rx_bit_rate = tsa_info.rx_bit_rate;
+ info->nb_tx_ts = hweight64(chan->tx_ts_mask);
+ info->tx_fs_rate = tsa_info.tx_fs_rate;
+ info->tx_bit_rate = tsa_info.tx_bit_rate;
+ info->nb_rx_ts = hweight64(chan->rx_ts_mask);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_get_info);
+
+int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
+{
+ if (param->mode != chan->mode)
+ return -EINVAL;
+
+ switch (param->mode) {
+ case QMC_HDLC:
+ if ((param->hdlc.max_rx_buf_size % 4) ||
+ (param->hdlc.max_rx_buf_size < 8))
+ return -EINVAL;
+
+ qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
+ param->hdlc.max_rx_buf_size - 8);
+ qmc_write16(chan->s_param + QMC_SPE_MFLR,
+ param->hdlc.max_rx_frame_size);
+ if (param->hdlc.is_crc32) {
+ qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
+ QMC_SPE_CHAMR_HDLC_CRC);
+ } else {
+ qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
+ QMC_SPE_CHAMR_HDLC_CRC);
+ }
+ break;
+
+ case QMC_TRANSPARENT:
+ qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
+ param->transp.max_rx_buf_size);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_set_param);
+
+int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ void (*complete)(void *context), void *context)
+{
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+ cbd_t __iomem *bd;
+ u16 ctrl;
+ int ret;
+
+ /*
+ * R bit UB bit
+ * 0 0 : The BD is free
+ * 1 1 : The BD is in used, waiting for transfer
+ * 0 1 : The BD is in used, waiting for completion
+ * 1 0 : Should not append
+ */
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+ bd = chan->txbd_free;
+
+ ctrl = qmc_read16(&bd->cbd_sc);
+ if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
+ /* We are full ... */
+ ret = -EBUSY;
+ goto end;
+ }
+
+ qmc_write16(&bd->cbd_datlen, length);
+ qmc_write32(&bd->cbd_bufaddr, addr);
+
+ xfer_desc = &chan->tx_desc[bd - chan->txbds];
+ xfer_desc->tx_complete = complete;
+ xfer_desc->context = context;
+
+ /* Activate the descriptor */
+ ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
+ wmb(); /* Be sure to flush the descriptor before control update */
+ qmc_write16(&bd->cbd_sc, ctrl);
+
+ if (!chan->is_tx_stopped)
+ qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
+
+ if (ctrl & QMC_BD_TX_W)
+ chan->txbd_free = chan->txbds;
+ else
+ chan->txbd_free++;
+
+ ret = 0;
+
+end:
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(qmc_chan_write_submit);
+
+static void qmc_chan_write_done(struct qmc_chan *chan)
+{
+ struct qmc_xfer_desc *xfer_desc;
+ void (*complete)(void *context);
+ unsigned long flags;
+ void *context;
+ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ /*
+ * R bit UB bit
+ * 0 0 : The BD is free
+ * 1 1 : The BD is in used, waiting for transfer
+ * 0 1 : The BD is in used, waiting for completion
+ * 1 0 : Should not append
+ */
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+ bd = chan->txbd_done;
+
+ ctrl = qmc_read16(&bd->cbd_sc);
+ while (!(ctrl & QMC_BD_TX_R)) {
+ if (!(ctrl & QMC_BD_TX_UB))
+ goto end;
+
+ xfer_desc = &chan->tx_desc[bd - chan->txbds];
+ complete = xfer_desc->tx_complete;
+ context = xfer_desc->context;
+ xfer_desc->tx_complete = NULL;
+ xfer_desc->context = NULL;
+
+ qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB);
+
+ if (ctrl & QMC_BD_TX_W)
+ chan->txbd_done = chan->txbds;
+ else
+ chan->txbd_done++;
+
+ if (complete) {
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+ complete(context);
+ spin_lock_irqsave(&chan->tx_lock, flags);
+ }
+
+ bd = chan->txbd_done;
+ ctrl = qmc_read16(&bd->cbd_sc);
+ }
+
+end:
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+}
+
+int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ void (*complete)(void *context, size_t length), void *context)
+{
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+ cbd_t __iomem *bd;
+ u16 ctrl;
+ int ret;
+
+ /*
+ * E bit UB bit
+ * 0 0 : The BD is free
+ * 1 1 : The BD is in used, waiting for transfer
+ * 0 1 : The BD is in used, waiting for completion
+ * 1 0 : Should not append
+ */
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+ bd = chan->rxbd_free;
+
+ ctrl = qmc_read16(&bd->cbd_sc);
+ if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
+ /* We are full ... */
+ ret = -EBUSY;
+ goto end;
+ }
+
+ qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
+ qmc_write32(&bd->cbd_bufaddr, addr);
+
+ xfer_desc = &chan->rx_desc[bd - chan->rxbds];
+ xfer_desc->rx_complete = complete;
+ xfer_desc->context = context;
+
+ /* Activate the descriptor */
+ ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
+ wmb(); /* Be sure to flush data before descriptor activation */
+ qmc_write16(&bd->cbd_sc, ctrl);
+
+ /* Restart receiver if needed */
+ if (chan->is_rx_halted && !chan->is_rx_stopped) {
+ /* Restart receiver */
+ if (chan->mode == QMC_TRANSPARENT)
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
+ else
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ chan->is_rx_halted = false;
+ }
+ chan->rx_pending++;
+
+ if (ctrl & QMC_BD_RX_W)
+ chan->rxbd_free = chan->rxbds;
+ else
+ chan->rxbd_free++;
+
+ ret = 0;
+end:
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(qmc_chan_read_submit);
+
+static void qmc_chan_read_done(struct qmc_chan *chan)
+{
+ void (*complete)(void *context, size_t size);
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+ cbd_t __iomem *bd;
+ void *context;
+ u16 datalen;
+ u16 ctrl;
+
+ /*
+ * E bit UB bit
+ * 0 0 : The BD is free
+ * 1 1 : The BD is in used, waiting for transfer
+ * 0 1 : The BD is in used, waiting for completion
+ * 1 0 : Should not append
+ */
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+ bd = chan->rxbd_done;
+
+ ctrl = qmc_read16(&bd->cbd_sc);
+ while (!(ctrl & QMC_BD_RX_E)) {
+ if (!(ctrl & QMC_BD_RX_UB))
+ goto end;
+
+ xfer_desc = &chan->rx_desc[bd - chan->rxbds];
+ complete = xfer_desc->rx_complete;
+ context = xfer_desc->context;
+ xfer_desc->rx_complete = NULL;
+ xfer_desc->context = NULL;
+
+ datalen = qmc_read16(&bd->cbd_datlen);
+ qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB);
+
+ if (ctrl & QMC_BD_RX_W)
+ chan->rxbd_done = chan->rxbds;
+ else
+ chan->rxbd_done++;
+
+ chan->rx_pending--;
+
+ if (complete) {
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ complete(context, datalen);
+ spin_lock_irqsave(&chan->rx_lock, flags);
+ }
+
+ bd = chan->rxbd_done;
+ ctrl = qmc_read16(&bd->cbd_sc);
+ }
+
+end:
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+}
+
+static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
+{
+ return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
+}
+
+static int qmc_chan_stop_rx(struct qmc_chan *chan)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+
+ /* Send STOP RECEIVE command */
+ ret = qmc_chan_command(chan, 0x0);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ chan->is_rx_stopped = true;
+
+end:
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ return ret;
+}
+
+static int qmc_chan_stop_tx(struct qmc_chan *chan)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+
+ /* Send STOP TRANSMIT command */
+ ret = qmc_chan_command(chan, 0x1);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ chan->is_tx_stopped = true;
+
+end:
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+ return ret;
+}
+
+int qmc_chan_stop(struct qmc_chan *chan, int direction)
+{
+ int ret;
+
+ if (direction & QMC_CHAN_READ) {
+ ret = qmc_chan_stop_rx(chan);
+ if (ret)
+ return ret;
+ }
+
+ if (direction & QMC_CHAN_WRITE) {
+ ret = qmc_chan_stop_tx(chan);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_stop);
+
+static void qmc_chan_start_rx(struct qmc_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+
+ /* Restart the receiver */
+ if (chan->mode == QMC_TRANSPARENT)
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
+ else
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ chan->is_rx_halted = false;
+
+ chan->is_rx_stopped = false;
+
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+}
+
+static void qmc_chan_start_tx(struct qmc_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+
+ /*
+ * Enable channel transmitter as it could be disabled if
+ * qmc_chan_reset() was called.
+ */
+ qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
+
+ /* Set the POL bit in the channel mode register */
+ qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
+
+ chan->is_tx_stopped = false;
+
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+}
+
+int qmc_chan_start(struct qmc_chan *chan, int direction)
+{
+ if (direction & QMC_CHAN_READ)
+ qmc_chan_start_rx(chan);
+
+ if (direction & QMC_CHAN_WRITE)
+ qmc_chan_start_tx(chan);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_start);
+
+static void qmc_chan_reset_rx(struct qmc_chan *chan)
+{
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+ bd = chan->rxbds;
+ do {
+ ctrl = qmc_read16(&bd->cbd_sc);
+ qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
+
+ xfer_desc = &chan->rx_desc[bd - chan->rxbds];
+ xfer_desc->rx_complete = NULL;
+ xfer_desc->context = NULL;
+
+ bd++;
+ } while (!(ctrl & QMC_BD_RX_W));
+
+ chan->rxbd_free = chan->rxbds;
+ chan->rxbd_done = chan->rxbds;
+ qmc_write16(chan->s_param + QMC_SPE_RBPTR,
+ qmc_read16(chan->s_param + QMC_SPE_RBASE));
+
+ chan->rx_pending = 0;
+
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+}
+
+static void qmc_chan_reset_tx(struct qmc_chan *chan)
+{
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+
+ /* Disable transmitter. It will be re-enable on qmc_chan_start() */
+ qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
+
+ bd = chan->txbds;
+ do {
+ ctrl = qmc_read16(&bd->cbd_sc);
+ qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
+
+ xfer_desc = &chan->tx_desc[bd - chan->txbds];
+ xfer_desc->tx_complete = NULL;
+ xfer_desc->context = NULL;
+
+ bd++;
+ } while (!(ctrl & QMC_BD_TX_W));
+
+ chan->txbd_free = chan->txbds;
+ chan->txbd_done = chan->txbds;
+ qmc_write16(chan->s_param + QMC_SPE_TBPTR,
+ qmc_read16(chan->s_param + QMC_SPE_TBASE));
+
+ /* Reset TSTATE and ZISTATE to their initial value */
+ qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
+ qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
+
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+}
+
+int qmc_chan_reset(struct qmc_chan *chan, int direction)
+{
+ if (direction & QMC_CHAN_READ)
+ qmc_chan_reset_rx(chan);
+
+ if (direction & QMC_CHAN_WRITE)
+ qmc_chan_reset_tx(chan);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_reset);
+
+static int qmc_check_chans(struct qmc *qmc)
+{
+ struct tsa_serial_info info;
+ bool is_one_table = false;
+ struct qmc_chan *chan;
+ u64 tx_ts_mask = 0;
+ u64 rx_ts_mask = 0;
+ u64 tx_ts_assigned_mask;
+ u64 rx_ts_assigned_mask;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
+ dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If more than 32 TS are assigned to this serial, one common table is
+ * used for Tx and Rx and so masks must be equal for all channels.
+ */
+ if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
+ if (info.nb_tx_ts != info.nb_rx_ts) {
+ dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
+ return -EINVAL;
+ }
+ is_one_table = true;
+ }
+
+ tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
+ rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
+
+ list_for_each_entry(chan, &qmc->chan_head, list) {
+ if (chan->tx_ts_mask > tx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
+ return -EINVAL;
+ }
+ if (tx_ts_mask & chan->tx_ts_mask) {
+ dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
+ return -EINVAL;
+ }
+
+ if (chan->rx_ts_mask > rx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
+ return -EINVAL;
+ }
+ if (rx_ts_mask & chan->rx_ts_mask) {
+ dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
+ return -EINVAL;
+ }
+
+ if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
+ dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
+ return -EINVAL;
+ }
+
+ tx_ts_mask |= chan->tx_ts_mask;
+ rx_ts_mask |= chan->rx_ts_mask;
+ }
+
+ return 0;
+}
+
+static unsigned int qmc_nb_chans(struct qmc *qmc)
+{
+ unsigned int count = 0;
+ struct qmc_chan *chan;
+
+ list_for_each_entry(chan, &qmc->chan_head, list)
+ count++;
+
+ return count;
+}
+
+static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
+{
+ struct device_node *chan_np;
+ struct qmc_chan *chan;
+ const char *mode;
+ u32 chan_id;
+ u64 ts_mask;
+ int ret;
+
+ for_each_available_child_of_node(np, chan_np) {
+ ret = of_property_read_u32(chan_np, "reg", &chan_id);
+ if (ret) {
+ dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
+ of_node_put(chan_np);
+ return ret;
+ }
+ if (chan_id > 63) {
+ dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
+ of_node_put(chan_np);
+ return -EINVAL;
+ }
+
+ chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan) {
+ of_node_put(chan_np);
+ return -ENOMEM;
+ }
+
+ chan->id = chan_id;
+ spin_lock_init(&chan->rx_lock);
+ spin_lock_init(&chan->tx_lock);
+
+ ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask);
+ if (ret) {
+ dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
+ chan_np);
+ of_node_put(chan_np);
+ return ret;
+ }
+ chan->tx_ts_mask = ts_mask;
+
+ ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
+ if (ret) {
+ dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
+ chan_np);
+ of_node_put(chan_np);
+ return ret;
+ }
+ chan->rx_ts_mask = ts_mask;
+
+ mode = "transparent";
+ ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
+ if (ret && ret != -EINVAL) {
+ dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
+ chan_np);
+ of_node_put(chan_np);
+ return ret;
+ }
+ if (!strcmp(mode, "transparent")) {
+ chan->mode = QMC_TRANSPARENT;
+ } else if (!strcmp(mode, "hdlc")) {
+ chan->mode = QMC_HDLC;
+ } else {
+ dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
+ chan_np, mode);
+ of_node_put(chan_np);
+ return -EINVAL;
+ }
+
+ chan->is_reverse_data = of_property_read_bool(chan_np,
+ "fsl,reverse-data");
+
+ list_add_tail(&chan->list, &qmc->chan_head);
+ qmc->chans[chan->id] = chan;
+ }
+
+ return qmc_check_chans(qmc);
+}
+
+static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
+{
+ struct qmc_chan *chan;
+ unsigned int i;
+ u16 val;
+
+ /*
+ * Use a common Tx/Rx 64 entries table.
+ * Everything was previously checked, Tx and Rx related stuffs are
+ * identical -> Used Rx related stuff to build the table
+ */
+
+ /* Invalidate all entries */
+ for (i = 0; i < 64; i++)
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
+
+ /* Set entries based on Rx stuff*/
+ list_for_each_entry(chan, &qmc->chan_head, list) {
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK |
+ QMC_TSA_CHANNEL(chan->id);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
+ }
+ }
+
+ /* Set Wrap bit on last entry */
+ qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
+ QMC_TSA_WRAP);
+
+ /* Init pointers to the table */
+ val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
+ qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
+ qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
+
+ return 0;
+}
+
+static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
+{
+ struct qmc_chan *chan;
+ unsigned int i;
+ u16 val;
+
+ /*
+ * Use a Tx 32 entries table and a Rx 32 entries table.
+ * Everything was previously checked.
+ */
+
+ /* Invalidate all entries */
+ for (i = 0; i < 32; i++) {
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
+ }
+
+ /* Set entries based on Rx and Tx stuff*/
+ list_for_each_entry(chan, &qmc->chan_head, list) {
+ /* Rx part */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK |
+ QMC_TSA_CHANNEL(chan->id);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
+ }
+ /* Tx part */
+ for (i = 0; i < info->nb_tx_ts; i++) {
+ if (!(chan->tx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK |
+ QMC_TSA_CHANNEL(chan->id);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val);
+ }
+ }
+
+ /* Set Wrap bit on last entries */
+ qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
+ QMC_TSA_WRAP);
+ qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
+ QMC_TSA_WRAP);
+
+ /* Init Rx pointers ...*/
+ val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
+ qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
+ qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
+
+ /* ... and Tx pointers */
+ val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
+ qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
+
+ return 0;
+}
+
+static int qmc_setup_tsa(struct qmc *qmc)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /*
+ * Setup one common 64 entries table or two 32 entries (one for Tx and
+ * one for Tx) according to assigned TS numbers.
+ */
+ return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
+ qmc_setup_tsa_64rxtx(qmc, &info) :
+ qmc_setup_tsa_32rx_32tx(qmc, &info);
+}
+
+static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
+{
+ struct tsa_serial_info info;
+ u16 first_rx, last_tx;
+ u16 trnsync;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Find the first Rx TS allocated to the channel */
+ first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
+
+ /* Find the last Tx TS allocated to the channel */
+ last_tx = fls64(chan->tx_ts_mask);
+
+ trnsync = 0;
+ if (info.nb_rx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
+ if (info.nb_tx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
+
+ qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
+
+ dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
+ chan->id, trnsync,
+ first_rx, info.nb_rx_ts, chan->rx_ts_mask,
+ last_tx, info.nb_tx_ts, chan->tx_ts_mask);
+
+ return 0;
+}
+
+static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
+{
+ unsigned int i;
+ cbd_t __iomem *bd;
+ int ret;
+ u16 val;
+
+ chan->qmc = qmc;
+
+ /* Set channel specific parameter base address */
+ chan->s_param = qmc->dpram + (chan->id * 64);
+ /* 16 bd per channel (8 rx and 8 tx) */
+ chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
+ chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
+
+ chan->txbd_free = chan->txbds;
+ chan->txbd_done = chan->txbds;
+ chan->rxbd_free = chan->rxbds;
+ chan->rxbd_done = chan->rxbds;
+
+ /* TBASE and TBPTR*/
+ val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
+ qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
+ qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
+
+ /* RBASE and RBPTR*/
+ val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
+ qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
+ qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
+ qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
+ if (chan->mode == QMC_TRANSPARENT) {
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
+ qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
+ val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
+ if (chan->is_reverse_data)
+ val |= QMC_SPE_CHAMR_TRANSP_RD;
+ qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
+ ret = qmc_setup_chan_trnsync(qmc, chan);
+ if (ret)
+ return ret;
+ } else {
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
+ qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
+ qmc_write16(chan->s_param + QMC_SPE_CHAMR,
+ QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
+ }
+
+ /* Do not enable interrupts now. They will be enabled later */
+ qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
+
+ /* Init Rx BDs and set Wrap bit on last descriptor */
+ BUILD_BUG_ON(QMC_NB_RXBDS == 0);
+ val = QMC_BD_RX_I;
+ for (i = 0; i < QMC_NB_RXBDS; i++) {
+ bd = chan->rxbds + i;
+ qmc_write16(&bd->cbd_sc, val);
+ }
+ bd = chan->rxbds + QMC_NB_RXBDS - 1;
+ qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
+
+ /* Init Tx BDs and set Wrap bit on last descriptor */
+ BUILD_BUG_ON(QMC_NB_TXBDS == 0);
+ val = QMC_BD_TX_I;
+ if (chan->mode == QMC_HDLC)
+ val |= QMC_BD_TX_L | QMC_BD_TX_TC;
+ for (i = 0; i < QMC_NB_TXBDS; i++) {
+ bd = chan->txbds + i;
+ qmc_write16(&bd->cbd_sc, val);
+ }
+ bd = chan->txbds + QMC_NB_TXBDS - 1;
+ qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W);
+
+ return 0;
+}
+
+static int qmc_setup_chans(struct qmc *qmc)
+{
+ struct qmc_chan *chan;
+ int ret;
+
+ list_for_each_entry(chan, &qmc->chan_head, list) {
+ ret = qmc_setup_chan(qmc, chan);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qmc_finalize_chans(struct qmc *qmc)
+{
+ struct qmc_chan *chan;
+ int ret;
+
+ list_for_each_entry(chan, &qmc->chan_head, list) {
+ /* Unmask channel interrupts */
+ if (chan->mode == QMC_HDLC) {
+ qmc_write16(chan->s_param + QMC_SPE_INTMSK,
+ QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
+ QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
+ QMC_INT_TXB | QMC_INT_RXB);
+ } else {
+ qmc_write16(chan->s_param + QMC_SPE_INTMSK,
+ QMC_INT_UN | QMC_INT_BSY |
+ QMC_INT_TXB | QMC_INT_RXB);
+ }
+
+ /* Forced stop the channel */
+ ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qmc_setup_ints(struct qmc *qmc)
+{
+ unsigned int i;
+ u16 __iomem *last;
+
+ /* Raz all entries */
+ for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
+ qmc_write16(qmc->int_table + i, 0x0000);
+
+ /* Set Wrap bit on last entry */
+ if (qmc->int_size >= sizeof(u16)) {
+ last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
+ qmc_write16(last, QMC_INT_W);
+ }
+
+ return 0;
+}
+
+static void qmc_irq_gint(struct qmc *qmc)
+{
+ struct qmc_chan *chan;
+ unsigned int chan_id;
+ unsigned long flags;
+ u16 int_entry;
+
+ int_entry = qmc_read16(qmc->int_curr);
+ while (int_entry & QMC_INT_V) {
+ /* Clear all but the Wrap bit */
+ qmc_write16(qmc->int_curr, int_entry & QMC_INT_W);
+
+ chan_id = QMC_INT_GET_CHANNEL(int_entry);
+ chan = qmc->chans[chan_id];
+ if (!chan) {
+ dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
+ goto int_next;
+ }
+
+ if (int_entry & QMC_INT_TXB)
+ qmc_chan_write_done(chan);
+
+ if (int_entry & QMC_INT_UN) {
+ dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
+ int_entry);
+ chan->nb_tx_underrun++;
+ }
+
+ if (int_entry & QMC_INT_BSY) {
+ dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
+ int_entry);
+ chan->nb_rx_busy++;
+ /* Restart the receiver if needed */
+ spin_lock_irqsave(&chan->rx_lock, flags);
+ if (chan->rx_pending && !chan->is_rx_stopped) {
+ if (chan->mode == QMC_TRANSPARENT)
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
+ else
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ chan->is_rx_halted = false;
+ } else {
+ chan->is_rx_halted = true;
+ }
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ }
+
+ if (int_entry & QMC_INT_RXB)
+ qmc_chan_read_done(chan);
+
+int_next:
+ if (int_entry & QMC_INT_W)
+ qmc->int_curr = qmc->int_table;
+ else
+ qmc->int_curr++;
+ int_entry = qmc_read16(qmc->int_curr);
+ }
+}
+
+static irqreturn_t qmc_irq_handler(int irq, void *priv)
+{
+ struct qmc *qmc = (struct qmc *)priv;
+ u16 scce;
+
+ scce = qmc_read16(qmc->scc_regs + SCC_SCCE);
+ qmc_write16(qmc->scc_regs + SCC_SCCE, scce);
+
+ if (unlikely(scce & SCC_SCCE_IQOV))
+ dev_info(qmc->dev, "IRQ queue overflow\n");
+
+ if (unlikely(scce & SCC_SCCE_GUN))
+ dev_err(qmc->dev, "Global transmitter underrun\n");
+
+ if (unlikely(scce & SCC_SCCE_GOV))
+ dev_err(qmc->dev, "Global receiver overrun\n");
+
+ /* normal interrupt */
+ if (likely(scce & SCC_SCCE_GINT))
+ qmc_irq_gint(qmc);
+
+ return IRQ_HANDLED;
+}
+
+static int qmc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int nb_chans;
+ struct resource *res;
+ struct qmc *qmc;
+ int irq;
+ int ret;
+
+ qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
+ if (!qmc)
+ return -ENOMEM;
+
+ qmc->dev = &pdev->dev;
+ INIT_LIST_HEAD(&qmc->chan_head);
+
+ qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
+ if (IS_ERR(qmc->scc_regs))
+ return PTR_ERR(qmc->scc_regs);
+
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
+ if (!res)
+ return -EINVAL;
+ qmc->scc_pram_offset = res->start - get_immrbase();
+ qmc->scc_pram = devm_ioremap_resource(qmc->dev, res);
+ if (IS_ERR(qmc->scc_pram))
+ return PTR_ERR(qmc->scc_pram);
+
+ qmc->dpram = devm_platform_ioremap_resource_byname(pdev, "dpram");
+ if (IS_ERR(qmc->dpram))
+ return PTR_ERR(qmc->dpram);
+
+ qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
+ if (IS_ERR(qmc->tsa_serial)) {
+ return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
+ "Failed to get TSA serial\n");
+ }
+
+ /* Connect the serial (SCC) to TSA */
+ ret = tsa_serial_connect(qmc->tsa_serial);
+ if (ret) {
+ dev_err(qmc->dev, "Failed to connect TSA serial\n");
+ return ret;
+ }
+
+ /* Parse channels informationss */
+ ret = qmc_of_parse_chans(qmc, np);
+ if (ret)
+ goto err_tsa_serial_disconnect;
+
+ nb_chans = qmc_nb_chans(qmc);
+
+ /* Init GMSR_H and GMSR_L registers */
+ qmc_write32(qmc->scc_regs + SCC_GSMRH,
+ SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP);
+
+ /* enable QMC mode */
+ qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
+
+ /*
+ * Allocate the buffer descriptor table
+ * 8 rx and 8 tx descriptors per channel
+ */
+ qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
+ qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
+ &qmc->bd_dma_addr, GFP_KERNEL);
+ if (!qmc->bd_table) {
+ dev_err(qmc->dev, "Failed to allocate bd table\n");
+ ret = -ENOMEM;
+ goto err_tsa_serial_disconnect;
+ }
+ memset(qmc->bd_table, 0, qmc->bd_size);
+
+ qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr);
+
+ /* Allocate the interrupt table */
+ qmc->int_size = QMC_NB_INTS * sizeof(u16);
+ qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
+ &qmc->int_dma_addr, GFP_KERNEL);
+ if (!qmc->int_table) {
+ dev_err(qmc->dev, "Failed to allocate interrupt table\n");
+ ret = -ENOMEM;
+ goto err_tsa_serial_disconnect;
+ }
+ memset(qmc->int_table, 0, qmc->int_size);
+
+ qmc->int_curr = qmc->int_table;
+ qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr);
+ qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr);
+
+ /* Set MRBLR (valid for HDLC only) max MRU + max CRC */
+ qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
+
+ qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1);
+ qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1);
+
+ qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
+ qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
+
+ ret = qmc_setup_tsa(qmc);
+ if (ret)
+ goto err_tsa_serial_disconnect;
+
+ qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
+
+ ret = qmc_setup_chans(qmc);
+ if (ret)
+ goto err_tsa_serial_disconnect;
+
+ /* Init interrupts table */
+ ret = qmc_setup_ints(qmc);
+ if (ret)
+ goto err_tsa_serial_disconnect;
+
+ /* Disable and clear interrupts, set the irq handler */
+ qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
+ qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto err_tsa_serial_disconnect;
+ ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
+ if (ret < 0)
+ goto err_tsa_serial_disconnect;
+
+ /* Enable interrupts */
+ qmc_write16(qmc->scc_regs + SCC_SCCM,
+ SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
+
+ ret = qmc_finalize_chans(qmc);
+ if (ret < 0)
+ goto err_disable_intr;
+
+ /* Enable transmiter and receiver */
+ qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ platform_set_drvdata(pdev, qmc);
+
+ return 0;
+
+err_disable_intr:
+ qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
+
+err_tsa_serial_disconnect:
+ tsa_serial_disconnect(qmc->tsa_serial);
+ return ret;
+}
+
+static int qmc_remove(struct platform_device *pdev)
+{
+ struct qmc *qmc = platform_get_drvdata(pdev);
+
+ /* Disable transmiter and receiver */
+ qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
+
+ /* Disable interrupts */
+ qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
+
+ /* Disconnect the serial from TSA */
+ tsa_serial_disconnect(qmc->tsa_serial);
+
+ return 0;
+}
+
+static const struct of_device_id qmc_id_table[] = {
+ { .compatible = "fsl,cpm1-scc-qmc" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, qmc_id_table);
+
+static struct platform_driver qmc_driver = {
+ .driver = {
+ .name = "fsl-qmc",
+ .of_match_table = of_match_ptr(qmc_id_table),
+ },
+ .probe = qmc_probe,
+ .remove = qmc_remove,
+};
+module_platform_driver(qmc_driver);
+
+struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
+{
+ struct of_phandle_args out_args;
+ struct platform_device *pdev;
+ struct qmc_chan *qmc_chan;
+ struct qmc *qmc;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
+ &out_args);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!of_match_node(qmc_driver.driver.of_match_table, out_args.np)) {
+ of_node_put(out_args.np);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdev = of_find_device_by_node(out_args.np);
+ of_node_put(out_args.np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ qmc = platform_get_drvdata(pdev);
+ if (!qmc) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (out_args.args_count != 1) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (out_args.args[0] >= ARRAY_SIZE(qmc->chans)) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ qmc_chan = qmc->chans[out_args.args[0]];
+ if (!qmc_chan) {
+ platform_device_put(pdev);
+ return ERR_PTR(-ENOENT);
+ }
+
+ return qmc_chan;
+}
+EXPORT_SYMBOL(qmc_chan_get_byphandle);
+
+void qmc_chan_put(struct qmc_chan *chan)
+{
+ put_device(chan->qmc->dev);
+}
+EXPORT_SYMBOL(qmc_chan_put);
+
+static void devm_qmc_chan_release(struct device *dev, void *res)
+{
+ struct qmc_chan **qmc_chan = res;
+
+ qmc_chan_put(*qmc_chan);
+}
+
+struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
+ struct device_node *np,
+ const char *phandle_name)
+{
+ struct qmc_chan *qmc_chan;
+ struct qmc_chan **dr;
+
+ dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ qmc_chan = qmc_chan_get_byphandle(np, phandle_name);
+ if (!IS_ERR(qmc_chan)) {
+ *dr = qmc_chan;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return qmc_chan;
+}
+EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("CPM QMC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c
new file mode 100644
index 0000000000..e0527b9efd
--- /dev/null
+++ b/drivers/soc/fsl/qe/tsa.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TSA driver
+ *
+ * Copyright 2022 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include "tsa.h"
+#include <dt-bindings/soc/cpm1-fsl,tsa.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+
+/* TSA SI RAM routing tables entry */
+#define TSA_SIRAM_ENTRY_LAST (1 << 16)
+#define TSA_SIRAM_ENTRY_BYTE (1 << 17)
+#define TSA_SIRAM_ENTRY_CNT(x) (((x) & 0x0f) << 18)
+#define TSA_SIRAM_ENTRY_CSEL_MASK (0x7 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_NU (0x0 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_SCC2 (0x2 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_SCC3 (0x3 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_SCC4 (0x4 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_SMC1 (0x5 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_SMC2 (0x6 << 22)
+
+/* SI mode register (32 bits) */
+#define TSA_SIMODE 0x00
+#define TSA_SIMODE_SMC2 0x80000000
+#define TSA_SIMODE_SMC1 0x00008000
+#define TSA_SIMODE_TDMA(x) ((x) << 0)
+#define TSA_SIMODE_TDMB(x) ((x) << 16)
+#define TSA_SIMODE_TDM_MASK 0x0fff
+#define TSA_SIMODE_TDM_SDM_MASK 0x0c00
+#define TSA_SIMODE_TDM_SDM_NORM 0x0000
+#define TSA_SIMODE_TDM_SDM_ECHO 0x0400
+#define TSA_SIMODE_TDM_SDM_INTL_LOOP 0x0800
+#define TSA_SIMODE_TDM_SDM_LOOP_CTRL 0x0c00
+#define TSA_SIMODE_TDM_RFSD(x) ((x) << 8)
+#define TSA_SIMODE_TDM_DSC 0x0080
+#define TSA_SIMODE_TDM_CRT 0x0040
+#define TSA_SIMODE_TDM_STZ 0x0020
+#define TSA_SIMODE_TDM_CE 0x0010
+#define TSA_SIMODE_TDM_FE 0x0008
+#define TSA_SIMODE_TDM_GM 0x0004
+#define TSA_SIMODE_TDM_TFSD(x) ((x) << 0)
+
+/* SI global mode register (8 bits) */
+#define TSA_SIGMR 0x04
+#define TSA_SIGMR_ENB (1<<3)
+#define TSA_SIGMR_ENA (1<<2)
+#define TSA_SIGMR_RDM_MASK 0x03
+#define TSA_SIGMR_RDM_STATIC_TDMA 0x00
+#define TSA_SIGMR_RDM_DYN_TDMA 0x01
+#define TSA_SIGMR_RDM_STATIC_TDMAB 0x02
+#define TSA_SIGMR_RDM_DYN_TDMAB 0x03
+
+/* SI status register (8 bits) */
+#define TSA_SISTR 0x06
+
+/* SI command register (8 bits) */
+#define TSA_SICMR 0x07
+
+/* SI clock route register (32 bits) */
+#define TSA_SICR 0x0C
+#define TSA_SICR_SCC2(x) ((x) << 8)
+#define TSA_SICR_SCC3(x) ((x) << 16)
+#define TSA_SICR_SCC4(x) ((x) << 24)
+#define TSA_SICR_SCC_MASK 0x0ff
+#define TSA_SICR_SCC_GRX (1 << 7)
+#define TSA_SICR_SCC_SCX_TSA (1 << 6)
+#define TSA_SICR_SCC_RXCS_MASK (0x7 << 3)
+#define TSA_SICR_SCC_RXCS_BRG1 (0x0 << 3)
+#define TSA_SICR_SCC_RXCS_BRG2 (0x1 << 3)
+#define TSA_SICR_SCC_RXCS_BRG3 (0x2 << 3)
+#define TSA_SICR_SCC_RXCS_BRG4 (0x3 << 3)
+#define TSA_SICR_SCC_RXCS_CLK15 (0x4 << 3)
+#define TSA_SICR_SCC_RXCS_CLK26 (0x5 << 3)
+#define TSA_SICR_SCC_RXCS_CLK37 (0x6 << 3)
+#define TSA_SICR_SCC_RXCS_CLK48 (0x7 << 3)
+#define TSA_SICR_SCC_TXCS_MASK (0x7 << 0)
+#define TSA_SICR_SCC_TXCS_BRG1 (0x0 << 0)
+#define TSA_SICR_SCC_TXCS_BRG2 (0x1 << 0)
+#define TSA_SICR_SCC_TXCS_BRG3 (0x2 << 0)
+#define TSA_SICR_SCC_TXCS_BRG4 (0x3 << 0)
+#define TSA_SICR_SCC_TXCS_CLK15 (0x4 << 0)
+#define TSA_SICR_SCC_TXCS_CLK26 (0x5 << 0)
+#define TSA_SICR_SCC_TXCS_CLK37 (0x6 << 0)
+#define TSA_SICR_SCC_TXCS_CLK48 (0x7 << 0)
+
+/* Serial interface RAM pointer register (32 bits) */
+#define TSA_SIRP 0x10
+
+struct tsa_entries_area {
+ void __iomem *entries_start;
+ void __iomem *entries_next;
+ void __iomem *last_entry;
+};
+
+struct tsa_tdm {
+ bool is_enable;
+ struct clk *l1rclk_clk;
+ struct clk *l1rsync_clk;
+ struct clk *l1tclk_clk;
+ struct clk *l1tsync_clk;
+ u32 simode_tdm;
+};
+
+#define TSA_TDMA 0
+#define TSA_TDMB 1
+
+struct tsa {
+ struct device *dev;
+ void __iomem *si_regs;
+ void __iomem *si_ram;
+ resource_size_t si_ram_sz;
+ spinlock_t lock;
+ int tdms; /* TSA_TDMx ORed */
+ struct tsa_tdm tdm[2]; /* TDMa and TDMb */
+ struct tsa_serial {
+ unsigned int id;
+ struct tsa_serial_info info;
+ } serials[6];
+};
+
+static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
+{
+ /* The serials table is indexed by the serial id */
+ return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
+}
+
+static inline void tsa_write32(void __iomem *addr, u32 val)
+{
+ iowrite32be(val, addr);
+}
+
+static inline void tsa_write8(void __iomem *addr, u32 val)
+{
+ iowrite8(val, addr);
+}
+
+static inline u32 tsa_read32(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
+{
+ tsa_write32(addr, tsa_read32(addr) & ~clr);
+}
+
+static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
+{
+ tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
+}
+
+int tsa_serial_connect(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+ unsigned long flags;
+ u32 clear;
+ u32 set;
+
+ switch (tsa_serial->id) {
+ case FSL_CPM_TSA_SCC2:
+ clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
+ set = TSA_SICR_SCC2(TSA_SICR_SCC_SCX_TSA);
+ break;
+ case FSL_CPM_TSA_SCC3:
+ clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
+ set = TSA_SICR_SCC3(TSA_SICR_SCC_SCX_TSA);
+ break;
+ case FSL_CPM_TSA_SCC4:
+ clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
+ set = TSA_SICR_SCC4(TSA_SICR_SCC_SCX_TSA);
+ break;
+ default:
+ dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tsa->lock, flags);
+ tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, set);
+ spin_unlock_irqrestore(&tsa->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(tsa_serial_connect);
+
+int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+ unsigned long flags;
+ u32 clear;
+
+ switch (tsa_serial->id) {
+ case FSL_CPM_TSA_SCC2:
+ clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
+ break;
+ case FSL_CPM_TSA_SCC3:
+ clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
+ break;
+ case FSL_CPM_TSA_SCC4:
+ clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
+ break;
+ default:
+ dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tsa->lock, flags);
+ tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, 0);
+ spin_unlock_irqrestore(&tsa->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(tsa_serial_disconnect);
+
+int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
+{
+ memcpy(info, &tsa_serial->info, sizeof(*info));
+ return 0;
+}
+EXPORT_SYMBOL(tsa_serial_get_info);
+
+static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 tdms, u32 tdm_id, bool is_rx)
+{
+ resource_size_t quarter;
+ resource_size_t half;
+
+ quarter = tsa->si_ram_sz/4;
+ half = tsa->si_ram_sz/2;
+
+ if (tdms == BIT(TSA_TDMA)) {
+ /* Only TDMA */
+ if (is_rx) {
+ /* First half of si_ram */
+ area->entries_start = tsa->si_ram;
+ area->entries_next = area->entries_start + half;
+ area->last_entry = NULL;
+ } else {
+ /* Second half of si_ram */
+ area->entries_start = tsa->si_ram + half;
+ area->entries_next = area->entries_start + half;
+ area->last_entry = NULL;
+ }
+ } else {
+ /* Only TDMB or both TDMs */
+ if (tdm_id == TSA_TDMA) {
+ if (is_rx) {
+ /* First half of first half of si_ram */
+ area->entries_start = tsa->si_ram;
+ area->entries_next = area->entries_start + quarter;
+ area->last_entry = NULL;
+ } else {
+ /* First half of second half of si_ram */
+ area->entries_start = tsa->si_ram + (2 * quarter);
+ area->entries_next = area->entries_start + quarter;
+ area->last_entry = NULL;
+ }
+ } else {
+ if (is_rx) {
+ /* Second half of first half of si_ram */
+ area->entries_start = tsa->si_ram + quarter;
+ area->entries_next = area->entries_start + quarter;
+ area->last_entry = NULL;
+ } else {
+ /* Second half of second half of si_ram */
+ area->entries_start = tsa->si_ram + (3 * quarter);
+ area->entries_next = area->entries_start + quarter;
+ area->last_entry = NULL;
+ }
+ }
+ }
+}
+
+static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
+{
+ switch (serial_id) {
+ case FSL_CPM_TSA_NU: return "Not used";
+ case FSL_CPM_TSA_SCC2: return "SCC2";
+ case FSL_CPM_TSA_SCC3: return "SCC3";
+ case FSL_CPM_TSA_SCC4: return "SCC4";
+ case FSL_CPM_TSA_SMC1: return "SMC1";
+ case FSL_CPM_TSA_SMC2: return "SMC2";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
+{
+ switch (serial_id) {
+ case FSL_CPM_TSA_SCC2: return TSA_SIRAM_ENTRY_CSEL_SCC2;
+ case FSL_CPM_TSA_SCC3: return TSA_SIRAM_ENTRY_CSEL_SCC3;
+ case FSL_CPM_TSA_SCC4: return TSA_SIRAM_ENTRY_CSEL_SCC4;
+ case FSL_CPM_TSA_SMC1: return TSA_SIRAM_ENTRY_CSEL_SMC1;
+ case FSL_CPM_TSA_SMC2: return TSA_SIRAM_ENTRY_CSEL_SMC2;
+ default:
+ break;
+ }
+ return TSA_SIRAM_ENTRY_CSEL_NU;
+}
+
+static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
+{
+ void __iomem *addr;
+ u32 left;
+ u32 val;
+ u32 cnt;
+ u32 nb;
+
+ addr = area->last_entry ? area->last_entry + 4 : area->entries_start;
+
+ nb = DIV_ROUND_UP(count, 8);
+ if ((addr + (nb * 4)) > area->entries_next) {
+ dev_err(tsa->dev, "si ram area full\n");
+ return -ENOSPC;
+ }
+
+ if (area->last_entry) {
+ /* Clear last flag */
+ tsa_clrbits32(area->last_entry, TSA_SIRAM_ENTRY_LAST);
+ }
+
+ left = count;
+ while (left) {
+ val = TSA_SIRAM_ENTRY_BYTE | tsa_serial_id2csel(tsa, serial_id);
+
+ if (left > 16) {
+ cnt = 16;
+ } else {
+ cnt = left;
+ val |= TSA_SIRAM_ENTRY_LAST;
+ area->last_entry = addr;
+ }
+ val |= TSA_SIRAM_ENTRY_CNT(cnt - 1);
+
+ tsa_write32(addr, val);
+ addr += 4;
+ left -= cnt;
+ }
+
+ return 0;
+}
+
+static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
+ u32 tdms, u32 tdm_id, bool is_rx)
+{
+ struct tsa_entries_area area;
+ const char *route_name;
+ u32 serial_id;
+ int len, i;
+ u32 count;
+ const char *serial_name;
+ struct tsa_serial_info *serial_info;
+ struct tsa_tdm *tdm;
+ int ret;
+ u32 ts;
+
+ route_name = is_rx ? "fsl,rx-ts-routes" : "fsl,tx-ts-routes";
+
+ len = of_property_count_u32_elems(tdm_np, route_name);
+ if (len < 0) {
+ dev_err(tsa->dev, "%pOF: failed to read %s\n", tdm_np, route_name);
+ return len;
+ }
+ if (len % 2 != 0) {
+ dev_err(tsa->dev, "%pOF: wrong %s format\n", tdm_np, route_name);
+ return -EINVAL;
+ }
+
+ tsa_init_entries_area(tsa, &area, tdms, tdm_id, is_rx);
+ ts = 0;
+ for (i = 0; i < len; i += 2) {
+ of_property_read_u32_index(tdm_np, route_name, i, &count);
+ of_property_read_u32_index(tdm_np, route_name, i + 1, &serial_id);
+
+ if (serial_id >= ARRAY_SIZE(tsa->serials)) {
+ dev_err(tsa->dev, "%pOF: invalid serial id (%u)\n",
+ tdm_np, serial_id);
+ return -EINVAL;
+ }
+
+ serial_name = tsa_serial_id2name(tsa, serial_id);
+ if (!serial_name) {
+ dev_err(tsa->dev, "%pOF: unsupported serial id (%u)\n",
+ tdm_np, serial_id);
+ return -EINVAL;
+ }
+
+ dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
+ tdm_id, route_name, ts, ts+count-1, serial_name);
+ ts += count;
+
+ ret = tsa_add_entry(tsa, &area, count, serial_id);
+ if (ret)
+ return ret;
+
+ serial_info = &tsa->serials[serial_id].info;
+ tdm = &tsa->tdm[tdm_id];
+ if (is_rx) {
+ serial_info->rx_fs_rate = clk_get_rate(tdm->l1rsync_clk);
+ serial_info->rx_bit_rate = clk_get_rate(tdm->l1rclk_clk);
+ serial_info->nb_rx_ts += count;
+ } else {
+ serial_info->tx_fs_rate = tdm->l1tsync_clk ?
+ clk_get_rate(tdm->l1tsync_clk) :
+ clk_get_rate(tdm->l1rsync_clk);
+ serial_info->tx_bit_rate = tdm->l1tclk_clk ?
+ clk_get_rate(tdm->l1tclk_clk) :
+ clk_get_rate(tdm->l1rclk_clk);
+ serial_info->nb_tx_ts += count;
+ }
+ }
+ return 0;
+}
+
+static inline int tsa_of_parse_tdm_rx_route(struct tsa *tsa,
+ struct device_node *tdm_np,
+ u32 tdms, u32 tdm_id)
+{
+ return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, true);
+}
+
+static inline int tsa_of_parse_tdm_tx_route(struct tsa *tsa,
+ struct device_node *tdm_np,
+ u32 tdms, u32 tdm_id)
+{
+ return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, false);
+}
+
+static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
+{
+ struct device_node *tdm_np;
+ struct tsa_tdm *tdm;
+ struct clk *clk;
+ u32 tdm_id, val;
+ int ret;
+ int i;
+
+ tsa->tdms = 0;
+ tsa->tdm[0].is_enable = false;
+ tsa->tdm[1].is_enable = false;
+
+ for_each_available_child_of_node(np, tdm_np) {
+ ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
+ if (ret) {
+ dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
+ of_node_put(tdm_np);
+ return ret;
+ }
+ switch (tdm_id) {
+ case 0:
+ tsa->tdms |= BIT(TSA_TDMA);
+ break;
+ case 1:
+ tsa->tdms |= BIT(TSA_TDMB);
+ break;
+ default:
+ dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
+ tdm_id);
+ of_node_put(tdm_np);
+ return -EINVAL;
+ }
+ }
+
+ for_each_available_child_of_node(np, tdm_np) {
+ ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
+ if (ret) {
+ dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
+ of_node_put(tdm_np);
+ return ret;
+ }
+
+ tdm = &tsa->tdm[tdm_id];
+ tdm->simode_tdm = TSA_SIMODE_TDM_SDM_NORM;
+
+ val = 0;
+ ret = of_property_read_u32(tdm_np, "fsl,rx-frame-sync-delay-bits",
+ &val);
+ if (ret && ret != -EINVAL) {
+ dev_err(tsa->dev,
+ "%pOF: failed to read fsl,rx-frame-sync-delay-bits\n",
+ tdm_np);
+ of_node_put(tdm_np);
+ return ret;
+ }
+ if (val > 3) {
+ dev_err(tsa->dev,
+ "%pOF: Invalid fsl,rx-frame-sync-delay-bits (%u)\n",
+ tdm_np, val);
+ of_node_put(tdm_np);
+ return -EINVAL;
+ }
+ tdm->simode_tdm |= TSA_SIMODE_TDM_RFSD(val);
+
+ val = 0;
+ ret = of_property_read_u32(tdm_np, "fsl,tx-frame-sync-delay-bits",
+ &val);
+ if (ret && ret != -EINVAL) {
+ dev_err(tsa->dev,
+ "%pOF: failed to read fsl,tx-frame-sync-delay-bits\n",
+ tdm_np);
+ of_node_put(tdm_np);
+ return ret;
+ }
+ if (val > 3) {
+ dev_err(tsa->dev,
+ "%pOF: Invalid fsl,tx-frame-sync-delay-bits (%u)\n",
+ tdm_np, val);
+ of_node_put(tdm_np);
+ return -EINVAL;
+ }
+ tdm->simode_tdm |= TSA_SIMODE_TDM_TFSD(val);
+
+ if (of_property_read_bool(tdm_np, "fsl,common-rxtx-pins"))
+ tdm->simode_tdm |= TSA_SIMODE_TDM_CRT;
+
+ if (of_property_read_bool(tdm_np, "fsl,clock-falling-edge"))
+ tdm->simode_tdm |= TSA_SIMODE_TDM_CE;
+
+ if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
+ tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
+
+ if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
+ tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
+
+ clk = of_clk_get_by_name(tdm_np, "l1rsync");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ clk_put(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ tdm->l1rsync_clk = clk;
+
+ clk = of_clk_get_by_name(tdm_np, "l1rclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ clk_put(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ tdm->l1rclk_clk = clk;
+
+ if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
+ clk = of_clk_get_by_name(tdm_np, "l1tsync");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ clk_put(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ tdm->l1tsync_clk = clk;
+
+ clk = of_clk_get_by_name(tdm_np, "l1tclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ clk_put(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ tdm->l1tclk_clk = clk;
+ }
+
+ ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
+ if (ret) {
+ of_node_put(tdm_np);
+ goto err;
+ }
+
+ ret = tsa_of_parse_tdm_tx_route(tsa, tdm_np, tsa->tdms, tdm_id);
+ if (ret) {
+ of_node_put(tdm_np);
+ goto err;
+ }
+
+ tdm->is_enable = true;
+ }
+ return 0;
+
+err:
+ for (i = 0; i < 2; i++) {
+ if (tsa->tdm[i].l1rsync_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
+ clk_put(tsa->tdm[i].l1rsync_clk);
+ }
+ if (tsa->tdm[i].l1rclk_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
+ clk_put(tsa->tdm[i].l1rclk_clk);
+ }
+ if (tsa->tdm[i].l1tsync_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
+ clk_put(tsa->tdm[i].l1rsync_clk);
+ }
+ if (tsa->tdm[i].l1tclk_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
+ clk_put(tsa->tdm[i].l1rclk_clk);
+ }
+ }
+ return ret;
+}
+
+static void tsa_init_si_ram(struct tsa *tsa)
+{
+ resource_size_t i;
+
+ /* Fill all entries as the last one */
+ for (i = 0; i < tsa->si_ram_sz; i += 4)
+ tsa_write32(tsa->si_ram + i, TSA_SIRAM_ENTRY_LAST);
+}
+
+static int tsa_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ struct tsa *tsa;
+ unsigned int i;
+ u32 val;
+ int ret;
+
+ tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
+ if (!tsa)
+ return -ENOMEM;
+
+ tsa->dev = &pdev->dev;
+
+ for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
+ tsa->serials[i].id = i;
+
+ spin_lock_init(&tsa->lock);
+
+ tsa->si_regs = devm_platform_ioremap_resource_byname(pdev, "si_regs");
+ if (IS_ERR(tsa->si_regs))
+ return PTR_ERR(tsa->si_regs);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "si_ram");
+ if (!res) {
+ dev_err(tsa->dev, "si_ram resource missing\n");
+ return -EINVAL;
+ }
+ tsa->si_ram_sz = resource_size(res);
+ tsa->si_ram = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tsa->si_ram))
+ return PTR_ERR(tsa->si_ram);
+
+ tsa_init_si_ram(tsa);
+
+ ret = tsa_of_parse_tdms(tsa, np);
+ if (ret)
+ return ret;
+
+ /* Set SIMODE */
+ val = 0;
+ if (tsa->tdm[0].is_enable)
+ val |= TSA_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
+ if (tsa->tdm[1].is_enable)
+ val |= TSA_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
+
+ tsa_clrsetbits32(tsa->si_regs + TSA_SIMODE,
+ TSA_SIMODE_TDMA(TSA_SIMODE_TDM_MASK) |
+ TSA_SIMODE_TDMB(TSA_SIMODE_TDM_MASK),
+ val);
+
+ /* Set SIGMR */
+ val = (tsa->tdms == BIT(TSA_TDMA)) ?
+ TSA_SIGMR_RDM_STATIC_TDMA : TSA_SIGMR_RDM_STATIC_TDMAB;
+ if (tsa->tdms & BIT(TSA_TDMA))
+ val |= TSA_SIGMR_ENA;
+ if (tsa->tdms & BIT(TSA_TDMB))
+ val |= TSA_SIGMR_ENB;
+ tsa_write8(tsa->si_regs + TSA_SIGMR, val);
+
+ platform_set_drvdata(pdev, tsa);
+
+ return 0;
+}
+
+static int tsa_remove(struct platform_device *pdev)
+{
+ struct tsa *tsa = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (tsa->tdm[i].l1rsync_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
+ clk_put(tsa->tdm[i].l1rsync_clk);
+ }
+ if (tsa->tdm[i].l1rclk_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
+ clk_put(tsa->tdm[i].l1rclk_clk);
+ }
+ if (tsa->tdm[i].l1tsync_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
+ clk_put(tsa->tdm[i].l1rsync_clk);
+ }
+ if (tsa->tdm[i].l1tclk_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
+ clk_put(tsa->tdm[i].l1rclk_clk);
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id tsa_id_table[] = {
+ { .compatible = "fsl,cpm1-tsa" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, tsa_id_table);
+
+static struct platform_driver tsa_driver = {
+ .driver = {
+ .name = "fsl-tsa",
+ .of_match_table = of_match_ptr(tsa_id_table),
+ },
+ .probe = tsa_probe,
+ .remove = tsa_remove,
+};
+module_platform_driver(tsa_driver);
+
+struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
+ const char *phandle_name)
+{
+ struct of_phandle_args out_args;
+ struct platform_device *pdev;
+ struct tsa_serial *tsa_serial;
+ struct tsa *tsa;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, &out_args);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!of_match_node(tsa_driver.driver.of_match_table, out_args.np)) {
+ of_node_put(out_args.np);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdev = of_find_device_by_node(out_args.np);
+ of_node_put(out_args.np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ tsa = platform_get_drvdata(pdev);
+ if (!tsa) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (out_args.args_count != 1) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (out_args.args[0] >= ARRAY_SIZE(tsa->serials)) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ tsa_serial = &tsa->serials[out_args.args[0]];
+
+ /*
+ * Be sure that the serial id matches the phandle arg.
+ * The tsa_serials table is indexed by serial ids. The serial id is set
+ * during the probe() call and needs to be coherent.
+ */
+ if (WARN_ON(tsa_serial->id != out_args.args[0])) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return tsa_serial;
+}
+EXPORT_SYMBOL(tsa_serial_get_byphandle);
+
+void tsa_serial_put(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+
+ put_device(tsa->dev);
+}
+EXPORT_SYMBOL(tsa_serial_put);
+
+static void devm_tsa_serial_release(struct device *dev, void *res)
+{
+ struct tsa_serial **tsa_serial = res;
+
+ tsa_serial_put(*tsa_serial);
+}
+
+struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
+ struct device_node *np,
+ const char *phandle_name)
+{
+ struct tsa_serial *tsa_serial;
+ struct tsa_serial **dr;
+
+ dr = devres_alloc(devm_tsa_serial_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ tsa_serial = tsa_serial_get_byphandle(np, phandle_name);
+ if (!IS_ERR(tsa_serial)) {
+ *dr = tsa_serial;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return tsa_serial;
+}
+EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("CPM TSA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/qe/tsa.h b/drivers/soc/fsl/qe/tsa.h
new file mode 100644
index 0000000000..d9df89b6da
--- /dev/null
+++ b/drivers/soc/fsl/qe/tsa.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TSA management
+ *
+ * Copyright 2022 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+#ifndef __SOC_FSL_TSA_H__
+#define __SOC_FSL_TSA_H__
+
+#include <linux/types.h>
+
+struct device_node;
+struct device;
+struct tsa_serial;
+
+struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
+ const char *phandle_name);
+void tsa_serial_put(struct tsa_serial *tsa_serial);
+struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
+ struct device_node *np,
+ const char *phandle_name);
+
+/* Connect and disconnect the TSA serial */
+int tsa_serial_connect(struct tsa_serial *tsa_serial);
+int tsa_serial_disconnect(struct tsa_serial *tsa_serial);
+
+/* Cell information */
+struct tsa_serial_info {
+ unsigned long rx_fs_rate;
+ unsigned long rx_bit_rate;
+ u8 nb_rx_ts;
+ unsigned long tx_fs_rate;
+ unsigned long tx_bit_rate;
+ u8 nb_tx_ts;
+};
+
+/* Get information */
+int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info);
+
+#endif /* __SOC_FSL_TSA_H__ */
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
new file mode 100644
index 0000000000..21dbcd787c
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/powerpc/sysdev/qe_lib/ucc.c
+ *
+ * QE UCC API Set - UCC specific routines implementations.
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/ucc.h>
+
+#define UCC_TDM_NUM 8
+#define RX_SYNC_SHIFT_BASE 30
+#define TX_SYNC_SHIFT_BASE 14
+#define RX_CLK_SHIFT_BASE 28
+#define TX_CLK_SHIFT_BASE 12
+
+int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
+{
+ unsigned long flags;
+
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+ qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+ ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
+
+/* Configure the UCC to either Slow or Fast.
+ *
+ * A given UCC can be figured to support either "slow" devices (e.g. UART)
+ * or "fast" devices (e.g. Ethernet).
+ *
+ * 'ucc_num' is the UCC number, from 0 - 7.
+ *
+ * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
+ * must always be set to 1.
+ */
+int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
+{
+ u8 __iomem *guemr;
+
+ /* The GUEMR register is at the same location for both slow and fast
+ devices, so we just use uccX.slow.guemr. */
+ switch (ucc_num) {
+ case 0: guemr = &qe_immr->ucc1.slow.guemr;
+ break;
+ case 1: guemr = &qe_immr->ucc2.slow.guemr;
+ break;
+ case 2: guemr = &qe_immr->ucc3.slow.guemr;
+ break;
+ case 3: guemr = &qe_immr->ucc4.slow.guemr;
+ break;
+ case 4: guemr = &qe_immr->ucc5.slow.guemr;
+ break;
+ case 5: guemr = &qe_immr->ucc6.slow.guemr;
+ break;
+ case 6: guemr = &qe_immr->ucc7.slow.guemr;
+ break;
+ case 7: guemr = &qe_immr->ucc8.slow.guemr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
+ UCC_GUEMR_SET_RESERVED3 | speed);
+
+ return 0;
+}
+
+static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
+ unsigned int *reg_num, unsigned int *shift)
+{
+ unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
+
+ *reg_num = cmx + 1;
+ *cmxucr = &qe_immr->qmx.cmxucr[cmx];
+ *shift = 16 - 8 * (ucc_num & 2);
+}
+
+int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
+{
+ __be32 __iomem *cmxucr;
+ unsigned int reg_num;
+ unsigned int shift;
+
+ /* check if the UCC number is in range. */
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+ if (set)
+ qe_setbits_be32(cmxucr, mask << shift);
+ else
+ qe_clrbits_be32(cmxucr, mask << shift);
+
+ return 0;
+}
+
+int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ __be32 __iomem *cmxucr;
+ unsigned int reg_num;
+ unsigned int shift;
+ u32 clock_bits = 0;
+
+ /* check if the UCC number is in range. */
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
+ return -EINVAL;
+
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+ switch (reg_num) {
+ case 1:
+ switch (clock) {
+ case QE_BRG1: clock_bits = 1; break;
+ case QE_BRG2: clock_bits = 2; break;
+ case QE_BRG7: clock_bits = 3; break;
+ case QE_BRG8: clock_bits = 4; break;
+ case QE_CLK9: clock_bits = 5; break;
+ case QE_CLK10: clock_bits = 6; break;
+ case QE_CLK11: clock_bits = 7; break;
+ case QE_CLK12: clock_bits = 8; break;
+ case QE_CLK15: clock_bits = 9; break;
+ case QE_CLK16: clock_bits = 10; break;
+ default: break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_BRG5: clock_bits = 1; break;
+ case QE_BRG6: clock_bits = 2; break;
+ case QE_BRG7: clock_bits = 3; break;
+ case QE_BRG8: clock_bits = 4; break;
+ case QE_CLK13: clock_bits = 5; break;
+ case QE_CLK14: clock_bits = 6; break;
+ case QE_CLK19: clock_bits = 7; break;
+ case QE_CLK20: clock_bits = 8; break;
+ case QE_CLK15: clock_bits = 9; break;
+ case QE_CLK16: clock_bits = 10; break;
+ default: break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_BRG9: clock_bits = 1; break;
+ case QE_BRG10: clock_bits = 2; break;
+ case QE_BRG15: clock_bits = 3; break;
+ case QE_BRG16: clock_bits = 4; break;
+ case QE_CLK3: clock_bits = 5; break;
+ case QE_CLK4: clock_bits = 6; break;
+ case QE_CLK17: clock_bits = 7; break;
+ case QE_CLK18: clock_bits = 8; break;
+ case QE_CLK7: clock_bits = 9; break;
+ case QE_CLK8: clock_bits = 10; break;
+ case QE_CLK16: clock_bits = 11; break;
+ default: break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_BRG13: clock_bits = 1; break;
+ case QE_BRG14: clock_bits = 2; break;
+ case QE_BRG15: clock_bits = 3; break;
+ case QE_BRG16: clock_bits = 4; break;
+ case QE_CLK5: clock_bits = 5; break;
+ case QE_CLK6: clock_bits = 6; break;
+ case QE_CLK21: clock_bits = 7; break;
+ case QE_CLK22: clock_bits = 8; break;
+ case QE_CLK7: clock_bits = 9; break;
+ case QE_CLK8: clock_bits = 10; break;
+ case QE_CLK16: clock_bits = 11; break;
+ default: break;
+ }
+ break;
+ default: break;
+ }
+
+ /* Check for invalid combination of clock and UCC number */
+ if (!clock_bits)
+ return -ENOENT;
+
+ if (mode == COMM_DIR_RX)
+ shift += 4;
+
+ qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+}
+
+static int ucc_get_tdm_common_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ /*
+ * for TDM[0, 1, 2, 3], TX and RX use common
+ * clock source BRG3,4 and CLK1,2
+ * for TDM[4, 5, 6, 7], TX and RX use common
+ * clock source BRG12,13 and CLK23,24
+ */
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG3:
+ clock_bits = 1;
+ break;
+ case QE_BRG4:
+ clock_bits = 2;
+ break;
+ case QE_CLK1:
+ clock_bits = 4;
+ break;
+ case QE_CLK2:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG12:
+ clock_bits = 1;
+ break;
+ case QE_BRG13:
+ clock_bits = 2;
+ break;
+ case QE_CLK23:
+ clock_bits = 4;
+ break;
+ case QE_CLK24:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_rx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK3:
+ clock_bits = 6;
+ break;
+ case QE_CLK8:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK5:
+ clock_bits = 6;
+ break;
+ case QE_CLK10:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK7:
+ clock_bits = 6;
+ break;
+ case QE_CLK12:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK9:
+ clock_bits = 6;
+ break;
+ case QE_CLK14:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK11:
+ clock_bits = 6;
+ break;
+ case QE_CLK16:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK13:
+ clock_bits = 6;
+ break;
+ case QE_CLK18:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK15:
+ clock_bits = 6;
+ break;
+ case QE_CLK20:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK17:
+ clock_bits = 6;
+ break;
+ case QE_CLK22:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_tx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK4:
+ clock_bits = 6;
+ break;
+ case QE_CLK9:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK6:
+ clock_bits = 6;
+ break;
+ case QE_CLK11:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK8:
+ clock_bits = 6;
+ break;
+ case QE_CLK13:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK10:
+ clock_bits = 6;
+ break;
+ case QE_CLK15:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK12:
+ clock_bits = 6;
+ break;
+ case QE_CLK17:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK14:
+ clock_bits = 6;
+ break;
+ case QE_CLK19:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK16:
+ clock_bits = 6;
+ break;
+ case QE_CLK21:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK18:
+ clock_bits = 6;
+ break;
+ case QE_CLK3:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+/* tdm_num: TDM A-H port num is 0-7 */
+static int ucc_get_tdm_rxtx_clk(enum comm_dir mode, u32 tdm_num,
+ enum qe_clock clock)
+{
+ int clock_bits;
+
+ clock_bits = ucc_get_tdm_common_clk(tdm_num, clock);
+ if (clock_bits > 0)
+ return clock_bits;
+ if (mode == COMM_DIR_RX)
+ clock_bits = ucc_get_tdm_rx_clk(tdm_num, clock);
+ if (mode == COMM_DIR_TX)
+ clock_bits = ucc_get_tdm_tx_clk(tdm_num, clock);
+ return clock_bits;
+}
+
+static u32 ucc_get_tdm_clk_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_CLK_SHIFT_BASE : TX_CLK_SHIFT_BASE;
+ if (tdm_num < 4)
+ shift -= tdm_num * 4;
+ else
+ shift -= (tdm_num - 4) * 4;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int clock_bits;
+ u32 shift;
+ struct qe_mux __iomem *qe_mux_reg;
+ __be32 __iomem *cmxs1cr;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num > 7)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ clock_bits = ucc_get_tdm_rxtx_clk(mode, tdm_num, clock);
+ if (clock_bits < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_clk_shift(mode, tdm_num);
+
+ cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
+ &qe_mux_reg->cmxsi1cr_h;
+
+ qe_clrsetbits_be32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+}
+
+static int ucc_get_tdm_sync_source(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source = -EINVAL;
+
+ if (mode == COMM_DIR_RX && clock == QE_RSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+ if (mode == COMM_DIR_TX && clock == QE_TSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG10:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG11:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG14:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG15:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return source;
+}
+
+static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
+ shift -= tdm_num * 2;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source;
+ u32 shift;
+ struct qe_mux __iomem *qe_mux_reg;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num >= UCC_TDM_NUM)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ source = ucc_get_tdm_sync_source(tdm_num, clock, mode);
+ if (source < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_sync_shift(mode, tdm_num);
+
+ qe_clrsetbits_be32(&qe_mux_reg->cmxsi1syr,
+ QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ source << shift);
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
new file mode 100644
index 0000000000..53d8aafc93
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Fast API Set - UCC Fast specific routines implementations.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
+{
+ printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
+ printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
+
+ printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
+ printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
+ printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
+ printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
+ printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
+ printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
+ printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
+ &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
+ printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
+ printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
+ printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
+ printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfset,
+ ioread16be(&uccf->uf_regs->urfset));
+ printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
+ printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
+ printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
+ printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
+ printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
+ printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
+ printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
+ &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
+}
+EXPORT_SYMBOL(ucc_fast_dump_regs);
+
+u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
+{
+ switch (uccf_num) {
+ case 0: return QE_CR_SUBBLOCK_UCCFAST1;
+ case 1: return QE_CR_SUBBLOCK_UCCFAST2;
+ case 2: return QE_CR_SUBBLOCK_UCCFAST3;
+ case 3: return QE_CR_SUBBLOCK_UCCFAST4;
+ case 4: return QE_CR_SUBBLOCK_UCCFAST5;
+ case 5: return QE_CR_SUBBLOCK_UCCFAST6;
+ case 6: return QE_CR_SUBBLOCK_UCCFAST7;
+ case 7: return QE_CR_SUBBLOCK_UCCFAST8;
+ default: return QE_CR_SUBBLOCK_INVALID;
+ }
+}
+EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
+
+void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
+{
+ iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
+}
+EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
+
+void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+
+ uf_regs = uccf->uf_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+ gumr = ioread32be(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr |= UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 1;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr |= UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 1;
+ }
+ iowrite32be(gumr, &uf_regs->gumr);
+}
+EXPORT_SYMBOL(ucc_fast_enable);
+
+void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+
+ uf_regs = uccf->uf_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+ gumr = ioread32be(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr &= ~UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 0;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr &= ~UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 0;
+ }
+ iowrite32be(gumr, &uf_regs->gumr);
+}
+EXPORT_SYMBOL(ucc_fast_disable);
+
+int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
+{
+ struct ucc_fast_private *uccf;
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+ int ret;
+
+ if (!uf_info)
+ return -EINVAL;
+
+ /* check if the UCC port number is in range. */
+ if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
+ printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check that 'max_rx_buf_length' is properly aligned (4). */
+ if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Validate Virtual Fifo register values */
+ if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
+ printk(KERN_ERR "%s: urfs is too small\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
+ if (!uccf) {
+ printk(KERN_ERR "%s: Cannot allocate private data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ uccf->ucc_fast_tx_virtual_fifo_base_offset = -1;
+ uccf->ucc_fast_rx_virtual_fifo_base_offset = -1;
+
+ /* Fill fast UCC structure */
+ uccf->uf_info = uf_info;
+ /* Set the PHY base address */
+ uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
+ if (uccf->uf_regs == NULL) {
+ printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+ kfree(uccf);
+ return -ENOMEM;
+ }
+
+ uccf->enabled_tx = 0;
+ uccf->enabled_rx = 0;
+ uccf->stopped_tx = 0;
+ uccf->stopped_rx = 0;
+ uf_regs = uccf->uf_regs;
+ uccf->p_ucce = &uf_regs->ucce;
+ uccf->p_uccm = &uf_regs->uccm;
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ uccf->p_utodr = &uf_regs->utodr;
+#endif
+#ifdef STATISTICS
+ uccf->tx_frames = 0;
+ uccf->rx_frames = 0;
+ uccf->rx_discarded = 0;
+#endif /* STATISTICS */
+
+ /* Set UCC to fast type */
+ ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
+ if (ret) {
+ printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
+ ucc_fast_free(uccf);
+ return ret;
+ }
+
+ uccf->mrblr = uf_info->max_rx_buf_length;
+
+ /* Set GUMR */
+ /* For more details see the hardware spec. */
+ gumr = uf_info->ttx_trx;
+ if (uf_info->tci)
+ gumr |= UCC_FAST_GUMR_TCI;
+ if (uf_info->cdp)
+ gumr |= UCC_FAST_GUMR_CDP;
+ if (uf_info->ctsp)
+ gumr |= UCC_FAST_GUMR_CTSP;
+ if (uf_info->cds)
+ gumr |= UCC_FAST_GUMR_CDS;
+ if (uf_info->ctss)
+ gumr |= UCC_FAST_GUMR_CTSS;
+ if (uf_info->txsy)
+ gumr |= UCC_FAST_GUMR_TXSY;
+ if (uf_info->rsyn)
+ gumr |= UCC_FAST_GUMR_RSYN;
+ gumr |= uf_info->synl;
+ if (uf_info->rtsm)
+ gumr |= UCC_FAST_GUMR_RTSM;
+ gumr |= uf_info->renc;
+ if (uf_info->revd)
+ gumr |= UCC_FAST_GUMR_REVD;
+ gumr |= uf_info->tenc;
+ gumr |= uf_info->tcrc;
+ gumr |= uf_info->mode;
+ iowrite32be(gumr, &uf_regs->gumr);
+
+ /* Allocate memory for Tx Virtual Fifo */
+ uccf->ucc_fast_tx_virtual_fifo_base_offset =
+ qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for Rx Virtual Fifo */
+ uccf->ucc_fast_rx_virtual_fifo_base_offset =
+ qe_muram_alloc(uf_info->urfs +
+ UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -ENOMEM;
+ }
+
+ /* Set Virtual Fifo registers */
+ iowrite16be(uf_info->urfs, &uf_regs->urfs);
+ iowrite16be(uf_info->urfet, &uf_regs->urfet);
+ iowrite16be(uf_info->urfset, &uf_regs->urfset);
+ iowrite16be(uf_info->utfs, &uf_regs->utfs);
+ iowrite16be(uf_info->utfet, &uf_regs->utfet);
+ iowrite16be(uf_info->utftt, &uf_regs->utftt);
+ /* utfb, urfb are offsets from MURAM base */
+ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
+ &uf_regs->utfb);
+ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
+ &uf_regs->urfb);
+
+ /* Mux clocking */
+ /* Grant Support */
+ ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
+ /* Breakpoint Support */
+ ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
+ /* Set Tsa or NMSI mode. */
+ ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
+ /* If NMSI (not Tsa), set Tx and Rx clock. */
+ if (!uf_info->tsa) {
+ /* Rx clock routing */
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ printk(KERN_ERR "%s: illegal value for RX clock\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ /* Tx clock routing */
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ printk(KERN_ERR "%s: illegal value for TX clock\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ } else {
+ /* tdm Rx clock routing */
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx clock routing */
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Rx sync clock routing */
+ if ((uf_info->rx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->rx_sync,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx sync clock routing */
+ if ((uf_info->tx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->tx_sync,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ }
+
+ /* Set interrupt mask register at UCC level. */
+ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
+
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
+ iowrite32be(0xffffffff, &uf_regs->ucce);
+
+ *uccf_ret = uccf;
+ return 0;
+}
+EXPORT_SYMBOL(ucc_fast_init);
+
+void ucc_fast_free(struct ucc_fast_private * uccf)
+{
+ if (!uccf)
+ return;
+
+ qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
+ qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+ if (uccf->uf_regs)
+ iounmap(uccf->uf_regs);
+
+ kfree(uccf);
+}
+EXPORT_SYMBOL(ucc_fast_free);
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
new file mode 100644
index 0000000000..d5ac1ac0ed
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Slow API Set - UCC Slow specific routines implementations.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_slow.h>
+
+u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
+{
+ switch (uccs_num) {
+ case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
+ case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
+ case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
+ case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
+ case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
+ case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
+ case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
+ case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
+ default: return QE_CR_SUBBLOCK_INVALID;
+ }
+}
+EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
+
+void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
+
+void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_stop_tx);
+
+void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_restart_tx);
+
+void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+ struct ucc_slow __iomem *us_regs;
+ u32 gumr_l;
+
+ us_regs = uccs->us_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+ gumr_l = ioread32be(&us_regs->gumr_l);
+ if (mode & COMM_DIR_TX) {
+ gumr_l |= UCC_SLOW_GUMR_L_ENT;
+ uccs->enabled_tx = 1;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr_l |= UCC_SLOW_GUMR_L_ENR;
+ uccs->enabled_rx = 1;
+ }
+ iowrite32be(gumr_l, &us_regs->gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_enable);
+
+void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+ struct ucc_slow __iomem *us_regs;
+ u32 gumr_l;
+
+ us_regs = uccs->us_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+ gumr_l = ioread32be(&us_regs->gumr_l);
+ if (mode & COMM_DIR_TX) {
+ gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
+ uccs->enabled_tx = 0;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
+ uccs->enabled_rx = 0;
+ }
+ iowrite32be(gumr_l, &us_regs->gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_disable);
+
+/* Initialize the UCC for Slow operations
+ *
+ * The caller should initialize the following us_info
+ */
+int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
+{
+ struct ucc_slow_private *uccs;
+ u32 i;
+ struct ucc_slow __iomem *us_regs;
+ u32 gumr;
+ struct qe_bd __iomem *bd;
+ u32 id;
+ u32 command;
+ int ret = 0;
+
+ if (!us_info)
+ return -EINVAL;
+
+ /* check if the UCC port number is in range. */
+ if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
+ printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Set mrblr
+ * Check that 'max_rx_buf_length' is properly aligned (4), unless
+ * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
+ * case when QE accepts 32 bits at a time.
+ */
+ if ((!us_info->rfw) &&
+ (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
+ printk(KERN_ERR "max_rx_buf_length not aligned.\n");
+ return -EINVAL;
+ }
+
+ uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
+ if (!uccs) {
+ printk(KERN_ERR "%s: Cannot allocate private data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ uccs->rx_base_offset = -1;
+ uccs->tx_base_offset = -1;
+ uccs->us_pram_offset = -1;
+
+ /* Fill slow UCC structure */
+ uccs->us_info = us_info;
+ /* Set the PHY base address */
+ uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
+ if (uccs->us_regs == NULL) {
+ printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+ kfree(uccs);
+ return -ENOMEM;
+ }
+
+ us_regs = uccs->us_regs;
+ uccs->p_ucce = &us_regs->ucce;
+ uccs->p_uccm = &us_regs->uccm;
+
+ /* Get PRAM base */
+ uccs->us_pram_offset =
+ qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
+ if (uccs->us_pram_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
+ uccs->us_pram_offset);
+
+ uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
+
+ /* Set UCC to slow type */
+ ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
+ if (ret) {
+ printk(KERN_ERR "%s: cannot set UCC type", __func__);
+ ucc_slow_free(uccs);
+ return ret;
+ }
+
+ iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
+
+ INIT_LIST_HEAD(&uccs->confQ);
+
+ /* Allocate BDs. */
+ uccs->rx_base_offset =
+ qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
+ QE_ALIGNMENT_OF_BD);
+ if (uccs->rx_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
+ us_info->rx_bd_ring_len);
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+
+ uccs->tx_base_offset =
+ qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
+ QE_ALIGNMENT_OF_BD);
+ if (uccs->tx_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+
+ /* Init Tx bds */
+ bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
+ for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
+ /* clear bd buffer */
+ iowrite32be(0, &bd->buf);
+ /* set bd status and length */
+ iowrite32be(0, (u32 __iomem *)bd);
+ bd++;
+ }
+ /* for last BD set Wrap bit */
+ iowrite32be(0, &bd->buf);
+ iowrite32be(T_W, (u32 __iomem *)bd);
+
+ /* Init Rx bds */
+ bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
+ for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
+ /* set bd status and length */
+ iowrite32be(0, (u32 __iomem *)bd);
+ /* clear bd buffer */
+ iowrite32be(0, &bd->buf);
+ bd++;
+ }
+ /* for last BD set Wrap bit */
+ iowrite32be(R_W, (u32 __iomem *)bd);
+ iowrite32be(0, &bd->buf);
+
+ /* Set GUMR (For more details see the hardware spec.). */
+ /* gumr_h */
+ gumr = us_info->tcrc;
+ if (us_info->cdp)
+ gumr |= UCC_SLOW_GUMR_H_CDP;
+ if (us_info->ctsp)
+ gumr |= UCC_SLOW_GUMR_H_CTSP;
+ if (us_info->cds)
+ gumr |= UCC_SLOW_GUMR_H_CDS;
+ if (us_info->ctss)
+ gumr |= UCC_SLOW_GUMR_H_CTSS;
+ if (us_info->tfl)
+ gumr |= UCC_SLOW_GUMR_H_TFL;
+ if (us_info->rfw)
+ gumr |= UCC_SLOW_GUMR_H_RFW;
+ if (us_info->txsy)
+ gumr |= UCC_SLOW_GUMR_H_TXSY;
+ if (us_info->rtsm)
+ gumr |= UCC_SLOW_GUMR_H_RTSM;
+ iowrite32be(gumr, &us_regs->gumr_h);
+
+ /* gumr_l */
+ gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
+ (u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode;
+ if (us_info->tci)
+ gumr |= UCC_SLOW_GUMR_L_TCI;
+ if (us_info->rinv)
+ gumr |= UCC_SLOW_GUMR_L_RINV;
+ if (us_info->tinv)
+ gumr |= UCC_SLOW_GUMR_L_TINV;
+ if (us_info->tend)
+ gumr |= UCC_SLOW_GUMR_L_TEND;
+ iowrite32be(gumr, &us_regs->gumr_l);
+
+ /* Function code registers */
+
+ /* if the data is in cachable memory, the 'global' */
+ /* in the function code should be set. */
+ iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
+ iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
+
+ /* rbase, tbase are offsets from MURAM base */
+ iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
+ iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
+
+ /* Mux clocking */
+ /* Grant Support */
+ ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
+ /* Breakpoint Support */
+ ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
+ /* Set Tsa or NMSI mode. */
+ ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
+ /* If NMSI (not Tsa), set Tx and Rx clock. */
+ if (!us_info->tsa) {
+ /* Rx clock routing */
+ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
+ COMM_DIR_RX)) {
+ printk(KERN_ERR "%s: illegal value for RX clock\n",
+ __func__);
+ ucc_slow_free(uccs);
+ return -EINVAL;
+ }
+ /* Tx clock routing */
+ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
+ COMM_DIR_TX)) {
+ printk(KERN_ERR "%s: illegal value for TX clock\n",
+ __func__);
+ ucc_slow_free(uccs);
+ return -EINVAL;
+ }
+ }
+
+ /* Set interrupt mask register at UCC level. */
+ iowrite16be(us_info->uccm_mask, &us_regs->uccm);
+
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
+ iowrite16be(0xffff, &us_regs->ucce);
+
+ /* Issue QE Init command */
+ if (us_info->init_tx && us_info->init_rx)
+ command = QE_INIT_TX_RX;
+ else if (us_info->init_tx)
+ command = QE_INIT_TX;
+ else
+ command = QE_INIT_RX; /* We know at least one is TRUE */
+
+ qe_issue_cmd(command, id, us_info->protocol, 0);
+
+ *uccs_ret = uccs;
+ return 0;
+}
+EXPORT_SYMBOL(ucc_slow_init);
+
+void ucc_slow_free(struct ucc_slow_private * uccs)
+{
+ if (!uccs)
+ return;
+
+ qe_muram_free(uccs->rx_base_offset);
+ qe_muram_free(uccs->tx_base_offset);
+ qe_muram_free(uccs->us_pram_offset);
+
+ if (uccs->us_regs)
+ iounmap(uccs->us_regs);
+
+ kfree(uccs);
+}
+EXPORT_SYMBOL(ucc_slow_free);
+
diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c
new file mode 100644
index 0000000000..890f236ea6
--- /dev/null
+++ b/drivers/soc/fsl/qe/usb.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * QE USB routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc.
+ * Shlomi Gridish <gridish@freescale.com>
+ * Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+int qe_usb_clock_set(enum qe_clock clk, int rate)
+{
+ struct qe_mux __iomem *mux = &qe_immr->qmx;
+ unsigned long flags;
+ u32 val;
+
+ switch (clk) {
+ case QE_CLK3: val = QE_CMXGCR_USBCS_CLK3; break;
+ case QE_CLK5: val = QE_CMXGCR_USBCS_CLK5; break;
+ case QE_CLK7: val = QE_CMXGCR_USBCS_CLK7; break;
+ case QE_CLK9: val = QE_CMXGCR_USBCS_CLK9; break;
+ case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break;
+ case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break;
+ case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break;
+ case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break;
+ case QE_BRG9: val = QE_CMXGCR_USBCS_BRG9; break;
+ case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break;
+ default:
+ pr_err("%s: requested unknown clock %d\n", __func__, clk);
+ return -EINVAL;
+ }
+
+ if (qe_clock_is_brg(clk))
+ qe_setbrg(clk, rate, 1);
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+
+ qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
+
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_usb_clock_set);
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
new file mode 100644
index 0000000000..3d0cae30c7
--- /dev/null
+++ b/drivers/soc/fsl/rcpm.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// rcpm.c - Freescale QorIQ RCPM driver
+//
+// Copyright 2019-2020 NXP
+//
+// Author: Ran Wang <ran.wang_1@nxp.com>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+
+#define RCPM_WAKEUP_CELL_MAX_SIZE 7
+
+struct rcpm {
+ unsigned int wakeup_cells;
+ void __iomem *ippdexpcr_base;
+ bool little_endian;
+};
+
+#define SCFG_SPARECR8 0x051c
+
+static void copy_ippdexpcr1_setting(u32 val)
+{
+ struct device_node *np;
+ void __iomem *regs;
+ u32 reg_val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021a-scfg");
+ if (!np)
+ return;
+
+ regs = of_iomap(np, 0);
+ if (!regs)
+ return;
+
+ reg_val = ioread32be(regs + SCFG_SPARECR8);
+ iowrite32be(val | reg_val, regs + SCFG_SPARECR8);
+
+ iounmap(regs);
+}
+
+/**
+ * rcpm_pm_prepare - performs device-level tasks associated with power
+ * management, such as programming related to the wakeup source control.
+ * @dev: Device to handle.
+ *
+ */
+static int rcpm_pm_prepare(struct device *dev)
+{
+ int i, ret, idx;
+ void __iomem *base;
+ struct wakeup_source *ws;
+ struct rcpm *rcpm;
+ struct device_node *np = dev->of_node;
+ u32 value[RCPM_WAKEUP_CELL_MAX_SIZE + 1];
+ u32 setting[RCPM_WAKEUP_CELL_MAX_SIZE] = {0};
+
+ rcpm = dev_get_drvdata(dev);
+ if (!rcpm)
+ return -EINVAL;
+
+ base = rcpm->ippdexpcr_base;
+ idx = wakeup_sources_read_lock();
+
+ /* Begin with first registered wakeup source */
+ for_each_wakeup_source(ws) {
+
+ /* skip object which is not attached to device */
+ if (!ws->dev || !ws->dev->parent)
+ continue;
+
+ ret = device_property_read_u32_array(ws->dev->parent,
+ "fsl,rcpm-wakeup", value,
+ rcpm->wakeup_cells + 1);
+
+ if (ret)
+ continue;
+
+ /*
+ * For DT mode, would handle devices with "fsl,rcpm-wakeup"
+ * pointing to the current RCPM node.
+ *
+ * For ACPI mode, currently we assume there is only one
+ * RCPM controller existing.
+ */
+ if (is_of_node(dev->fwnode))
+ if (np->phandle != value[0])
+ continue;
+
+ /* Property "#fsl,rcpm-wakeup-cells" of rcpm node defines the
+ * number of IPPDEXPCR register cells, and "fsl,rcpm-wakeup"
+ * of wakeup source IP contains an integer array: <phandle to
+ * RCPM node, IPPDEXPCR0 setting, IPPDEXPCR1 setting,
+ * IPPDEXPCR2 setting, etc>.
+ *
+ * So we will go thought them to collect setting data.
+ */
+ for (i = 0; i < rcpm->wakeup_cells; i++)
+ setting[i] |= value[i + 1];
+ }
+
+ wakeup_sources_read_unlock(idx);
+
+ /* Program all IPPDEXPCRn once */
+ for (i = 0; i < rcpm->wakeup_cells; i++) {
+ u32 tmp = setting[i];
+ void __iomem *address = base + i * 4;
+
+ if (!tmp)
+ continue;
+
+ /* We can only OR related bits */
+ if (rcpm->little_endian) {
+ tmp |= ioread32(address);
+ iowrite32(tmp, address);
+ } else {
+ tmp |= ioread32be(address);
+ iowrite32be(tmp, address);
+ }
+ /*
+ * Workaround of errata A-008646 on SoC LS1021A:
+ * There is a bug of register ippdexpcr1.
+ * Reading configuration register RCPM_IPPDEXPCR1
+ * always return zero. So save ippdexpcr1's value
+ * to register SCFG_SPARECR8.And the value of
+ * ippdexpcr1 will be read from SCFG_SPARECR8.
+ */
+ if (dev_of_node(dev) && (i == 1))
+ if (of_device_is_compatible(np, "fsl,ls1021a-rcpm"))
+ copy_ippdexpcr1_setting(tmp);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops rcpm_pm_ops = {
+ .prepare = rcpm_pm_prepare,
+};
+
+static int rcpm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcpm *rcpm;
+ int ret;
+
+ rcpm = devm_kzalloc(dev, sizeof(*rcpm), GFP_KERNEL);
+ if (!rcpm)
+ return -ENOMEM;
+
+ rcpm->ippdexpcr_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rcpm->ippdexpcr_base)) {
+ ret = PTR_ERR(rcpm->ippdexpcr_base);
+ return ret;
+ }
+
+ rcpm->little_endian = device_property_read_bool(
+ &pdev->dev, "little-endian");
+
+ ret = device_property_read_u32(&pdev->dev,
+ "#fsl,rcpm-wakeup-cells", &rcpm->wakeup_cells);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(&pdev->dev, rcpm);
+
+ return 0;
+}
+
+static const struct of_device_id rcpm_of_match[] = {
+ { .compatible = "fsl,qoriq-rcpm-2.1+", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rcpm_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id rcpm_acpi_ids[] = {
+ {"NXP0015",},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, rcpm_acpi_ids);
+#endif
+
+static struct platform_driver rcpm_driver = {
+ .driver = {
+ .name = "rcpm",
+ .of_match_table = rcpm_of_match,
+ .acpi_match_table = ACPI_PTR(rcpm_acpi_ids),
+ .pm = &rcpm_pm_ops,
+ },
+ .probe = rcpm_probe,
+};
+
+module_platform_driver(rcpm_driver);