summaryrefslogtreecommitdiffstats
path: root/drivers/mailbox
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/mailbox
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/mailbox')
-rw-r--r--drivers/mailbox/Kconfig257
-rw-r--r--drivers/mailbox/Makefile56
-rw-r--r--drivers/mailbox/arm_mhu.c177
-rw-r--r--drivers/mailbox/arm_mhu_db.c354
-rw-r--r--drivers/mailbox/armada-37xx-rwtm-mailbox.c206
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c1704
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c1646
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c202
-rw-r--r--drivers/mailbox/hi3660-mailbox.c301
-rw-r--r--drivers/mailbox/hi6220-mailbox.c376
-rw-r--r--drivers/mailbox/imx-mailbox.c679
-rw-r--r--drivers/mailbox/mailbox-altera.c364
-rw-r--r--drivers/mailbox/mailbox-sti.c499
-rw-r--r--drivers/mailbox/mailbox-test.c454
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c262
-rw-r--r--drivers/mailbox/mailbox.c625
-rw-r--r--drivers/mailbox/mailbox.h10
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c636
-rw-r--r--drivers/mailbox/omap-mailbox.c933
-rw-r--r--drivers/mailbox/pcc.c616
-rw-r--r--drivers/mailbox/pl320-ipc.c187
-rw-r--r--drivers/mailbox/platform_mhu.c187
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c191
-rw-r--r--drivers/mailbox/qcom-ipcc.c292
-rw-r--r--drivers/mailbox/rockchip-mailbox.c262
-rw-r--r--drivers/mailbox/sprd-mailbox.c376
-rw-r--r--drivers/mailbox/stm32-ipcc.c396
-rw-r--r--drivers/mailbox/sun6i-msgbox.c326
-rw-r--r--drivers/mailbox/tegra-hsp.c848
-rw-r--r--drivers/mailbox/ti-msgmgr.c845
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c725
31 files changed, 14992 insertions, 0 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
new file mode 100644
index 000000000..05b1009e2
--- /dev/null
+++ b/drivers/mailbox/Kconfig
@@ -0,0 +1,257 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig MAILBOX
+ bool "Mailbox Hardware Support"
+ help
+ Mailbox is a framework to control hardware communication between
+ on-chip processors through queued messages and interrupt driven
+ signals. Say Y if your platform supports hardware mailboxes.
+
+if MAILBOX
+
+config ARM_MHU
+ tristate "ARM MHU Mailbox"
+ depends on ARM_AMBA
+ help
+ Say Y here if you want to build the ARM MHU controller driver.
+ The controller has 3 mailbox channels, the last of which can be
+ used in Secure mode only.
+
+config IMX_MBOX
+ tristate "i.MX Mailbox"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ Mailbox implementation for i.MX Messaging Unit (MU).
+
+config PLATFORM_MHU
+ tristate "Platform MHU Mailbox"
+ depends on OF
+ depends on HAS_IOMEM
+ help
+ Say Y here if you want to build a platform specific variant MHU
+ controller driver.
+ The controller has a maximum of 3 mailbox channels, the last of
+ which can be used in Secure mode only.
+
+config PL320_MBOX
+ bool "ARM PL320 Mailbox"
+ depends on ARM_AMBA
+ help
+ An implementation of the ARM PL320 Interprocessor Communication
+ Mailbox (IPCM), tailored for the Calxeda Highbank. It is used to
+ send short messages between Highbank's A9 cores and the EnergyCore
+ Management Engine, primarily for cpufreq. Say Y here if you want
+ to use the PL320 IPCM support.
+
+config ARMADA_37XX_RWTM_MBOX
+ tristate "Armada 37xx rWTM BIU Mailbox"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on OF
+ help
+ Mailbox implementation for communication with the the firmware
+ running on the Cortex-M3 rWTM secure processor of the Armada 37xx
+ SOC. Say Y here if you are building for such a device (for example
+ the Turris Mox router).
+
+config OMAP2PLUS_MBOX
+ tristate "OMAP2+ Mailbox framework support"
+ depends on ARCH_OMAP2PLUS || ARCH_K3
+ help
+ Mailbox implementation for OMAP family chips with hardware for
+ interprocessor communication involving DSP, IVA1.0 and IVA2 in
+ OMAP2/3; or IPU, IVA HD and DSP in OMAP4/5. Say Y here if you
+ want to use OMAP2+ Mailbox framework support.
+
+config OMAP_MBOX_KFIFO_SIZE
+ int "Mailbox kfifo default buffer size (bytes)"
+ depends on OMAP2PLUS_MBOX
+ default 256
+ help
+ Specify the default size of mailbox's kfifo buffers (bytes).
+ This can also be changed at runtime (via the mbox_kfifo_size
+ module parameter).
+
+config ROCKCHIP_MBOX
+ bool "Rockchip Soc Intergrated Mailbox Support"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ help
+ This driver provides support for inter-processor communication
+ between CPU cores and MCU processor on Some Rockchip SOCs.
+ Please check it that the Soc you use have Mailbox hardware.
+ Say Y here if you want to use the Rockchip Mailbox support.
+
+config PCC
+ bool "Platform Communication Channel Driver"
+ depends on ACPI
+ default n
+ help
+ ACPI 5.0+ spec defines a generic mode of communication
+ between the OS and a platform such as the BMC. This medium
+ (PCC) is typically used by CPPC (ACPI CPU Performance management),
+ RAS (ACPI reliability protocol) and MPST (ACPI Memory power
+ states). Select this driver if your platform implements the
+ PCC clients mentioned above.
+
+config ALTERA_MBOX
+ tristate "Altera Mailbox"
+ depends on HAS_IOMEM
+ help
+ An implementation of the Altera Mailbox soft core. It is used
+ to send message between processors. Say Y here if you want to use the
+ Altera mailbox support.
+
+config BCM2835_MBOX
+ tristate "BCM2835 Mailbox"
+ depends on ARCH_BCM2835
+ help
+ An implementation of the BCM2385 Mailbox. It is used to invoke
+ the services of the Videocore. Say Y here if you want to use the
+ BCM2835 Mailbox.
+
+config STI_MBOX
+ tristate "STI Mailbox framework support"
+ depends on ARCH_STI && OF
+ help
+ Mailbox implementation for STMicroelectonics family chips with
+ hardware for interprocessor communication.
+
+config TI_MESSAGE_MANAGER
+ tristate "Texas Instruments Message Manager Driver"
+ depends on ARCH_KEYSTONE || ARCH_K3
+ help
+ An implementation of Message Manager slave driver for Keystone
+ and K3 architecture SoCs from Texas Instruments. Message Manager
+ is a communication entity found on few of Texas Instrument's keystone
+ and K3 architecture SoCs. These may be used for communication between
+ multiple processors within the SoC. Select this driver if your
+ platform has support for the hardware block.
+
+config HI3660_MBOX
+ tristate "Hi3660 Mailbox" if EXPERT
+ depends on (ARCH_HISI || COMPILE_TEST)
+ depends on OF
+ default ARCH_HISI
+ help
+ An implementation of the hi3660 mailbox. It is used to send message
+ between application processors and other processors/MCU/DSP. Select
+ Y here if you want to use Hi3660 mailbox controller.
+
+config HI6220_MBOX
+ tristate "Hi6220 Mailbox" if EXPERT
+ depends on (ARCH_HISI || COMPILE_TEST)
+ depends on OF
+ default ARCH_HISI
+ help
+ An implementation of the hi6220 mailbox. It is used to send message
+ between application processors and MCU. Say Y here if you want to
+ build Hi6220 mailbox controller driver.
+
+config MAILBOX_TEST
+ tristate "Mailbox Test Client"
+ depends on OF
+ depends on HAS_IOMEM
+ help
+ Test client to help with testing new Controller driver
+ implementations.
+
+config QCOM_APCS_IPC
+ tristate "Qualcomm APCS IPC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Say y here to enable support for the APCS IPC mailbox driver,
+ providing an interface for invoking the inter-process communication
+ signals from the application processor to other masters.
+
+config TEGRA_HSP_MBOX
+ bool "Tegra HSP (Hardware Synchronization Primitives) Driver"
+ depends on ARCH_TEGRA
+ help
+ The Tegra HSP driver is used for the interprocessor communication
+ between different remote processors and host processors on Tegra186
+ and later SoCs. Say Y here if you want to have this support.
+ If unsure say N.
+
+config XGENE_SLIMPRO_MBOX
+ tristate "APM SoC X-Gene SLIMpro Mailbox Controller"
+ depends on ARCH_XGENE
+ help
+ An implementation of the APM X-Gene Interprocessor Communication
+ Mailbox (IPCM) between the ARM 64-bit cores and SLIMpro controller.
+ It is used to send short messages between ARM64-bit cores and
+ the SLIMpro Management Engine, primarily for PM. Say Y here if you
+ want to use the APM X-Gene SLIMpro IPCM support.
+
+config BCM_PDC_MBOX
+ tristate "Broadcom FlexSparx DMA Mailbox"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ help
+ Mailbox implementation for the Broadcom FlexSparx DMA ring manager,
+ which provides access to various offload engines on Broadcom
+ SoCs, including FA2/FA+ on Northstar Plus and PDC on Northstar 2.
+
+config BCM_FLEXRM_MBOX
+ tristate "Broadcom FlexRM Mailbox"
+ depends on ARM64
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ select GENERIC_MSI_IRQ_DOMAIN
+ default m if ARCH_BCM_IPROC
+ help
+ Mailbox implementation of the Broadcom FlexRM ring manager,
+ which provides access to various offload engines on Broadcom
+ SoCs. Say Y here if you want to use the Broadcom FlexRM.
+
+config STM32_IPCC
+ tristate "STM32 IPCC Mailbox"
+ depends on MACH_STM32MP157
+ help
+ Mailbox implementation for STMicroelectonics STM32 family chips
+ with hardware for Inter-Processor Communication Controller (IPCC)
+ between processors. Say Y here if you want to have this support.
+
+config MTK_CMDQ_MBOX
+ tristate "MediaTek CMDQ Mailbox Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select MTK_INFRACFG
+ help
+ Say yes here to add support for the MediaTek Command Queue (CMDQ)
+ mailbox driver. The CMDQ is used to help read/write registers with
+ critical time limitation, such as updating display configuration
+ during the vblank.
+
+config ZYNQMP_IPI_MBOX
+ bool "Xilinx ZynqMP IPI Mailbox"
+ depends on ARCH_ZYNQMP && OF
+ help
+ Say yes here to add support for Xilinx IPI mailbox driver.
+ This mailbox driver is used to send notification or short message
+ between processors with Xilinx ZynqMP IPI. It will place the
+ message to the IPI buffer and will access the IPI control
+ registers to kick the other processor or enquire status.
+
+config SUN6I_MSGBOX
+ tristate "Allwinner sun6i/sun8i/sun9i/sun50i Message Box"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default ARCH_SUNXI
+ help
+ Mailbox implementation for the hardware message box present in
+ various Allwinner SoCs. This mailbox is used for communication
+ between the application CPUs and the power management coprocessor.
+
+config SPRD_MBOX
+ tristate "Spreadtrum Mailbox"
+ depends on ARCH_SPRD || COMPILE_TEST
+ help
+ Mailbox driver implementation for the Spreadtrum platform. It is used
+ to send message between application processors and MCU. Say Y here if
+ you want to build the Spreatrum mailbox controller driver.
+
+config QCOM_IPCC
+ bool "Qualcomm Technologies, Inc. IPCC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Qualcomm Technologies, Inc. Inter-Processor Communication Controller
+ (IPCC) driver for MSM devices. The driver provides mailbox support for
+ sending interrupts to the clients. On the other hand, the driver also
+ acts as an interrupt controller for receiving interrupts from clients.
+ Say Y here if you want to build this driver.
+
+endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
new file mode 100644
index 000000000..2e06e02b2
--- /dev/null
+++ b/drivers/mailbox/Makefile
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0
+# Generic MAILBOX API
+
+obj-$(CONFIG_MAILBOX) += mailbox.o
+
+obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o
+
+obj-$(CONFIG_ARM_MHU) += arm_mhu.o arm_mhu_db.o
+
+obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
+
+obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
+
+obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o
+
+obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
+
+obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
+
+obj-$(CONFIG_ROCKCHIP_MBOX) += rockchip-mailbox.o
+
+obj-$(CONFIG_PCC) += pcc.o
+
+obj-$(CONFIG_ALTERA_MBOX) += mailbox-altera.o
+
+obj-$(CONFIG_BCM2835_MBOX) += bcm2835-mailbox.o
+
+obj-$(CONFIG_STI_MBOX) += mailbox-sti.o
+
+obj-$(CONFIG_TI_MESSAGE_MANAGER) += ti-msgmgr.o
+
+obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
+
+obj-$(CONFIG_HI3660_MBOX) += hi3660-mailbox.o
+
+obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
+
+obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
+
+obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
+
+obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
+
+obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
+
+obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
+
+obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
+
+obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
+
+obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
+
+obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
+
+obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
new file mode 100644
index 000000000..b7fbf276e
--- /dev/null
+++ b/drivers/mailbox/arm_mhu.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Jassi Brar <jaswinder.singh@linaro.org>
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+
+#define INTR_STAT_OFS 0x0
+#define INTR_SET_OFS 0x8
+#define INTR_CLR_OFS 0x10
+
+#define MHU_LP_OFFSET 0x0
+#define MHU_HP_OFFSET 0x20
+#define MHU_SEC_OFFSET 0x200
+#define TX_REG_OFFSET 0x100
+
+#define MHU_CHANS 3
+
+struct mhu_link {
+ unsigned irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct arm_mhu {
+ void __iomem *base;
+ struct mhu_link mlink[MHU_CHANS];
+ struct mbox_chan chan[MHU_CHANS];
+ struct mbox_controller mbox;
+};
+
+static irqreturn_t mhu_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val;
+
+ val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
+ if (!val)
+ return IRQ_NONE;
+
+ mbox_chan_received_data(chan, (void *)&val);
+
+ writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
+
+ return IRQ_HANDLED;
+}
+
+static bool mhu_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+
+ return (val == 0);
+}
+
+static int mhu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 *arg = data;
+
+ writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int mhu_startup(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+ writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
+
+ ret = request_irq(mlink->irq, mhu_rx_interrupt,
+ IRQF_SHARED, "mhu_link", chan);
+ if (ret) {
+ dev_err(chan->mbox->dev,
+ "Unable to acquire IRQ %d\n", mlink->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mhu_shutdown(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+
+ free_irq(mlink->irq, chan);
+}
+
+static const struct mbox_chan_ops mhu_ops = {
+ .send_data = mhu_send_data,
+ .startup = mhu_startup,
+ .shutdown = mhu_shutdown,
+ .last_tx_done = mhu_last_tx_done,
+};
+
+static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int i, err;
+ struct arm_mhu *mhu;
+ struct device *dev = &adev->dev;
+ int mhu_reg[MHU_CHANS] = {MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET};
+
+ if (!of_device_is_compatible(dev->of_node, "arm,mhu"))
+ return -ENODEV;
+
+ /* Allocate memory for device */
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(mhu->base)) {
+ dev_err(dev, "ioremap failed\n");
+ return PTR_ERR(mhu->base);
+ }
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ mhu->chan[i].con_priv = &mhu->mlink[i];
+ mhu->mlink[i].irq = adev->irq[i];
+ mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+ }
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = &mhu->chan[0];
+ mhu->mbox.num_chans = MHU_CHANS;
+ mhu->mbox.ops = &mhu_ops;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+ amba_set_drvdata(adev, mhu);
+
+ err = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ dev_info(dev, "ARM MHU Mailbox registered\n");
+ return 0;
+}
+
+static struct amba_id mhu_ids[] = {
+ {
+ .id = 0x1bb098,
+ .mask = 0xffffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhu_ids);
+
+static struct amba_driver arm_mhu_driver = {
+ .drv = {
+ .name = "mhu",
+ },
+ .id_table = mhu_ids,
+ .probe = mhu_probe,
+};
+module_amba_driver(arm_mhu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHU Driver");
+MODULE_AUTHOR("Jassi Brar <jassisinghbrar@gmail.com>");
diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c
new file mode 100644
index 000000000..8eb66c4ec
--- /dev/null
+++ b/drivers/mailbox/arm_mhu_db.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Based on ARM MHU driver by Jassi Brar <jaswinder.singh@linaro.org>
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#define INTR_STAT_OFS 0x0
+#define INTR_SET_OFS 0x8
+#define INTR_CLR_OFS 0x10
+
+#define MHU_LP_OFFSET 0x0
+#define MHU_HP_OFFSET 0x20
+#define MHU_SEC_OFFSET 0x200
+#define TX_REG_OFFSET 0x100
+
+#define MHU_CHANS 3 /* Secure, Non-Secure High and Low Priority */
+#define MHU_CHAN_MAX 20 /* Max channels to save on unused RAM */
+#define MHU_NUM_DOORBELLS 32
+
+struct mhu_db_link {
+ unsigned int irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct arm_mhu {
+ void __iomem *base;
+ struct mhu_db_link mlink[MHU_CHANS];
+ struct mbox_controller mbox;
+ struct device *dev;
+};
+
+/**
+ * ARM MHU Mailbox allocated channel information
+ *
+ * @mhu: Pointer to parent mailbox device
+ * @pchan: Physical channel within which this doorbell resides in
+ * @doorbell: doorbell number pertaining to this channel
+ */
+struct mhu_db_channel {
+ struct arm_mhu *mhu;
+ unsigned int pchan;
+ unsigned int doorbell;
+};
+
+static inline struct mbox_chan *
+mhu_db_mbox_to_channel(struct mbox_controller *mbox, unsigned int pchan,
+ unsigned int doorbell)
+{
+ int i;
+ struct mhu_db_channel *chan_info;
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ chan_info = mbox->chans[i].con_priv;
+ if (chan_info && chan_info->pchan == pchan &&
+ chan_info->doorbell == doorbell)
+ return &mbox->chans[i];
+ }
+
+ return NULL;
+}
+
+static void mhu_db_mbox_clear_irq(struct mbox_chan *chan)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].rx_reg;
+
+ writel_relaxed(BIT(chan_info->doorbell), base + INTR_CLR_OFS);
+}
+
+static unsigned int mhu_db_mbox_irq_to_pchan_num(struct arm_mhu *mhu, int irq)
+{
+ unsigned int pchan;
+
+ for (pchan = 0; pchan < MHU_CHANS; pchan++)
+ if (mhu->mlink[pchan].irq == irq)
+ break;
+ return pchan;
+}
+
+static struct mbox_chan *
+mhu_db_mbox_irq_to_channel(struct arm_mhu *mhu, unsigned int pchan)
+{
+ unsigned long bits;
+ unsigned int doorbell;
+ struct mbox_chan *chan = NULL;
+ struct mbox_controller *mbox = &mhu->mbox;
+ void __iomem *base = mhu->mlink[pchan].rx_reg;
+
+ bits = readl_relaxed(base + INTR_STAT_OFS);
+ if (!bits)
+ /* No IRQs fired in specified physical channel */
+ return NULL;
+
+ /* An IRQ has fired, find the associated channel */
+ for (doorbell = 0; bits; doorbell++) {
+ if (!test_and_clear_bit(doorbell, &bits))
+ continue;
+
+ chan = mhu_db_mbox_to_channel(mbox, pchan, doorbell);
+ if (chan)
+ break;
+ dev_err(mbox->dev,
+ "Channel not registered: pchan: %d doorbell: %d\n",
+ pchan, doorbell);
+ }
+
+ return chan;
+}
+
+static irqreturn_t mhu_db_mbox_rx_handler(int irq, void *data)
+{
+ struct mbox_chan *chan;
+ struct arm_mhu *mhu = data;
+ unsigned int pchan = mhu_db_mbox_irq_to_pchan_num(mhu, irq);
+
+ while (NULL != (chan = mhu_db_mbox_irq_to_channel(mhu, pchan))) {
+ mbox_chan_received_data(chan, NULL);
+ mhu_db_mbox_clear_irq(chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool mhu_db_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].tx_reg;
+
+ if (readl_relaxed(base + INTR_STAT_OFS) & BIT(chan_info->doorbell))
+ return false;
+
+ return true;
+}
+
+static int mhu_db_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].tx_reg;
+
+ /* Send event to co-processor */
+ writel_relaxed(BIT(chan_info->doorbell), base + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int mhu_db_startup(struct mbox_chan *chan)
+{
+ mhu_db_mbox_clear_irq(chan);
+ return 0;
+}
+
+static void mhu_db_shutdown(struct mbox_chan *chan)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ struct mbox_controller *mbox = &chan_info->mhu->mbox;
+ int i;
+
+ for (i = 0; i < mbox->num_chans; i++)
+ if (chan == &mbox->chans[i])
+ break;
+
+ if (mbox->num_chans == i) {
+ dev_warn(mbox->dev, "Request to free non-existent channel\n");
+ return;
+ }
+
+ /* Reset channel */
+ mhu_db_mbox_clear_irq(chan);
+ devm_kfree(mbox->dev, chan->con_priv);
+ chan->con_priv = NULL;
+}
+
+static struct mbox_chan *mhu_db_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct arm_mhu *mhu = dev_get_drvdata(mbox->dev);
+ struct mhu_db_channel *chan_info;
+ struct mbox_chan *chan;
+ unsigned int pchan = spec->args[0];
+ unsigned int doorbell = spec->args[1];
+ int i;
+
+ /* Bounds checking */
+ if (pchan >= MHU_CHANS || doorbell >= MHU_NUM_DOORBELLS) {
+ dev_err(mbox->dev,
+ "Invalid channel requested pchan: %d doorbell: %d\n",
+ pchan, doorbell);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Is requested channel free? */
+ chan = mhu_db_mbox_to_channel(mbox, pchan, doorbell);
+ if (chan) {
+ dev_err(mbox->dev, "Channel in use: pchan: %d doorbell: %d\n",
+ pchan, doorbell);
+ return ERR_PTR(-EBUSY);
+ }
+
+ /* Find the first free slot */
+ for (i = 0; i < mbox->num_chans; i++)
+ if (!mbox->chans[i].con_priv)
+ break;
+
+ if (mbox->num_chans == i) {
+ dev_err(mbox->dev, "No free channels left\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ chan = &mbox->chans[i];
+
+ chan_info = devm_kzalloc(mbox->dev, sizeof(*chan_info), GFP_KERNEL);
+ if (!chan_info)
+ return ERR_PTR(-ENOMEM);
+
+ chan_info->mhu = mhu;
+ chan_info->pchan = pchan;
+ chan_info->doorbell = doorbell;
+
+ chan->con_priv = chan_info;
+
+ dev_dbg(mbox->dev, "mbox: created channel phys: %d doorbell: %d\n",
+ pchan, doorbell);
+
+ return chan;
+}
+
+static const struct mbox_chan_ops mhu_db_ops = {
+ .send_data = mhu_db_send_data,
+ .startup = mhu_db_startup,
+ .shutdown = mhu_db_shutdown,
+ .last_tx_done = mhu_db_last_tx_done,
+};
+
+static int mhu_db_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ u32 cell_count;
+ int i, err, max_chans;
+ struct arm_mhu *mhu;
+ struct mbox_chan *chans;
+ struct device *dev = &adev->dev;
+ struct device_node *np = dev->of_node;
+ int mhu_reg[MHU_CHANS] = {
+ MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET,
+ };
+
+ if (!of_device_is_compatible(np, "arm,mhu-doorbell"))
+ return -ENODEV;
+
+ err = of_property_read_u32(np, "#mbox-cells", &cell_count);
+ if (err) {
+ dev_err(dev, "failed to read #mbox-cells in '%pOF'\n", np);
+ return err;
+ }
+
+ if (cell_count == 2) {
+ max_chans = MHU_CHAN_MAX;
+ } else {
+ dev_err(dev, "incorrect value of #mbox-cells in '%pOF'\n", np);
+ return -EINVAL;
+ }
+
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(mhu->base)) {
+ dev_err(dev, "ioremap failed\n");
+ return PTR_ERR(mhu->base);
+ }
+
+ chans = devm_kcalloc(dev, max_chans, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mhu->dev = dev;
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = chans;
+ mhu->mbox.num_chans = max_chans;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+ mhu->mbox.of_xlate = mhu_db_mbox_xlate;
+ amba_set_drvdata(adev, mhu);
+
+ mhu->mbox.ops = &mhu_db_ops;
+
+ err = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ int irq = mhu->mlink[i].irq = adev->irq[i];
+
+ if (irq <= 0) {
+ dev_dbg(dev, "No IRQ found for Channel %d\n", i);
+ continue;
+ }
+
+ mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+
+ err = devm_request_threaded_irq(dev, irq, NULL,
+ mhu_db_mbox_rx_handler,
+ IRQF_ONESHOT, "mhu_db_link", mhu);
+ if (err) {
+ dev_err(dev, "Can't claim IRQ %d\n", irq);
+ mbox_controller_unregister(&mhu->mbox);
+ return err;
+ }
+ }
+
+ dev_info(dev, "ARM MHU Doorbell mailbox registered\n");
+ return 0;
+}
+
+static struct amba_id mhu_ids[] = {
+ {
+ .id = 0x1bb098,
+ .mask = 0xffffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhu_ids);
+
+static struct amba_driver arm_mhu_db_driver = {
+ .drv = {
+ .name = "mhu-doorbell",
+ },
+ .id_table = mhu_ids,
+ .probe = mhu_db_probe,
+};
+module_amba_driver(arm_mhu_db_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHU Doorbell Driver");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
new file mode 100644
index 000000000..9f2ce7f03
--- /dev/null
+++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * rWTM BIU Mailbox driver for Armada 37xx
+ *
+ * Author: Marek Behun <marek.behun@nic.cz>
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/armada-37xx-rwtm-mailbox.h>
+
+#define DRIVER_NAME "armada-37xx-rwtm-mailbox"
+
+/* relative to rWTM BIU Mailbox Registers */
+#define RWTM_MBOX_PARAM(i) (0x0 + ((i) << 2))
+#define RWTM_MBOX_COMMAND 0x40
+#define RWTM_MBOX_RETURN_STATUS 0x80
+#define RWTM_MBOX_STATUS(i) (0x84 + ((i) << 2))
+#define RWTM_MBOX_FIFO_STATUS 0xc4
+#define FIFO_STS_RDY 0x100
+#define FIFO_STS_CNTR_MASK 0x7
+#define FIFO_STS_CNTR_MAX 4
+
+#define RWTM_HOST_INT_RESET 0xc8
+#define RWTM_HOST_INT_MASK 0xcc
+#define SP_CMD_COMPLETE BIT(0)
+#define SP_CMD_QUEUE_FULL_ACCESS BIT(17)
+#define SP_CMD_QUEUE_FULL BIT(18)
+
+struct a37xx_mbox {
+ struct device *dev;
+ struct mbox_controller controller;
+ void __iomem *base;
+ int irq;
+};
+
+static void a37xx_mbox_receive(struct mbox_chan *chan)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ struct armada_37xx_rwtm_rx_msg rx_msg;
+ int i;
+
+ rx_msg.retval = readl(mbox->base + RWTM_MBOX_RETURN_STATUS);
+ for (i = 0; i < 16; ++i)
+ rx_msg.status[i] = readl(mbox->base + RWTM_MBOX_STATUS(i));
+
+ mbox_chan_received_data(chan, &rx_msg);
+}
+
+static irqreturn_t a37xx_mbox_irq_handler(int irq, void *data)
+{
+ struct mbox_chan *chan = data;
+ struct a37xx_mbox *mbox = chan->con_priv;
+ u32 reg;
+
+ reg = readl(mbox->base + RWTM_HOST_INT_RESET);
+
+ if (reg & SP_CMD_COMPLETE)
+ a37xx_mbox_receive(chan);
+
+ if (reg & (SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL))
+ dev_err(mbox->dev, "Secure processor command queue full\n");
+
+ writel(reg, mbox->base + RWTM_HOST_INT_RESET);
+ if (reg)
+ mbox_chan_txdone(chan, 0);
+
+ return reg ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int a37xx_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ struct armada_37xx_rwtm_tx_msg *msg = data;
+ int i;
+ u32 reg;
+
+ if (!data)
+ return -EINVAL;
+
+ reg = readl(mbox->base + RWTM_MBOX_FIFO_STATUS);
+ if (!(reg & FIFO_STS_RDY))
+ dev_warn(mbox->dev, "Secure processor not ready\n");
+
+ if ((reg & FIFO_STS_CNTR_MASK) >= FIFO_STS_CNTR_MAX) {
+ dev_err(mbox->dev, "Secure processor command queue full\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < 16; ++i)
+ writel(msg->args[i], mbox->base + RWTM_MBOX_PARAM(i));
+ writel(msg->command, mbox->base + RWTM_MBOX_COMMAND);
+
+ return 0;
+}
+
+static int a37xx_mbox_startup(struct mbox_chan *chan)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ u32 reg;
+ int ret;
+
+ ret = devm_request_irq(mbox->dev, mbox->irq, a37xx_mbox_irq_handler, 0,
+ DRIVER_NAME, chan);
+ if (ret < 0) {
+ dev_err(mbox->dev, "Cannot request irq\n");
+ return ret;
+ }
+
+ /* enable IRQ generation */
+ reg = readl(mbox->base + RWTM_HOST_INT_MASK);
+ reg &= ~(SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL);
+ writel(reg, mbox->base + RWTM_HOST_INT_MASK);
+
+ return 0;
+}
+
+static void a37xx_mbox_shutdown(struct mbox_chan *chan)
+{
+ u32 reg;
+ struct a37xx_mbox *mbox = chan->con_priv;
+
+ /* disable interrupt generation */
+ reg = readl(mbox->base + RWTM_HOST_INT_MASK);
+ reg |= SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL;
+ writel(reg, mbox->base + RWTM_HOST_INT_MASK);
+
+ devm_free_irq(mbox->dev, mbox->irq, chan);
+}
+
+static const struct mbox_chan_ops a37xx_mbox_ops = {
+ .send_data = a37xx_mbox_send_data,
+ .startup = a37xx_mbox_startup,
+ .shutdown = a37xx_mbox_shutdown,
+};
+
+static int armada_37xx_mbox_probe(struct platform_device *pdev)
+{
+ struct a37xx_mbox *mbox;
+ struct mbox_chan *chans;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Allocated one channel */
+ chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->base))
+ return PTR_ERR(mbox->base);
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+
+ mbox->dev = &pdev->dev;
+
+ /* Hardware supports only one channel. */
+ chans[0].con_priv = mbox;
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = chans;
+ mbox->controller.ops = &a37xx_mbox_ops;
+ mbox->controller.txdone_irq = true;
+
+ ret = devm_mbox_controller_register(mbox->dev, &mbox->controller);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register mailbox controller\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ return ret;
+}
+
+
+static const struct of_device_id armada_37xx_mbox_match[] = {
+ { .compatible = "marvell,armada-3700-rwtm-mailbox" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, armada_37xx_mbox_match);
+
+static struct platform_driver armada_37xx_mbox_driver = {
+ .probe = armada_37xx_mbox_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = armada_37xx_mbox_match,
+ },
+};
+
+module_platform_driver(armada_37xx_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
+MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
new file mode 100644
index 000000000..e913ed1e3
--- /dev/null
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -0,0 +1,1704 @@
+/*
+ * Copyright (C) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Broadcom FlexRM Mailbox Driver
+ *
+ * Each Broadcom FlexSparx4 offload engine is implemented as an
+ * extension to Broadcom FlexRM ring manager. The FlexRM ring
+ * manager provides a set of rings which can be used to submit
+ * work to a FlexSparx4 offload engine.
+ *
+ * This driver creates a mailbox controller using a set of FlexRM
+ * rings where each mailbox channel represents a separate FlexRM ring.
+ */
+
+#include <asm/barrier.h>
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/brcm-message.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* ====== FlexRM register defines ===== */
+
+/* FlexRM configuration */
+#define RING_REGS_SIZE 0x10000
+#define RING_DESC_SIZE 8
+#define RING_DESC_INDEX(offset) \
+ ((offset) / RING_DESC_SIZE)
+#define RING_DESC_OFFSET(index) \
+ ((index) * RING_DESC_SIZE)
+#define RING_MAX_REQ_COUNT 1024
+#define RING_BD_ALIGN_ORDER 12
+#define RING_BD_ALIGN_CHECK(addr) \
+ (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
+#define RING_BD_TOGGLE_INVALID(offset) \
+ (((offset) >> RING_BD_ALIGN_ORDER) & 0x1)
+#define RING_BD_TOGGLE_VALID(offset) \
+ (!RING_BD_TOGGLE_INVALID(offset))
+#define RING_BD_DESC_PER_REQ 32
+#define RING_BD_DESC_COUNT \
+ (RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ)
+#define RING_BD_SIZE \
+ (RING_BD_DESC_COUNT * RING_DESC_SIZE)
+#define RING_CMPL_ALIGN_ORDER 13
+#define RING_CMPL_DESC_COUNT RING_MAX_REQ_COUNT
+#define RING_CMPL_SIZE \
+ (RING_CMPL_DESC_COUNT * RING_DESC_SIZE)
+#define RING_VER_MAGIC 0x76303031
+
+/* Per-Ring register offsets */
+#define RING_VER 0x000
+#define RING_BD_START_ADDR 0x004
+#define RING_BD_READ_PTR 0x008
+#define RING_BD_WRITE_PTR 0x00c
+#define RING_BD_READ_PTR_DDR_LS 0x010
+#define RING_BD_READ_PTR_DDR_MS 0x014
+#define RING_CMPL_START_ADDR 0x018
+#define RING_CMPL_WRITE_PTR 0x01c
+#define RING_NUM_REQ_RECV_LS 0x020
+#define RING_NUM_REQ_RECV_MS 0x024
+#define RING_NUM_REQ_TRANS_LS 0x028
+#define RING_NUM_REQ_TRANS_MS 0x02c
+#define RING_NUM_REQ_OUTSTAND 0x030
+#define RING_CONTROL 0x034
+#define RING_FLUSH_DONE 0x038
+#define RING_MSI_ADDR_LS 0x03c
+#define RING_MSI_ADDR_MS 0x040
+#define RING_MSI_CONTROL 0x048
+#define RING_BD_READ_PTR_DDR_CONTROL 0x04c
+#define RING_MSI_DATA_VALUE 0x064
+
+/* Register RING_BD_START_ADDR fields */
+#define BD_LAST_UPDATE_HW_SHIFT 28
+#define BD_LAST_UPDATE_HW_MASK 0x1
+#define BD_START_ADDR_VALUE(pa) \
+ ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
+#define BD_START_ADDR_DECODE(val) \
+ ((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
+
+/* Register RING_CMPL_START_ADDR fields */
+#define CMPL_START_ADDR_VALUE(pa) \
+ ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
+
+/* Register RING_CONTROL fields */
+#define CONTROL_MASK_DISABLE_CONTROL 12
+#define CONTROL_FLUSH_SHIFT 5
+#define CONTROL_ACTIVE_SHIFT 4
+#define CONTROL_RATE_ADAPT_MASK 0xf
+#define CONTROL_RATE_DYNAMIC 0x0
+#define CONTROL_RATE_FAST 0x8
+#define CONTROL_RATE_MEDIUM 0x9
+#define CONTROL_RATE_SLOW 0xa
+#define CONTROL_RATE_IDLE 0xb
+
+/* Register RING_FLUSH_DONE fields */
+#define FLUSH_DONE_MASK 0x1
+
+/* Register RING_MSI_CONTROL fields */
+#define MSI_TIMER_VAL_SHIFT 16
+#define MSI_TIMER_VAL_MASK 0xffff
+#define MSI_ENABLE_SHIFT 15
+#define MSI_ENABLE_MASK 0x1
+#define MSI_COUNT_SHIFT 0
+#define MSI_COUNT_MASK 0x3ff
+
+/* Register RING_BD_READ_PTR_DDR_CONTROL fields */
+#define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16
+#define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff
+#define BD_READ_PTR_DDR_ENABLE_SHIFT 15
+#define BD_READ_PTR_DDR_ENABLE_MASK 0x1
+
+/* ====== FlexRM ring descriptor defines ===== */
+
+/* Completion descriptor format */
+#define CMPL_OPAQUE_SHIFT 0
+#define CMPL_OPAQUE_MASK 0xffff
+#define CMPL_ENGINE_STATUS_SHIFT 16
+#define CMPL_ENGINE_STATUS_MASK 0xffff
+#define CMPL_DME_STATUS_SHIFT 32
+#define CMPL_DME_STATUS_MASK 0xffff
+#define CMPL_RM_STATUS_SHIFT 48
+#define CMPL_RM_STATUS_MASK 0xffff
+
+/* Completion DME status code */
+#define DME_STATUS_MEM_COR_ERR BIT(0)
+#define DME_STATUS_MEM_UCOR_ERR BIT(1)
+#define DME_STATUS_FIFO_UNDERFLOW BIT(2)
+#define DME_STATUS_FIFO_OVERFLOW BIT(3)
+#define DME_STATUS_RRESP_ERR BIT(4)
+#define DME_STATUS_BRESP_ERR BIT(5)
+#define DME_STATUS_ERROR_MASK (DME_STATUS_MEM_COR_ERR | \
+ DME_STATUS_MEM_UCOR_ERR | \
+ DME_STATUS_FIFO_UNDERFLOW | \
+ DME_STATUS_FIFO_OVERFLOW | \
+ DME_STATUS_RRESP_ERR | \
+ DME_STATUS_BRESP_ERR)
+
+/* Completion RM status code */
+#define RM_STATUS_CODE_SHIFT 0
+#define RM_STATUS_CODE_MASK 0x3ff
+#define RM_STATUS_CODE_GOOD 0x0
+#define RM_STATUS_CODE_AE_TIMEOUT 0x3ff
+
+/* General descriptor format */
+#define DESC_TYPE_SHIFT 60
+#define DESC_TYPE_MASK 0xf
+#define DESC_PAYLOAD_SHIFT 0
+#define DESC_PAYLOAD_MASK 0x0fffffffffffffff
+
+/* Null descriptor format */
+#define NULL_TYPE 0
+#define NULL_TOGGLE_SHIFT 58
+#define NULL_TOGGLE_MASK 0x1
+
+/* Header descriptor format */
+#define HEADER_TYPE 1
+#define HEADER_TOGGLE_SHIFT 58
+#define HEADER_TOGGLE_MASK 0x1
+#define HEADER_ENDPKT_SHIFT 57
+#define HEADER_ENDPKT_MASK 0x1
+#define HEADER_STARTPKT_SHIFT 56
+#define HEADER_STARTPKT_MASK 0x1
+#define HEADER_BDCOUNT_SHIFT 36
+#define HEADER_BDCOUNT_MASK 0x1f
+#define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK
+#define HEADER_FLAGS_SHIFT 16
+#define HEADER_FLAGS_MASK 0xffff
+#define HEADER_OPAQUE_SHIFT 0
+#define HEADER_OPAQUE_MASK 0xffff
+
+/* Source (SRC) descriptor format */
+#define SRC_TYPE 2
+#define SRC_LENGTH_SHIFT 44
+#define SRC_LENGTH_MASK 0xffff
+#define SRC_ADDR_SHIFT 0
+#define SRC_ADDR_MASK 0x00000fffffffffff
+
+/* Destination (DST) descriptor format */
+#define DST_TYPE 3
+#define DST_LENGTH_SHIFT 44
+#define DST_LENGTH_MASK 0xffff
+#define DST_ADDR_SHIFT 0
+#define DST_ADDR_MASK 0x00000fffffffffff
+
+/* Immediate (IMM) descriptor format */
+#define IMM_TYPE 4
+#define IMM_DATA_SHIFT 0
+#define IMM_DATA_MASK 0x0fffffffffffffff
+
+/* Next pointer (NPTR) descriptor format */
+#define NPTR_TYPE 5
+#define NPTR_TOGGLE_SHIFT 58
+#define NPTR_TOGGLE_MASK 0x1
+#define NPTR_ADDR_SHIFT 0
+#define NPTR_ADDR_MASK 0x00000fffffffffff
+
+/* Mega source (MSRC) descriptor format */
+#define MSRC_TYPE 6
+#define MSRC_LENGTH_SHIFT 44
+#define MSRC_LENGTH_MASK 0xffff
+#define MSRC_ADDR_SHIFT 0
+#define MSRC_ADDR_MASK 0x00000fffffffffff
+
+/* Mega destination (MDST) descriptor format */
+#define MDST_TYPE 7
+#define MDST_LENGTH_SHIFT 44
+#define MDST_LENGTH_MASK 0xffff
+#define MDST_ADDR_SHIFT 0
+#define MDST_ADDR_MASK 0x00000fffffffffff
+
+/* Source with tlast (SRCT) descriptor format */
+#define SRCT_TYPE 8
+#define SRCT_LENGTH_SHIFT 44
+#define SRCT_LENGTH_MASK 0xffff
+#define SRCT_ADDR_SHIFT 0
+#define SRCT_ADDR_MASK 0x00000fffffffffff
+
+/* Destination with tlast (DSTT) descriptor format */
+#define DSTT_TYPE 9
+#define DSTT_LENGTH_SHIFT 44
+#define DSTT_LENGTH_MASK 0xffff
+#define DSTT_ADDR_SHIFT 0
+#define DSTT_ADDR_MASK 0x00000fffffffffff
+
+/* Immediate with tlast (IMMT) descriptor format */
+#define IMMT_TYPE 10
+#define IMMT_DATA_SHIFT 0
+#define IMMT_DATA_MASK 0x0fffffffffffffff
+
+/* Descriptor helper macros */
+#define DESC_DEC(_d, _s, _m) (((_d) >> (_s)) & (_m))
+#define DESC_ENC(_d, _v, _s, _m) \
+ do { \
+ (_d) &= ~((u64)(_m) << (_s)); \
+ (_d) |= (((u64)(_v) & (_m)) << (_s)); \
+ } while (0)
+
+/* ====== FlexRM data structures ===== */
+
+struct flexrm_ring {
+ /* Unprotected members */
+ int num;
+ struct flexrm_mbox *mbox;
+ void __iomem *regs;
+ bool irq_requested;
+ unsigned int irq;
+ cpumask_t irq_aff_hint;
+ unsigned int msi_timer_val;
+ unsigned int msi_count_threshold;
+ struct brcm_message *requests[RING_MAX_REQ_COUNT];
+ void *bd_base;
+ dma_addr_t bd_dma_base;
+ u32 bd_write_offset;
+ void *cmpl_base;
+ dma_addr_t cmpl_dma_base;
+ /* Atomic stats */
+ atomic_t msg_send_count;
+ atomic_t msg_cmpl_count;
+ /* Protected members */
+ spinlock_t lock;
+ DECLARE_BITMAP(requests_bmap, RING_MAX_REQ_COUNT);
+ u32 cmpl_read_offset;
+};
+
+struct flexrm_mbox {
+ struct device *dev;
+ void __iomem *regs;
+ u32 num_rings;
+ struct flexrm_ring *rings;
+ struct dma_pool *bd_pool;
+ struct dma_pool *cmpl_pool;
+ struct dentry *root;
+ struct mbox_controller controller;
+};
+
+/* ====== FlexRM ring descriptor helper routines ===== */
+
+static u64 flexrm_read_desc(void *desc_ptr)
+{
+ return le64_to_cpu(*((u64 *)desc_ptr));
+}
+
+static void flexrm_write_desc(void *desc_ptr, u64 desc)
+{
+ *((u64 *)desc_ptr) = cpu_to_le64(desc);
+}
+
+static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc)
+{
+ return (u32)(cmpl_desc & CMPL_OPAQUE_MASK);
+}
+
+static int flexrm_cmpl_desc_to_error(u64 cmpl_desc)
+{
+ u32 status;
+
+ status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT,
+ CMPL_DME_STATUS_MASK);
+ if (status & DME_STATUS_ERROR_MASK)
+ return -EIO;
+
+ status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT,
+ CMPL_RM_STATUS_MASK);
+ status &= RM_STATUS_CODE_MASK;
+ if (status == RM_STATUS_CODE_AE_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static bool flexrm_is_next_table_desc(void *desc_ptr)
+{
+ u64 desc = flexrm_read_desc(desc_ptr);
+ u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+
+ return (type == NPTR_TYPE) ? true : false;
+}
+
+static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK);
+ DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_null_desc(u32 toggle)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK);
+
+ return desc;
+}
+
+static u32 flexrm_estimate_header_desc_count(u32 nhcnt)
+{
+ u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX;
+
+ if (!(nhcnt % HEADER_BDCOUNT_MAX))
+ hcnt += 1;
+
+ return hcnt;
+}
+
+static void flexrm_flip_header_toggle(void *desc_ptr)
+{
+ u64 desc = flexrm_read_desc(desc_ptr);
+
+ if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT))
+ desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT);
+ else
+ desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT);
+
+ flexrm_write_desc(desc_ptr, desc);
+}
+
+static u64 flexrm_header_desc(u32 toggle, u32 startpkt, u32 endpkt,
+ u32 bdcount, u32 flags, u32 opaque)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK);
+ DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK);
+ DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK);
+ DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK);
+ DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK);
+ DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK);
+
+ return desc;
+}
+
+static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid,
+ u64 desc, void **desc_ptr, u32 *toggle,
+ void *start_desc, void *end_desc)
+{
+ u64 d;
+ u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount;
+
+ /* Sanity check */
+ if (nhcnt <= nhpos)
+ return;
+
+ /*
+ * Each request or packet start with a HEADER descriptor followed
+ * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
+ * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
+ * following a HEADER descriptor is represented by BDCOUNT field
+ * of HEADER descriptor. The max value of BDCOUNT field is 31 which
+ * means we can only have 31 non-HEADER descriptors following one
+ * HEADER descriptor.
+ *
+ * In general use, number of non-HEADER descriptors can easily go
+ * beyond 31. To tackle this situation, we have packet (or request)
+ * extenstion bits (STARTPKT and ENDPKT) in the HEADER descriptor.
+ *
+ * To use packet extension, the first HEADER descriptor of request
+ * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
+ * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
+ * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
+ * TOGGLE bit of the first HEADER will be set to invalid state to
+ * ensure that FlexRM does not start fetching descriptors till all
+ * descriptors are enqueued. The user of this function will flip
+ * the TOGGLE bit of first HEADER after all descriptors are
+ * enqueued.
+ */
+
+ if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
+ /* Prepare the header descriptor */
+ nhavail = (nhcnt - nhpos);
+ _toggle = (nhpos == 0) ? !(*toggle) : (*toggle);
+ _startpkt = (nhpos == 0) ? 0x1 : 0x0;
+ _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
+ _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
+ nhavail : HEADER_BDCOUNT_MAX;
+ if (nhavail <= HEADER_BDCOUNT_MAX)
+ _bdcount = nhavail;
+ else
+ _bdcount = HEADER_BDCOUNT_MAX;
+ d = flexrm_header_desc(_toggle, _startpkt, _endpkt,
+ _bdcount, 0x0, reqid);
+
+ /* Write header descriptor */
+ flexrm_write_desc(*desc_ptr, d);
+
+ /* Point to next descriptor */
+ *desc_ptr += sizeof(desc);
+ if (*desc_ptr == end_desc)
+ *desc_ptr = start_desc;
+
+ /* Skip next pointer descriptors */
+ while (flexrm_is_next_table_desc(*desc_ptr)) {
+ *toggle = (*toggle) ? 0 : 1;
+ *desc_ptr += sizeof(desc);
+ if (*desc_ptr == end_desc)
+ *desc_ptr = start_desc;
+ }
+ }
+
+ /* Write desired descriptor */
+ flexrm_write_desc(*desc_ptr, desc);
+
+ /* Point to next descriptor */
+ *desc_ptr += sizeof(desc);
+ if (*desc_ptr == end_desc)
+ *desc_ptr = start_desc;
+
+ /* Skip next pointer descriptors */
+ while (flexrm_is_next_table_desc(*desc_ptr)) {
+ *toggle = (*toggle) ? 0 : 1;
+ *desc_ptr += sizeof(desc);
+ if (*desc_ptr == end_desc)
+ *desc_ptr = start_desc;
+ }
+}
+
+static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK);
+ DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK);
+ DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK);
+ DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK);
+ DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_imm_desc(u64 data)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK);
+ DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK);
+ DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_immt_desc(u64 data)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK);
+
+ return desc;
+}
+
+static bool flexrm_spu_sanity_check(struct brcm_message *msg)
+{
+ struct scatterlist *sg;
+
+ if (!msg->spu.src || !msg->spu.dst)
+ return false;
+ for (sg = msg->spu.src; sg; sg = sg_next(sg)) {
+ if (sg->length & 0xf) {
+ if (sg->length > SRC_LENGTH_MASK)
+ return false;
+ } else {
+ if (sg->length > (MSRC_LENGTH_MASK * 16))
+ return false;
+ }
+ }
+ for (sg = msg->spu.dst; sg; sg = sg_next(sg)) {
+ if (sg->length & 0xf) {
+ if (sg->length > DST_LENGTH_MASK)
+ return false;
+ } else {
+ if (sg->length > (MDST_LENGTH_MASK * 16))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg)
+{
+ u32 cnt = 0;
+ unsigned int dst_target = 0;
+ struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
+
+ while (src_sg || dst_sg) {
+ if (src_sg) {
+ cnt++;
+ dst_target = src_sg->length;
+ src_sg = sg_next(src_sg);
+ } else
+ dst_target = UINT_MAX;
+
+ while (dst_target && dst_sg) {
+ cnt++;
+ if (dst_sg->length < dst_target)
+ dst_target -= dst_sg->length;
+ else
+ dst_target = 0;
+ dst_sg = sg_next(dst_sg);
+ }
+ }
+
+ return cnt;
+}
+
+static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
+{
+ int rc;
+
+ rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
+ DMA_TO_DEVICE);
+ if (!rc)
+ return -EIO;
+
+ rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
+ DMA_FROM_DEVICE);
+ if (!rc) {
+ dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
+ DMA_TO_DEVICE);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg)
+{
+ dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
+ DMA_TO_DEVICE);
+}
+
+static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
+ u32 reqid, void *desc_ptr, u32 toggle,
+ void *start_desc, void *end_desc)
+{
+ u64 d;
+ u32 nhpos = 0;
+ void *orig_desc_ptr = desc_ptr;
+ unsigned int dst_target = 0;
+ struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
+
+ while (src_sg || dst_sg) {
+ if (src_sg) {
+ if (sg_dma_len(src_sg) & 0xf)
+ d = flexrm_src_desc(sg_dma_address(src_sg),
+ sg_dma_len(src_sg));
+ else
+ d = flexrm_msrc_desc(sg_dma_address(src_sg),
+ sg_dma_len(src_sg)/16);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ dst_target = sg_dma_len(src_sg);
+ src_sg = sg_next(src_sg);
+ } else
+ dst_target = UINT_MAX;
+
+ while (dst_target && dst_sg) {
+ if (sg_dma_len(dst_sg) & 0xf)
+ d = flexrm_dst_desc(sg_dma_address(dst_sg),
+ sg_dma_len(dst_sg));
+ else
+ d = flexrm_mdst_desc(sg_dma_address(dst_sg),
+ sg_dma_len(dst_sg)/16);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ if (sg_dma_len(dst_sg) < dst_target)
+ dst_target -= sg_dma_len(dst_sg);
+ else
+ dst_target = 0;
+ dst_sg = sg_next(dst_sg);
+ }
+ }
+
+ /* Null descriptor with invalid toggle bit */
+ flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
+
+ /* Ensure that descriptors have been written to memory */
+ wmb();
+
+ /* Flip toggle bit in header */
+ flexrm_flip_header_toggle(orig_desc_ptr);
+
+ return desc_ptr;
+}
+
+static bool flexrm_sba_sanity_check(struct brcm_message *msg)
+{
+ u32 i;
+
+ if (!msg->sba.cmds || !msg->sba.cmds_count)
+ return false;
+
+ for (i = 0; i < msg->sba.cmds_count; i++) {
+ if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
+ (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) &&
+ (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT))
+ return false;
+ if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) &&
+ (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
+ return false;
+ if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) &&
+ (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
+ return false;
+ if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) &&
+ (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK))
+ return false;
+ if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) &&
+ (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK))
+ return false;
+ }
+
+ return true;
+}
+
+static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg)
+{
+ u32 i, cnt;
+
+ cnt = 0;
+ for (i = 0; i < msg->sba.cmds_count; i++) {
+ cnt++;
+
+ if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
+ (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C))
+ cnt++;
+
+ if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP)
+ cnt++;
+
+ if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt,
+ u32 reqid, void *desc_ptr, u32 toggle,
+ void *start_desc, void *end_desc)
+{
+ u64 d;
+ u32 i, nhpos = 0;
+ struct brcm_sba_command *c;
+ void *orig_desc_ptr = desc_ptr;
+
+ /* Convert SBA commands into descriptors */
+ for (i = 0; i < msg->sba.cmds_count; i++) {
+ c = &msg->sba.cmds[i];
+
+ if ((c->flags & BRCM_SBA_CMD_HAS_RESP) &&
+ (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) {
+ /* Destination response descriptor */
+ d = flexrm_dst_desc(c->resp, c->resp_len);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) {
+ /* Destination response with tlast descriptor */
+ d = flexrm_dstt_desc(c->resp, c->resp_len);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ }
+
+ if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) {
+ /* Destination with tlast descriptor */
+ d = flexrm_dstt_desc(c->data, c->data_len);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ }
+
+ if (c->flags & BRCM_SBA_CMD_TYPE_B) {
+ /* Command as immediate descriptor */
+ d = flexrm_imm_desc(c->cmd);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ } else {
+ /* Command as immediate descriptor with tlast */
+ d = flexrm_immt_desc(c->cmd);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ }
+
+ if ((c->flags & BRCM_SBA_CMD_TYPE_B) ||
+ (c->flags & BRCM_SBA_CMD_TYPE_C)) {
+ /* Source with tlast descriptor */
+ d = flexrm_srct_desc(c->data, c->data_len);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ }
+ }
+
+ /* Null descriptor with invalid toggle bit */
+ flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
+
+ /* Ensure that descriptors have been written to memory */
+ wmb();
+
+ /* Flip toggle bit in header */
+ flexrm_flip_header_toggle(orig_desc_ptr);
+
+ return desc_ptr;
+}
+
+static bool flexrm_sanity_check(struct brcm_message *msg)
+{
+ if (!msg)
+ return false;
+
+ switch (msg->type) {
+ case BRCM_MESSAGE_SPU:
+ return flexrm_spu_sanity_check(msg);
+ case BRCM_MESSAGE_SBA:
+ return flexrm_sba_sanity_check(msg);
+ default:
+ return false;
+ };
+}
+
+static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg)
+{
+ if (!msg)
+ return 0;
+
+ switch (msg->type) {
+ case BRCM_MESSAGE_SPU:
+ return flexrm_spu_estimate_nonheader_desc_count(msg);
+ case BRCM_MESSAGE_SBA:
+ return flexrm_sba_estimate_nonheader_desc_count(msg);
+ default:
+ return 0;
+ };
+}
+
+static int flexrm_dma_map(struct device *dev, struct brcm_message *msg)
+{
+ if (!dev || !msg)
+ return -EINVAL;
+
+ switch (msg->type) {
+ case BRCM_MESSAGE_SPU:
+ return flexrm_spu_dma_map(dev, msg);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg)
+{
+ if (!dev || !msg)
+ return;
+
+ switch (msg->type) {
+ case BRCM_MESSAGE_SPU:
+ flexrm_spu_dma_unmap(dev, msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt,
+ u32 reqid, void *desc_ptr, u32 toggle,
+ void *start_desc, void *end_desc)
+{
+ if (!msg || !desc_ptr || !start_desc || !end_desc)
+ return ERR_PTR(-ENOTSUPP);
+
+ if ((desc_ptr < start_desc) || (end_desc <= desc_ptr))
+ return ERR_PTR(-ERANGE);
+
+ switch (msg->type) {
+ case BRCM_MESSAGE_SPU:
+ return flexrm_spu_write_descs(msg, nhcnt, reqid,
+ desc_ptr, toggle,
+ start_desc, end_desc);
+ case BRCM_MESSAGE_SBA:
+ return flexrm_sba_write_descs(msg, nhcnt, reqid,
+ desc_ptr, toggle,
+ start_desc, end_desc);
+ default:
+ return ERR_PTR(-ENOTSUPP);
+ };
+}
+
+/* ====== FlexRM driver helper routines ===== */
+
+static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox,
+ struct seq_file *file)
+{
+ int i;
+ const char *state;
+ struct flexrm_ring *ring;
+
+ seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n",
+ "Ring#", "State", "BD_Addr", "BD_Size",
+ "Cmpl_Addr", "Cmpl_Size");
+
+ for (i = 0; i < mbox->num_rings; i++) {
+ ring = &mbox->rings[i];
+ if (readl(ring->regs + RING_CONTROL) &
+ BIT(CONTROL_ACTIVE_SHIFT))
+ state = "active";
+ else
+ state = "inactive";
+ seq_printf(file,
+ "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n",
+ ring->num, state,
+ (unsigned long long)ring->bd_dma_base,
+ (u32)RING_BD_SIZE,
+ (unsigned long long)ring->cmpl_dma_base,
+ (u32)RING_CMPL_SIZE);
+ }
+}
+
+static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox,
+ struct seq_file *file)
+{
+ int i;
+ u32 val, bd_read_offset;
+ struct flexrm_ring *ring;
+
+ seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n",
+ "Ring#", "BD_Read", "BD_Write",
+ "Cmpl_Read", "Submitted", "Completed");
+
+ for (i = 0; i < mbox->num_rings; i++) {
+ ring = &mbox->rings[i];
+ bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
+ val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
+ bd_read_offset *= RING_DESC_SIZE;
+ bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) -
+ ring->bd_dma_base);
+ seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n",
+ ring->num,
+ (u32)bd_read_offset,
+ (u32)ring->bd_write_offset,
+ (u32)ring->cmpl_read_offset,
+ (u32)atomic_read(&ring->msg_send_count),
+ (u32)atomic_read(&ring->msg_cmpl_count));
+ }
+}
+
+static int flexrm_new_request(struct flexrm_ring *ring,
+ struct brcm_message *batch_msg,
+ struct brcm_message *msg)
+{
+ void *next;
+ unsigned long flags;
+ u32 val, count, nhcnt;
+ u32 read_offset, write_offset;
+ bool exit_cleanup = false;
+ int ret = 0, reqid;
+
+ /* Do sanity check on message */
+ if (!flexrm_sanity_check(msg))
+ return -EIO;
+ msg->error = 0;
+
+ /* If no requests possible then save data pointer and goto done. */
+ spin_lock_irqsave(&ring->lock, flags);
+ reqid = bitmap_find_free_region(ring->requests_bmap,
+ RING_MAX_REQ_COUNT, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
+ if (reqid < 0)
+ return -ENOSPC;
+ ring->requests[reqid] = msg;
+
+ /* Do DMA mappings for the message */
+ ret = flexrm_dma_map(ring->mbox->dev, msg);
+ if (ret < 0) {
+ ring->requests[reqid] = NULL;
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return ret;
+ }
+
+ /* Determine current HW BD read offset */
+ read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
+ val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
+ read_offset *= RING_DESC_SIZE;
+ read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);
+
+ /*
+ * Number required descriptors = number of non-header descriptors +
+ * number of header descriptors +
+ * 1x null descriptor
+ */
+ nhcnt = flexrm_estimate_nonheader_desc_count(msg);
+ count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1;
+
+ /* Check for available descriptor space. */
+ write_offset = ring->bd_write_offset;
+ while (count) {
+ if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
+ count--;
+ write_offset += RING_DESC_SIZE;
+ if (write_offset == RING_BD_SIZE)
+ write_offset = 0x0;
+ if (write_offset == read_offset)
+ break;
+ }
+ if (count) {
+ ret = -ENOSPC;
+ exit_cleanup = true;
+ goto exit;
+ }
+
+ /* Write descriptors to ring */
+ next = flexrm_write_descs(msg, nhcnt, reqid,
+ ring->bd_base + ring->bd_write_offset,
+ RING_BD_TOGGLE_VALID(ring->bd_write_offset),
+ ring->bd_base, ring->bd_base + RING_BD_SIZE);
+ if (IS_ERR(next)) {
+ ret = PTR_ERR(next);
+ exit_cleanup = true;
+ goto exit;
+ }
+
+ /* Save ring BD write offset */
+ ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
+
+ /* Increment number of messages sent */
+ atomic_inc_return(&ring->msg_send_count);
+
+exit:
+ /* Update error status in message */
+ msg->error = ret;
+
+ /* Cleanup if we failed */
+ if (exit_cleanup) {
+ flexrm_dma_unmap(ring->mbox->dev, msg);
+ ring->requests[reqid] = NULL;
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
+ }
+
+ return ret;
+}
+
+static int flexrm_process_completions(struct flexrm_ring *ring)
+{
+ u64 desc;
+ int err, count = 0;
+ unsigned long flags;
+ struct brcm_message *msg = NULL;
+ u32 reqid, cmpl_read_offset, cmpl_write_offset;
+ struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /*
+ * Get current completion read and write offset
+ *
+ * Note: We should read completion write pointer atleast once
+ * after we get a MSI interrupt because HW maintains internal
+ * MSI status which will allow next MSI interrupt only after
+ * completion write pointer is read.
+ */
+ cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
+ cmpl_write_offset *= RING_DESC_SIZE;
+ cmpl_read_offset = ring->cmpl_read_offset;
+ ring->cmpl_read_offset = cmpl_write_offset;
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* For each completed request notify mailbox clients */
+ reqid = 0;
+ while (cmpl_read_offset != cmpl_write_offset) {
+ /* Dequeue next completion descriptor */
+ desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));
+
+ /* Next read offset */
+ cmpl_read_offset += RING_DESC_SIZE;
+ if (cmpl_read_offset == RING_CMPL_SIZE)
+ cmpl_read_offset = 0;
+
+ /* Decode error from completion descriptor */
+ err = flexrm_cmpl_desc_to_error(desc);
+ if (err < 0) {
+ dev_warn(ring->mbox->dev,
+ "ring%d got completion desc=0x%lx with error %d\n",
+ ring->num, (unsigned long)desc, err);
+ }
+
+ /* Determine request id from completion descriptor */
+ reqid = flexrm_cmpl_desc_to_reqid(desc);
+
+ /* Determine message pointer based on reqid */
+ msg = ring->requests[reqid];
+ if (!msg) {
+ dev_warn(ring->mbox->dev,
+ "ring%d null msg pointer for completion desc=0x%lx\n",
+ ring->num, (unsigned long)desc);
+ continue;
+ }
+
+ /* Release reqid for recycling */
+ ring->requests[reqid] = NULL;
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Unmap DMA mappings */
+ flexrm_dma_unmap(ring->mbox->dev, msg);
+
+ /* Give-back message to mailbox client */
+ msg->error = err;
+ mbox_chan_received_data(chan, msg);
+
+ /* Increment number of completions processed */
+ atomic_inc_return(&ring->msg_cmpl_count);
+ count++;
+ }
+
+ return count;
+}
+
+/* ====== FlexRM Debugfs callbacks ====== */
+
+static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset)
+{
+ struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
+
+ /* Write config in file */
+ flexrm_write_config_in_seqfile(mbox, file);
+
+ return 0;
+}
+
+static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset)
+{
+ struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
+
+ /* Write stats in file */
+ flexrm_write_stats_in_seqfile(mbox, file);
+
+ return 0;
+}
+
+/* ====== FlexRM interrupt handler ===== */
+
+static irqreturn_t flexrm_irq_event(int irq, void *dev_id)
+{
+ /* We only have MSI for completions so just wakeup IRQ thread */
+ /* Ring related errors will be informed via completion descriptors */
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t flexrm_irq_thread(int irq, void *dev_id)
+{
+ flexrm_process_completions(dev_id);
+
+ return IRQ_HANDLED;
+}
+
+/* ====== FlexRM mailbox callbacks ===== */
+
+static int flexrm_send_data(struct mbox_chan *chan, void *data)
+{
+ int i, rc;
+ struct flexrm_ring *ring = chan->con_priv;
+ struct brcm_message *msg = data;
+
+ if (msg->type == BRCM_MESSAGE_BATCH) {
+ for (i = msg->batch.msgs_queued;
+ i < msg->batch.msgs_count; i++) {
+ rc = flexrm_new_request(ring, msg,
+ &msg->batch.msgs[i]);
+ if (rc) {
+ msg->error = rc;
+ return rc;
+ }
+ msg->batch.msgs_queued++;
+ }
+ return 0;
+ }
+
+ return flexrm_new_request(ring, NULL, data);
+}
+
+static bool flexrm_peek_data(struct mbox_chan *chan)
+{
+ int cnt = flexrm_process_completions(chan->con_priv);
+
+ return (cnt > 0) ? true : false;
+}
+
+static int flexrm_startup(struct mbox_chan *chan)
+{
+ u64 d;
+ u32 val, off;
+ int ret = 0;
+ dma_addr_t next_addr;
+ struct flexrm_ring *ring = chan->con_priv;
+
+ /* Allocate BD memory */
+ ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
+ GFP_KERNEL, &ring->bd_dma_base);
+ if (!ring->bd_base) {
+ dev_err(ring->mbox->dev,
+ "can't allocate BD memory for ring%d\n",
+ ring->num);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* Configure next table pointer entries in BD memory */
+ for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) {
+ next_addr = off + RING_DESC_SIZE;
+ if (next_addr == RING_BD_SIZE)
+ next_addr = 0;
+ next_addr += ring->bd_dma_base;
+ if (RING_BD_ALIGN_CHECK(next_addr))
+ d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off),
+ next_addr);
+ else
+ d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off));
+ flexrm_write_desc(ring->bd_base + off, d);
+ }
+
+ /* Allocate completion memory */
+ ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool,
+ GFP_KERNEL, &ring->cmpl_dma_base);
+ if (!ring->cmpl_base) {
+ dev_err(ring->mbox->dev,
+ "can't allocate completion memory for ring%d\n",
+ ring->num);
+ ret = -ENOMEM;
+ goto fail_free_bd_memory;
+ }
+
+ /* Request IRQ */
+ if (ring->irq == UINT_MAX) {
+ dev_err(ring->mbox->dev,
+ "ring%d IRQ not available\n", ring->num);
+ ret = -ENODEV;
+ goto fail_free_cmpl_memory;
+ }
+ ret = request_threaded_irq(ring->irq,
+ flexrm_irq_event,
+ flexrm_irq_thread,
+ 0, dev_name(ring->mbox->dev), ring);
+ if (ret) {
+ dev_err(ring->mbox->dev,
+ "failed to request ring%d IRQ\n", ring->num);
+ goto fail_free_cmpl_memory;
+ }
+ ring->irq_requested = true;
+
+ /* Set IRQ affinity hint */
+ ring->irq_aff_hint = CPU_MASK_NONE;
+ val = ring->mbox->num_rings;
+ val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
+ cpumask_set_cpu((ring->num / val) % num_online_cpus(),
+ &ring->irq_aff_hint);
+ ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
+ if (ret) {
+ dev_err(ring->mbox->dev,
+ "failed to set IRQ affinity hint for ring%d\n",
+ ring->num);
+ goto fail_free_irq;
+ }
+
+ /* Disable/inactivate ring */
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
+
+ /* Program BD start address */
+ val = BD_START_ADDR_VALUE(ring->bd_dma_base);
+ writel_relaxed(val, ring->regs + RING_BD_START_ADDR);
+
+ /* BD write pointer will be same as HW write pointer */
+ ring->bd_write_offset =
+ readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
+ ring->bd_write_offset *= RING_DESC_SIZE;
+
+ /* Program completion start address */
+ val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
+ writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
+
+ /* Completion read pointer will be same as HW write pointer */
+ ring->cmpl_read_offset =
+ readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
+ ring->cmpl_read_offset *= RING_DESC_SIZE;
+
+ /* Read ring Tx, Rx, and Outstanding counts to clear */
+ readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
+ readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
+ readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
+ readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
+ readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);
+
+ /* Configure RING_MSI_CONTROL */
+ val = 0;
+ val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
+ val |= BIT(MSI_ENABLE_SHIFT);
+ val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
+ writel_relaxed(val, ring->regs + RING_MSI_CONTROL);
+
+ /* Enable/activate ring */
+ val = BIT(CONTROL_ACTIVE_SHIFT);
+ writel_relaxed(val, ring->regs + RING_CONTROL);
+
+ /* Reset stats to zero */
+ atomic_set(&ring->msg_send_count, 0);
+ atomic_set(&ring->msg_cmpl_count, 0);
+
+ return 0;
+
+fail_free_irq:
+ free_irq(ring->irq, ring);
+ ring->irq_requested = false;
+fail_free_cmpl_memory:
+ dma_pool_free(ring->mbox->cmpl_pool,
+ ring->cmpl_base, ring->cmpl_dma_base);
+ ring->cmpl_base = NULL;
+fail_free_bd_memory:
+ dma_pool_free(ring->mbox->bd_pool,
+ ring->bd_base, ring->bd_dma_base);
+ ring->bd_base = NULL;
+fail:
+ return ret;
+}
+
+static void flexrm_shutdown(struct mbox_chan *chan)
+{
+ u32 reqid;
+ unsigned int timeout;
+ struct brcm_message *msg;
+ struct flexrm_ring *ring = chan->con_priv;
+
+ /* Disable/inactivate ring */
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
+
+ /* Set ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
+ ring->regs + RING_CONTROL);
+ do {
+ if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK)
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "setting ring%d flush state timedout\n", ring->num);
+
+ /* Clear ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
+ do {
+ if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK))
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "clearing ring%d flush state timedout\n", ring->num);
+
+ /* Abort all in-flight requests */
+ for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
+ msg = ring->requests[reqid];
+ if (!msg)
+ continue;
+
+ /* Release reqid for recycling */
+ ring->requests[reqid] = NULL;
+
+ /* Unmap DMA mappings */
+ flexrm_dma_unmap(ring->mbox->dev, msg);
+
+ /* Give-back message to mailbox client */
+ msg->error = -EIO;
+ mbox_chan_received_data(chan, msg);
+ }
+
+ /* Clear requests bitmap */
+ bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
+
+ /* Release IRQ */
+ if (ring->irq_requested) {
+ irq_set_affinity_hint(ring->irq, NULL);
+ free_irq(ring->irq, ring);
+ ring->irq_requested = false;
+ }
+
+ /* Free-up completion descriptor ring */
+ if (ring->cmpl_base) {
+ dma_pool_free(ring->mbox->cmpl_pool,
+ ring->cmpl_base, ring->cmpl_dma_base);
+ ring->cmpl_base = NULL;
+ }
+
+ /* Free-up BD descriptor ring */
+ if (ring->bd_base) {
+ dma_pool_free(ring->mbox->bd_pool,
+ ring->bd_base, ring->bd_dma_base);
+ ring->bd_base = NULL;
+ }
+}
+
+static const struct mbox_chan_ops flexrm_mbox_chan_ops = {
+ .send_data = flexrm_send_data,
+ .startup = flexrm_startup,
+ .shutdown = flexrm_shutdown,
+ .peek_data = flexrm_peek_data,
+};
+
+static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr,
+ const struct of_phandle_args *pa)
+{
+ struct mbox_chan *chan;
+ struct flexrm_ring *ring;
+
+ if (pa->args_count < 3)
+ return ERR_PTR(-EINVAL);
+
+ if (pa->args[0] >= cntlr->num_chans)
+ return ERR_PTR(-ENOENT);
+
+ if (pa->args[1] > MSI_COUNT_MASK)
+ return ERR_PTR(-EINVAL);
+
+ if (pa->args[2] > MSI_TIMER_VAL_MASK)
+ return ERR_PTR(-EINVAL);
+
+ chan = &cntlr->chans[pa->args[0]];
+ ring = chan->con_priv;
+ ring->msi_count_threshold = pa->args[1];
+ ring->msi_timer_val = pa->args[2];
+
+ return chan;
+}
+
+/* ====== FlexRM platform driver ===== */
+
+static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct flexrm_mbox *mbox = dev_get_drvdata(dev);
+ struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index];
+
+ /* Configure per-Ring MSI registers */
+ writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
+ writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS);
+ writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
+}
+
+static int flexrm_mbox_probe(struct platform_device *pdev)
+{
+ int index, ret = 0;
+ void __iomem *regs;
+ void __iomem *regs_end;
+ struct msi_desc *desc;
+ struct resource *iomem;
+ struct flexrm_ring *ring;
+ struct flexrm_mbox *mbox;
+ struct device *dev = &pdev->dev;
+
+ /* Allocate driver mailbox struct */
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ mbox->dev = dev;
+ platform_set_drvdata(pdev, mbox);
+
+ /* Get resource for registers */
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ /* Map registers of all rings */
+ mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(mbox->regs)) {
+ ret = PTR_ERR(mbox->regs);
+ dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret);
+ goto fail;
+ }
+ regs_end = mbox->regs + resource_size(iomem);
+
+ /* Scan and count available rings */
+ mbox->num_rings = 0;
+ for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) {
+ if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC)
+ mbox->num_rings++;
+ }
+ if (!mbox->num_rings) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ /* Allocate driver ring structs */
+ ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ mbox->rings = ring;
+
+ /* Initialize members of driver ring structs */
+ regs = mbox->regs;
+ for (index = 0; index < mbox->num_rings; index++) {
+ ring = &mbox->rings[index];
+ ring->num = index;
+ ring->mbox = mbox;
+ while ((regs < regs_end) &&
+ (readl_relaxed(regs + RING_VER) != RING_VER_MAGIC))
+ regs += RING_REGS_SIZE;
+ if (regs_end <= regs) {
+ ret = -ENODEV;
+ goto fail;
+ }
+ ring->regs = regs;
+ regs += RING_REGS_SIZE;
+ ring->irq = UINT_MAX;
+ ring->irq_requested = false;
+ ring->msi_timer_val = MSI_TIMER_VAL_MASK;
+ ring->msi_count_threshold = 0x1;
+ memset(ring->requests, 0, sizeof(ring->requests));
+ ring->bd_base = NULL;
+ ring->bd_dma_base = 0;
+ ring->cmpl_base = NULL;
+ ring->cmpl_dma_base = 0;
+ atomic_set(&ring->msg_send_count, 0);
+ atomic_set(&ring->msg_cmpl_count, 0);
+ spin_lock_init(&ring->lock);
+ bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
+ ring->cmpl_read_offset = 0;
+ }
+
+ /* FlexRM is capable of 40-bit physical addresses only */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
+ }
+
+ /* Create DMA pool for ring BD memory */
+ mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE,
+ 1 << RING_BD_ALIGN_ORDER, 0);
+ if (!mbox->bd_pool) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* Create DMA pool for ring completion memory */
+ mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE,
+ 1 << RING_CMPL_ALIGN_ORDER, 0);
+ if (!mbox->cmpl_pool) {
+ ret = -ENOMEM;
+ goto fail_destroy_bd_pool;
+ }
+
+ /* Allocate platform MSIs for each ring */
+ ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
+ flexrm_mbox_msi_write);
+ if (ret)
+ goto fail_destroy_cmpl_pool;
+
+ /* Save alloced IRQ numbers for each ring */
+ for_each_msi_entry(desc, dev) {
+ ring = &mbox->rings[desc->platform.msi_index];
+ ring->irq = desc->irq;
+ }
+
+ /* Check availability of debugfs */
+ if (!debugfs_initialized())
+ goto skip_debugfs;
+
+ /* Create debugfs root entry */
+ mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL);
+
+ /* Create debugfs config entry */
+ debugfs_create_devm_seqfile(mbox->dev, "config", mbox->root,
+ flexrm_debugfs_conf_show);
+
+ /* Create debugfs stats entry */
+ debugfs_create_devm_seqfile(mbox->dev, "stats", mbox->root,
+ flexrm_debugfs_stats_show);
+
+skip_debugfs:
+
+ /* Initialize mailbox controller */
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = false;
+ mbox->controller.ops = &flexrm_mbox_chan_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = mbox->num_rings;
+ mbox->controller.of_xlate = flexrm_mbox_of_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings,
+ sizeof(*mbox->controller.chans), GFP_KERNEL);
+ if (!mbox->controller.chans) {
+ ret = -ENOMEM;
+ goto fail_free_debugfs_root;
+ }
+ for (index = 0; index < mbox->num_rings; index++)
+ mbox->controller.chans[index].con_priv = &mbox->rings[index];
+
+ /* Register mailbox controller */
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
+ if (ret)
+ goto fail_free_debugfs_root;
+
+ dev_info(dev, "registered flexrm mailbox with %d channels\n",
+ mbox->controller.num_chans);
+
+ return 0;
+
+fail_free_debugfs_root:
+ debugfs_remove_recursive(mbox->root);
+ platform_msi_domain_free_irqs(dev);
+fail_destroy_cmpl_pool:
+ dma_pool_destroy(mbox->cmpl_pool);
+fail_destroy_bd_pool:
+ dma_pool_destroy(mbox->bd_pool);
+fail:
+ return ret;
+}
+
+static int flexrm_mbox_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(mbox->root);
+
+ platform_msi_domain_free_irqs(dev);
+
+ dma_pool_destroy(mbox->cmpl_pool);
+ dma_pool_destroy(mbox->bd_pool);
+
+ return 0;
+}
+
+static const struct of_device_id flexrm_mbox_of_match[] = {
+ { .compatible = "brcm,iproc-flexrm-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match);
+
+static struct platform_driver flexrm_mbox_driver = {
+ .driver = {
+ .name = "brcm-flexrm-mbox",
+ .of_match_table = flexrm_mbox_of_match,
+ },
+ .probe = flexrm_mbox_probe,
+ .remove = flexrm_mbox_remove,
+};
+module_platform_driver(flexrm_mbox_driver);
+
+MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
new file mode 100644
index 000000000..5b375985f
--- /dev/null
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -0,0 +1,1646 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2016 Broadcom
+ */
+
+/*
+ * Broadcom PDC Mailbox Driver
+ * The PDC provides a ring based programming interface to one or more hardware
+ * offload engines. For example, the PDC driver works with both SPU-M and SPU2
+ * cryptographic offload hardware. In some chips the PDC is referred to as MDE,
+ * and in others the FA2/FA+ hardware is used with this PDC driver.
+ *
+ * The PDC driver registers with the Linux mailbox framework as a mailbox
+ * controller, once for each PDC instance. Ring 0 for each PDC is registered as
+ * a mailbox channel. The PDC driver uses interrupts to determine when data
+ * transfers to and from an offload engine are complete. The PDC driver uses
+ * threaded IRQs so that response messages are handled outside of interrupt
+ * context.
+ *
+ * The PDC driver allows multiple messages to be pending in the descriptor
+ * rings. The tx_msg_start descriptor index indicates where the last message
+ * starts. The txin_numd value at this index indicates how many descriptor
+ * indexes make up the message. Similar state is kept on the receive side. When
+ * an rx interrupt indicates a response is ready, the PDC driver processes numd
+ * descriptors from the tx and rx ring, thus processing one response at a time.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/brcm-message.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#define PDC_SUCCESS 0
+
+#define RING_ENTRY_SIZE sizeof(struct dma64dd)
+
+/* # entries in PDC dma ring */
+#define PDC_RING_ENTRIES 512
+/*
+ * Minimum number of ring descriptor entries that must be free to tell mailbox
+ * framework that it can submit another request
+ */
+#define PDC_RING_SPACE_MIN 15
+
+#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
+/* Rings are 8k aligned */
+#define RING_ALIGN_ORDER 13
+#define RING_ALIGN BIT(RING_ALIGN_ORDER)
+
+#define RX_BUF_ALIGN_ORDER 5
+#define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
+
+/* descriptor bumping macros */
+#define XXD(x, max_mask) ((x) & (max_mask))
+#define TXD(x, max_mask) XXD((x), (max_mask))
+#define RXD(x, max_mask) XXD((x), (max_mask))
+#define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
+#define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
+#define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
+#define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
+#define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
+#define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
+
+/* Length of BCM header at start of SPU msg, in bytes */
+#define BCM_HDR_LEN 8
+
+/*
+ * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
+ * not currently support use of multiple ringsets on a single PDC engine.
+ */
+#define PDC_RINGSET 0
+
+/*
+ * Interrupt mask and status definitions. Enable interrupts for tx and rx on
+ * ring 0
+ */
+#define PDC_RCVINT_0 (16 + PDC_RINGSET)
+#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
+#define PDC_INTMASK (PDC_RCVINTEN_0)
+#define PDC_LAZY_FRAMECOUNT 1
+#define PDC_LAZY_TIMEOUT 10000
+#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
+#define PDC_INTMASK_OFFSET 0x24
+#define PDC_INTSTATUS_OFFSET 0x20
+#define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
+#define FA_RCVLAZY0_OFFSET 0x100
+
+/*
+ * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
+ * before frame
+ */
+#define PDC_SPU2_RESP_HDR_LEN 17
+#define PDC_CKSUM_CTRL BIT(27)
+#define PDC_CKSUM_CTRL_OFFSET 0x400
+
+#define PDC_SPUM_RESP_HDR_LEN 32
+
+/*
+ * Sets the following bits for write to transmit control reg:
+ * 11 - PtyChkDisable - parity check is disabled
+ * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
+ */
+#define PDC_TX_CTL 0x000C0800
+
+/* Bit in tx control reg to enable tx channel */
+#define PDC_TX_ENABLE 0x1
+
+/*
+ * Sets the following bits for write to receive control reg:
+ * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
+ * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
+ * that have StartOfFrame set
+ * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
+ * remaining bytes in current frame, report error
+ * in rx frame status for current frame
+ * 11 - PtyChkDisable - parity check is disabled
+ * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
+ */
+#define PDC_RX_CTL 0x000C0E00
+
+/* Bit in rx control reg to enable rx channel */
+#define PDC_RX_ENABLE 0x1
+
+#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
+
+/* descriptor flags */
+#define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
+#define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
+#define D64_CTRL1_EOF BIT(30) /* end of frame */
+#define D64_CTRL1_SOF BIT(31) /* start of frame */
+
+#define RX_STATUS_OVERFLOW 0x00800000
+#define RX_STATUS_LEN 0x0000FFFF
+
+#define PDC_TXREGS_OFFSET 0x200
+#define PDC_RXREGS_OFFSET 0x220
+
+/* Maximum size buffer the DMA engine can handle */
+#define PDC_DMA_BUF_MAX 16384
+
+enum pdc_hw {
+ FA_HW, /* FA2/FA+ hardware (i.e. Northstar Plus) */
+ PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
+};
+
+struct pdc_dma_map {
+ void *ctx; /* opaque context associated with frame */
+};
+
+/* dma descriptor */
+struct dma64dd {
+ u32 ctrl1; /* misc control bits */
+ u32 ctrl2; /* buffer count and address extension */
+ u32 addrlow; /* memory address of the date buffer, bits 31:0 */
+ u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
+};
+
+/* dma registers per channel(xmt or rcv) */
+struct dma64_regs {
+ u32 control; /* enable, et al */
+ u32 ptr; /* last descriptor posted to chip */
+ u32 addrlow; /* descriptor ring base address low 32-bits */
+ u32 addrhigh; /* descriptor ring base address bits 63:32 */
+ u32 status0; /* last rx descriptor written by hw */
+ u32 status1; /* driver does not use */
+};
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/* dma registers. matches hw layout. */
+struct dma64 {
+ struct dma64_regs dmaxmt; /* dma tx */
+ u32 PAD[2];
+ struct dma64_regs dmarcv; /* dma rx */
+ u32 PAD[2];
+};
+
+/* PDC registers */
+struct pdc_regs {
+ u32 devcontrol; /* 0x000 */
+ u32 devstatus; /* 0x004 */
+ u32 PAD;
+ u32 biststatus; /* 0x00c */
+ u32 PAD[4];
+ u32 intstatus; /* 0x020 */
+ u32 intmask; /* 0x024 */
+ u32 gptimer; /* 0x028 */
+
+ u32 PAD;
+ u32 intrcvlazy_0; /* 0x030 (Only in PDC, not FA2) */
+ u32 intrcvlazy_1; /* 0x034 (Only in PDC, not FA2) */
+ u32 intrcvlazy_2; /* 0x038 (Only in PDC, not FA2) */
+ u32 intrcvlazy_3; /* 0x03c (Only in PDC, not FA2) */
+
+ u32 PAD[48];
+ u32 fa_intrecvlazy; /* 0x100 (Only in FA2, not PDC) */
+ u32 flowctlthresh; /* 0x104 */
+ u32 wrrthresh; /* 0x108 */
+ u32 gmac_idle_cnt_thresh; /* 0x10c */
+
+ u32 PAD[4];
+ u32 ifioaccessaddr; /* 0x120 */
+ u32 ifioaccessbyte; /* 0x124 */
+ u32 ifioaccessdata; /* 0x128 */
+
+ u32 PAD[21];
+ u32 phyaccess; /* 0x180 */
+ u32 PAD;
+ u32 phycontrol; /* 0x188 */
+ u32 txqctl; /* 0x18c */
+ u32 rxqctl; /* 0x190 */
+ u32 gpioselect; /* 0x194 */
+ u32 gpio_output_en; /* 0x198 */
+ u32 PAD; /* 0x19c */
+ u32 txq_rxq_mem_ctl; /* 0x1a0 */
+ u32 memory_ecc_status; /* 0x1a4 */
+ u32 serdes_ctl; /* 0x1a8 */
+ u32 serdes_status0; /* 0x1ac */
+ u32 serdes_status1; /* 0x1b0 */
+ u32 PAD[11]; /* 0x1b4-1dc */
+ u32 clk_ctl_st; /* 0x1e0 */
+ u32 hw_war; /* 0x1e4 (Only in PDC, not FA2) */
+ u32 pwrctl; /* 0x1e8 */
+ u32 PAD[5];
+
+#define PDC_NUM_DMA_RINGS 4
+ struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
+
+ /* more registers follow, but we don't use them */
+};
+
+/* structure for allocating/freeing DMA rings */
+struct pdc_ring_alloc {
+ dma_addr_t dmabase; /* DMA address of start of ring */
+ void *vbase; /* base kernel virtual address of ring */
+ u32 size; /* ring allocation size in bytes */
+};
+
+/*
+ * context associated with a receive descriptor.
+ * @rxp_ctx: opaque context associated with frame that starts at each
+ * rx ring index.
+ * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
+ * index. Retained in order to unmap each sg after reply is processed.
+ * @rxin_numd: Number of rx descriptors associated with the message that starts
+ * at a descriptor index. Not set for every index. For example,
+ * if descriptor index i points to a scatterlist with 4 entries,
+ * then the next three descriptor indexes don't have a value set.
+ * @resp_hdr: Virtual address of buffer used to catch DMA rx status
+ * @resp_hdr_daddr: physical address of DMA rx status buffer
+ */
+struct pdc_rx_ctx {
+ void *rxp_ctx;
+ struct scatterlist *dst_sg;
+ u32 rxin_numd;
+ void *resp_hdr;
+ dma_addr_t resp_hdr_daddr;
+};
+
+/* PDC state structure */
+struct pdc_state {
+ /* Index of the PDC whose state is in this structure instance */
+ u8 pdc_idx;
+
+ /* Platform device for this PDC instance */
+ struct platform_device *pdev;
+
+ /*
+ * Each PDC instance has a mailbox controller. PDC receives request
+ * messages through mailboxes, and sends response messages through the
+ * mailbox framework.
+ */
+ struct mbox_controller mbc;
+
+ unsigned int pdc_irq;
+
+ /* tasklet for deferred processing after DMA rx interrupt */
+ struct tasklet_struct rx_tasklet;
+
+ /* Number of bytes of receive status prior to each rx frame */
+ u32 rx_status_len;
+ /* Whether a BCM header is prepended to each frame */
+ bool use_bcm_hdr;
+ /* Sum of length of BCM header and rx status header */
+ u32 pdc_resp_hdr_len;
+
+ /* The base virtual address of DMA hw registers */
+ void __iomem *pdc_reg_vbase;
+
+ /* Pool for allocation of DMA rings */
+ struct dma_pool *ring_pool;
+
+ /* Pool for allocation of metadata buffers for response messages */
+ struct dma_pool *rx_buf_pool;
+
+ /*
+ * The base virtual address of DMA tx/rx descriptor rings. Corresponding
+ * DMA address and size of ring allocation.
+ */
+ struct pdc_ring_alloc tx_ring_alloc;
+ struct pdc_ring_alloc rx_ring_alloc;
+
+ struct pdc_regs *regs; /* start of PDC registers */
+
+ struct dma64_regs *txregs_64; /* dma tx engine registers */
+ struct dma64_regs *rxregs_64; /* dma rx engine registers */
+
+ /*
+ * Arrays of PDC_RING_ENTRIES descriptors
+ * To use multiple ringsets, this needs to be extended
+ */
+ struct dma64dd *txd_64; /* tx descriptor ring */
+ struct dma64dd *rxd_64; /* rx descriptor ring */
+
+ /* descriptor ring sizes */
+ u32 ntxd; /* # tx descriptors */
+ u32 nrxd; /* # rx descriptors */
+ u32 nrxpost; /* # rx buffers to keep posted */
+ u32 ntxpost; /* max number of tx buffers that can be posted */
+
+ /*
+ * Index of next tx descriptor to reclaim. That is, the descriptor
+ * index of the oldest tx buffer for which the host has yet to process
+ * the corresponding response.
+ */
+ u32 txin;
+
+ /*
+ * Index of the first receive descriptor for the sequence of
+ * message fragments currently under construction. Used to build up
+ * the rxin_numd count for a message. Updated to rxout when the host
+ * starts a new sequence of rx buffers for a new message.
+ */
+ u32 tx_msg_start;
+
+ /* Index of next tx descriptor to post. */
+ u32 txout;
+
+ /*
+ * Number of tx descriptors associated with the message that starts
+ * at this tx descriptor index.
+ */
+ u32 txin_numd[PDC_RING_ENTRIES];
+
+ /*
+ * Index of next rx descriptor to reclaim. This is the index of
+ * the next descriptor whose data has yet to be processed by the host.
+ */
+ u32 rxin;
+
+ /*
+ * Index of the first receive descriptor for the sequence of
+ * message fragments currently under construction. Used to build up
+ * the rxin_numd count for a message. Updated to rxout when the host
+ * starts a new sequence of rx buffers for a new message.
+ */
+ u32 rx_msg_start;
+
+ /*
+ * Saved value of current hardware rx descriptor index.
+ * The last rx buffer written by the hw is the index previous to
+ * this one.
+ */
+ u32 last_rx_curr;
+
+ /* Index of next rx descriptor to post. */
+ u32 rxout;
+
+ struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
+
+ /*
+ * Scatterlists used to form request and reply frames beginning at a
+ * given ring index. Retained in order to unmap each sg after reply
+ * is processed
+ */
+ struct scatterlist *src_sg[PDC_RING_ENTRIES];
+
+ /* counters */
+ u32 pdc_requests; /* number of request messages submitted */
+ u32 pdc_replies; /* number of reply messages received */
+ u32 last_tx_not_done; /* too few tx descriptors to indicate done */
+ u32 tx_ring_full; /* unable to accept msg because tx ring full */
+ u32 rx_ring_full; /* unable to accept msg because rx ring full */
+ u32 txnobuf; /* unable to create tx descriptor */
+ u32 rxnobuf; /* unable to create rx descriptor */
+ u32 rx_oflow; /* count of rx overflows */
+
+ /* hardware type - FA2 or PDC/MDE */
+ enum pdc_hw hw_type;
+};
+
+/* Global variables */
+
+struct pdc_globals {
+ /* Actual number of SPUs in hardware, as reported by device tree */
+ u32 num_spu;
+};
+
+static struct pdc_globals pdcg;
+
+/* top level debug FS directory for PDC driver */
+static struct dentry *debugfs_dir;
+
+static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct pdc_state *pdcs;
+ char *buf;
+ ssize_t ret, out_offset, out_count;
+
+ out_count = 512;
+
+ buf = kmalloc(out_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pdcs = filp->private_data;
+ out_offset = 0;
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "SPU %u stats:\n", pdcs->pdc_idx);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "PDC requests....................%u\n",
+ pdcs->pdc_requests);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "PDC responses...................%u\n",
+ pdcs->pdc_replies);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Tx not done.....................%u\n",
+ pdcs->last_tx_not_done);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Tx ring full....................%u\n",
+ pdcs->tx_ring_full);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Rx ring full....................%u\n",
+ pdcs->rx_ring_full);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Tx desc write fail. Ring full...%u\n",
+ pdcs->txnobuf);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Rx desc write fail. Ring full...%u\n",
+ pdcs->rxnobuf);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Receive overflow................%u\n",
+ pdcs->rx_oflow);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Num frags in rx ring............%u\n",
+ NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
+ pdcs->nrxpost));
+
+ if (out_offset > out_count)
+ out_offset = out_count;
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations pdc_debugfs_stats = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = pdc_debugfs_read,
+};
+
+/**
+ * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
+ * directory has not yet been created, create it now. Create a stats file in
+ * this directory for a SPU.
+ * @pdcs: PDC state structure
+ */
+static void pdc_setup_debugfs(struct pdc_state *pdcs)
+{
+ char spu_stats_name[16];
+
+ if (!debugfs_initialized())
+ return;
+
+ snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
+ if (!debugfs_dir)
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ /* S_IRUSR == 0400 */
+ debugfs_create_file(spu_stats_name, 0400, debugfs_dir, pdcs,
+ &pdc_debugfs_stats);
+}
+
+static void pdc_free_debugfs(void)
+{
+ debugfs_remove_recursive(debugfs_dir);
+ debugfs_dir = NULL;
+}
+
+/**
+ * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
+ * @pdcs: PDC state for SPU that will generate result
+ * @dma_addr: DMA address of buffer that descriptor is being built for
+ * @buf_len: Length of the receive buffer, in bytes
+ * @flags: Flags to be stored in descriptor
+ */
+static inline void
+pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
+ u32 buf_len, u32 flags)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
+
+ dev_dbg(dev,
+ "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
+ pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
+
+ rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+ rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+ rxd->ctrl1 = cpu_to_le32(flags);
+ rxd->ctrl2 = cpu_to_le32(buf_len);
+
+ /* bump ring index and return */
+ pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
+}
+
+/**
+ * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
+ * hardware.
+ * @pdcs: PDC state for the SPU that will process this request
+ * @dma_addr: DMA address of packet to be transmitted
+ * @buf_len: Length of tx buffer, in bytes
+ * @flags: Flags to be stored in descriptor
+ */
+static inline void
+pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
+ u32 flags)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
+
+ dev_dbg(dev,
+ "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
+ pdcs->pdc_idx, pdcs->txout, buf_len, flags);
+
+ txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+ txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+ txd->ctrl1 = cpu_to_le32(flags);
+ txd->ctrl2 = cpu_to_le32(buf_len);
+
+ /* bump ring index and return */
+ pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
+}
+
+/**
+ * pdc_receive_one() - Receive a response message from a given SPU.
+ * @pdcs: PDC state for the SPU to receive from
+ *
+ * When the return code indicates success, the response message is available in
+ * the receive buffers provided prior to submission of the request.
+ *
+ * Return: PDC_SUCCESS if one or more receive descriptors was processed
+ * -EAGAIN indicates that no response message is available
+ * -EIO an error occurred
+ */
+static int
+pdc_receive_one(struct pdc_state *pdcs)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ struct mbox_controller *mbc;
+ struct mbox_chan *chan;
+ struct brcm_message mssg;
+ u32 len, rx_status;
+ u32 num_frags;
+ u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
+ u32 frags_rdy; /* number of fragments ready to read */
+ u32 rx_idx; /* ring index of start of receive frame */
+ dma_addr_t resp_hdr_daddr;
+ struct pdc_rx_ctx *rx_ctx;
+
+ mbc = &pdcs->mbc;
+ chan = &mbc->chans[0];
+ mssg.type = BRCM_MESSAGE_SPU;
+
+ /*
+ * return if a complete response message is not yet ready.
+ * rxin_numd[rxin] is the number of fragments in the next msg
+ * to read.
+ */
+ frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
+ if ((frags_rdy == 0) ||
+ (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
+ /* No response ready */
+ return -EAGAIN;
+
+ num_frags = pdcs->txin_numd[pdcs->txin];
+ WARN_ON(num_frags == 0);
+
+ dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
+ sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
+
+ pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
+
+ dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
+ pdcs->pdc_idx, num_frags);
+
+ rx_idx = pdcs->rxin;
+ rx_ctx = &pdcs->rx_ctx[rx_idx];
+ num_frags = rx_ctx->rxin_numd;
+ /* Return opaque context with result */
+ mssg.ctx = rx_ctx->rxp_ctx;
+ rx_ctx->rxp_ctx = NULL;
+ resp_hdr = rx_ctx->resp_hdr;
+ resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
+ dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
+ DMA_FROM_DEVICE);
+
+ pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
+
+ dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
+ pdcs->pdc_idx, num_frags);
+
+ dev_dbg(dev,
+ "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
+ pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
+ pdcs->rxout, pdcs->last_rx_curr);
+
+ if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
+ /*
+ * For SPU-M, get length of response msg and rx overflow status.
+ */
+ rx_status = *((u32 *)resp_hdr);
+ len = rx_status & RX_STATUS_LEN;
+ dev_dbg(dev,
+ "SPU response length %u bytes", len);
+ if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
+ if (rx_status & RX_STATUS_OVERFLOW) {
+ dev_err_ratelimited(dev,
+ "crypto receive overflow");
+ pdcs->rx_oflow++;
+ } else {
+ dev_info_ratelimited(dev, "crypto rx len = 0");
+ }
+ return -EIO;
+ }
+ }
+
+ dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
+
+ mbox_chan_received_data(chan, &mssg);
+
+ pdcs->pdc_replies++;
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_receive() - Process as many responses as are available in the rx ring.
+ * @pdcs: PDC state
+ *
+ * Called within the hard IRQ.
+ * Return:
+ */
+static int
+pdc_receive(struct pdc_state *pdcs)
+{
+ int rx_status;
+
+ /* read last_rx_curr from register once */
+ pdcs->last_rx_curr =
+ (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) &
+ CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
+
+ do {
+ /* Could be many frames ready */
+ rx_status = pdc_receive_one(pdcs);
+ } while (rx_status == PDC_SUCCESS);
+
+ return 0;
+}
+
+/**
+ * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
+ * descriptors for a given SPU. The scatterlist buffers contain the data for a
+ * SPU request message.
+ * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
+ * @sg: Scatterlist whose buffers contain part of the SPU request
+ *
+ * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
+ * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 otherwise
+ */
+static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
+{
+ u32 flags = 0;
+ u32 eot;
+ u32 tx_avail;
+
+ /*
+ * Num descriptors needed. Conservatively assume we need a descriptor
+ * for every entry in sg.
+ */
+ u32 num_desc;
+ u32 desc_w = 0; /* Number of tx descriptors written */
+ u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
+ dma_addr_t databufptr; /* DMA address to put in descriptor */
+
+ num_desc = (u32)sg_nents(sg);
+
+ /* check whether enough tx descriptors are available */
+ tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
+ pdcs->ntxpost);
+ if (unlikely(num_desc > tx_avail)) {
+ pdcs->txnobuf++;
+ return -ENOSPC;
+ }
+
+ /* build tx descriptors */
+ if (pdcs->tx_msg_start == pdcs->txout) {
+ /* Start of frame */
+ pdcs->txin_numd[pdcs->tx_msg_start] = 0;
+ pdcs->src_sg[pdcs->txout] = sg;
+ flags = D64_CTRL1_SOF;
+ }
+
+ while (sg) {
+ if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
+ eot = D64_CTRL1_EOT;
+ else
+ eot = 0;
+
+ /*
+ * If sg buffer larger than PDC limit, split across
+ * multiple descriptors
+ */
+ bufcnt = sg_dma_len(sg);
+ databufptr = sg_dma_address(sg);
+ while (bufcnt > PDC_DMA_BUF_MAX) {
+ pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
+ flags | eot);
+ desc_w++;
+ bufcnt -= PDC_DMA_BUF_MAX;
+ databufptr += PDC_DMA_BUF_MAX;
+ if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
+ eot = D64_CTRL1_EOT;
+ else
+ eot = 0;
+ }
+ sg = sg_next(sg);
+ if (!sg)
+ /* Writing last descriptor for frame */
+ flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
+ pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
+ desc_w++;
+ /* Clear start of frame after first descriptor */
+ flags &= ~D64_CTRL1_SOF;
+ }
+ pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
+ * ring.
+ * @pdcs: PDC state for SPU to process the request
+ *
+ * Sets the index of the last descriptor written in both the rx and tx ring.
+ *
+ * Return: PDC_SUCCESS
+ */
+static int pdc_tx_list_final(struct pdc_state *pdcs)
+{
+ /*
+ * write barrier to ensure all register writes are complete
+ * before chip starts to process new request
+ */
+ wmb();
+ iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
+ iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
+ pdcs->pdc_requests++;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
+ * @pdcs: PDC state for SPU handling request
+ * @dst_sg: scatterlist providing rx buffers for response to be returned to
+ * mailbox client
+ * @ctx: Opaque context for this request
+ *
+ * Posts a single receive descriptor to hold the metadata that precedes a
+ * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
+ * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
+ * rx to indicate the start of a new message.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 if an error (e.g., rx ring is full)
+ */
+static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
+ void *ctx)
+{
+ u32 flags = 0;
+ u32 rx_avail;
+ u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
+ dma_addr_t daddr;
+ void *vaddr;
+ struct pdc_rx_ctx *rx_ctx;
+
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(rx_pkt_cnt > rx_avail)) {
+ pdcs->rxnobuf++;
+ return -ENOSPC;
+ }
+
+ /* allocate a buffer for the dma rx status */
+ vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
+ if (unlikely(!vaddr))
+ return -ENOMEM;
+
+ /*
+ * Update msg_start indexes for both tx and rx to indicate the start
+ * of a new sequence of descriptor indexes that contain the fragments
+ * of the same message.
+ */
+ pdcs->rx_msg_start = pdcs->rxout;
+ pdcs->tx_msg_start = pdcs->txout;
+
+ /* This is always the first descriptor in the receive sequence */
+ flags = D64_CTRL1_SOF;
+ pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
+
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags |= D64_CTRL1_EOT;
+
+ rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
+ rx_ctx->rxp_ctx = ctx;
+ rx_ctx->dst_sg = dst_sg;
+ rx_ctx->resp_hdr = vaddr;
+ rx_ctx->resp_hdr_daddr = daddr;
+ pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
+ * descriptors for a given SPU. The caller must have already DMA mapped the
+ * scatterlist.
+ * @spu_idx: Indicates which SPU the buffers are for
+ * @sg: Scatterlist whose buffers are added to the receive ring
+ *
+ * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
+ * multiple receive descriptors are written, each with a buffer <=
+ * PDC_DMA_BUF_MAX.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 otherwise (e.g., receive ring is full)
+ */
+static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
+{
+ u32 flags = 0;
+ u32 rx_avail;
+
+ /*
+ * Num descriptors needed. Conservatively assume we need a descriptor
+ * for every entry from our starting point in the scatterlist.
+ */
+ u32 num_desc;
+ u32 desc_w = 0; /* Number of tx descriptors written */
+ u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
+ dma_addr_t databufptr; /* DMA address to put in descriptor */
+
+ num_desc = (u32)sg_nents(sg);
+
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(num_desc > rx_avail)) {
+ pdcs->rxnobuf++;
+ return -ENOSPC;
+ }
+
+ while (sg) {
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags = D64_CTRL1_EOT;
+ else
+ flags = 0;
+
+ /*
+ * If sg buffer larger than PDC limit, split across
+ * multiple descriptors
+ */
+ bufcnt = sg_dma_len(sg);
+ databufptr = sg_dma_address(sg);
+ while (bufcnt > PDC_DMA_BUF_MAX) {
+ pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
+ desc_w++;
+ bufcnt -= PDC_DMA_BUF_MAX;
+ databufptr += PDC_DMA_BUF_MAX;
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags = D64_CTRL1_EOT;
+ else
+ flags = 0;
+ }
+ pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
+ desc_w++;
+ sg = sg_next(sg);
+ }
+ pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_irq_handler() - Interrupt handler called in interrupt context.
+ * @irq: Interrupt number that has fired
+ * @data: device struct for DMA engine that generated the interrupt
+ *
+ * We have to clear the device interrupt status flags here. So cache the
+ * status for later use in the thread function. Other than that, just return
+ * WAKE_THREAD to invoke the thread function.
+ *
+ * Return: IRQ_WAKE_THREAD if interrupt is ours
+ * IRQ_NONE otherwise
+ */
+static irqreturn_t pdc_irq_handler(int irq, void *data)
+{
+ struct device *dev = (struct device *)data;
+ struct pdc_state *pdcs = dev_get_drvdata(dev);
+ u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
+
+ if (unlikely(intstatus == 0))
+ return IRQ_NONE;
+
+ /* Disable interrupts until soft handler runs */
+ iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
+
+ /* Clear interrupt flags in device */
+ iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
+
+ /* Wakeup IRQ thread */
+ tasklet_schedule(&pdcs->rx_tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
+ * a DMA receive interrupt. Reenables the receive interrupt.
+ * @data: PDC state structure
+ */
+static void pdc_tasklet_cb(struct tasklet_struct *t)
+{
+ struct pdc_state *pdcs = from_tasklet(pdcs, t, rx_tasklet);
+
+ pdc_receive(pdcs);
+
+ /* reenable interrupts */
+ iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
+}
+
+/**
+ * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
+ * descriptors in one ringset.
+ * @pdcs: PDC instance state
+ * @ringset: index of ringset being used
+ *
+ * Return: PDC_SUCCESS if ring initialized
+ * < 0 otherwise
+ */
+static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
+{
+ int i;
+ int err = PDC_SUCCESS;
+ struct dma64 *dma_reg;
+ struct device *dev = &pdcs->pdev->dev;
+ struct pdc_ring_alloc tx;
+ struct pdc_ring_alloc rx;
+
+ /* Allocate tx ring */
+ tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
+ if (unlikely(!tx.vbase)) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* Allocate rx ring */
+ rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
+ if (unlikely(!rx.vbase)) {
+ err = -ENOMEM;
+ goto fail_dealloc;
+ }
+
+ dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
+ dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
+ dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
+ dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
+
+ memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
+ memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
+
+ pdcs->rxin = 0;
+ pdcs->rx_msg_start = 0;
+ pdcs->last_rx_curr = 0;
+ pdcs->rxout = 0;
+ pdcs->txin = 0;
+ pdcs->tx_msg_start = 0;
+ pdcs->txout = 0;
+
+ /* Set descriptor array base addresses */
+ pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
+ pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
+
+ /* Tell device the base DMA address of each ring */
+ dma_reg = &pdcs->regs->dmaregs[ringset];
+
+ /* But first disable DMA and set curptr to 0 for both TX & RX */
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+ iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
+ &dma_reg->dmarcv.control);
+ iowrite32(0, &dma_reg->dmaxmt.ptr);
+ iowrite32(0, &dma_reg->dmarcv.ptr);
+
+ /* Set base DMA addresses */
+ iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
+ &dma_reg->dmaxmt.addrlow);
+ iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
+ &dma_reg->dmaxmt.addrhigh);
+
+ iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
+ &dma_reg->dmarcv.addrlow);
+ iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
+ &dma_reg->dmarcv.addrhigh);
+
+ /* Re-enable DMA */
+ iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
+ iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
+ &dma_reg->dmarcv.control);
+
+ /* Initialize descriptors */
+ for (i = 0; i < PDC_RING_ENTRIES; i++) {
+ /* Every tx descriptor can be used for start of frame. */
+ if (i != pdcs->ntxpost) {
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
+ &pdcs->txd_64[i].ctrl1);
+ } else {
+ /* Last descriptor in ringset. Set End of Table. */
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
+ D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
+ }
+
+ /* Every rx descriptor can be used for start of frame */
+ if (i != pdcs->nrxpost) {
+ iowrite32(D64_CTRL1_SOF,
+ &pdcs->rxd_64[i].ctrl1);
+ } else {
+ /* Last descriptor in ringset. Set End of Table. */
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
+ &pdcs->rxd_64[i].ctrl1);
+ }
+ }
+ return PDC_SUCCESS;
+
+fail_dealloc:
+ dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
+done:
+ return err;
+}
+
+static void pdc_ring_free(struct pdc_state *pdcs)
+{
+ if (pdcs->tx_ring_alloc.vbase) {
+ dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
+ pdcs->tx_ring_alloc.dmabase);
+ pdcs->tx_ring_alloc.vbase = NULL;
+ }
+
+ if (pdcs->rx_ring_alloc.vbase) {
+ dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
+ pdcs->rx_ring_alloc.dmabase);
+ pdcs->rx_ring_alloc.vbase = NULL;
+ }
+}
+
+/**
+ * pdc_desc_count() - Count the number of DMA descriptors that will be required
+ * for a given scatterlist. Account for the max length of a DMA buffer.
+ * @sg: Scatterlist to be DMA'd
+ * Return: Number of descriptors required
+ */
+static u32 pdc_desc_count(struct scatterlist *sg)
+{
+ u32 cnt = 0;
+
+ while (sg) {
+ cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
+ sg = sg_next(sg);
+ }
+ return cnt;
+}
+
+/**
+ * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
+ * and the rx ring has room for rx_cnt descriptors.
+ * @pdcs: PDC state
+ * @tx_cnt: The number of descriptors required in the tx ring
+ * @rx_cnt: The number of descriptors required i the rx ring
+ *
+ * Return: true if one of the rings does not have enough space
+ * false if sufficient space is available in both rings
+ */
+static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
+{
+ u32 rx_avail;
+ u32 tx_avail;
+ bool full = false;
+
+ /* Check if the tx and rx rings are likely to have enough space */
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(rx_cnt > rx_avail)) {
+ pdcs->rx_ring_full++;
+ full = true;
+ }
+
+ if (likely(!full)) {
+ tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
+ pdcs->ntxpost);
+ if (unlikely(tx_cnt > tx_avail)) {
+ pdcs->tx_ring_full++;
+ full = true;
+ }
+ }
+ return full;
+}
+
+/**
+ * pdc_last_tx_done() - If both the tx and rx rings have at least
+ * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
+ * framework can submit another message.
+ * @chan: mailbox channel to check
+ * Return: true if PDC can accept another message on this channel
+ */
+static bool pdc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+ bool ret;
+
+ if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
+ PDC_RING_SPACE_MIN))) {
+ pdcs->last_tx_not_done++;
+ ret = false;
+ } else {
+ ret = true;
+ }
+ return ret;
+}
+
+/**
+ * pdc_send_data() - mailbox send_data function
+ * @chan: The mailbox channel on which the data is sent. The channel
+ * corresponds to a DMA ringset.
+ * @data: The mailbox message to be sent. The message must be a
+ * brcm_message structure.
+ *
+ * This function is registered as the send_data function for the mailbox
+ * controller. From the destination scatterlist in the mailbox message, it
+ * creates a sequence of receive descriptors in the rx ring. From the source
+ * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
+ * After creating the descriptors, it writes the rx ptr and tx ptr registers to
+ * initiate the DMA transfer.
+ *
+ * This function does the DMA map and unmap of the src and dst scatterlists in
+ * the mailbox message.
+ *
+ * Return: 0 if successful
+ * -ENOTSUPP if the mailbox message is a type this driver does not
+ * support
+ * < 0 if an error
+ */
+static int pdc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+ struct device *dev = &pdcs->pdev->dev;
+ struct brcm_message *mssg = data;
+ int err = PDC_SUCCESS;
+ int src_nent;
+ int dst_nent;
+ int nent;
+ u32 tx_desc_req;
+ u32 rx_desc_req;
+
+ if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
+ return -ENOTSUPP;
+
+ src_nent = sg_nents(mssg->spu.src);
+ if (likely(src_nent)) {
+ nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
+ if (unlikely(nent == 0))
+ return -EIO;
+ }
+
+ dst_nent = sg_nents(mssg->spu.dst);
+ if (likely(dst_nent)) {
+ nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
+ DMA_FROM_DEVICE);
+ if (unlikely(nent == 0)) {
+ dma_unmap_sg(dev, mssg->spu.src, src_nent,
+ DMA_TO_DEVICE);
+ return -EIO;
+ }
+ }
+
+ /*
+ * Check if the tx and rx rings have enough space. Do this prior to
+ * writing any tx or rx descriptors. Need to ensure that we do not write
+ * a partial set of descriptors, or write just rx descriptors but
+ * corresponding tx descriptors don't fit. Note that we want this check
+ * and the entire sequence of descriptor to happen without another
+ * thread getting in. The channel spin lock in the mailbox framework
+ * ensures this.
+ */
+ tx_desc_req = pdc_desc_count(mssg->spu.src);
+ rx_desc_req = pdc_desc_count(mssg->spu.dst);
+ if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
+ return -ENOSPC;
+
+ /* Create rx descriptors to SPU catch response */
+ err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
+ err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
+
+ /* Create tx descriptors to submit SPU request */
+ err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
+ err |= pdc_tx_list_final(pdcs); /* initiate transfer */
+
+ if (unlikely(err))
+ dev_err(&pdcs->pdev->dev,
+ "%s failed with error %d", __func__, err);
+
+ return err;
+}
+
+static int pdc_startup(struct mbox_chan *chan)
+{
+ return pdc_ring_init(chan->con_priv, PDC_RINGSET);
+}
+
+static void pdc_shutdown(struct mbox_chan *chan)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+
+ if (!pdcs)
+ return;
+
+ dev_dbg(&pdcs->pdev->dev,
+ "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
+ pdc_ring_free(pdcs);
+}
+
+/**
+ * pdc_hw_init() - Use the given initialization parameters to initialize the
+ * state for one of the PDCs.
+ * @pdcs: state of the PDC
+ */
+static
+void pdc_hw_init(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+ struct dma64 *dma_reg;
+ int ringset = PDC_RINGSET;
+
+ pdev = pdcs->pdev;
+ dev = &pdev->dev;
+
+ dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
+ dev_dbg(dev, "state structure: %p",
+ pdcs);
+ dev_dbg(dev, " - base virtual addr of hw regs %p",
+ pdcs->pdc_reg_vbase);
+
+ /* initialize data structures */
+ pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
+ pdcs->txregs_64 = (struct dma64_regs *)
+ (((u8 *)pdcs->pdc_reg_vbase) +
+ PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
+ pdcs->rxregs_64 = (struct dma64_regs *)
+ (((u8 *)pdcs->pdc_reg_vbase) +
+ PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
+
+ pdcs->ntxd = PDC_RING_ENTRIES;
+ pdcs->nrxd = PDC_RING_ENTRIES;
+ pdcs->ntxpost = PDC_RING_ENTRIES - 1;
+ pdcs->nrxpost = PDC_RING_ENTRIES - 1;
+ iowrite32(0, &pdcs->regs->intmask);
+
+ dma_reg = &pdcs->regs->dmaregs[ringset];
+
+ /* Configure DMA but will enable later in pdc_ring_init() */
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+
+ iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
+ &dma_reg->dmarcv.control);
+
+ /* Reset current index pointers after making sure DMA is disabled */
+ iowrite32(0, &dma_reg->dmaxmt.ptr);
+ iowrite32(0, &dma_reg->dmarcv.ptr);
+
+ if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
+ iowrite32(PDC_CKSUM_CTRL,
+ pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
+}
+
+/**
+ * pdc_hw_disable() - Disable the tx and rx control in the hw.
+ * @pdcs: PDC state structure
+ *
+ */
+static void pdc_hw_disable(struct pdc_state *pdcs)
+{
+ struct dma64 *dma_reg;
+
+ dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+ iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
+ &dma_reg->dmarcv.control);
+}
+
+/**
+ * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
+ * header returned with each response message.
+ * @pdcs: PDC state structure
+ *
+ * The metadata is not returned to the mailbox client. So the PDC driver
+ * manages these buffers.
+ *
+ * Return: PDC_SUCCESS
+ * -ENOMEM if pool creation fails
+ */
+static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+
+ pdev = pdcs->pdev;
+ dev = &pdev->dev;
+
+ pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
+ if (pdcs->use_bcm_hdr)
+ pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
+
+ pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
+ pdcs->pdc_resp_hdr_len,
+ RX_BUF_ALIGN, 0);
+ if (!pdcs->rx_buf_pool)
+ return -ENOMEM;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
+ * specify a threaded IRQ handler for deferred handling of interrupts outside of
+ * interrupt context.
+ * @pdcs: PDC state
+ *
+ * Set the interrupt mask for transmit and receive done.
+ * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
+ *
+ * Return: PDC_SUCCESS
+ * <0 if threaded irq request fails
+ */
+static int pdc_interrupts_init(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev = pdcs->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = pdev->dev.of_node;
+ int err;
+
+ /* interrupt configuration */
+ iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
+
+ if (pdcs->hw_type == FA_HW)
+ iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
+ FA_RCVLAZY0_OFFSET);
+ else
+ iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
+ PDC_RCVLAZY0_OFFSET);
+
+ /* read irq from device tree */
+ pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
+ dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
+ dev_name(dev), pdcs->pdc_irq, pdcs);
+
+ err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
+ dev_name(dev), dev);
+ if (err) {
+ dev_err(dev, "IRQ %u request failed with err %d\n",
+ pdcs->pdc_irq, err);
+ return err;
+ }
+ return PDC_SUCCESS;
+}
+
+static const struct mbox_chan_ops pdc_mbox_chan_ops = {
+ .send_data = pdc_send_data,
+ .last_tx_done = pdc_last_tx_done,
+ .startup = pdc_startup,
+ .shutdown = pdc_shutdown
+};
+
+/**
+ * pdc_mb_init() - Initialize the mailbox controller.
+ * @pdcs: PDC state
+ *
+ * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
+ * driver only uses one ringset and thus one mb channel. PDC uses the transmit
+ * complete interrupt to determine when a mailbox message has successfully been
+ * transmitted.
+ *
+ * Return: 0 on success
+ * < 0 if there is an allocation or registration failure
+ */
+static int pdc_mb_init(struct pdc_state *pdcs)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ struct mbox_controller *mbc;
+ int chan_index;
+ int err;
+
+ mbc = &pdcs->mbc;
+ mbc->dev = dev;
+ mbc->ops = &pdc_mbox_chan_ops;
+ mbc->num_chans = 1;
+ mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
+ GFP_KERNEL);
+ if (!mbc->chans)
+ return -ENOMEM;
+
+ mbc->txdone_irq = false;
+ mbc->txdone_poll = true;
+ mbc->txpoll_period = 1;
+ for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
+ mbc->chans[chan_index].con_priv = pdcs;
+
+ /* Register mailbox controller */
+ err = devm_mbox_controller_register(dev, mbc);
+ if (err) {
+ dev_crit(dev,
+ "Failed to register PDC mailbox controller. Error %d.",
+ err);
+ return err;
+ }
+ return 0;
+}
+
+/* Device tree API */
+static const int pdc_hw = PDC_HW;
+static const int fa_hw = FA_HW;
+
+static const struct of_device_id pdc_mbox_of_match[] = {
+ {.compatible = "brcm,iproc-pdc-mbox", .data = &pdc_hw},
+ {.compatible = "brcm,iproc-fa2-mbox", .data = &fa_hw},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
+
+/**
+ * pdc_dt_read() - Read application-specific data from device tree.
+ * @pdev: Platform device
+ * @pdcs: PDC state
+ *
+ * Reads the number of bytes of receive status that precede each received frame.
+ * Reads whether transmit and received frames should be preceded by an 8-byte
+ * BCM header.
+ *
+ * Return: 0 if successful
+ * -ENODEV if device not available
+ */
+static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *match;
+ const int *hw_type;
+ int err;
+
+ err = of_property_read_u32(dn, "brcm,rx-status-len",
+ &pdcs->rx_status_len);
+ if (err < 0)
+ dev_err(dev,
+ "%s failed to get DMA receive status length from device tree",
+ __func__);
+
+ pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
+
+ pdcs->hw_type = PDC_HW;
+
+ match = of_match_device(of_match_ptr(pdc_mbox_of_match), dev);
+ if (match != NULL) {
+ hw_type = match->data;
+ pdcs->hw_type = *hw_type;
+ }
+
+ return 0;
+}
+
+/**
+ * pdc_probe() - Probe function for PDC driver.
+ * @pdev: PDC platform device
+ *
+ * Reserve and map register regions defined in device tree.
+ * Allocate and initialize tx and rx DMA rings.
+ * Initialize a mailbox controller for each PDC.
+ *
+ * Return: 0 if successful
+ * < 0 if an error
+ */
+static int pdc_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *pdc_regs;
+ struct pdc_state *pdcs;
+
+ /* PDC state for one SPU */
+ pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
+ if (!pdcs) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ pdcs->pdev = pdev;
+ platform_set_drvdata(pdev, pdcs);
+ pdcs->pdc_idx = pdcg.num_spu;
+ pdcg.num_spu++;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
+ if (err) {
+ dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
+ goto cleanup;
+ }
+
+ /* Create DMA pool for tx ring */
+ pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
+ RING_ALIGN, 0);
+ if (!pdcs->ring_pool) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ err = pdc_dt_read(pdev, pdcs);
+ if (err)
+ goto cleanup_ring_pool;
+
+ pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pdc_regs) {
+ err = -ENODEV;
+ goto cleanup_ring_pool;
+ }
+ dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
+ &pdc_regs->start, &pdc_regs->end);
+
+ pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs);
+ if (IS_ERR(pdcs->pdc_reg_vbase)) {
+ err = PTR_ERR(pdcs->pdc_reg_vbase);
+ dev_err(&pdev->dev, "Failed to map registers: %d\n", err);
+ goto cleanup_ring_pool;
+ }
+
+ /* create rx buffer pool after dt read to know how big buffers are */
+ err = pdc_rx_buf_pool_create(pdcs);
+ if (err)
+ goto cleanup_ring_pool;
+
+ pdc_hw_init(pdcs);
+
+ /* Init tasklet for deferred DMA rx processing */
+ tasklet_setup(&pdcs->rx_tasklet, pdc_tasklet_cb);
+
+ err = pdc_interrupts_init(pdcs);
+ if (err)
+ goto cleanup_buf_pool;
+
+ /* Initialize mailbox controller */
+ err = pdc_mb_init(pdcs);
+ if (err)
+ goto cleanup_buf_pool;
+
+ pdc_setup_debugfs(pdcs);
+
+ dev_dbg(dev, "pdc_probe() successful");
+ return PDC_SUCCESS;
+
+cleanup_buf_pool:
+ tasklet_kill(&pdcs->rx_tasklet);
+ dma_pool_destroy(pdcs->rx_buf_pool);
+
+cleanup_ring_pool:
+ dma_pool_destroy(pdcs->ring_pool);
+
+cleanup:
+ return err;
+}
+
+static int pdc_remove(struct platform_device *pdev)
+{
+ struct pdc_state *pdcs = platform_get_drvdata(pdev);
+
+ pdc_free_debugfs();
+
+ tasklet_kill(&pdcs->rx_tasklet);
+
+ pdc_hw_disable(pdcs);
+
+ dma_pool_destroy(pdcs->rx_buf_pool);
+ dma_pool_destroy(pdcs->ring_pool);
+ return 0;
+}
+
+static struct platform_driver pdc_mbox_driver = {
+ .probe = pdc_probe,
+ .remove = pdc_remove,
+ .driver = {
+ .name = "brcm-iproc-pdc-mbox",
+ .of_match_table = of_match_ptr(pdc_mbox_of_match),
+ },
+};
+module_platform_driver(pdc_mbox_driver);
+
+MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
new file mode 100644
index 000000000..39761d190
--- /dev/null
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010,2015 Broadcom
+ * Copyright (C) 2013-2014 Lubomir Rintel
+ * Copyright (C) 2013 Craig McGeachie
+ *
+ * Parts of the driver are based on:
+ * - arch/arm/mach-bcm2708/vcio.c file written by Gray Girling that was
+ * obtained from branch "rpi-3.6.y" of git://github.com/raspberrypi/
+ * linux.git
+ * - drivers/mailbox/bcm2835-ipc.c by Lubomir Rintel at
+ * https://github.com/hackerspace/rpi-linux/blob/lr-raspberry-pi/drivers/
+ * mailbox/bcm2835-ipc.c
+ * - documentation available on the following web site:
+ * https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* Mailboxes */
+#define ARM_0_MAIL0 0x00
+#define ARM_0_MAIL1 0x20
+
+/*
+ * Mailbox registers. We basically only support mailbox 0 & 1. We
+ * deliver to the VC in mailbox 1, it delivers to us in mailbox 0. See
+ * BCM2835-ARM-Peripherals.pdf section 1.3 for an explanation about
+ * the placement of memory barriers.
+ */
+#define MAIL0_RD (ARM_0_MAIL0 + 0x00)
+#define MAIL0_POL (ARM_0_MAIL0 + 0x10)
+#define MAIL0_STA (ARM_0_MAIL0 + 0x18)
+#define MAIL0_CNF (ARM_0_MAIL0 + 0x1C)
+#define MAIL1_WRT (ARM_0_MAIL1 + 0x00)
+#define MAIL1_STA (ARM_0_MAIL1 + 0x18)
+
+/* Status register: FIFO state. */
+#define ARM_MS_FULL BIT(31)
+#define ARM_MS_EMPTY BIT(30)
+
+/* Configuration register: Enable interrupts. */
+#define ARM_MC_IHAVEDATAIRQEN BIT(0)
+
+struct bcm2835_mbox {
+ void __iomem *regs;
+ spinlock_t lock;
+ struct mbox_controller controller;
+};
+
+static struct bcm2835_mbox *bcm2835_link_mbox(struct mbox_chan *link)
+{
+ return container_of(link->mbox, struct bcm2835_mbox, controller);
+}
+
+static irqreturn_t bcm2835_mbox_irq(int irq, void *dev_id)
+{
+ struct bcm2835_mbox *mbox = dev_id;
+ struct device *dev = mbox->controller.dev;
+ struct mbox_chan *link = &mbox->controller.chans[0];
+
+ while (!(readl(mbox->regs + MAIL0_STA) & ARM_MS_EMPTY)) {
+ u32 msg = readl(mbox->regs + MAIL0_RD);
+ dev_dbg(dev, "Reply 0x%08X\n", msg);
+ mbox_chan_received_data(link, &msg);
+ }
+ return IRQ_HANDLED;
+}
+
+static int bcm2835_send_data(struct mbox_chan *link, void *data)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+ u32 msg = *(u32 *)data;
+
+ spin_lock(&mbox->lock);
+ writel(msg, mbox->regs + MAIL1_WRT);
+ dev_dbg(mbox->controller.dev, "Request 0x%08X\n", msg);
+ spin_unlock(&mbox->lock);
+ return 0;
+}
+
+static int bcm2835_startup(struct mbox_chan *link)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+
+ /* Enable the interrupt on data reception */
+ writel(ARM_MC_IHAVEDATAIRQEN, mbox->regs + MAIL0_CNF);
+
+ return 0;
+}
+
+static void bcm2835_shutdown(struct mbox_chan *link)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+
+ writel(0, mbox->regs + MAIL0_CNF);
+}
+
+static bool bcm2835_last_tx_done(struct mbox_chan *link)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+ bool ret;
+
+ spin_lock(&mbox->lock);
+ ret = !(readl(mbox->regs + MAIL1_STA) & ARM_MS_FULL);
+ spin_unlock(&mbox->lock);
+ return ret;
+}
+
+static const struct mbox_chan_ops bcm2835_mbox_chan_ops = {
+ .send_data = bcm2835_send_data,
+ .startup = bcm2835_startup,
+ .shutdown = bcm2835_shutdown,
+ .last_tx_done = bcm2835_last_tx_done
+};
+
+static struct mbox_chan *bcm2835_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ if (sp->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ return &mbox->chans[0];
+}
+
+static int bcm2835_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+ struct resource *iomem;
+ struct bcm2835_mbox *mbox;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (mbox == NULL)
+ return -ENOMEM;
+ spin_lock_init(&mbox->lock);
+
+ ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
+ bcm2835_mbox_irq, 0, dev_name(dev), mbox);
+ if (ret) {
+ dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
+ ret);
+ return -ENODEV;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(mbox->regs)) {
+ ret = PTR_ERR(mbox->regs);
+ dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret);
+ return ret;
+ }
+
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+ mbox->controller.ops = &bcm2835_mbox_chan_ops;
+ mbox->controller.of_xlate = &bcm2835_mbox_index_xlate;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = devm_kzalloc(dev,
+ sizeof(*mbox->controller.chans), GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "mailbox enabled\n");
+
+ return ret;
+}
+
+static const struct of_device_id bcm2835_mbox_of_match[] = {
+ { .compatible = "brcm,bcm2835-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_mbox_of_match);
+
+static struct platform_driver bcm2835_mbox_driver = {
+ .driver = {
+ .name = "bcm2835-mbox",
+ .of_match_table = bcm2835_mbox_of_match,
+ },
+ .probe = bcm2835_mbox_probe,
+};
+module_platform_driver(bcm2835_mbox_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("BCM2835 mailbox IPC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
new file mode 100644
index 000000000..53f4bc248
--- /dev/null
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018 Hisilicon Limited.
+// Copyright (c) 2017-2018 Linaro Limited.
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "mailbox.h"
+
+#define MBOX_CHAN_MAX 32
+
+#define MBOX_RX 0x0
+#define MBOX_TX 0x1
+
+#define MBOX_BASE(mbox, ch) ((mbox)->base + ((ch) * 0x40))
+#define MBOX_SRC_REG 0x00
+#define MBOX_DST_REG 0x04
+#define MBOX_DCLR_REG 0x08
+#define MBOX_DSTAT_REG 0x0c
+#define MBOX_MODE_REG 0x10
+#define MBOX_IMASK_REG 0x14
+#define MBOX_ICLR_REG 0x18
+#define MBOX_SEND_REG 0x1c
+#define MBOX_DATA_REG 0x20
+
+#define MBOX_IPC_LOCK_REG 0xa00
+#define MBOX_IPC_UNLOCK 0x1acce551
+
+#define MBOX_AUTOMATIC_ACK 1
+
+#define MBOX_STATE_IDLE BIT(4)
+#define MBOX_STATE_READY BIT(5)
+#define MBOX_STATE_ACK BIT(7)
+
+#define MBOX_MSG_LEN 8
+
+/**
+ * Hi3660 mailbox channel information
+ *
+ * A channel can be used for TX or RX, it can trigger remote
+ * processor interrupt to notify remote processor and can receive
+ * interrupt if has incoming message.
+ *
+ * @dst_irq: Interrupt vector for remote processor
+ * @ack_irq: Interrupt vector for local processor
+ */
+struct hi3660_chan_info {
+ unsigned int dst_irq;
+ unsigned int ack_irq;
+};
+
+/**
+ * Hi3660 mailbox controller data
+ *
+ * Mailbox controller includes 32 channels and can allocate
+ * channel for message transferring.
+ *
+ * @dev: Device to which it is attached
+ * @base: Base address of the register mapping region
+ * @chan: Representation of channels in mailbox controller
+ * @mchan: Representation of channel info
+ * @controller: Representation of a communication channel controller
+ */
+struct hi3660_mbox {
+ struct device *dev;
+ void __iomem *base;
+ struct mbox_chan chan[MBOX_CHAN_MAX];
+ struct hi3660_chan_info mchan[MBOX_CHAN_MAX];
+ struct mbox_controller controller;
+};
+
+static struct hi3660_mbox *to_hi3660_mbox(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct hi3660_mbox, controller);
+}
+
+static int hi3660_mbox_check_state(struct mbox_chan *chan)
+{
+ unsigned long ch = (unsigned long)chan->con_priv;
+ struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
+ struct hi3660_chan_info *mchan = &mbox->mchan[ch];
+ void __iomem *base = MBOX_BASE(mbox, ch);
+ unsigned long val;
+ unsigned int ret;
+
+ /* Mailbox is ready to use */
+ if (readl(base + MBOX_MODE_REG) & MBOX_STATE_READY)
+ return 0;
+
+ /* Wait for acknowledge from remote */
+ ret = readx_poll_timeout_atomic(readl, base + MBOX_MODE_REG,
+ val, (val & MBOX_STATE_ACK), 1000, 300000);
+ if (ret) {
+ dev_err(mbox->dev, "%s: timeout for receiving ack\n", __func__);
+ return ret;
+ }
+
+ /* clear ack state, mailbox will get back to ready state */
+ writel(BIT(mchan->ack_irq), base + MBOX_ICLR_REG);
+
+ return 0;
+}
+
+static int hi3660_mbox_unlock(struct mbox_chan *chan)
+{
+ struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
+ unsigned int val, retry = 3;
+
+ do {
+ writel(MBOX_IPC_UNLOCK, mbox->base + MBOX_IPC_LOCK_REG);
+
+ val = readl(mbox->base + MBOX_IPC_LOCK_REG);
+ if (!val)
+ break;
+
+ udelay(10);
+ } while (retry--);
+
+ if (val)
+ dev_err(mbox->dev, "%s: failed to unlock mailbox\n", __func__);
+
+ return (!val) ? 0 : -ETIMEDOUT;
+}
+
+static int hi3660_mbox_acquire_channel(struct mbox_chan *chan)
+{
+ unsigned long ch = (unsigned long)chan->con_priv;
+ struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
+ struct hi3660_chan_info *mchan = &mbox->mchan[ch];
+ void __iomem *base = MBOX_BASE(mbox, ch);
+ unsigned int val, retry;
+
+ for (retry = 10; retry; retry--) {
+ /* Check if channel is in idle state */
+ if (readl(base + MBOX_MODE_REG) & MBOX_STATE_IDLE) {
+ writel(BIT(mchan->ack_irq), base + MBOX_SRC_REG);
+
+ /* Check ack bit has been set successfully */
+ val = readl(base + MBOX_SRC_REG);
+ if (val & BIT(mchan->ack_irq))
+ break;
+ }
+ }
+
+ if (!retry)
+ dev_err(mbox->dev, "%s: failed to acquire channel\n", __func__);
+
+ return retry ? 0 : -ETIMEDOUT;
+}
+
+static int hi3660_mbox_startup(struct mbox_chan *chan)
+{
+ int ret;
+
+ ret = hi3660_mbox_unlock(chan);
+ if (ret)
+ return ret;
+
+ ret = hi3660_mbox_acquire_channel(chan);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hi3660_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ unsigned long ch = (unsigned long)chan->con_priv;
+ struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
+ struct hi3660_chan_info *mchan = &mbox->mchan[ch];
+ void __iomem *base = MBOX_BASE(mbox, ch);
+ u32 *buf = msg;
+ unsigned int i;
+ int ret;
+
+ ret = hi3660_mbox_check_state(chan);
+ if (ret)
+ return ret;
+
+ /* Clear mask for destination interrupt */
+ writel_relaxed(~BIT(mchan->dst_irq), base + MBOX_IMASK_REG);
+
+ /* Config destination for interrupt vector */
+ writel_relaxed(BIT(mchan->dst_irq), base + MBOX_DST_REG);
+
+ /* Automatic acknowledge mode */
+ writel_relaxed(MBOX_AUTOMATIC_ACK, base + MBOX_MODE_REG);
+
+ /* Fill message data */
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ writel_relaxed(buf[i], base + MBOX_DATA_REG + i * 4);
+
+ /* Trigger data transferring */
+ writel(BIT(mchan->ack_irq), base + MBOX_SEND_REG);
+ return 0;
+}
+
+static const struct mbox_chan_ops hi3660_mbox_ops = {
+ .startup = hi3660_mbox_startup,
+ .send_data = hi3660_mbox_send_data,
+};
+
+static struct mbox_chan *hi3660_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct hi3660_mbox *mbox = to_hi3660_mbox(controller);
+ struct hi3660_chan_info *mchan;
+ unsigned int ch = spec->args[0];
+
+ if (ch >= MBOX_CHAN_MAX) {
+ dev_err(mbox->dev, "Invalid channel idx %d\n", ch);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mchan = &mbox->mchan[ch];
+ mchan->dst_irq = spec->args[1];
+ mchan->ack_irq = spec->args[2];
+
+ return &mbox->chan[ch];
+}
+
+static const struct of_device_id hi3660_mbox_of_match[] = {
+ { .compatible = "hisilicon,hi3660-mbox", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hi3660_mbox_of_match);
+
+static int hi3660_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hi3660_mbox *mbox;
+ struct mbox_chan *chan;
+ struct resource *res;
+ unsigned long ch;
+ int err;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mbox->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->base))
+ return PTR_ERR(mbox->base);
+
+ mbox->dev = dev;
+ mbox->controller.dev = dev;
+ mbox->controller.chans = mbox->chan;
+ mbox->controller.num_chans = MBOX_CHAN_MAX;
+ mbox->controller.ops = &hi3660_mbox_ops;
+ mbox->controller.of_xlate = hi3660_mbox_xlate;
+
+ /* Initialize mailbox channel data */
+ chan = mbox->chan;
+ for (ch = 0; ch < MBOX_CHAN_MAX; ch++)
+ chan[ch].con_priv = (void *)ch;
+
+ err = devm_mbox_controller_register(dev, &mbox->controller);
+ if (err) {
+ dev_err(dev, "Failed to register mailbox %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "Mailbox enabled\n");
+ return 0;
+}
+
+static struct platform_driver hi3660_mbox_driver = {
+ .probe = hi3660_mbox_probe,
+ .driver = {
+ .name = "hi3660-mbox",
+ .of_match_table = hi3660_mbox_of_match,
+ },
+};
+
+static int __init hi3660_mbox_init(void)
+{
+ return platform_driver_register(&hi3660_mbox_driver);
+}
+core_initcall(hi3660_mbox_init);
+
+static void __exit hi3660_mbox_exit(void)
+{
+ platform_driver_unregister(&hi3660_mbox_driver);
+}
+module_exit(hi3660_mbox_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Hi3660 Mailbox Controller");
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
new file mode 100644
index 000000000..cc236ac7a
--- /dev/null
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Hisilicon's Hi6220 mailbox driver
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MBOX_CHAN_MAX 32
+
+#define MBOX_TX 0x1
+
+/* Mailbox message length: 8 words */
+#define MBOX_MSG_LEN 8
+
+/* Mailbox Registers */
+#define MBOX_OFF(m) (0x40 * (m))
+#define MBOX_MODE_REG(m) (MBOX_OFF(m) + 0x0)
+#define MBOX_DATA_REG(m) (MBOX_OFF(m) + 0x4)
+
+#define MBOX_STATE_MASK (0xF << 4)
+#define MBOX_STATE_IDLE (0x1 << 4)
+#define MBOX_STATE_TX (0x2 << 4)
+#define MBOX_STATE_RX (0x4 << 4)
+#define MBOX_STATE_ACK (0x8 << 4)
+#define MBOX_ACK_CONFIG_MASK (0x1 << 0)
+#define MBOX_ACK_AUTOMATIC (0x1 << 0)
+#define MBOX_ACK_IRQ (0x0 << 0)
+
+/* IPC registers */
+#define ACK_INT_RAW_REG(i) ((i) + 0x400)
+#define ACK_INT_MSK_REG(i) ((i) + 0x404)
+#define ACK_INT_STAT_REG(i) ((i) + 0x408)
+#define ACK_INT_CLR_REG(i) ((i) + 0x40c)
+#define ACK_INT_ENA_REG(i) ((i) + 0x500)
+#define ACK_INT_DIS_REG(i) ((i) + 0x504)
+#define DST_INT_RAW_REG(i) ((i) + 0x420)
+
+
+struct hi6220_mbox_chan {
+
+ /*
+ * Description for channel's hardware info:
+ * - direction: tx or rx
+ * - dst irq: peer core's irq number
+ * - ack irq: local irq number
+ * - slot number
+ */
+ unsigned int dir, dst_irq, ack_irq;
+ unsigned int slot;
+
+ struct hi6220_mbox *parent;
+};
+
+struct hi6220_mbox {
+ struct device *dev;
+
+ int irq;
+
+ /* flag of enabling tx's irq mode */
+ bool tx_irq_mode;
+
+ /* region for ipc event */
+ void __iomem *ipc;
+
+ /* region for mailbox */
+ void __iomem *base;
+
+ unsigned int chan_num;
+ struct hi6220_mbox_chan *mchan;
+
+ void *irq_map_chan[MBOX_CHAN_MAX];
+ struct mbox_chan *chan;
+ struct mbox_controller controller;
+};
+
+static void mbox_set_state(struct hi6220_mbox *mbox,
+ unsigned int slot, u32 val)
+{
+ u32 status;
+
+ status = readl(mbox->base + MBOX_MODE_REG(slot));
+ status = (status & ~MBOX_STATE_MASK) | val;
+ writel(status, mbox->base + MBOX_MODE_REG(slot));
+}
+
+static void mbox_set_mode(struct hi6220_mbox *mbox,
+ unsigned int slot, u32 val)
+{
+ u32 mode;
+
+ mode = readl(mbox->base + MBOX_MODE_REG(slot));
+ mode = (mode & ~MBOX_ACK_CONFIG_MASK) | val;
+ writel(mode, mbox->base + MBOX_MODE_REG(slot));
+}
+
+static bool hi6220_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+ u32 state;
+
+ /* Only set idle state for polling mode */
+ BUG_ON(mbox->tx_irq_mode);
+
+ state = readl(mbox->base + MBOX_MODE_REG(mchan->slot));
+ return ((state & MBOX_STATE_MASK) == MBOX_STATE_IDLE);
+}
+
+static int hi6220_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+ unsigned int slot = mchan->slot;
+ u32 *buf = msg;
+ int i;
+
+ /* indicate as a TX channel */
+ mchan->dir = MBOX_TX;
+
+ mbox_set_state(mbox, slot, MBOX_STATE_TX);
+
+ if (mbox->tx_irq_mode)
+ mbox_set_mode(mbox, slot, MBOX_ACK_IRQ);
+ else
+ mbox_set_mode(mbox, slot, MBOX_ACK_AUTOMATIC);
+
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ writel(buf[i], mbox->base + MBOX_DATA_REG(slot) + i * 4);
+
+ /* trigger remote request */
+ writel(BIT(mchan->dst_irq), DST_INT_RAW_REG(mbox->ipc));
+ return 0;
+}
+
+static irqreturn_t hi6220_mbox_interrupt(int irq, void *p)
+{
+ struct hi6220_mbox *mbox = p;
+ struct hi6220_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int state, intr_bit, i;
+ u32 msg[MBOX_MSG_LEN];
+
+ state = readl(ACK_INT_STAT_REG(mbox->ipc));
+ if (!state) {
+ dev_warn(mbox->dev, "%s: spurious interrupt\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ while (state) {
+ intr_bit = __ffs(state);
+ state &= (state - 1);
+
+ chan = mbox->irq_map_chan[intr_bit];
+ if (!chan) {
+ dev_warn(mbox->dev, "%s: unexpected irq vector %d\n",
+ __func__, intr_bit);
+ continue;
+ }
+
+ mchan = chan->con_priv;
+ if (mchan->dir == MBOX_TX)
+ mbox_chan_txdone(chan, 0);
+ else {
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ msg[i] = readl(mbox->base +
+ MBOX_DATA_REG(mchan->slot) + i * 4);
+
+ mbox_chan_received_data(chan, (void *)msg);
+ }
+
+ /* clear IRQ source */
+ writel(BIT(mchan->ack_irq), ACK_INT_CLR_REG(mbox->ipc));
+ mbox_set_state(mbox, mchan->slot, MBOX_STATE_IDLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hi6220_mbox_startup(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+
+ mchan->dir = 0;
+
+ /* enable interrupt */
+ writel(BIT(mchan->ack_irq), ACK_INT_ENA_REG(mbox->ipc));
+ return 0;
+}
+
+static void hi6220_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+
+ /* disable interrupt */
+ writel(BIT(mchan->ack_irq), ACK_INT_DIS_REG(mbox->ipc));
+ mbox->irq_map_chan[mchan->ack_irq] = NULL;
+}
+
+static const struct mbox_chan_ops hi6220_mbox_ops = {
+ .send_data = hi6220_mbox_send_data,
+ .startup = hi6220_mbox_startup,
+ .shutdown = hi6220_mbox_shutdown,
+ .last_tx_done = hi6220_mbox_last_tx_done,
+};
+
+static struct mbox_chan *hi6220_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct hi6220_mbox *mbox = dev_get_drvdata(controller->dev);
+ struct hi6220_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int i = spec->args[0];
+ unsigned int dst_irq = spec->args[1];
+ unsigned int ack_irq = spec->args[2];
+
+ /* Bounds checking */
+ if (i >= mbox->chan_num || dst_irq >= mbox->chan_num ||
+ ack_irq >= mbox->chan_num) {
+ dev_err(mbox->dev,
+ "Invalid channel idx %d dst_irq %d ack_irq %d\n",
+ i, dst_irq, ack_irq);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Is requested channel free? */
+ chan = &mbox->chan[i];
+ if (mbox->irq_map_chan[ack_irq] == (void *)chan) {
+ dev_err(mbox->dev, "Channel in use\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ mchan = chan->con_priv;
+ mchan->dst_irq = dst_irq;
+ mchan->ack_irq = ack_irq;
+
+ mbox->irq_map_chan[ack_irq] = (void *)chan;
+ return chan;
+}
+
+static const struct of_device_id hi6220_mbox_of_match[] = {
+ { .compatible = "hisilicon,hi6220-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hi6220_mbox_of_match);
+
+static int hi6220_mbox_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct hi6220_mbox *mbox;
+ struct resource *res;
+ int i, err;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->dev = dev;
+ mbox->chan_num = MBOX_CHAN_MAX;
+ mbox->mchan = devm_kcalloc(dev,
+ mbox->chan_num, sizeof(*mbox->mchan), GFP_KERNEL);
+ if (!mbox->mchan)
+ return -ENOMEM;
+
+ mbox->chan = devm_kcalloc(dev,
+ mbox->chan_num, sizeof(*mbox->chan), GFP_KERNEL);
+ if (!mbox->chan)
+ return -ENOMEM;
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mbox->ipc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->ipc)) {
+ dev_err(dev, "ioremap ipc failed\n");
+ return PTR_ERR(mbox->ipc);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ mbox->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->base)) {
+ dev_err(dev, "ioremap buffer failed\n");
+ return PTR_ERR(mbox->base);
+ }
+
+ err = devm_request_irq(dev, mbox->irq, hi6220_mbox_interrupt, 0,
+ dev_name(dev), mbox);
+ if (err) {
+ dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
+ err);
+ return -ENODEV;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.chans = &mbox->chan[0];
+ mbox->controller.num_chans = mbox->chan_num;
+ mbox->controller.ops = &hi6220_mbox_ops;
+ mbox->controller.of_xlate = hi6220_mbox_xlate;
+
+ for (i = 0; i < mbox->chan_num; i++) {
+ mbox->chan[i].con_priv = &mbox->mchan[i];
+ mbox->irq_map_chan[i] = NULL;
+
+ mbox->mchan[i].parent = mbox;
+ mbox->mchan[i].slot = i;
+ }
+
+ /* mask and clear all interrupt vectors */
+ writel(0x0, ACK_INT_MSK_REG(mbox->ipc));
+ writel(~0x0, ACK_INT_CLR_REG(mbox->ipc));
+
+ /* use interrupt for tx's ack */
+ if (of_find_property(node, "hi6220,mbox-tx-noirq", NULL))
+ mbox->tx_irq_mode = false;
+ else
+ mbox->tx_irq_mode = true;
+
+ if (mbox->tx_irq_mode)
+ mbox->controller.txdone_irq = true;
+ else {
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+ }
+
+ err = devm_mbox_controller_register(dev, &mbox->controller);
+ if (err) {
+ dev_err(dev, "Failed to register mailbox %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "Mailbox enabled\n");
+ return 0;
+}
+
+static struct platform_driver hi6220_mbox_driver = {
+ .driver = {
+ .name = "hi6220-mbox",
+ .of_match_table = hi6220_mbox_of_match,
+ },
+ .probe = hi6220_mbox_probe,
+};
+
+static int __init hi6220_mbox_init(void)
+{
+ return platform_driver_register(&hi6220_mbox_driver);
+}
+core_initcall(hi6220_mbox_init);
+
+static void __exit hi6220_mbox_exit(void)
+{
+ platform_driver_unregister(&hi6220_mbox_driver);
+}
+module_exit(hi6220_mbox_exit);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("Hi6220 mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
new file mode 100644
index 000000000..c5663398c
--- /dev/null
+++ b/drivers/mailbox/imx-mailbox.c
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include <linux/slab.h>
+
+#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
+#define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x)))
+#define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x)))
+#define IMX_MU_xSR_BRDIP BIT(9)
+
+/* General Purpose Interrupt Enable */
+#define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x)))
+/* Receive Interrupt Enable */
+#define IMX_MU_xCR_RIEn(x) BIT(24 + (3 - (x)))
+/* Transmit Interrupt Enable */
+#define IMX_MU_xCR_TIEn(x) BIT(20 + (3 - (x)))
+/* General Purpose Interrupt Request */
+#define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
+
+#define IMX_MU_CHANS 16
+/* TX0/RX0/RXDB[0-3] */
+#define IMX_MU_SCU_CHANS 6
+#define IMX_MU_CHAN_NAME_SIZE 20
+
+enum imx_mu_chan_type {
+ IMX_MU_TYPE_TX, /* Tx */
+ IMX_MU_TYPE_RX, /* Rx */
+ IMX_MU_TYPE_TXDB, /* Tx doorbell */
+ IMX_MU_TYPE_RXDB, /* Rx doorbell */
+};
+
+struct imx_sc_rpc_msg_max {
+ struct imx_sc_rpc_msg hdr;
+ u32 data[7];
+};
+
+struct imx_mu_con_priv {
+ unsigned int idx;
+ char irq_desc[IMX_MU_CHAN_NAME_SIZE];
+ enum imx_mu_chan_type type;
+ struct mbox_chan *chan;
+ struct tasklet_struct txdb_tasklet;
+};
+
+struct imx_mu_priv {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t xcr_lock; /* control register lock */
+
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[IMX_MU_CHANS];
+
+ struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
+ const struct imx_mu_dcfg *dcfg;
+ struct clk *clk;
+ int irq;
+ bool suspend;
+
+ u32 xcr;
+
+ bool side_b;
+};
+
+struct imx_mu_dcfg {
+ int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
+ int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
+ void (*init)(struct imx_mu_priv *priv);
+ u32 xTR[4]; /* Transmit Registers */
+ u32 xRR[4]; /* Receive Registers */
+ u32 xSR; /* Status Register */
+ u32 xCR; /* Control Register */
+};
+
+static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct imx_mu_priv, mbox);
+}
+
+static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
+{
+ iowrite32(val, priv->base + offs);
+}
+
+static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
+{
+ return ioread32(priv->base + offs);
+}
+
+static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->xcr_lock, flags);
+ val = imx_mu_read(priv, priv->dcfg->xCR);
+ val &= ~clr;
+ val |= set;
+ imx_mu_write(priv, val, priv->dcfg->xCR);
+ spin_unlock_irqrestore(&priv->xcr_lock, flags);
+
+ return val;
+}
+
+static int imx_mu_generic_tx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp,
+ void *data)
+{
+ u32 *arg = data;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_TXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
+ tasklet_schedule(&cp->txdb_tasklet);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_generic_rx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp)
+{
+ u32 dat;
+
+ dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
+ mbox_chan_received_data(cp->chan, (void *)&dat);
+
+ return 0;
+}
+
+static int imx_mu_scu_tx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp,
+ void *data)
+{
+ struct imx_sc_rpc_msg_max *msg = data;
+ u32 *arg = data;
+ int i, ret;
+ u32 xsr;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ /*
+ * msg->hdr.size specifies the number of u32 words while
+ * sizeof yields bytes.
+ */
+
+ if (msg->hdr.size > sizeof(*msg) / 4) {
+ /*
+ * The real message size can be different to
+ * struct imx_sc_rpc_msg_max size
+ */
+ dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4 && i < msg->hdr.size; i++)
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
+ for (; i < msg->hdr.size; i++) {
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR,
+ xsr,
+ xsr & IMX_MU_xSR_TEn(i % 4),
+ 0, 100);
+ if (ret) {
+ dev_err(priv->dev, "Send data index: %d timeout\n", i);
+ return ret;
+ }
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
+ }
+
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_scu_rx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp)
+{
+ struct imx_sc_rpc_msg_max msg;
+ u32 *data = (u32 *)&msg;
+ int i, ret;
+ u32 xsr;
+
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
+
+ if (msg.hdr.size > sizeof(msg) / 4) {
+ dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
+ return -EINVAL;
+ }
+
+ for (i = 1; i < msg.hdr.size; i++) {
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR, xsr,
+ xsr & IMX_MU_xSR_RFn(i % 4), 0, 100);
+ if (ret) {
+ dev_err(priv->dev, "timeout read idx %d\n", i);
+ return ret;
+ }
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR[i % 4]);
+ }
+
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(0), 0);
+ mbox_chan_received_data(cp->chan, (void *)&msg);
+
+ return 0;
+}
+
+static void imx_mu_txdb_tasklet(unsigned long data)
+{
+ struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
+
+ mbox_chan_txdone(cp->chan, 0);
+}
+
+static irqreturn_t imx_mu_isr(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ u32 val, ctrl;
+
+ ctrl = imx_mu_read(priv, priv->dcfg->xCR);
+ val = imx_mu_read(priv, priv->dcfg->xSR);
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ val &= IMX_MU_xSR_TEn(cp->idx) &
+ (ctrl & IMX_MU_xCR_TIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RX:
+ val &= IMX_MU_xSR_RFn(cp->idx) &
+ (ctrl & IMX_MU_xCR_RIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RXDB:
+ val &= IMX_MU_xSR_GIPn(cp->idx) &
+ (ctrl & IMX_MU_xCR_GIEn(cp->idx));
+ break;
+ default:
+ break;
+ }
+
+ if (!val)
+ return IRQ_NONE;
+
+ if (val == IMX_MU_xSR_TEn(cp->idx)) {
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
+ mbox_chan_txdone(chan, 0);
+ } else if (val == IMX_MU_xSR_RFn(cp->idx)) {
+ priv->dcfg->rx(priv, cp);
+ } else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
+ imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
+ mbox_chan_received_data(chan, NULL);
+ } else {
+ dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
+ return IRQ_NONE;
+ }
+
+ if (priv->suspend)
+ pm_system_wakeup();
+
+ return IRQ_HANDLED;
+}
+
+static int imx_mu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+
+ return priv->dcfg->tx(priv, cp, data);
+}
+
+static int imx_mu_startup(struct mbox_chan *chan)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ unsigned long irq_flag = IRQF_SHARED;
+ int ret;
+
+ pm_runtime_get_sync(priv->dev);
+ if (cp->type == IMX_MU_TYPE_TXDB) {
+ /* Tx doorbell don't have ACK support */
+ tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
+ (unsigned long)cp);
+ return 0;
+ }
+
+ /* IPC MU should be with IRQF_NO_SUSPEND set */
+ if (!priv->dev->pm_domain)
+ irq_flag |= IRQF_NO_SUSPEND;
+
+ ret = request_irq(priv->irq, imx_mu_isr, irq_flag,
+ cp->irq_desc, chan);
+ if (ret) {
+ dev_err(priv->dev,
+ "Unable to acquire IRQ %d\n", priv->irq);
+ return ret;
+ }
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_RX:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_RXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIEn(cp->idx), 0);
+ break;
+ default:
+ break;
+ }
+
+ priv->suspend = true;
+
+ return 0;
+}
+
+static void imx_mu_shutdown(struct mbox_chan *chan)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+
+ if (cp->type == IMX_MU_TYPE_TXDB) {
+ tasklet_kill(&cp->txdb_tasklet);
+ pm_runtime_put_sync(priv->dev);
+ return;
+ }
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RX:
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RXDB:
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx));
+ break;
+ default:
+ break;
+ }
+
+ free_irq(priv->irq, chan);
+ pm_runtime_put_sync(priv->dev);
+}
+
+static const struct mbox_chan_ops imx_mu_ops = {
+ .send_data = imx_mu_send_data,
+ .startup = imx_mu_startup,
+ .shutdown = imx_mu_shutdown,
+};
+
+static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 type, idx, chan;
+
+ if (sp->args_count != 2) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+ idx = sp->args[1]; /* index */
+
+ switch (type) {
+ case IMX_MU_TYPE_TX:
+ case IMX_MU_TYPE_RX:
+ if (idx != 0)
+ dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
+ chan = type;
+ break;
+ case IMX_MU_TYPE_RXDB:
+ chan = 2 + idx;
+ break;
+ default:
+ dev_err(mbox->dev, "Invalid chan type: %d\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (chan >= mbox->num_chans) {
+ dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &mbox->chans[chan];
+}
+
+static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 type, idx, chan;
+
+ if (sp->args_count != 2) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+ idx = sp->args[1]; /* index */
+ chan = type * 4 + idx;
+
+ if (chan >= mbox->num_chans) {
+ dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &mbox->chans[chan];
+}
+
+static void imx_mu_init_generic(struct imx_mu_priv *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMX_MU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i % 4;
+ cp->type = i >> 2;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->mbox.num_chans = IMX_MU_CHANS;
+ priv->mbox.of_xlate = imx_mu_xlate;
+
+ if (priv->side_b)
+ return;
+
+ /* Set default MU configuration */
+ imx_mu_write(priv, 0, priv->dcfg->xCR);
+}
+
+static void imx_mu_init_scu(struct imx_mu_priv *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMX_MU_SCU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i < 2 ? 0 : i - 2;
+ cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->mbox.num_chans = IMX_MU_SCU_CHANS;
+ priv->mbox.of_xlate = imx_mu_scu_xlate;
+
+ /* Set default MU configuration */
+ imx_mu_write(priv, 0, priv->dcfg->xCR);
+}
+
+static int imx_mu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx_mu_priv *priv;
+ const struct imx_mu_dcfg *dcfg;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ dcfg = of_device_get_match_data(dev);
+ if (!dcfg)
+ return -EINVAL;
+ priv->dcfg = dcfg;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ if (PTR_ERR(priv->clk) != -ENOENT)
+ return PTR_ERR(priv->clk);
+
+ priv->clk = NULL;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
+
+ priv->dcfg->init(priv);
+
+ spin_lock_init(&priv->xcr_lock);
+
+ priv->mbox.dev = dev;
+ priv->mbox.ops = &imx_mu_ops;
+ priv->mbox.chans = priv->mbox_chans;
+ priv->mbox.txdone_irq = true;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ goto disable_runtime_pm;
+ }
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ goto disable_runtime_pm;
+
+ clk_disable_unprepare(priv->clk);
+
+ priv->suspend = false;
+
+ return 0;
+
+disable_runtime_pm:
+ pm_runtime_disable(dev);
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int imx_mu_remove(struct platform_device *pdev)
+{
+ struct imx_mu_priv *priv = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(priv->dev);
+
+ return 0;
+}
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .init = imx_mu_init_generic,
+ .xTR = {0x0, 0x4, 0x8, 0xc},
+ .xRR = {0x10, 0x14, 0x18, 0x1c},
+ .xSR = 0x20,
+ .xCR = 0x24,
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .init = imx_mu_init_generic,
+ .xTR = {0x20, 0x24, 0x28, 0x2c},
+ .xRR = {0x40, 0x44, 0x48, 0x4c},
+ .xSR = 0x60,
+ .xCR = 0x64,
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
+ .tx = imx_mu_scu_tx,
+ .rx = imx_mu_scu_rx,
+ .init = imx_mu_init_scu,
+ .xTR = {0x0, 0x4, 0x8, 0xc},
+ .xRR = {0x10, 0x14, 0x18, 0x1c},
+ .xSR = 0x20,
+ .xCR = 0x24,
+};
+
+static const struct of_device_id imx_mu_dt_ids[] = {
+ { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
+ { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
+ { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
+
+static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv->clk)
+ priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
+
+ return 0;
+}
+
+static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ /*
+ * ONLY restore MU when context lost, the TIE could
+ * be set during noirq resume as there is MU data
+ * communication going on, and restore the saved
+ * value will overwrite the TIE and cause MU data
+ * send failed, may lead to system freeze. This issue
+ * is observed by testing freeze mode suspend.
+ */
+ if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
+ imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
+
+ return 0;
+}
+
+static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ dev_err(dev, "failed to enable clock\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_mu_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
+ imx_mu_resume_noirq)
+ SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
+ imx_mu_runtime_resume, NULL)
+};
+
+static struct platform_driver imx_mu_driver = {
+ .probe = imx_mu_probe,
+ .remove = imx_mu_remove,
+ .driver = {
+ .name = "imx_mu",
+ .of_match_table = imx_mu_dt_ids,
+ .pm = &imx_mu_pm_ops,
+ },
+};
+module_platform_driver(imx_mu_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
+MODULE_DESCRIPTION("Message Unit driver for i.MX");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
new file mode 100644
index 000000000..75282666f
--- /dev/null
+++ b/drivers/mailbox/mailbox-altera.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright Altera Corporation (C) 2013-2014. All rights reserved
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "altera-mailbox"
+
+#define MAILBOX_CMD_REG 0x00
+#define MAILBOX_PTR_REG 0x04
+#define MAILBOX_STS_REG 0x08
+#define MAILBOX_INTMASK_REG 0x0C
+
+#define INT_PENDING_MSK 0x1
+#define INT_SPACE_MSK 0x2
+
+#define STS_PENDING_MSK 0x1
+#define STS_FULL_MSK 0x2
+#define STS_FULL_OFT 0x1
+
+#define MBOX_PENDING(status) (((status) & STS_PENDING_MSK))
+#define MBOX_FULL(status) (((status) & STS_FULL_MSK) >> STS_FULL_OFT)
+
+enum altera_mbox_msg {
+ MBOX_CMD = 0,
+ MBOX_PTR,
+};
+
+#define MBOX_POLLING_MS 5 /* polling interval 5ms */
+
+struct altera_mbox {
+ bool is_sender; /* 1-sender, 0-receiver */
+ bool intr_mode;
+ int irq;
+ void __iomem *mbox_base;
+ struct device *dev;
+ struct mbox_controller controller;
+
+ /* If the controller supports only RX polling mode */
+ struct timer_list rxpoll_timer;
+ struct mbox_chan *chan;
+};
+
+static struct altera_mbox *mbox_chan_to_altera_mbox(struct mbox_chan *chan)
+{
+ if (!chan || !chan->con_priv)
+ return NULL;
+
+ return (struct altera_mbox *)chan->con_priv;
+}
+
+static inline int altera_mbox_full(struct altera_mbox *mbox)
+{
+ u32 status;
+
+ status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
+ return MBOX_FULL(status);
+}
+
+static inline int altera_mbox_pending(struct altera_mbox *mbox)
+{
+ u32 status;
+
+ status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
+ return MBOX_PENDING(status);
+}
+
+static void altera_mbox_rx_intmask(struct altera_mbox *mbox, bool enable)
+{
+ u32 mask;
+
+ mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
+ if (enable)
+ mask |= INT_PENDING_MSK;
+ else
+ mask &= ~INT_PENDING_MSK;
+ writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
+}
+
+static void altera_mbox_tx_intmask(struct altera_mbox *mbox, bool enable)
+{
+ u32 mask;
+
+ mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
+ if (enable)
+ mask |= INT_SPACE_MSK;
+ else
+ mask &= ~INT_SPACE_MSK;
+ writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
+}
+
+static bool altera_mbox_is_sender(struct altera_mbox *mbox)
+{
+ u32 reg;
+ /* Write a magic number to PTR register and read back this register.
+ * This register is read-write if it is a sender.
+ */
+ #define MBOX_MAGIC 0xA5A5AA55
+ writel_relaxed(MBOX_MAGIC, mbox->mbox_base + MAILBOX_PTR_REG);
+ reg = readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
+ if (reg == MBOX_MAGIC) {
+ /* Clear to 0 */
+ writel_relaxed(0, mbox->mbox_base + MAILBOX_PTR_REG);
+ return true;
+ }
+ return false;
+}
+
+static void altera_mbox_rx_data(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ u32 data[2];
+
+ if (altera_mbox_pending(mbox)) {
+ data[MBOX_PTR] =
+ readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
+ data[MBOX_CMD] =
+ readl_relaxed(mbox->mbox_base + MAILBOX_CMD_REG);
+ mbox_chan_received_data(chan, (void *)data);
+ }
+}
+
+static void altera_mbox_poll_rx(struct timer_list *t)
+{
+ struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer);
+
+ altera_mbox_rx_data(mbox->chan);
+
+ mod_timer(&mbox->rxpoll_timer,
+ jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
+}
+
+static irqreturn_t altera_mbox_tx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)p;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ altera_mbox_tx_intmask(mbox, false);
+ mbox_chan_txdone(chan, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t altera_mbox_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)p;
+
+ altera_mbox_rx_data(chan);
+ return IRQ_HANDLED;
+}
+
+static int altera_mbox_startup_sender(struct mbox_chan *chan)
+{
+ int ret;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ ret = request_irq(mbox->irq, altera_mbox_tx_interrupt, 0,
+ DRIVER_NAME, chan);
+ if (unlikely(ret)) {
+ dev_err(mbox->dev,
+ "failed to register mailbox interrupt:%d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int altera_mbox_startup_receiver(struct mbox_chan *chan)
+{
+ int ret;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ ret = request_irq(mbox->irq, altera_mbox_rx_interrupt, 0,
+ DRIVER_NAME, chan);
+ if (unlikely(ret)) {
+ mbox->intr_mode = false;
+ goto polling; /* use polling if failed */
+ }
+
+ altera_mbox_rx_intmask(mbox, true);
+ return 0;
+ }
+
+polling:
+ /* Setup polling timer */
+ mbox->chan = chan;
+ timer_setup(&mbox->rxpoll_timer, altera_mbox_poll_rx, 0);
+ mod_timer(&mbox->rxpoll_timer,
+ jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
+
+ return 0;
+}
+
+static int altera_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ u32 *udata = (u32 *)data;
+
+ if (!mbox || !data)
+ return -EINVAL;
+ if (!mbox->is_sender) {
+ dev_warn(mbox->dev,
+ "failed to send. This is receiver mailbox.\n");
+ return -EINVAL;
+ }
+
+ if (altera_mbox_full(mbox))
+ return -EBUSY;
+
+ /* Enable interrupt before send */
+ if (mbox->intr_mode)
+ altera_mbox_tx_intmask(mbox, true);
+
+ /* Pointer register must write before command register */
+ writel_relaxed(udata[MBOX_PTR], mbox->mbox_base + MAILBOX_PTR_REG);
+ writel_relaxed(udata[MBOX_CMD], mbox->mbox_base + MAILBOX_CMD_REG);
+
+ return 0;
+}
+
+static bool altera_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ /* Return false if mailbox is full */
+ return altera_mbox_full(mbox) ? false : true;
+}
+
+static bool altera_mbox_peek_data(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ return altera_mbox_pending(mbox) ? true : false;
+}
+
+static int altera_mbox_startup(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ int ret = 0;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (mbox->is_sender)
+ ret = altera_mbox_startup_sender(chan);
+ else
+ ret = altera_mbox_startup_receiver(chan);
+
+ return ret;
+}
+
+static void altera_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ /* Unmask all interrupt masks */
+ writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG);
+ free_irq(mbox->irq, chan);
+ } else if (!mbox->is_sender) {
+ del_timer_sync(&mbox->rxpoll_timer);
+ }
+}
+
+static const struct mbox_chan_ops altera_mbox_ops = {
+ .send_data = altera_mbox_send_data,
+ .startup = altera_mbox_startup,
+ .shutdown = altera_mbox_shutdown,
+ .last_tx_done = altera_mbox_last_tx_done,
+ .peek_data = altera_mbox_peek_data,
+};
+
+static int altera_mbox_probe(struct platform_device *pdev)
+{
+ struct altera_mbox *mbox;
+ struct resource *regs;
+ struct mbox_chan *chans;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox),
+ GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Allocated one channel */
+ chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mbox->mbox_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mbox->mbox_base))
+ return PTR_ERR(mbox->mbox_base);
+
+ /* Check is it a sender or receiver? */
+ mbox->is_sender = altera_mbox_is_sender(mbox);
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq >= 0)
+ mbox->intr_mode = true;
+
+ mbox->dev = &pdev->dev;
+
+ /* Hardware supports only one channel. */
+ chans[0].con_priv = mbox;
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = chans;
+ mbox->controller.ops = &altera_mbox_ops;
+
+ if (mbox->is_sender) {
+ if (mbox->intr_mode) {
+ mbox->controller.txdone_irq = true;
+ } else {
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = MBOX_POLLING_MS;
+ }
+ }
+
+ ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
+ if (ret) {
+ dev_err(&pdev->dev, "Register mailbox failed\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+err:
+ return ret;
+}
+
+static const struct of_device_id altera_mbox_match[] = {
+ { .compatible = "altr,mailbox-1.0" },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, altera_mbox_match);
+
+static struct platform_driver altera_mbox_driver = {
+ .probe = altera_mbox_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = altera_mbox_match,
+ },
+};
+
+module_platform_driver(altera_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Altera mailbox specific functions");
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_ALIAS("platform:altera-mailbox");
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
new file mode 100644
index 000000000..2baf69a0b
--- /dev/null
+++ b/drivers/mailbox/mailbox-sti.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * STi Mailbox
+ *
+ * Copyright (C) 2015 ST Microelectronics
+ *
+ * Author: Lee Jones <lee.jones@linaro.org> for ST Microelectronics
+ *
+ * Based on the original driver written by;
+ * Alexandre Torgue, Olivier Lebreton and Loic Pallardy
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "mailbox.h"
+
+#define STI_MBOX_INST_MAX 4 /* RAM saving: Max supported instances */
+#define STI_MBOX_CHAN_MAX 20 /* RAM saving: Max supported channels */
+
+#define STI_IRQ_VAL_OFFSET 0x04 /* Read interrupt status */
+#define STI_IRQ_SET_OFFSET 0x24 /* Generate a Tx channel interrupt */
+#define STI_IRQ_CLR_OFFSET 0x44 /* Clear pending Rx interrupts */
+#define STI_ENA_VAL_OFFSET 0x64 /* Read enable status */
+#define STI_ENA_SET_OFFSET 0x84 /* Enable a channel */
+#define STI_ENA_CLR_OFFSET 0xa4 /* Disable a channel */
+
+#define MBOX_BASE(mdev, inst) ((mdev)->base + ((inst) * 4))
+
+/**
+ * STi Mailbox device data
+ *
+ * An IP Mailbox is currently composed of 4 instances
+ * Each instance is currently composed of 32 channels
+ * This means that we have 128 channels per Mailbox
+ * A channel an be used for TX or RX
+ *
+ * @dev: Device to which it is attached
+ * @mbox: Representation of a communication channel controller
+ * @base: Base address of the register mapping region
+ * @name: Name of the mailbox
+ * @enabled: Local copy of enabled channels
+ * @lock: Mutex protecting enabled status
+ */
+struct sti_mbox_device {
+ struct device *dev;
+ struct mbox_controller *mbox;
+ void __iomem *base;
+ const char *name;
+ u32 enabled[STI_MBOX_INST_MAX];
+ spinlock_t lock;
+};
+
+/**
+ * STi Mailbox platform specific configuration
+ *
+ * @num_inst: Maximum number of instances in one HW Mailbox
+ * @num_chan: Maximum number of channel per instance
+ */
+struct sti_mbox_pdata {
+ unsigned int num_inst;
+ unsigned int num_chan;
+};
+
+/**
+ * STi Mailbox allocated channel information
+ *
+ * @mdev: Pointer to parent Mailbox device
+ * @instance: Instance number channel resides in
+ * @channel: Channel number pertaining to this container
+ */
+struct sti_channel {
+ struct sti_mbox_device *mdev;
+ unsigned int instance;
+ unsigned int channel;
+};
+
+static inline bool sti_mbox_channel_is_enabled(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+
+ return mdev->enabled[instance] & BIT(channel);
+}
+
+static inline
+struct mbox_chan *sti_mbox_to_channel(struct mbox_controller *mbox,
+ unsigned int instance,
+ unsigned int channel)
+{
+ struct sti_channel *chan_info;
+ int i;
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ chan_info = mbox->chans[i].con_priv;
+ if (chan_info &&
+ chan_info->instance == instance &&
+ chan_info->channel == channel)
+ return &mbox->chans[i];
+ }
+
+ dev_err(mbox->dev,
+ "Channel not registered: instance: %d channel: %d\n",
+ instance, channel);
+
+ return NULL;
+}
+
+static void sti_mbox_enable_channel(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+ unsigned long flags;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ spin_lock_irqsave(&mdev->lock, flags);
+ mdev->enabled[instance] |= BIT(channel);
+ writel_relaxed(BIT(channel), base + STI_ENA_SET_OFFSET);
+ spin_unlock_irqrestore(&mdev->lock, flags);
+}
+
+static void sti_mbox_disable_channel(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+ unsigned long flags;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ spin_lock_irqsave(&mdev->lock, flags);
+ mdev->enabled[instance] &= ~BIT(channel);
+ writel_relaxed(BIT(channel), base + STI_ENA_CLR_OFFSET);
+ spin_unlock_irqrestore(&mdev->lock, flags);
+}
+
+static void sti_mbox_clear_irq(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ writel_relaxed(BIT(channel), base + STI_IRQ_CLR_OFFSET);
+}
+
+static struct mbox_chan *sti_mbox_irq_to_channel(struct sti_mbox_device *mdev,
+ unsigned int instance)
+{
+ struct mbox_controller *mbox = mdev->mbox;
+ struct mbox_chan *chan = NULL;
+ unsigned int channel;
+ unsigned long bits;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ bits = readl_relaxed(base + STI_IRQ_VAL_OFFSET);
+ if (!bits)
+ /* No IRQs fired in specified instance */
+ return NULL;
+
+ /* An IRQ has fired, find the associated channel */
+ for (channel = 0; bits; channel++) {
+ if (!test_and_clear_bit(channel, &bits))
+ continue;
+
+ chan = sti_mbox_to_channel(mbox, instance, channel);
+ if (chan) {
+ dev_dbg(mbox->dev,
+ "IRQ fired on instance: %d channel: %d\n",
+ instance, channel);
+ break;
+ }
+ }
+
+ return chan;
+}
+
+static irqreturn_t sti_mbox_thread_handler(int irq, void *data)
+{
+ struct sti_mbox_device *mdev = data;
+ struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
+ struct mbox_chan *chan;
+ unsigned int instance;
+
+ for (instance = 0; instance < pdata->num_inst; instance++) {
+keep_looking:
+ chan = sti_mbox_irq_to_channel(mdev, instance);
+ if (!chan)
+ continue;
+
+ mbox_chan_received_data(chan, NULL);
+ sti_mbox_clear_irq(chan);
+ sti_mbox_enable_channel(chan);
+ goto keep_looking;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sti_mbox_irq_handler(int irq, void *data)
+{
+ struct sti_mbox_device *mdev = data;
+ struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
+ struct sti_channel *chan_info;
+ struct mbox_chan *chan;
+ unsigned int instance;
+ int ret = IRQ_NONE;
+
+ for (instance = 0; instance < pdata->num_inst; instance++) {
+ chan = sti_mbox_irq_to_channel(mdev, instance);
+ if (!chan)
+ continue;
+ chan_info = chan->con_priv;
+
+ if (!sti_mbox_channel_is_enabled(chan)) {
+ dev_warn(mdev->dev,
+ "Unexpected IRQ: %s\n"
+ " instance: %d: channel: %d [enabled: %x]\n",
+ mdev->name, chan_info->instance,
+ chan_info->channel, mdev->enabled[instance]);
+
+ /* Only handle IRQ if no other valid IRQs were found */
+ if (ret == IRQ_NONE)
+ ret = IRQ_HANDLED;
+ continue;
+ }
+
+ sti_mbox_disable_channel(chan);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (ret == IRQ_NONE)
+ dev_err(mdev->dev, "Spurious IRQ - was a channel requested?\n");
+
+ return ret;
+}
+
+static bool sti_mbox_tx_is_ready(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ if (!(readl_relaxed(base + STI_ENA_VAL_OFFSET) & BIT(channel))) {
+ dev_dbg(mdev->dev, "Mbox: %s: inst: %d, chan: %d disabled\n",
+ mdev->name, instance, channel);
+ return false;
+ }
+
+ if (readl_relaxed(base + STI_IRQ_VAL_OFFSET) & BIT(channel)) {
+ dev_dbg(mdev->dev, "Mbox: %s: inst: %d, chan: %d not ready\n",
+ mdev->name, instance, channel);
+ return false;
+ }
+
+ return true;
+}
+
+static int sti_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ /* Send event to co-processor */
+ writel_relaxed(BIT(channel), base + STI_IRQ_SET_OFFSET);
+
+ dev_dbg(mdev->dev,
+ "Sent via Mailbox %s: instance: %d channel: %d\n",
+ mdev->name, instance, channel);
+
+ return 0;
+}
+
+static int sti_mbox_startup_chan(struct mbox_chan *chan)
+{
+ sti_mbox_clear_irq(chan);
+ sti_mbox_enable_channel(chan);
+
+ return 0;
+}
+
+static void sti_mbox_shutdown_chan(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct mbox_controller *mbox = chan_info->mdev->mbox;
+ int i;
+
+ for (i = 0; i < mbox->num_chans; i++)
+ if (chan == &mbox->chans[i])
+ break;
+
+ if (mbox->num_chans == i) {
+ dev_warn(mbox->dev, "Request to free non-existent channel\n");
+ return;
+ }
+
+ /* Reset channel */
+ sti_mbox_disable_channel(chan);
+ sti_mbox_clear_irq(chan);
+ chan->con_priv = NULL;
+}
+
+static struct mbox_chan *sti_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct sti_mbox_device *mdev = dev_get_drvdata(mbox->dev);
+ struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
+ struct sti_channel *chan_info;
+ struct mbox_chan *chan = NULL;
+ unsigned int instance = spec->args[0];
+ unsigned int channel = spec->args[1];
+ int i;
+
+ /* Bounds checking */
+ if (instance >= pdata->num_inst || channel >= pdata->num_chan) {
+ dev_err(mbox->dev,
+ "Invalid channel requested instance: %d channel: %d\n",
+ instance, channel);
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ chan_info = mbox->chans[i].con_priv;
+
+ /* Is requested channel free? */
+ if (chan_info &&
+ mbox->dev == chan_info->mdev->dev &&
+ instance == chan_info->instance &&
+ channel == chan_info->channel) {
+
+ dev_err(mbox->dev, "Channel in use\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ /*
+ * Find the first free slot, then continue checking
+ * to see if requested channel is in use
+ */
+ if (!chan && !chan_info)
+ chan = &mbox->chans[i];
+ }
+
+ if (!chan) {
+ dev_err(mbox->dev, "No free channels left\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ chan_info = devm_kzalloc(mbox->dev, sizeof(*chan_info), GFP_KERNEL);
+ if (!chan_info)
+ return ERR_PTR(-ENOMEM);
+
+ chan_info->mdev = mdev;
+ chan_info->instance = instance;
+ chan_info->channel = channel;
+
+ chan->con_priv = chan_info;
+
+ dev_info(mbox->dev,
+ "Mbox: %s: Created channel: instance: %d channel: %d\n",
+ mdev->name, instance, channel);
+
+ return chan;
+}
+
+static const struct mbox_chan_ops sti_mbox_ops = {
+ .startup = sti_mbox_startup_chan,
+ .shutdown = sti_mbox_shutdown_chan,
+ .send_data = sti_mbox_send_data,
+ .last_tx_done = sti_mbox_tx_is_ready,
+};
+
+static const struct sti_mbox_pdata mbox_stih407_pdata = {
+ .num_inst = 4,
+ .num_chan = 32,
+};
+
+static const struct of_device_id sti_mailbox_match[] = {
+ {
+ .compatible = "st,stih407-mailbox",
+ .data = (void *)&mbox_stih407_pdata
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sti_mailbox_match);
+
+static int sti_mbox_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct mbox_controller *mbox;
+ struct sti_mbox_device *mdev;
+ struct device_node *np = pdev->dev.of_node;
+ struct mbox_chan *chans;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ match = of_match_device(sti_mailbox_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "No configuration found\n");
+ return -ENODEV;
+ }
+ pdev->dev.platform_data = (struct sti_mbox_pdata *) match->data;
+
+ mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mdev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mdev->base))
+ return PTR_ERR(mdev->base);
+
+ ret = of_property_read_string(np, "mbox-name", &mdev->name);
+ if (ret)
+ mdev->name = np->full_name;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(&pdev->dev,
+ STI_MBOX_CHAN_MAX, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mdev->dev = &pdev->dev;
+ mdev->mbox = mbox;
+
+ spin_lock_init(&mdev->lock);
+
+ /* STi Mailbox does not have a Tx-Done or Tx-Ready IRQ */
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = true;
+ mbox->txpoll_period = 100;
+ mbox->ops = &sti_mbox_ops;
+ mbox->dev = mdev->dev;
+ mbox->of_xlate = sti_mbox_xlate;
+ mbox->chans = chans;
+ mbox->num_chans = STI_MBOX_CHAN_MAX;
+
+ ret = devm_mbox_controller_register(&pdev->dev, mbox);
+ if (ret)
+ return ret;
+
+ /* It's okay for Tx Mailboxes to not supply IRQs */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_info(&pdev->dev,
+ "%s: Registered Tx only Mailbox\n", mdev->name);
+ return 0;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ sti_mbox_irq_handler,
+ sti_mbox_thread_handler,
+ IRQF_ONESHOT, mdev->name, mdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't claim IRQ %d\n", irq);
+ return -EINVAL;
+ }
+
+ dev_info(&pdev->dev, "%s: Registered Tx/Rx Mailbox\n", mdev->name);
+
+ return 0;
+}
+
+static struct platform_driver sti_mbox_driver = {
+ .probe = sti_mbox_probe,
+ .driver = {
+ .name = "sti-mailbox",
+ .of_match_table = sti_mailbox_match,
+ },
+};
+module_platform_driver(sti_mbox_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("STMicroelectronics Mailbox Controller");
+MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org");
+MODULE_ALIAS("platform:mailbox-sti");
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
new file mode 100644
index 000000000..abcee58e8
--- /dev/null
+++ b/drivers/mailbox/mailbox-test.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 ST Microelectronics
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/sched/signal.h>
+
+#define MBOX_MAX_SIG_LEN 8
+#define MBOX_MAX_MSG_LEN 128
+#define MBOX_BYTES_PER_LINE 16
+#define MBOX_HEXDUMP_LINE_LEN ((MBOX_BYTES_PER_LINE * 4) + 2)
+#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
+ (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
+
+static bool mbox_data_ready;
+
+struct mbox_test_device {
+ struct device *dev;
+ void __iomem *tx_mmio;
+ void __iomem *rx_mmio;
+ struct mbox_chan *tx_channel;
+ struct mbox_chan *rx_channel;
+ char *rx_buffer;
+ char *signal;
+ char *message;
+ spinlock_t lock;
+ struct mutex mutex;
+ wait_queue_head_t waitq;
+ struct fasync_struct *async_queue;
+ struct dentry *root_debugfs_dir;
+};
+
+static ssize_t mbox_test_signal_write(struct file *filp,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ if (!tdev->tx_channel) {
+ dev_err(tdev->dev, "Channel cannot do Tx\n");
+ return -EINVAL;
+ }
+
+ if (count > MBOX_MAX_SIG_LEN) {
+ dev_err(tdev->dev,
+ "Signal length %zd greater than max allowed %d\n",
+ count, MBOX_MAX_SIG_LEN);
+ return -EINVAL;
+ }
+
+ /* Only allocate memory if we need to */
+ if (!tdev->signal) {
+ tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
+ if (!tdev->signal)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(tdev->signal, userbuf, count)) {
+ kfree(tdev->signal);
+ tdev->signal = NULL;
+ return -EFAULT;
+ }
+
+ return count;
+}
+
+static const struct file_operations mbox_test_signal_ops = {
+ .write = mbox_test_signal_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+static int mbox_test_message_fasync(int fd, struct file *filp, int on)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ return fasync_helper(fd, filp, on, &tdev->async_queue);
+}
+
+static ssize_t mbox_test_message_write(struct file *filp,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+ char *message;
+ void *data;
+ int ret;
+
+ if (!tdev->tx_channel) {
+ dev_err(tdev->dev, "Channel cannot do Tx\n");
+ return -EINVAL;
+ }
+
+ if (count > MBOX_MAX_MSG_LEN) {
+ dev_err(tdev->dev,
+ "Message length %zd greater than max allowed %d\n",
+ count, MBOX_MAX_MSG_LEN);
+ return -EINVAL;
+ }
+
+ message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
+ if (!message)
+ return -ENOMEM;
+
+ mutex_lock(&tdev->mutex);
+
+ tdev->message = message;
+ ret = copy_from_user(tdev->message, userbuf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * A separate signal is only of use if there is
+ * MMIO to subsequently pass the message through
+ */
+ if (tdev->tx_mmio && tdev->signal) {
+ print_hex_dump_bytes("Client: Sending: Signal: ", DUMP_PREFIX_ADDRESS,
+ tdev->signal, MBOX_MAX_SIG_LEN);
+
+ data = tdev->signal;
+ } else
+ data = tdev->message;
+
+ print_hex_dump_bytes("Client: Sending: Message: ", DUMP_PREFIX_ADDRESS,
+ tdev->message, MBOX_MAX_MSG_LEN);
+
+ ret = mbox_send_message(tdev->tx_channel, data);
+ if (ret < 0)
+ dev_err(tdev->dev, "Failed to send message via mailbox\n");
+
+out:
+ kfree(tdev->signal);
+ kfree(tdev->message);
+ tdev->signal = NULL;
+
+ mutex_unlock(&tdev->mutex);
+
+ return ret < 0 ? ret : count;
+}
+
+static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
+{
+ bool data_ready;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdev->lock, flags);
+ data_ready = mbox_data_ready;
+ spin_unlock_irqrestore(&tdev->lock, flags);
+
+ return data_ready;
+}
+
+static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+ unsigned long flags;
+ char *touser, *ptr;
+ int l = 0;
+ int ret;
+
+ DECLARE_WAITQUEUE(wait, current);
+
+ touser = kzalloc(MBOX_HEXDUMP_MAX_LEN + 1, GFP_KERNEL);
+ if (!touser)
+ return -ENOMEM;
+
+ if (!tdev->rx_channel) {
+ ret = snprintf(touser, 20, "<NO RX CAPABILITY>\n");
+ ret = simple_read_from_buffer(userbuf, count, ppos,
+ touser, ret);
+ goto kfree_err;
+ }
+
+ add_wait_queue(&tdev->waitq, &wait);
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (mbox_test_message_data_ready(tdev))
+ break;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto waitq_err;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto waitq_err;
+ }
+ schedule();
+
+ } while (1);
+
+ spin_lock_irqsave(&tdev->lock, flags);
+
+ ptr = tdev->rx_buffer;
+ while (l < MBOX_HEXDUMP_MAX_LEN) {
+ hex_dump_to_buffer(ptr,
+ MBOX_BYTES_PER_LINE,
+ MBOX_BYTES_PER_LINE, 1, touser + l,
+ MBOX_HEXDUMP_LINE_LEN, true);
+
+ ptr += MBOX_BYTES_PER_LINE;
+ l += MBOX_HEXDUMP_LINE_LEN;
+ *(touser + (l - 1)) = '\n';
+ }
+ *(touser + l) = '\0';
+
+ memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
+ mbox_data_ready = false;
+
+ spin_unlock_irqrestore(&tdev->lock, flags);
+
+ ret = simple_read_from_buffer(userbuf, count, ppos, touser, MBOX_HEXDUMP_MAX_LEN);
+waitq_err:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&tdev->waitq, &wait);
+kfree_err:
+ kfree(touser);
+ return ret;
+}
+
+static __poll_t
+mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ poll_wait(filp, &tdev->waitq, wait);
+
+ if (mbox_test_message_data_ready(tdev))
+ return EPOLLIN | EPOLLRDNORM;
+ return 0;
+}
+
+static const struct file_operations mbox_test_message_ops = {
+ .write = mbox_test_message_write,
+ .read = mbox_test_message_read,
+ .fasync = mbox_test_message_fasync,
+ .poll = mbox_test_message_poll,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+static int mbox_test_add_debugfs(struct platform_device *pdev,
+ struct mbox_test_device *tdev)
+{
+ if (!debugfs_initialized())
+ return 0;
+
+ tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL);
+ if (!tdev->root_debugfs_dir) {
+ dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("message", 0600, tdev->root_debugfs_dir,
+ tdev, &mbox_test_message_ops);
+
+ debugfs_create_file("signal", 0200, tdev->root_debugfs_dir,
+ tdev, &mbox_test_signal_ops);
+
+ return 0;
+}
+
+static void mbox_test_receive_message(struct mbox_client *client, void *message)
+{
+ struct mbox_test_device *tdev = dev_get_drvdata(client->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdev->lock, flags);
+ if (tdev->rx_mmio) {
+ memcpy_fromio(tdev->rx_buffer, tdev->rx_mmio, MBOX_MAX_MSG_LEN);
+ print_hex_dump_bytes("Client: Received [MMIO]: ", DUMP_PREFIX_ADDRESS,
+ tdev->rx_buffer, MBOX_MAX_MSG_LEN);
+ } else if (message) {
+ print_hex_dump_bytes("Client: Received [API]: ", DUMP_PREFIX_ADDRESS,
+ message, MBOX_MAX_MSG_LEN);
+ memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
+ }
+ mbox_data_ready = true;
+ spin_unlock_irqrestore(&tdev->lock, flags);
+
+ wake_up_interruptible(&tdev->waitq);
+
+ kill_fasync(&tdev->async_queue, SIGIO, POLL_IN);
+}
+
+static void mbox_test_prepare_message(struct mbox_client *client, void *message)
+{
+ struct mbox_test_device *tdev = dev_get_drvdata(client->dev);
+
+ if (tdev->tx_mmio) {
+ if (tdev->signal)
+ memcpy_toio(tdev->tx_mmio, tdev->message, MBOX_MAX_MSG_LEN);
+ else
+ memcpy_toio(tdev->tx_mmio, message, MBOX_MAX_MSG_LEN);
+ }
+}
+
+static void mbox_test_message_sent(struct mbox_client *client,
+ void *message, int r)
+{
+ if (r)
+ dev_warn(client->dev,
+ "Client: Message could not be sent: %d\n", r);
+ else
+ dev_info(client->dev,
+ "Client: Message sent\n");
+}
+
+static struct mbox_chan *
+mbox_test_request_channel(struct platform_device *pdev, const char *name)
+{
+ struct mbox_client *client;
+ struct mbox_chan *channel;
+
+ client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ client->dev = &pdev->dev;
+ client->rx_callback = mbox_test_receive_message;
+ client->tx_prepare = mbox_test_prepare_message;
+ client->tx_done = mbox_test_message_sent;
+ client->tx_block = true;
+ client->knows_txdone = false;
+ client->tx_tout = 500;
+
+ channel = mbox_request_channel_byname(client, name);
+ if (IS_ERR(channel)) {
+ dev_warn(&pdev->dev, "Failed to request %s channel\n", name);
+ return NULL;
+ }
+
+ return channel;
+}
+
+static int mbox_test_probe(struct platform_device *pdev)
+{
+ struct mbox_test_device *tdev;
+ struct resource *res;
+ resource_size_t size;
+ int ret;
+
+ tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
+ if (!tdev)
+ return -ENOMEM;
+
+ /* It's okay for MMIO to be NULL */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (PTR_ERR(tdev->tx_mmio) == -EBUSY) {
+ /* if reserved area in SRAM, try just ioremap */
+ size = resource_size(res);
+ tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+ } else if (IS_ERR(tdev->tx_mmio)) {
+ tdev->tx_mmio = NULL;
+ }
+
+ /* If specified, second reg entry is Rx MMIO */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (PTR_ERR(tdev->rx_mmio) == -EBUSY) {
+ size = resource_size(res);
+ tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+ } else if (IS_ERR(tdev->rx_mmio)) {
+ tdev->rx_mmio = tdev->tx_mmio;
+ }
+
+ tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
+ tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
+
+ if (!tdev->tx_channel && !tdev->rx_channel)
+ return -EPROBE_DEFER;
+
+ /* If Rx is not specified but has Rx MMIO, then Rx = Tx */
+ if (!tdev->rx_channel && (tdev->rx_mmio != tdev->tx_mmio))
+ tdev->rx_channel = tdev->tx_channel;
+
+ tdev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, tdev);
+
+ spin_lock_init(&tdev->lock);
+ mutex_init(&tdev->mutex);
+
+ if (tdev->rx_channel) {
+ tdev->rx_buffer = devm_kzalloc(&pdev->dev,
+ MBOX_MAX_MSG_LEN, GFP_KERNEL);
+ if (!tdev->rx_buffer)
+ return -ENOMEM;
+ }
+
+ ret = mbox_test_add_debugfs(pdev, tdev);
+ if (ret)
+ return ret;
+
+ init_waitqueue_head(&tdev->waitq);
+ dev_info(&pdev->dev, "Successfully registered\n");
+
+ return 0;
+}
+
+static int mbox_test_remove(struct platform_device *pdev)
+{
+ struct mbox_test_device *tdev = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(tdev->root_debugfs_dir);
+
+ if (tdev->tx_channel)
+ mbox_free_channel(tdev->tx_channel);
+ if (tdev->rx_channel)
+ mbox_free_channel(tdev->rx_channel);
+
+ return 0;
+}
+
+static const struct of_device_id mbox_test_match[] = {
+ { .compatible = "mailbox-test" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mbox_test_match);
+
+static struct platform_driver mbox_test_driver = {
+ .driver = {
+ .name = "mailbox_test",
+ .of_match_table = mbox_test_match,
+ },
+ .probe = mbox_test_probe,
+ .remove = mbox_test_remove,
+};
+module_platform_driver(mbox_test_driver);
+
+MODULE_DESCRIPTION("Generic Mailbox Testing Facility");
+MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
new file mode 100644
index 000000000..de260799f
--- /dev/null
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * APM X-Gene SLIMpro MailBox Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Feng Kan fkan@apm.com
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define MBOX_CON_NAME "slimpro-mbox"
+#define MBOX_REG_SET_OFFSET 0x1000
+#define MBOX_CNT 8
+#define MBOX_STATUS_AVAIL_MASK BIT(16)
+#define MBOX_STATUS_ACK_MASK BIT(0)
+
+/* Configuration and Status Registers */
+#define REG_DB_IN 0x00
+#define REG_DB_DIN0 0x04
+#define REG_DB_DIN1 0x08
+#define REG_DB_OUT 0x10
+#define REG_DB_DOUT0 0x14
+#define REG_DB_DOUT1 0x18
+#define REG_DB_STAT 0x20
+#define REG_DB_STATMASK 0x24
+
+/**
+ * X-Gene SlimPRO mailbox channel information
+ *
+ * @dev: Device to which it is attached
+ * @chan: Pointer to mailbox communication channel
+ * @reg: Base address to access channel registers
+ * @irq: Interrupt number of the channel
+ * @rx_msg: Received message storage
+ */
+struct slimpro_mbox_chan {
+ struct device *dev;
+ struct mbox_chan *chan;
+ void __iomem *reg;
+ int irq;
+ u32 rx_msg[3];
+};
+
+/**
+ * X-Gene SlimPRO Mailbox controller data
+ *
+ * X-Gene SlimPRO Mailbox controller has 8 commnunication channels.
+ * Each channel has a separate IRQ number assgined to it.
+ *
+ * @mb_ctrl: Representation of the commnunication channel controller
+ * @mc: Array of SlimPRO mailbox channels of the controller
+ * @chans: Array of mailbox communication channels
+ *
+ */
+struct slimpro_mbox {
+ struct mbox_controller mb_ctrl;
+ struct slimpro_mbox_chan mc[MBOX_CNT];
+ struct mbox_chan chans[MBOX_CNT];
+};
+
+static void mb_chan_send_msg(struct slimpro_mbox_chan *mb_chan, u32 *msg)
+{
+ writel(msg[1], mb_chan->reg + REG_DB_DOUT0);
+ writel(msg[2], mb_chan->reg + REG_DB_DOUT1);
+ writel(msg[0], mb_chan->reg + REG_DB_OUT);
+}
+
+static void mb_chan_recv_msg(struct slimpro_mbox_chan *mb_chan)
+{
+ mb_chan->rx_msg[1] = readl(mb_chan->reg + REG_DB_DIN0);
+ mb_chan->rx_msg[2] = readl(mb_chan->reg + REG_DB_DIN1);
+ mb_chan->rx_msg[0] = readl(mb_chan->reg + REG_DB_IN);
+}
+
+static int mb_chan_status_ack(struct slimpro_mbox_chan *mb_chan)
+{
+ u32 val = readl(mb_chan->reg + REG_DB_STAT);
+
+ if (val & MBOX_STATUS_ACK_MASK) {
+ writel(MBOX_STATUS_ACK_MASK, mb_chan->reg + REG_DB_STAT);
+ return 1;
+ }
+ return 0;
+}
+
+static int mb_chan_status_avail(struct slimpro_mbox_chan *mb_chan)
+{
+ u32 val = readl(mb_chan->reg + REG_DB_STAT);
+
+ if (val & MBOX_STATUS_AVAIL_MASK) {
+ mb_chan_recv_msg(mb_chan);
+ writel(MBOX_STATUS_AVAIL_MASK, mb_chan->reg + REG_DB_STAT);
+ return 1;
+ }
+ return 0;
+}
+
+static irqreturn_t slimpro_mbox_irq(int irq, void *id)
+{
+ struct slimpro_mbox_chan *mb_chan = id;
+
+ if (mb_chan_status_ack(mb_chan))
+ mbox_chan_txdone(mb_chan->chan, 0);
+
+ if (mb_chan_status_avail(mb_chan))
+ mbox_chan_received_data(mb_chan->chan, mb_chan->rx_msg);
+
+ return IRQ_HANDLED;
+}
+
+static int slimpro_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+
+ mb_chan_send_msg(mb_chan, msg);
+ return 0;
+}
+
+static int slimpro_mbox_startup(struct mbox_chan *chan)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+ int rc;
+ u32 val;
+
+ rc = devm_request_irq(mb_chan->dev, mb_chan->irq, slimpro_mbox_irq, 0,
+ MBOX_CON_NAME, mb_chan);
+ if (unlikely(rc)) {
+ dev_err(mb_chan->dev, "failed to register mailbox interrupt %d\n",
+ mb_chan->irq);
+ return rc;
+ }
+
+ /* Enable HW interrupt */
+ writel(MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK,
+ mb_chan->reg + REG_DB_STAT);
+ /* Unmask doorbell status interrupt */
+ val = readl(mb_chan->reg + REG_DB_STATMASK);
+ val &= ~(MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK);
+ writel(val, mb_chan->reg + REG_DB_STATMASK);
+
+ return 0;
+}
+
+static void slimpro_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+ u32 val;
+
+ /* Mask doorbell status interrupt */
+ val = readl(mb_chan->reg + REG_DB_STATMASK);
+ val |= (MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK);
+ writel(val, mb_chan->reg + REG_DB_STATMASK);
+
+ devm_free_irq(mb_chan->dev, mb_chan->irq, mb_chan);
+}
+
+static const struct mbox_chan_ops slimpro_mbox_ops = {
+ .send_data = slimpro_mbox_send_data,
+ .startup = slimpro_mbox_startup,
+ .shutdown = slimpro_mbox_shutdown,
+};
+
+static int slimpro_mbox_probe(struct platform_device *pdev)
+{
+ struct slimpro_mbox *ctx;
+ struct resource *regs;
+ void __iomem *mb_base;
+ int rc;
+ int i;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ctx);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mb_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(mb_base))
+ return PTR_ERR(mb_base);
+
+ /* Setup mailbox links */
+ for (i = 0; i < MBOX_CNT; i++) {
+ ctx->mc[i].irq = platform_get_irq(pdev, i);
+ if (ctx->mc[i].irq < 0) {
+ if (i == 0) {
+ dev_err(&pdev->dev, "no available IRQ\n");
+ return -EINVAL;
+ }
+ dev_info(&pdev->dev, "no IRQ for channel %d\n", i);
+ break;
+ }
+
+ ctx->mc[i].dev = &pdev->dev;
+ ctx->mc[i].reg = mb_base + i * MBOX_REG_SET_OFFSET;
+ ctx->mc[i].chan = &ctx->chans[i];
+ ctx->chans[i].con_priv = &ctx->mc[i];
+ }
+
+ /* Setup mailbox controller */
+ ctx->mb_ctrl.dev = &pdev->dev;
+ ctx->mb_ctrl.chans = ctx->chans;
+ ctx->mb_ctrl.txdone_irq = true;
+ ctx->mb_ctrl.ops = &slimpro_mbox_ops;
+ ctx->mb_ctrl.num_chans = i;
+
+ rc = devm_mbox_controller_register(&pdev->dev, &ctx->mb_ctrl);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "APM X-Gene SLIMpro MailBox register failed:%d\n", rc);
+ return rc;
+ }
+
+ dev_info(&pdev->dev, "APM X-Gene SLIMpro MailBox registered\n");
+ return 0;
+}
+
+static const struct of_device_id slimpro_of_match[] = {
+ {.compatible = "apm,xgene-slimpro-mbox" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, slimpro_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id slimpro_acpi_ids[] = {
+ {"APMC0D01", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, slimpro_acpi_ids);
+#endif
+
+static struct platform_driver slimpro_mbox_driver = {
+ .probe = slimpro_mbox_probe,
+ .driver = {
+ .name = "xgene-slimpro-mbox",
+ .of_match_table = of_match_ptr(slimpro_of_match),
+ .acpi_match_table = ACPI_PTR(slimpro_acpi_ids)
+ },
+};
+
+static int __init slimpro_mbox_init(void)
+{
+ return platform_driver_register(&slimpro_mbox_driver);
+}
+
+static void __exit slimpro_mbox_exit(void)
+{
+ platform_driver_unregister(&slimpro_mbox_driver);
+}
+
+subsys_initcall(slimpro_mbox_init);
+module_exit(slimpro_mbox_exit);
+
+MODULE_DESCRIPTION("APM X-Gene SLIMpro Mailbox Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
new file mode 100644
index 000000000..4229b9b5d
--- /dev/null
+++ b/drivers/mailbox/mailbox.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Mailbox: Common code for Mailbox controllers and users
+ *
+ * Copyright (C) 2013-2014 Linaro Ltd.
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+
+#include "mailbox.h"
+
+static LIST_HEAD(mbox_cons);
+static DEFINE_MUTEX(con_mutex);
+
+static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
+{
+ int idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* See if there is any space left */
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -ENOBUFS;
+ }
+
+ idx = chan->msg_free;
+ chan->msg_data[idx] = mssg;
+ chan->msg_count++;
+
+ if (idx == MBOX_TX_QUEUE_LEN - 1)
+ chan->msg_free = 0;
+ else
+ chan->msg_free++;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return idx;
+}
+
+static void msg_submit(struct mbox_chan *chan)
+{
+ unsigned count, idx;
+ unsigned long flags;
+ void *data;
+ int err = -EBUSY;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->msg_count || chan->active_req)
+ goto exit;
+
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
+
+ data = chan->msg_data[idx];
+
+ if (chan->cl->tx_prepare)
+ chan->cl->tx_prepare(chan->cl, data);
+ /* Try to submit a message to the MBOX controller */
+ err = chan->mbox->ops->send_data(chan, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
+exit:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
+ /* kick start the timer immediately to avoid delays */
+ spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
+ }
+}
+
+static void tx_tick(struct mbox_chan *chan, int r)
+{
+ unsigned long flags;
+ void *mssg;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Submit next message */
+ msg_submit(chan);
+
+ if (!mssg)
+ return;
+
+ /* Notify the client */
+ if (chan->cl->tx_done)
+ chan->cl->tx_done(chan->cl, mssg, r);
+
+ if (r != -ETIME && chan->cl->tx_block)
+ complete(&chan->tx_complete);
+}
+
+static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
+{
+ struct mbox_controller *mbox =
+ container_of(hrtimer, struct mbox_controller, poll_hrt);
+ bool txdone, resched = false;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ struct mbox_chan *chan = &mbox->chans[i];
+
+ if (chan->active_req && chan->cl) {
+ txdone = chan->mbox->ops->last_tx_done(chan);
+ if (txdone)
+ tx_tick(chan, 0);
+ else
+ resched = true;
+ }
+ }
+
+ if (resched) {
+ spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
+ if (!hrtimer_is_queued(hrtimer))
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
+
+ return HRTIMER_RESTART;
+ }
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * mbox_chan_received_data - A way for controller driver to push data
+ * received from remote to the upper layer.
+ * @chan: Pointer to the mailbox channel on which RX happened.
+ * @mssg: Client specific message typecasted as void *
+ *
+ * After startup and before shutdown any data received on the chan
+ * is passed on to the API via atomic mbox_chan_received_data().
+ * The controller should ACK the RX only after this call returns.
+ */
+void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
+{
+ /* No buffering the received data */
+ if (chan->cl->rx_callback)
+ chan->cl->rx_callback(chan->cl, mssg);
+}
+EXPORT_SYMBOL_GPL(mbox_chan_received_data);
+
+/**
+ * mbox_chan_txdone - A way for controller driver to notify the
+ * framework that the last TX has completed.
+ * @chan: Pointer to the mailbox chan on which TX happened.
+ * @r: Status of last TX - OK or ERROR
+ *
+ * The controller that has IRQ for TX ACK calls this atomic API
+ * to tick the TX state machine. It works only if txdone_irq
+ * is set by the controller.
+ */
+void mbox_chan_txdone(struct mbox_chan *chan, int r)
+{
+ if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
+ dev_err(chan->mbox->dev,
+ "Controller can't run the TX ticker\n");
+ return;
+ }
+
+ tx_tick(chan, r);
+}
+EXPORT_SYMBOL_GPL(mbox_chan_txdone);
+
+/**
+ * mbox_client_txdone - The way for a client to run the TX state machine.
+ * @chan: Mailbox channel assigned to this client.
+ * @r: Success status of last transmission.
+ *
+ * The client/protocol had received some 'ACK' packet and it notifies
+ * the API that the last packet was sent successfully. This only works
+ * if the controller can't sense TX-Done.
+ */
+void mbox_client_txdone(struct mbox_chan *chan, int r)
+{
+ if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
+ dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
+ return;
+ }
+
+ tx_tick(chan, r);
+}
+EXPORT_SYMBOL_GPL(mbox_client_txdone);
+
+/**
+ * mbox_client_peek_data - A way for client driver to pull data
+ * received from remote by the controller.
+ * @chan: Mailbox channel assigned to this client.
+ *
+ * A poke to controller driver for any received data.
+ * The data is actually passed onto client via the
+ * mbox_chan_received_data()
+ * The call can be made from atomic context, so the controller's
+ * implementation of peek_data() must not sleep.
+ *
+ * Return: True, if controller has, and is going to push after this,
+ * some data.
+ * False, if controller doesn't have any data to be read.
+ */
+bool mbox_client_peek_data(struct mbox_chan *chan)
+{
+ if (chan->mbox->ops->peek_data)
+ return chan->mbox->ops->peek_data(chan);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(mbox_client_peek_data);
+
+/**
+ * mbox_send_message - For client to submit a message to be
+ * sent to the remote.
+ * @chan: Mailbox channel assigned to this client.
+ * @mssg: Client specific message typecasted.
+ *
+ * For client to submit data to the controller destined for a remote
+ * processor. If the client had set 'tx_block', the call will return
+ * either when the remote receives the data or when 'tx_tout' millisecs
+ * run out.
+ * In non-blocking mode, the requests are buffered by the API and a
+ * non-negative token is returned for each queued request. If the request
+ * is not queued, a negative token is returned. Upon failure or successful
+ * TX, the API calls 'tx_done' from atomic context, from which the client
+ * could submit yet another request.
+ * The pointer to message should be preserved until it is sent
+ * over the chan, i.e, tx_done() is made.
+ * This function could be called from atomic context as it simply
+ * queues the data and returns a token against the request.
+ *
+ * Return: Non-negative integer for successful submission (non-blocking mode)
+ * or transmission over chan (blocking mode).
+ * Negative value denotes failure.
+ */
+int mbox_send_message(struct mbox_chan *chan, void *mssg)
+{
+ int t;
+
+ if (!chan || !chan->cl)
+ return -EINVAL;
+
+ t = add_to_rbuf(chan, mssg);
+ if (t < 0) {
+ dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
+ return t;
+ }
+
+ msg_submit(chan);
+
+ if (chan->cl->tx_block) {
+ unsigned long wait;
+ int ret;
+
+ if (!chan->cl->tx_tout) /* wait forever */
+ wait = msecs_to_jiffies(3600000);
+ else
+ wait = msecs_to_jiffies(chan->cl->tx_tout);
+
+ ret = wait_for_completion_timeout(&chan->tx_complete, wait);
+ if (ret == 0) {
+ t = -ETIME;
+ tx_tick(chan, t);
+ }
+ }
+
+ return t;
+}
+EXPORT_SYMBOL_GPL(mbox_send_message);
+
+/**
+ * mbox_flush - flush a mailbox channel
+ * @chan: mailbox channel to flush
+ * @timeout: time, in milliseconds, to allow the flush operation to succeed
+ *
+ * Mailbox controllers that need to work in atomic context can implement the
+ * ->flush() callback to busy loop until a transmission has been completed.
+ * The implementation must call mbox_chan_txdone() upon success. Clients can
+ * call the mbox_flush() function at any time after mbox_send_message() to
+ * flush the transmission. After the function returns success, the mailbox
+ * transmission is guaranteed to have completed.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ int ret;
+
+ if (!chan->mbox->ops->flush)
+ return -ENOTSUPP;
+
+ ret = chan->mbox->ops->flush(chan, timeout);
+ if (ret < 0)
+ tx_tick(chan, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mbox_flush);
+
+/**
+ * mbox_request_channel - Request a mailbox channel.
+ * @cl: Identity of the client requesting the channel.
+ * @index: Index of mailbox specifier in 'mboxes' property.
+ *
+ * The Client specifies its requirements and capabilities while asking for
+ * a mailbox channel. It can't be called from atomic context.
+ * The channel is exclusively allocated and can't be used by another
+ * client before the owner calls mbox_free_channel.
+ * After assignment, any packet received on this channel will be
+ * handed over to the client via the 'rx_callback'.
+ * The framework holds reference to the client, so the mbox_client
+ * structure shouldn't be modified until the mbox_free_channel returns.
+ *
+ * Return: Pointer to the channel assigned to the client if successful.
+ * ERR_PTR for request failure.
+ */
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+{
+ struct device *dev = cl->dev;
+ struct mbox_controller *mbox;
+ struct of_phandle_args spec;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ int ret;
+
+ if (!dev || !dev->of_node) {
+ pr_debug("%s: No owner device node\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&con_mutex);
+
+ if (of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", index, &spec)) {
+ dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ mutex_unlock(&con_mutex);
+ return ERR_PTR(-ENODEV);
+ }
+
+ chan = ERR_PTR(-EPROBE_DEFER);
+ list_for_each_entry(mbox, &mbox_cons, node)
+ if (mbox->dev->of_node == spec.np) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
+
+ of_node_put(spec.np);
+
+ if (IS_ERR(chan)) {
+ mutex_unlock(&con_mutex);
+ return chan;
+ }
+
+ if (chan->cl || !try_module_get(mbox->dev->driver->owner)) {
+ dev_dbg(dev, "%s: mailbox not free\n", __func__);
+ mutex_unlock(&con_mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method = TXDONE_BY_ACK;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (chan->mbox->ops->startup) {
+ ret = chan->mbox->ops->startup(chan);
+
+ if (ret) {
+ dev_err(dev, "Unable to startup the chan (%d)\n", ret);
+ mbox_free_channel(chan);
+ chan = ERR_PTR(ret);
+ }
+ }
+
+ mutex_unlock(&con_mutex);
+ return chan;
+}
+EXPORT_SYMBOL_GPL(mbox_request_channel);
+
+struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
+ const char *name)
+{
+ struct device_node *np = cl->dev->of_node;
+ struct property *prop;
+ const char *mbox_name;
+ int index = 0;
+
+ if (!np) {
+ dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!of_get_property(np, "mbox-names", NULL)) {
+ dev_err(cl->dev,
+ "%s() requires an \"mbox-names\" property\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
+ if (!strncmp(name, mbox_name, strlen(name)))
+ return mbox_request_channel(cl, index);
+ index++;
+ }
+
+ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+ __func__, name);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
+
+/**
+ * mbox_free_channel - The client relinquishes control of a mailbox
+ * channel by this call.
+ * @chan: The mailbox channel to be freed.
+ */
+void mbox_free_channel(struct mbox_chan *chan)
+{
+ unsigned long flags;
+
+ if (!chan || !chan->cl)
+ return;
+
+ if (chan->mbox->ops->shutdown)
+ chan->mbox->ops->shutdown(chan);
+
+ /* The queued TX requests are simply aborted, no callbacks are made */
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+
+ module_put(chan->mbox->dev->driver->owner);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+EXPORT_SYMBOL_GPL(mbox_free_channel);
+
+static struct mbox_chan *
+of_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int ind = sp->args[0];
+
+ if (ind >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ return &mbox->chans[ind];
+}
+
+/**
+ * mbox_controller_register - Register the mailbox controller
+ * @mbox: Pointer to the mailbox controller.
+ *
+ * The controller driver registers its communication channels
+ */
+int mbox_controller_register(struct mbox_controller *mbox)
+{
+ int i, txdone;
+
+ /* Sanity check */
+ if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
+ return -EINVAL;
+
+ if (mbox->txdone_irq)
+ txdone = TXDONE_BY_IRQ;
+ else if (mbox->txdone_poll)
+ txdone = TXDONE_BY_POLL;
+ else /* It has to be ACK then */
+ txdone = TXDONE_BY_ACK;
+
+ if (txdone == TXDONE_BY_POLL) {
+
+ if (!mbox->ops->last_tx_done) {
+ dev_err(mbox->dev, "last_tx_done method is absent\n");
+ return -EINVAL;
+ }
+
+ hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ mbox->poll_hrt.function = txdone_hrtimer;
+ spin_lock_init(&mbox->poll_hrt_lock);
+ }
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ struct mbox_chan *chan = &mbox->chans[i];
+
+ chan->cl = NULL;
+ chan->mbox = mbox;
+ chan->txdone_method = txdone;
+ spin_lock_init(&chan->lock);
+ }
+
+ if (!mbox->of_xlate)
+ mbox->of_xlate = of_mbox_index_xlate;
+
+ mutex_lock(&con_mutex);
+ list_add_tail(&mbox->node, &mbox_cons);
+ mutex_unlock(&con_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mbox_controller_register);
+
+/**
+ * mbox_controller_unregister - Unregister the mailbox controller
+ * @mbox: Pointer to the mailbox controller.
+ */
+void mbox_controller_unregister(struct mbox_controller *mbox)
+{
+ int i;
+
+ if (!mbox)
+ return;
+
+ mutex_lock(&con_mutex);
+
+ list_del(&mbox->node);
+
+ for (i = 0; i < mbox->num_chans; i++)
+ mbox_free_channel(&mbox->chans[i]);
+
+ if (mbox->txdone_poll)
+ hrtimer_cancel(&mbox->poll_hrt);
+
+ mutex_unlock(&con_mutex);
+}
+EXPORT_SYMBOL_GPL(mbox_controller_unregister);
+
+static void __devm_mbox_controller_unregister(struct device *dev, void *res)
+{
+ struct mbox_controller **mbox = res;
+
+ mbox_controller_unregister(*mbox);
+}
+
+static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
+{
+ struct mbox_controller **mbox = res;
+
+ if (WARN_ON(!mbox || !*mbox))
+ return 0;
+
+ return *mbox == data;
+}
+
+/**
+ * devm_mbox_controller_register() - managed mbox_controller_register()
+ * @dev: device owning the mailbox controller being registered
+ * @mbox: mailbox controller being registered
+ *
+ * This function adds a device-managed resource that will make sure that the
+ * mailbox controller, which is registered using mbox_controller_register()
+ * as part of this function, will be unregistered along with the rest of
+ * device-managed resources upon driver probe failure or driver removal.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int devm_mbox_controller_register(struct device *dev,
+ struct mbox_controller *mbox)
+{
+ struct mbox_controller **ptr;
+ int err;
+
+ ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ err = mbox_controller_register(mbox);
+ if (err < 0) {
+ devres_free(ptr);
+ return err;
+ }
+
+ devres_add(dev, ptr);
+ *ptr = mbox;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
+
+/**
+ * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
+ * @dev: device owning the mailbox controller being unregistered
+ * @mbox: mailbox controller being unregistered
+ *
+ * This function unregisters the mailbox controller and removes the device-
+ * managed resource that was set up to automatically unregister the mailbox
+ * controller on driver probe failure or driver removal. It's typically not
+ * necessary to call this function.
+ */
+void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
+{
+ WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
+ devm_mbox_controller_match, mbox));
+}
+EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
diff --git a/drivers/mailbox/mailbox.h b/drivers/mailbox/mailbox.h
new file mode 100644
index 000000000..4e3cc4426
--- /dev/null
+++ b/drivers/mailbox/mailbox.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __MAILBOX_H
+#define __MAILBOX_H
+
+#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
+#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
+#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */
+
+#endif /* __MAILBOX_H */
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
new file mode 100644
index 000000000..75378e35c
--- /dev/null
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/mtk-cmdq-mailbox.h>
+#include <linux/of_device.h>
+
+#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
+#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
+
+#define CMDQ_CURR_IRQ_STATUS 0x10
+#define CMDQ_SYNC_TOKEN_UPDATE 0x68
+#define CMDQ_THR_SLOT_CYCLES 0x30
+#define CMDQ_THR_BASE 0x100
+#define CMDQ_THR_SIZE 0x80
+#define CMDQ_THR_WARM_RESET 0x00
+#define CMDQ_THR_ENABLE_TASK 0x04
+#define CMDQ_THR_SUSPEND_TASK 0x08
+#define CMDQ_THR_CURR_STATUS 0x0c
+#define CMDQ_THR_IRQ_STATUS 0x10
+#define CMDQ_THR_IRQ_ENABLE 0x14
+#define CMDQ_THR_CURR_ADDR 0x20
+#define CMDQ_THR_END_ADDR 0x24
+#define CMDQ_THR_WAIT_TOKEN 0x30
+#define CMDQ_THR_PRIORITY 0x40
+
+#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
+#define CMDQ_THR_ENABLED 0x1
+#define CMDQ_THR_DISABLED 0x0
+#define CMDQ_THR_SUSPEND 0x1
+#define CMDQ_THR_RESUME 0x0
+#define CMDQ_THR_STATUS_SUSPENDED BIT(1)
+#define CMDQ_THR_DO_WARM_RESET BIT(0)
+#define CMDQ_THR_IRQ_DONE 0x1
+#define CMDQ_THR_IRQ_ERROR 0x12
+#define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
+#define CMDQ_THR_IS_WAITING BIT(31)
+
+#define CMDQ_JUMP_BY_OFFSET 0x10000000
+#define CMDQ_JUMP_BY_PA 0x10000001
+
+struct cmdq_thread {
+ struct mbox_chan *chan;
+ void __iomem *base;
+ struct list_head task_busy_list;
+ u32 priority;
+};
+
+struct cmdq_task {
+ struct cmdq *cmdq;
+ struct list_head list_entry;
+ dma_addr_t pa_base;
+ struct cmdq_thread *thread;
+ struct cmdq_pkt *pkt; /* the packet sent from mailbox client */
+};
+
+struct cmdq {
+ struct mbox_controller mbox;
+ void __iomem *base;
+ int irq;
+ u32 thread_nr;
+ u32 irq_mask;
+ struct cmdq_thread *thread;
+ struct clk *clock;
+ bool suspended;
+ u8 shift_pa;
+};
+
+struct gce_plat {
+ u32 thread_nr;
+ u8 shift;
+};
+
+u8 cmdq_get_shift_pa(struct mbox_chan *chan)
+{
+ struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
+
+ return cmdq->shift_pa;
+}
+EXPORT_SYMBOL(cmdq_get_shift_pa);
+
+static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ u32 status;
+
+ writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
+
+ /* If already disabled, treat as suspended successful. */
+ if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
+ return 0;
+
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
+ status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
+ dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
+ (u32)(thread->base - cmdq->base));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void cmdq_thread_resume(struct cmdq_thread *thread)
+{
+ writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
+}
+
+static void cmdq_init(struct cmdq *cmdq)
+{
+ int i;
+
+ WARN_ON(clk_enable(cmdq->clock) < 0);
+ writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
+ for (i = 0; i <= CMDQ_MAX_EVENT; i++)
+ writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
+ clk_disable(cmdq->clock);
+}
+
+static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ u32 warm_reset;
+
+ writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
+ warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
+ 0, 10)) {
+ dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
+ (u32)(thread->base - cmdq->base));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ cmdq_thread_reset(cmdq, thread);
+ writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
+}
+
+/* notify GCE to re-fetch commands by setting GCE thread PC */
+static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
+{
+ writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
+ thread->base + CMDQ_THR_CURR_ADDR);
+}
+
+static void cmdq_task_insert_into_thread(struct cmdq_task *task)
+{
+ struct device *dev = task->cmdq->mbox.dev;
+ struct cmdq_thread *thread = task->thread;
+ struct cmdq_task *prev_task = list_last_entry(
+ &thread->task_busy_list, typeof(*task), list_entry);
+ u64 *prev_task_base = prev_task->pkt->va_base;
+
+ /* let previous task jump to this task */
+ dma_sync_single_for_cpu(dev, prev_task->pa_base,
+ prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
+ prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
+ (u64)CMDQ_JUMP_BY_PA << 32 |
+ (task->pa_base >> task->cmdq->shift_pa);
+ dma_sync_single_for_device(dev, prev_task->pa_base,
+ prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
+
+ cmdq_thread_invalidate_fetched_data(thread);
+}
+
+static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
+{
+ return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
+}
+
+static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
+{
+ struct cmdq_task_cb *cb = &task->pkt->async_cb;
+ struct cmdq_cb_data data;
+
+ WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
+ data.sta = sta;
+ data.data = cb->data;
+ cb->cb(data);
+
+ list_del(&task->list_entry);
+}
+
+static void cmdq_task_handle_error(struct cmdq_task *task)
+{
+ struct cmdq_thread *thread = task->thread;
+ struct cmdq_task *next_task;
+ struct cmdq *cmdq = task->cmdq;
+
+ dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ next_task = list_first_entry_or_null(&thread->task_busy_list,
+ struct cmdq_task, list_entry);
+ if (next_task)
+ writel(next_task->pa_base >> cmdq->shift_pa,
+ thread->base + CMDQ_THR_CURR_ADDR);
+ cmdq_thread_resume(thread);
+}
+
+static void cmdq_thread_irq_handler(struct cmdq *cmdq,
+ struct cmdq_thread *thread)
+{
+ struct cmdq_task *task, *tmp, *curr_task = NULL;
+ u32 curr_pa, irq_flag, task_end_pa;
+ bool err;
+
+ irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
+ writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
+
+ /*
+ * When ISR call this function, another CPU core could run
+ * "release task" right before we acquire the spin lock, and thus
+ * reset / disable this GCE thread, so we need to check the enable
+ * bit of this GCE thread.
+ */
+ if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
+ return;
+
+ if (irq_flag & CMDQ_THR_IRQ_ERROR)
+ err = true;
+ else if (irq_flag & CMDQ_THR_IRQ_DONE)
+ err = false;
+ else
+ return;
+
+ curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa;
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
+ if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
+ curr_task = task;
+
+ if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
+ cmdq_task_exec_done(task, CMDQ_CB_NORMAL);
+ kfree(task);
+ } else if (err) {
+ cmdq_task_exec_done(task, CMDQ_CB_ERROR);
+ cmdq_task_handle_error(curr_task);
+ kfree(task);
+ }
+
+ if (curr_task)
+ break;
+ }
+
+ if (list_empty(&thread->task_busy_list)) {
+ cmdq_thread_disable(cmdq, thread);
+ clk_disable(cmdq->clock);
+ }
+}
+
+static irqreturn_t cmdq_irq_handler(int irq, void *dev)
+{
+ struct cmdq *cmdq = dev;
+ unsigned long irq_status, flags = 0L;
+ int bit;
+
+ irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
+ if (!(irq_status ^ cmdq->irq_mask))
+ return IRQ_NONE;
+
+ for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) {
+ struct cmdq_thread *thread = &cmdq->thread[bit];
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ cmdq_thread_irq_handler(cmdq, thread);
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cmdq_suspend(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+ struct cmdq_thread *thread;
+ int i;
+ bool task_running = false;
+
+ cmdq->suspended = true;
+
+ for (i = 0; i < cmdq->thread_nr; i++) {
+ thread = &cmdq->thread[i];
+ if (!list_empty(&thread->task_busy_list)) {
+ task_running = true;
+ break;
+ }
+ }
+
+ if (task_running)
+ dev_warn(dev, "exist running task(s) in suspend\n");
+
+ clk_unprepare(cmdq->clock);
+
+ return 0;
+}
+
+static int cmdq_resume(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+
+ WARN_ON(clk_prepare(cmdq->clock) < 0);
+ cmdq->suspended = false;
+ return 0;
+}
+
+static int cmdq_remove(struct platform_device *pdev)
+{
+ struct cmdq *cmdq = platform_get_drvdata(pdev);
+
+ clk_unprepare(cmdq->clock);
+
+ return 0;
+}
+
+static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task;
+ unsigned long curr_pa, end_pa;
+
+ /* Client should not flush new tasks if suspended. */
+ WARN_ON(cmdq->suspended);
+
+ task = kzalloc(sizeof(*task), GFP_ATOMIC);
+ if (!task)
+ return -ENOMEM;
+
+ task->cmdq = cmdq;
+ INIT_LIST_HEAD(&task->list_entry);
+ task->pa_base = pkt->pa_base;
+ task->thread = thread;
+ task->pkt = pkt;
+
+ if (list_empty(&thread->task_busy_list)) {
+ WARN_ON(clk_enable(cmdq->clock) < 0);
+ /*
+ * The thread reset will clear thread related register to 0,
+ * including pc, end, priority, irq, suspend and enable. Thus
+ * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
+ * thread and make it running.
+ */
+ WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
+
+ writel(task->pa_base >> cmdq->shift_pa,
+ thread->base + CMDQ_THR_CURR_ADDR);
+ writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
+ thread->base + CMDQ_THR_END_ADDR);
+
+ writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
+ writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
+ writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
+ } else {
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
+ cmdq->shift_pa;
+ end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
+ cmdq->shift_pa;
+ /* check boundary */
+ if (curr_pa == end_pa - CMDQ_INST_SIZE ||
+ curr_pa == end_pa) {
+ /* set to this task directly */
+ writel(task->pa_base >> cmdq->shift_pa,
+ thread->base + CMDQ_THR_CURR_ADDR);
+ } else {
+ cmdq_task_insert_into_thread(task);
+ smp_mb(); /* modify jump before enable thread */
+ }
+ writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
+ thread->base + CMDQ_THR_END_ADDR);
+ cmdq_thread_resume(thread);
+ }
+ list_move_tail(&task->list_entry, &thread->task_busy_list);
+
+ return 0;
+}
+
+static int cmdq_mbox_startup(struct mbox_chan *chan)
+{
+ return 0;
+}
+
+static void cmdq_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ if (list_empty(&thread->task_busy_list))
+ goto done;
+
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+
+ /* make sure executed tasks have success callback */
+ cmdq_thread_irq_handler(cmdq, thread);
+ if (list_empty(&thread->task_busy_list))
+ goto done;
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ cmdq_task_exec_done(task, CMDQ_CB_ERROR);
+ kfree(task);
+ }
+
+ cmdq_thread_disable(cmdq, thread);
+ clk_disable(cmdq->clock);
+done:
+ /*
+ * The thread->task_busy_list empty means thread already disable. The
+ * cmdq_mbox_send_data() always reset thread which clear disable and
+ * suspend statue when first pkt send to channel, so there is no need
+ * to do any operation here, only unlock and leave.
+ */
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+}
+
+static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq_task_cb *cb;
+ struct cmdq_cb_data data;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task, *tmp;
+ unsigned long flags;
+ u32 enable;
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ if (list_empty(&thread->task_busy_list))
+ goto out;
+
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ if (!cmdq_thread_is_in_wfe(thread))
+ goto wait;
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ cb = &task->pkt->async_cb;
+ if (cb->cb) {
+ data.sta = CMDQ_CB_ERROR;
+ data.data = cb->data;
+ cb->cb(data);
+ }
+ list_del(&task->list_entry);
+ kfree(task);
+ }
+
+ cmdq_thread_resume(thread);
+ cmdq_thread_disable(cmdq, thread);
+ clk_disable(cmdq->clock);
+
+out:
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ return 0;
+
+wait:
+ cmdq_thread_resume(thread);
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
+ enable, enable == 0, 1, timeout)) {
+ dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
+ (u32)(thread->base - cmdq->base));
+
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
+ .send_data = cmdq_mbox_send_data,
+ .startup = cmdq_mbox_startup,
+ .shutdown = cmdq_mbox_shutdown,
+ .flush = cmdq_mbox_flush,
+};
+
+static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int ind = sp->args[0];
+ struct cmdq_thread *thread;
+
+ if (ind >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
+ thread->priority = sp->args[1];
+ thread->chan = &mbox->chans[ind];
+
+ return &mbox->chans[ind];
+}
+
+static int cmdq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct cmdq *cmdq;
+ int err, i;
+ struct gce_plat *plat_data;
+
+ cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
+ if (!cmdq)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cmdq->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cmdq->base)) {
+ dev_err(dev, "failed to ioremap gce\n");
+ return PTR_ERR(cmdq->base);
+ }
+
+ cmdq->irq = platform_get_irq(pdev, 0);
+ if (cmdq->irq < 0)
+ return cmdq->irq;
+
+ plat_data = (struct gce_plat *)of_device_get_match_data(dev);
+ if (!plat_data) {
+ dev_err(dev, "failed to get match data\n");
+ return -EINVAL;
+ }
+
+ cmdq->thread_nr = plat_data->thread_nr;
+ cmdq->shift_pa = plat_data->shift;
+ cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
+ err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
+ "mtk_cmdq", cmdq);
+ if (err < 0) {
+ dev_err(dev, "failed to register ISR (%d)\n", err);
+ return err;
+ }
+
+ dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
+ dev, cmdq->base, cmdq->irq);
+
+ cmdq->clock = devm_clk_get(dev, "gce");
+ if (IS_ERR(cmdq->clock)) {
+ dev_err(dev, "failed to get gce clk\n");
+ return PTR_ERR(cmdq->clock);
+ }
+
+ cmdq->mbox.dev = dev;
+ cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
+ sizeof(*cmdq->mbox.chans), GFP_KERNEL);
+ if (!cmdq->mbox.chans)
+ return -ENOMEM;
+
+ cmdq->mbox.num_chans = cmdq->thread_nr;
+ cmdq->mbox.ops = &cmdq_mbox_chan_ops;
+ cmdq->mbox.of_xlate = cmdq_xlate;
+
+ /* make use of TXDONE_BY_ACK */
+ cmdq->mbox.txdone_irq = false;
+ cmdq->mbox.txdone_poll = false;
+
+ cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
+ sizeof(*cmdq->thread), GFP_KERNEL);
+ if (!cmdq->thread)
+ return -ENOMEM;
+
+ for (i = 0; i < cmdq->thread_nr; i++) {
+ cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
+ CMDQ_THR_SIZE * i;
+ INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
+ cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
+ }
+
+ err = devm_mbox_controller_register(dev, &cmdq->mbox);
+ if (err < 0) {
+ dev_err(dev, "failed to register mailbox: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, cmdq);
+ WARN_ON(clk_prepare(cmdq->clock) < 0);
+
+ cmdq_init(cmdq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cmdq_pm_ops = {
+ .suspend = cmdq_suspend,
+ .resume = cmdq_resume,
+};
+
+static const struct gce_plat gce_plat_v2 = {.thread_nr = 16};
+static const struct gce_plat gce_plat_v3 = {.thread_nr = 24};
+static const struct gce_plat gce_plat_v4 = {.thread_nr = 24, .shift = 3};
+
+static const struct of_device_id cmdq_of_ids[] = {
+ {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
+ {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
+ {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
+ {}
+};
+
+static struct platform_driver cmdq_drv = {
+ .probe = cmdq_probe,
+ .remove = cmdq_remove,
+ .driver = {
+ .name = "mtk_cmdq",
+ .pm = &cmdq_pm_ops,
+ .of_match_table = cmdq_of_ids,
+ }
+};
+
+static int __init cmdq_drv_init(void)
+{
+ return platform_driver_register(&cmdq_drv);
+}
+
+static void __exit cmdq_drv_exit(void)
+{
+ platform_driver_unregister(&cmdq_drv);
+}
+
+subsys_initcall(cmdq_drv_init);
+module_exit(cmdq_drv_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
new file mode 100644
index 000000000..93fe08aef
--- /dev/null
+++ b/drivers/mailbox/omap-mailbox.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OMAP mailbox driver
+ *
+ * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2013-2019 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/omap-mailbox.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+
+#include "mailbox.h"
+
+#define MAILBOX_REVISION 0x000
+#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
+#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
+#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
+
+#define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
+#define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
+
+#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
+
+#define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
+ OMAP2_MAILBOX_IRQSTATUS(u))
+#define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \
+ OMAP2_MAILBOX_IRQENABLE(u))
+#define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
+ : OMAP2_MAILBOX_IRQENABLE(u))
+
+#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
+#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
+
+/* Interrupt register configuration types */
+#define MBOX_INTR_CFG_TYPE1 0
+#define MBOX_INTR_CFG_TYPE2 1
+
+struct omap_mbox_fifo {
+ unsigned long msg;
+ unsigned long fifo_stat;
+ unsigned long msg_stat;
+ unsigned long irqenable;
+ unsigned long irqstatus;
+ unsigned long irqdisable;
+ u32 intr_bit;
+};
+
+struct omap_mbox_queue {
+ spinlock_t lock;
+ struct kfifo fifo;
+ struct work_struct work;
+ struct omap_mbox *mbox;
+ bool full;
+};
+
+struct omap_mbox_match_data {
+ u32 intr_type;
+};
+
+struct omap_mbox_device {
+ struct device *dev;
+ struct mutex cfg_lock;
+ void __iomem *mbox_base;
+ u32 *irq_ctx;
+ u32 num_users;
+ u32 num_fifos;
+ u32 intr_type;
+ struct omap_mbox **mboxes;
+ struct mbox_controller controller;
+ struct list_head elem;
+};
+
+struct omap_mbox_fifo_info {
+ int tx_id;
+ int tx_usr;
+ int tx_irq;
+
+ int rx_id;
+ int rx_usr;
+ int rx_irq;
+
+ const char *name;
+ bool send_no_irq;
+};
+
+struct omap_mbox {
+ const char *name;
+ int irq;
+ struct omap_mbox_queue *rxq;
+ struct device *dev;
+ struct omap_mbox_device *parent;
+ struct omap_mbox_fifo tx_fifo;
+ struct omap_mbox_fifo rx_fifo;
+ u32 intr_type;
+ struct mbox_chan *chan;
+ bool send_no_irq;
+};
+
+/* global variables for the mailbox devices */
+static DEFINE_MUTEX(omap_mbox_devices_lock);
+static LIST_HEAD(omap_mbox_devices);
+
+static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
+module_param(mbox_kfifo_size, uint, S_IRUGO);
+MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
+
+static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
+{
+ if (!chan || !chan->con_priv)
+ return NULL;
+
+ return (struct omap_mbox *)chan->con_priv;
+}
+
+static inline
+unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
+{
+ return __raw_readl(mdev->mbox_base + ofs);
+}
+
+static inline
+void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
+{
+ __raw_writel(val, mdev->mbox_base + ofs);
+}
+
+/* Mailbox FIFO handle functions */
+static u32 mbox_fifo_read(struct omap_mbox *mbox)
+{
+ struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+
+ return mbox_read_reg(mbox->parent, fifo->msg);
+}
+
+static void mbox_fifo_write(struct omap_mbox *mbox, u32 msg)
+{
+ struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+
+ mbox_write_reg(mbox->parent, msg, fifo->msg);
+}
+
+static int mbox_fifo_empty(struct omap_mbox *mbox)
+{
+ struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+
+ return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
+}
+
+static int mbox_fifo_full(struct omap_mbox *mbox)
+{
+ struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+
+ return mbox_read_reg(mbox->parent, fifo->fifo_stat);
+}
+
+/* Mailbox IRQ handle functions */
+static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqstatus = fifo->irqstatus;
+
+ mbox_write_reg(mbox->parent, bit, irqstatus);
+
+ /* Flush posted write for irq status to avoid spurious interrupts */
+ mbox_read_reg(mbox->parent, irqstatus);
+}
+
+static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqenable = fifo->irqenable;
+ u32 irqstatus = fifo->irqstatus;
+
+ u32 enable = mbox_read_reg(mbox->parent, irqenable);
+ u32 status = mbox_read_reg(mbox->parent, irqstatus);
+
+ return (int)(enable & status & bit);
+}
+
+static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ u32 l;
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqenable = fifo->irqenable;
+
+ l = mbox_read_reg(mbox->parent, irqenable);
+ l |= bit;
+ mbox_write_reg(mbox->parent, l, irqenable);
+}
+
+static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqdisable = fifo->irqdisable;
+
+ /*
+ * Read and update the interrupt configuration register for pre-OMAP4.
+ * OMAP4 and later SoCs have a dedicated interrupt disabling register.
+ */
+ if (!mbox->intr_type)
+ bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
+
+ mbox_write_reg(mbox->parent, bit, irqdisable);
+}
+
+void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
+
+ _omap_mbox_enable_irq(mbox, irq);
+}
+EXPORT_SYMBOL(omap_mbox_enable_irq);
+
+void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
+
+ _omap_mbox_disable_irq(mbox, irq);
+}
+EXPORT_SYMBOL(omap_mbox_disable_irq);
+
+/*
+ * Message receiver(workqueue)
+ */
+static void mbox_rx_work(struct work_struct *work)
+{
+ struct omap_mbox_queue *mq =
+ container_of(work, struct omap_mbox_queue, work);
+ mbox_msg_t data;
+ u32 msg;
+ int len;
+
+ while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
+ len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
+ WARN_ON(len != sizeof(msg));
+ data = msg;
+
+ mbox_chan_received_data(mq->mbox->chan, (void *)data);
+ spin_lock_irq(&mq->lock);
+ if (mq->full) {
+ mq->full = false;
+ _omap_mbox_enable_irq(mq->mbox, IRQ_RX);
+ }
+ spin_unlock_irq(&mq->lock);
+ }
+}
+
+/*
+ * Mailbox interrupt handler
+ */
+static void __mbox_tx_interrupt(struct omap_mbox *mbox)
+{
+ _omap_mbox_disable_irq(mbox, IRQ_TX);
+ ack_mbox_irq(mbox, IRQ_TX);
+ mbox_chan_txdone(mbox->chan, 0);
+}
+
+static void __mbox_rx_interrupt(struct omap_mbox *mbox)
+{
+ struct omap_mbox_queue *mq = mbox->rxq;
+ u32 msg;
+ int len;
+
+ while (!mbox_fifo_empty(mbox)) {
+ if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
+ _omap_mbox_disable_irq(mbox, IRQ_RX);
+ mq->full = true;
+ goto nomem;
+ }
+
+ msg = mbox_fifo_read(mbox);
+
+ len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
+ WARN_ON(len != sizeof(msg));
+ }
+
+ /* no more messages in the fifo. clear IRQ source. */
+ ack_mbox_irq(mbox, IRQ_RX);
+nomem:
+ schedule_work(&mbox->rxq->work);
+}
+
+static irqreturn_t mbox_interrupt(int irq, void *p)
+{
+ struct omap_mbox *mbox = p;
+
+ if (is_mbox_irq(mbox, IRQ_TX))
+ __mbox_tx_interrupt(mbox);
+
+ if (is_mbox_irq(mbox, IRQ_RX))
+ __mbox_rx_interrupt(mbox);
+
+ return IRQ_HANDLED;
+}
+
+static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
+ void (*work)(struct work_struct *))
+{
+ struct omap_mbox_queue *mq;
+
+ if (!work)
+ return NULL;
+
+ mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+ if (!mq)
+ return NULL;
+
+ spin_lock_init(&mq->lock);
+
+ if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
+ goto error;
+
+ INIT_WORK(&mq->work, work);
+ return mq;
+
+error:
+ kfree(mq);
+ return NULL;
+}
+
+static void mbox_queue_free(struct omap_mbox_queue *q)
+{
+ kfifo_free(&q->fifo);
+ kfree(q);
+}
+
+static int omap_mbox_startup(struct omap_mbox *mbox)
+{
+ int ret = 0;
+ struct omap_mbox_queue *mq;
+
+ mq = mbox_queue_alloc(mbox, mbox_rx_work);
+ if (!mq)
+ return -ENOMEM;
+ mbox->rxq = mq;
+ mq->mbox = mbox;
+
+ ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
+ mbox->name, mbox);
+ if (unlikely(ret)) {
+ pr_err("failed to register mailbox interrupt:%d\n", ret);
+ goto fail_request_irq;
+ }
+
+ if (mbox->send_no_irq)
+ mbox->chan->txdone_method = TXDONE_BY_ACK;
+
+ _omap_mbox_enable_irq(mbox, IRQ_RX);
+
+ return 0;
+
+fail_request_irq:
+ mbox_queue_free(mbox->rxq);
+ return ret;
+}
+
+static void omap_mbox_fini(struct omap_mbox *mbox)
+{
+ _omap_mbox_disable_irq(mbox, IRQ_RX);
+ free_irq(mbox->irq, mbox);
+ flush_work(&mbox->rxq->work);
+ mbox_queue_free(mbox->rxq);
+}
+
+static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
+ const char *mbox_name)
+{
+ struct omap_mbox *_mbox, *mbox = NULL;
+ struct omap_mbox **mboxes = mdev->mboxes;
+ int i;
+
+ if (!mboxes)
+ return NULL;
+
+ for (i = 0; (_mbox = mboxes[i]); i++) {
+ if (!strcmp(_mbox->name, mbox_name)) {
+ mbox = _mbox;
+ break;
+ }
+ }
+ return mbox;
+}
+
+struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
+ const char *chan_name)
+{
+ struct device *dev = cl->dev;
+ struct omap_mbox *mbox = NULL;
+ struct omap_mbox_device *mdev;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ int ret;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (dev->of_node) {
+ pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_for_each_entry(mdev, &omap_mbox_devices, elem) {
+ mbox = omap_mbox_device_find(mdev, chan_name);
+ if (mbox)
+ break;
+ }
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ if (!mbox || !mbox->chan)
+ return ERR_PTR(-ENOENT);
+
+ chan = mbox->chan;
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ ret = chan->mbox->ops->startup(chan);
+ if (ret) {
+ pr_err("Unable to startup the chan (%d)\n", ret);
+ mbox_free_channel(chan);
+ chan = ERR_PTR(ret);
+ }
+
+ return chan;
+}
+EXPORT_SYMBOL(omap_mbox_request_channel);
+
+static struct class omap_mbox_class = { .name = "mbox", };
+
+static int omap_mbox_register(struct omap_mbox_device *mdev)
+{
+ int ret;
+ int i;
+ struct omap_mbox **mboxes;
+
+ if (!mdev || !mdev->mboxes)
+ return -EINVAL;
+
+ mboxes = mdev->mboxes;
+ for (i = 0; mboxes[i]; i++) {
+ struct omap_mbox *mbox = mboxes[i];
+
+ mbox->dev = device_create(&omap_mbox_class, mdev->dev,
+ 0, mbox, "%s", mbox->name);
+ if (IS_ERR(mbox->dev)) {
+ ret = PTR_ERR(mbox->dev);
+ goto err_out;
+ }
+ }
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_add(&mdev->elem, &omap_mbox_devices);
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ ret = devm_mbox_controller_register(mdev->dev, &mdev->controller);
+
+err_out:
+ if (ret) {
+ while (i--)
+ device_unregister(mboxes[i]->dev);
+ }
+ return ret;
+}
+
+static int omap_mbox_unregister(struct omap_mbox_device *mdev)
+{
+ int i;
+ struct omap_mbox **mboxes;
+
+ if (!mdev || !mdev->mboxes)
+ return -EINVAL;
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_del(&mdev->elem);
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ mboxes = mdev->mboxes;
+ for (i = 0; mboxes[i]; i++)
+ device_unregister(mboxes[i]->dev);
+ return 0;
+}
+
+static int omap_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox_device *mdev = mbox->parent;
+ int ret = 0;
+
+ mutex_lock(&mdev->cfg_lock);
+ pm_runtime_get_sync(mdev->dev);
+ ret = omap_mbox_startup(mbox);
+ if (ret)
+ pm_runtime_put_sync(mdev->dev);
+ mutex_unlock(&mdev->cfg_lock);
+ return ret;
+}
+
+static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox_device *mdev = mbox->parent;
+
+ mutex_lock(&mdev->cfg_lock);
+ omap_mbox_fini(mbox);
+ pm_runtime_put_sync(mdev->dev);
+ mutex_unlock(&mdev->cfg_lock);
+}
+
+static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, u32 msg)
+{
+ int ret = -EBUSY;
+
+ if (!mbox_fifo_full(mbox)) {
+ _omap_mbox_enable_irq(mbox, IRQ_RX);
+ mbox_fifo_write(mbox, msg);
+ ret = 0;
+ _omap_mbox_disable_irq(mbox, IRQ_RX);
+
+ /* we must read and ack the interrupt directly from here */
+ mbox_fifo_read(mbox);
+ ack_mbox_irq(mbox, IRQ_RX);
+ }
+
+ return ret;
+}
+
+static int omap_mbox_chan_send(struct omap_mbox *mbox, u32 msg)
+{
+ int ret = -EBUSY;
+
+ if (!mbox_fifo_full(mbox)) {
+ mbox_fifo_write(mbox, msg);
+ ret = 0;
+ }
+
+ /* always enable the interrupt */
+ _omap_mbox_enable_irq(mbox, IRQ_TX);
+ return ret;
+}
+
+static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ int ret;
+ u32 msg = omap_mbox_message(data);
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (mbox->send_no_irq)
+ ret = omap_mbox_chan_send_noirq(mbox, msg);
+ else
+ ret = omap_mbox_chan_send(mbox, msg);
+
+ return ret;
+}
+
+static const struct mbox_chan_ops omap_mbox_chan_ops = {
+ .startup = omap_mbox_chan_startup,
+ .send_data = omap_mbox_chan_send_data,
+ .shutdown = omap_mbox_chan_shutdown,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int omap_mbox_suspend(struct device *dev)
+{
+ struct omap_mbox_device *mdev = dev_get_drvdata(dev);
+ u32 usr, fifo, reg;
+
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
+ if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
+ dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
+ fifo);
+ return -EBUSY;
+ }
+ }
+
+ for (usr = 0; usr < mdev->num_users; usr++) {
+ reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
+ mdev->irq_ctx[usr] = mbox_read_reg(mdev, reg);
+ }
+
+ return 0;
+}
+
+static int omap_mbox_resume(struct device *dev)
+{
+ struct omap_mbox_device *mdev = dev_get_drvdata(dev);
+ u32 usr, reg;
+
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ for (usr = 0; usr < mdev->num_users; usr++) {
+ reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
+ mbox_write_reg(mdev, mdev->irq_ctx[usr], reg);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap_mbox_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
+};
+
+static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
+static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
+
+static const struct of_device_id omap_mailbox_of_match[] = {
+ {
+ .compatible = "ti,omap2-mailbox",
+ .data = &omap2_data,
+ },
+ {
+ .compatible = "ti,omap3-mailbox",
+ .data = &omap2_data,
+ },
+ {
+ .compatible = "ti,omap4-mailbox",
+ .data = &omap4_data,
+ },
+ {
+ .compatible = "ti,am654-mailbox",
+ .data = &omap4_data,
+ },
+ {
+ /* end */
+ },
+};
+MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
+
+static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *sp)
+{
+ phandle phandle = sp->args[0];
+ struct device_node *node;
+ struct omap_mbox_device *mdev;
+ struct omap_mbox *mbox;
+
+ mdev = container_of(controller, struct omap_mbox_device, controller);
+ if (WARN_ON(!mdev))
+ return ERR_PTR(-EINVAL);
+
+ node = of_find_node_by_phandle(phandle);
+ if (!node) {
+ pr_err("%s: could not find node phandle 0x%x\n",
+ __func__, phandle);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mbox = omap_mbox_device_find(mdev, node->name);
+ of_node_put(node);
+ return mbox ? mbox->chan : ERR_PTR(-ENOENT);
+}
+
+static int omap_mbox_probe(struct platform_device *pdev)
+{
+ struct resource *mem;
+ int ret;
+ struct mbox_chan *chnls;
+ struct omap_mbox **list, *mbox, *mboxblk;
+ struct omap_mbox_fifo_info *finfo, *finfoblk;
+ struct omap_mbox_device *mdev;
+ struct omap_mbox_fifo *fifo;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *child;
+ const struct omap_mbox_match_data *match_data;
+ u32 intr_type, info_count;
+ u32 num_users, num_fifos;
+ u32 tmp[3];
+ u32 l;
+ int i;
+
+ if (!node) {
+ pr_err("%s: only DT-based devices are supported\n", __func__);
+ return -ENODEV;
+ }
+
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
+ return -ENODEV;
+ intr_type = match_data->intr_type;
+
+ if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
+ return -ENODEV;
+
+ if (of_property_read_u32(node, "ti,mbox-num-fifos", &num_fifos))
+ return -ENODEV;
+
+ info_count = of_get_available_child_count(node);
+ if (!info_count) {
+ dev_err(&pdev->dev, "no available mbox devices found\n");
+ return -ENODEV;
+ }
+
+ finfoblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*finfoblk),
+ GFP_KERNEL);
+ if (!finfoblk)
+ return -ENOMEM;
+
+ finfo = finfoblk;
+ child = NULL;
+ for (i = 0; i < info_count; i++, finfo++) {
+ child = of_get_next_available_child(node, child);
+ ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
+ ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ finfo->tx_id = tmp[0];
+ finfo->tx_irq = tmp[1];
+ finfo->tx_usr = tmp[2];
+
+ ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
+ ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ finfo->rx_id = tmp[0];
+ finfo->rx_irq = tmp[1];
+ finfo->rx_usr = tmp[2];
+
+ finfo->name = child->name;
+
+ if (of_find_property(child, "ti,mbox-send-noirq", NULL))
+ finfo->send_no_irq = true;
+
+ if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
+ finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
+ return -EINVAL;
+ }
+
+ mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(mdev->mbox_base))
+ return PTR_ERR(mdev->mbox_base);
+
+ mdev->irq_ctx = devm_kcalloc(&pdev->dev, num_users, sizeof(u32),
+ GFP_KERNEL);
+ if (!mdev->irq_ctx)
+ return -ENOMEM;
+
+ /* allocate one extra for marking end of list */
+ list = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*list),
+ GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ chnls = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*chnls),
+ GFP_KERNEL);
+ if (!chnls)
+ return -ENOMEM;
+
+ mboxblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*mbox),
+ GFP_KERNEL);
+ if (!mboxblk)
+ return -ENOMEM;
+
+ mbox = mboxblk;
+ finfo = finfoblk;
+ for (i = 0; i < info_count; i++, finfo++) {
+ fifo = &mbox->tx_fifo;
+ fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
+ fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
+
+ fifo = &mbox->rx_fifo;
+ fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
+ fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
+
+ mbox->send_no_irq = finfo->send_no_irq;
+ mbox->intr_type = intr_type;
+
+ mbox->parent = mdev;
+ mbox->name = finfo->name;
+ mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
+ if (mbox->irq < 0)
+ return mbox->irq;
+ mbox->chan = &chnls[i];
+ chnls[i].con_priv = mbox;
+ list[i] = mbox++;
+ }
+
+ mutex_init(&mdev->cfg_lock);
+ mdev->dev = &pdev->dev;
+ mdev->num_users = num_users;
+ mdev->num_fifos = num_fifos;
+ mdev->intr_type = intr_type;
+ mdev->mboxes = list;
+
+ /*
+ * OMAP/K3 Mailbox IP does not have a Tx-Done IRQ, but rather a Tx-Ready
+ * IRQ and is needed to run the Tx state machine
+ */
+ mdev->controller.txdone_irq = true;
+ mdev->controller.dev = mdev->dev;
+ mdev->controller.ops = &omap_mbox_chan_ops;
+ mdev->controller.chans = chnls;
+ mdev->controller.num_chans = info_count;
+ mdev->controller.of_xlate = omap_mbox_of_xlate;
+ ret = omap_mbox_register(mdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mdev);
+ pm_runtime_enable(mdev->dev);
+
+ ret = pm_runtime_get_sync(mdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(mdev->dev);
+ goto unregister;
+ }
+
+ /*
+ * just print the raw revision register, the format is not
+ * uniform across all SoCs
+ */
+ l = mbox_read_reg(mdev, MAILBOX_REVISION);
+ dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
+
+ ret = pm_runtime_put_sync(mdev->dev);
+ if (ret < 0 && ret != -ENOSYS)
+ goto unregister;
+
+ devm_kfree(&pdev->dev, finfoblk);
+ return 0;
+
+unregister:
+ pm_runtime_disable(mdev->dev);
+ omap_mbox_unregister(mdev);
+ return ret;
+}
+
+static int omap_mbox_remove(struct platform_device *pdev)
+{
+ struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(mdev->dev);
+ omap_mbox_unregister(mdev);
+
+ return 0;
+}
+
+static struct platform_driver omap_mbox_driver = {
+ .probe = omap_mbox_probe,
+ .remove = omap_mbox_remove,
+ .driver = {
+ .name = "omap-mailbox",
+ .pm = &omap_mbox_pm_ops,
+ .of_match_table = of_match_ptr(omap_mailbox_of_match),
+ },
+};
+
+static int __init omap_mbox_init(void)
+{
+ int err;
+
+ err = class_register(&omap_mbox_class);
+ if (err)
+ return err;
+
+ /* kfifo size sanity check: alignment and minimal size */
+ mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(u32));
+ mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(u32));
+
+ err = platform_driver_register(&omap_mbox_driver);
+ if (err)
+ class_unregister(&omap_mbox_class);
+
+ return err;
+}
+subsys_initcall(omap_mbox_init);
+
+static void __exit omap_mbox_exit(void)
+{
+ platform_driver_unregister(&omap_mbox_driver);
+ class_unregister(&omap_mbox_class);
+}
+module_exit(omap_mbox_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
+MODULE_AUTHOR("Toshihiro Kobayashi");
+MODULE_AUTHOR("Hiroshi DOYU");
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
new file mode 100644
index 000000000..ef9ecd1f5
--- /dev/null
+++ b/drivers/mailbox/pcc.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
+ *
+ * PCC (Platform Communication Channel) is defined in the ACPI 5.0+
+ * specification. It is a mailbox like mechanism to allow clients
+ * such as CPPC (Collaborative Processor Performance Control), RAS
+ * (Reliability, Availability and Serviceability) and MPST (Memory
+ * Node Power State Table) to talk to the platform (e.g. BMC) through
+ * shared memory regions as defined in the PCC table entries. The PCC
+ * specification supports a Doorbell mechanism for the PCC clients
+ * to notify the platform about new data. This Doorbell information
+ * is also specified in each PCC table entry.
+ *
+ * Typical high level flow of operation is:
+ *
+ * PCC Reads:
+ * * Client tries to acquire a channel lock.
+ * * After it is acquired it writes READ cmd in communication region cmd
+ * address.
+ * * Client issues mbox_send_message() which rings the PCC doorbell
+ * for its PCC channel.
+ * * If command completes, then client has control over channel and
+ * it can proceed with its reads.
+ * * Client releases lock.
+ *
+ * PCC Writes:
+ * * Client tries to acquire channel lock.
+ * * Client writes to its communication region after it acquires a
+ * channel lock.
+ * * Client writes WRITE cmd in communication region cmd address.
+ * * Client issues mbox_send_message() which rings the PCC doorbell
+ * for its PCC channel.
+ * * If command completes, then writes have succeded and it can release
+ * the channel lock.
+ *
+ * There is a Nominal latency defined for each channel which indicates
+ * how long to wait until a command completes. If command is not complete
+ * the client needs to retry or assume failure.
+ *
+ * For more details about PCC, please see the ACPI specification from
+ * http://www.uefi.org/ACPIv5.1 Section 14.
+ *
+ * This file implements PCC as a Mailbox controller and allows for PCC
+ * clients to be implemented as its Mailbox Client Channels.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <acpi/pcc.h>
+
+#include "mailbox.h"
+
+#define MBOX_IRQ_NAME "pcc-mbox"
+
+static struct mbox_chan *pcc_mbox_channels;
+
+/* Array of cached virtual address for doorbell registers */
+static void __iomem **pcc_doorbell_vaddr;
+/* Array of cached virtual address for doorbell ack registers */
+static void __iomem **pcc_doorbell_ack_vaddr;
+/* Array of doorbell interrupts */
+static int *pcc_doorbell_irq;
+
+static struct mbox_controller pcc_mbox_ctrl = {};
+/**
+ * get_pcc_channel - Given a PCC subspace idx, get
+ * the respective mbox_channel.
+ * @id: PCC subspace index.
+ *
+ * Return: ERR_PTR(errno) if error, else pointer
+ * to mbox channel.
+ */
+static struct mbox_chan *get_pcc_channel(int id)
+{
+ if (id < 0 || id >= pcc_mbox_ctrl.num_chans)
+ return ERR_PTR(-ENOENT);
+
+ return &pcc_mbox_channels[id];
+}
+
+/*
+ * PCC can be used with perf critical drivers such as CPPC
+ * So it makes sense to locally cache the virtual address and
+ * use it to read/write to PCC registers such as doorbell register
+ *
+ * The below read_register and write_registers are used to read and
+ * write from perf critical registers such as PCC doorbell register
+ */
+static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
+{
+ int ret_val = 0;
+
+ switch (bit_width) {
+ case 8:
+ *val = readb(vaddr);
+ break;
+ case 16:
+ *val = readw(vaddr);
+ break;
+ case 32:
+ *val = readl(vaddr);
+ break;
+ case 64:
+ *val = readq(vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot read register of %u bit width",
+ bit_width);
+ ret_val = -EFAULT;
+ break;
+ }
+ return ret_val;
+}
+
+static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
+{
+ int ret_val = 0;
+
+ switch (bit_width) {
+ case 8:
+ writeb(val, vaddr);
+ break;
+ case 16:
+ writew(val, vaddr);
+ break;
+ case 32:
+ writel(val, vaddr);
+ break;
+ case 64:
+ writeq(val, vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot write register of %u bit width",
+ bit_width);
+ ret_val = -EFAULT;
+ break;
+ }
+ return ret_val;
+}
+
+/**
+ * pcc_map_interrupt - Map a PCC subspace GSI to a linux IRQ number
+ * @interrupt: GSI number.
+ * @flags: interrupt flags
+ *
+ * Returns: a valid linux IRQ number on success
+ * 0 or -EINVAL on failure
+ */
+static int pcc_map_interrupt(u32 interrupt, u32 flags)
+{
+ int trigger, polarity;
+
+ if (!interrupt)
+ return 0;
+
+ trigger = (flags & ACPI_PCCT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
+ : ACPI_LEVEL_SENSITIVE;
+
+ polarity = (flags & ACPI_PCCT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
+ : ACPI_ACTIVE_HIGH;
+
+ return acpi_register_gsi(NULL, interrupt, trigger, polarity);
+}
+
+/**
+ * pcc_mbox_irq - PCC mailbox interrupt handler
+ */
+static irqreturn_t pcc_mbox_irq(int irq, void *p)
+{
+ struct acpi_generic_address *doorbell_ack;
+ struct acpi_pcct_hw_reduced *pcct_ss;
+ struct mbox_chan *chan = p;
+ u64 doorbell_ack_preserve;
+ u64 doorbell_ack_write;
+ u64 doorbell_ack_val;
+ int ret;
+
+ pcct_ss = chan->con_priv;
+
+ mbox_chan_received_data(chan, NULL);
+
+ if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_hw_reduced_type2 *pcct2_ss = chan->con_priv;
+ u32 id = chan - pcc_mbox_channels;
+
+ doorbell_ack = &pcct2_ss->platform_ack_register;
+ doorbell_ack_preserve = pcct2_ss->ack_preserve_mask;
+ doorbell_ack_write = pcct2_ss->ack_write_mask;
+
+ ret = read_register(pcc_doorbell_ack_vaddr[id],
+ &doorbell_ack_val,
+ doorbell_ack->bit_width);
+ if (ret)
+ return IRQ_NONE;
+
+ ret = write_register(pcc_doorbell_ack_vaddr[id],
+ (doorbell_ack_val & doorbell_ack_preserve)
+ | doorbell_ack_write,
+ doorbell_ack->bit_width);
+ if (ret)
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * pcc_mbox_request_channel - PCC clients call this function to
+ * request a pointer to their PCC subspace, from which they
+ * can get the details of communicating with the remote.
+ * @cl: Pointer to Mailbox client, so we know where to bind the
+ * Channel.
+ * @subspace_id: The PCC Subspace index as parsed in the PCC client
+ * ACPI package. This is used to lookup the array of PCC
+ * subspaces as parsed by the PCC Mailbox controller.
+ *
+ * Return: Pointer to the Mailbox Channel if successful or
+ * ERR_PTR.
+ */
+struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
+ int subspace_id)
+{
+ struct device *dev = pcc_mbox_ctrl.dev;
+ struct mbox_chan *chan;
+ unsigned long flags;
+
+ /*
+ * Each PCC Subspace is a Mailbox Channel.
+ * The PCC Clients get their PCC Subspace ID
+ * from their own tables and pass it here.
+ * This returns a pointer to the PCC subspace
+ * for the Client to operate on.
+ */
+ chan = get_pcc_channel(subspace_id);
+
+ if (IS_ERR(chan) || chan->cl) {
+ dev_err(dev, "Channel not found for idx: %d\n", subspace_id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method = TXDONE_BY_ACK;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (pcc_doorbell_irq[subspace_id] > 0) {
+ int rc;
+
+ rc = devm_request_irq(dev, pcc_doorbell_irq[subspace_id],
+ pcc_mbox_irq, 0, MBOX_IRQ_NAME, chan);
+ if (unlikely(rc)) {
+ dev_err(dev, "failed to register PCC interrupt %d\n",
+ pcc_doorbell_irq[subspace_id]);
+ pcc_mbox_free_channel(chan);
+ chan = ERR_PTR(rc);
+ }
+ }
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
+
+/**
+ * pcc_mbox_free_channel - Clients call this to free their Channel.
+ *
+ * @chan: Pointer to the mailbox channel as returned by
+ * pcc_mbox_request_channel()
+ */
+void pcc_mbox_free_channel(struct mbox_chan *chan)
+{
+ u32 id = chan - pcc_mbox_channels;
+ unsigned long flags;
+
+ if (!chan || !chan->cl)
+ return;
+
+ if (id >= pcc_mbox_ctrl.num_chans) {
+ pr_debug("pcc_mbox_free_channel: Invalid mbox_chan passed\n");
+ return;
+ }
+
+ if (pcc_doorbell_irq[id] > 0)
+ devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+
+/**
+ * pcc_send_data - Called from Mailbox Controller code. Used
+ * here only to ring the channel doorbell. The PCC client
+ * specific read/write is done in the client driver in
+ * order to maintain atomicity over PCC channel once
+ * OS has control over it. See above for flow of operations.
+ * @chan: Pointer to Mailbox channel over which to send data.
+ * @data: Client specific data written over channel. Used here
+ * only for debug after PCC transaction completes.
+ *
+ * Return: Err if something failed else 0 for success.
+ */
+static int pcc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
+ struct acpi_generic_address *doorbell;
+ u64 doorbell_preserve;
+ u64 doorbell_val;
+ u64 doorbell_write;
+ u32 id = chan - pcc_mbox_channels;
+ int ret = 0;
+
+ if (id >= pcc_mbox_ctrl.num_chans) {
+ pr_debug("pcc_send_data: Invalid mbox_chan passed\n");
+ return -ENOENT;
+ }
+
+ doorbell = &pcct_ss->doorbell_register;
+ doorbell_preserve = pcct_ss->preserve_mask;
+ doorbell_write = pcct_ss->write_mask;
+
+ /* Sync notification from OS to Platform. */
+ if (pcc_doorbell_vaddr[id]) {
+ ret = read_register(pcc_doorbell_vaddr[id], &doorbell_val,
+ doorbell->bit_width);
+ if (ret)
+ return ret;
+ ret = write_register(pcc_doorbell_vaddr[id],
+ (doorbell_val & doorbell_preserve) | doorbell_write,
+ doorbell->bit_width);
+ } else {
+ ret = acpi_read(&doorbell_val, doorbell);
+ if (ret)
+ return ret;
+ ret = acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
+ doorbell);
+ }
+ return ret;
+}
+
+static const struct mbox_chan_ops pcc_chan_ops = {
+ .send_data = pcc_send_data,
+};
+
+/**
+ * parse_pcc_subspaces -- Count PCC subspaces defined
+ * @header: Pointer to the ACPI subtable header under the PCCT.
+ * @end: End of subtable entry.
+ *
+ * Return: If we find a PCC subspace entry of a valid type, return 0.
+ * Otherwise, return -EINVAL.
+ *
+ * This gets called for each entry in the PCC table.
+ */
+static int parse_pcc_subspace(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_pcct_subspace *ss = (struct acpi_pcct_subspace *) header;
+
+ if (ss->header.type < ACPI_PCCT_TYPE_RESERVED)
+ return 0;
+
+ return -EINVAL;
+}
+
+/**
+ * pcc_parse_subspace_irq - Parse the PCC IRQ and PCC ACK register
+ * There should be one entry per PCC client.
+ * @id: PCC subspace index.
+ * @pcct_ss: Pointer to the ACPI subtable header under the PCCT.
+ *
+ * Return: 0 for Success, else errno.
+ *
+ * This gets called for each entry in the PCC table.
+ */
+static int pcc_parse_subspace_irq(int id,
+ struct acpi_pcct_hw_reduced *pcct_ss)
+{
+ pcc_doorbell_irq[id] = pcc_map_interrupt(pcct_ss->platform_interrupt,
+ (u32)pcct_ss->flags);
+ if (pcc_doorbell_irq[id] <= 0) {
+ pr_err("PCC GSI %d not registered\n",
+ pcct_ss->platform_interrupt);
+ return -EINVAL;
+ }
+
+ if (pcct_ss->header.type
+ == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
+
+ pcc_doorbell_ack_vaddr[id] = acpi_os_ioremap(
+ pcct2_ss->platform_ack_register.address,
+ pcct2_ss->platform_ack_register.bit_width / 8);
+ if (!pcc_doorbell_ack_vaddr[id]) {
+ pr_err("Failed to ioremap PCC ACK register\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpi_pcc_probe - Parse the ACPI tree for the PCCT.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int __init acpi_pcc_probe(void)
+{
+ struct acpi_table_header *pcct_tbl;
+ struct acpi_subtable_header *pcct_entry;
+ struct acpi_table_pcct *acpi_pcct_tbl;
+ struct acpi_subtable_proc proc[ACPI_PCCT_TYPE_RESERVED];
+ int count, i, rc;
+ acpi_status status = AE_OK;
+
+ /* Search for PCCT */
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
+
+ if (ACPI_FAILURE(status) || !pcct_tbl)
+ return -ENODEV;
+
+ /* Set up the subtable handlers */
+ for (i = ACPI_PCCT_TYPE_GENERIC_SUBSPACE;
+ i < ACPI_PCCT_TYPE_RESERVED; i++) {
+ proc[i].id = i;
+ proc[i].count = 0;
+ proc[i].handler = parse_pcc_subspace;
+ }
+
+ count = acpi_table_parse_entries_array(ACPI_SIG_PCCT,
+ sizeof(struct acpi_table_pcct), proc,
+ ACPI_PCCT_TYPE_RESERVED, MAX_PCC_SUBSPACES);
+ if (count <= 0 || count > MAX_PCC_SUBSPACES) {
+ if (count < 0)
+ pr_warn("Error parsing PCC subspaces from PCCT\n");
+ else
+ pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
+
+ rc = -EINVAL;
+ goto err_put_pcct;
+ }
+
+ pcc_mbox_channels = kcalloc(count, sizeof(struct mbox_chan),
+ GFP_KERNEL);
+ if (!pcc_mbox_channels) {
+ pr_err("Could not allocate space for PCC mbox channels\n");
+ rc = -ENOMEM;
+ goto err_put_pcct;
+ }
+
+ pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
+ if (!pcc_doorbell_vaddr) {
+ rc = -ENOMEM;
+ goto err_free_mbox;
+ }
+
+ pcc_doorbell_ack_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
+ if (!pcc_doorbell_ack_vaddr) {
+ rc = -ENOMEM;
+ goto err_free_db_vaddr;
+ }
+
+ pcc_doorbell_irq = kcalloc(count, sizeof(int), GFP_KERNEL);
+ if (!pcc_doorbell_irq) {
+ rc = -ENOMEM;
+ goto err_free_db_ack_vaddr;
+ }
+
+ /* Point to the first PCC subspace entry */
+ pcct_entry = (struct acpi_subtable_header *) (
+ (unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
+
+ acpi_pcct_tbl = (struct acpi_table_pcct *) pcct_tbl;
+ if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL)
+ pcc_mbox_ctrl.txdone_irq = true;
+
+ for (i = 0; i < count; i++) {
+ struct acpi_generic_address *db_reg;
+ struct acpi_pcct_subspace *pcct_ss;
+ pcc_mbox_channels[i].con_priv = pcct_entry;
+
+ if (pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
+ pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_hw_reduced *pcct_hrss;
+
+ pcct_hrss = (struct acpi_pcct_hw_reduced *) pcct_entry;
+
+ if (pcc_mbox_ctrl.txdone_irq) {
+ rc = pcc_parse_subspace_irq(i, pcct_hrss);
+ if (rc < 0)
+ goto err;
+ }
+ }
+ pcct_ss = (struct acpi_pcct_subspace *) pcct_entry;
+
+ /* If doorbell is in system memory cache the virt address */
+ db_reg = &pcct_ss->doorbell_register;
+ if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
+ db_reg->bit_width/8);
+ pcct_entry = (struct acpi_subtable_header *)
+ ((unsigned long) pcct_entry + pcct_entry->length);
+ }
+
+ pcc_mbox_ctrl.num_chans = count;
+
+ pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans);
+
+ return 0;
+
+err:
+ kfree(pcc_doorbell_irq);
+err_free_db_ack_vaddr:
+ kfree(pcc_doorbell_ack_vaddr);
+err_free_db_vaddr:
+ kfree(pcc_doorbell_vaddr);
+err_free_mbox:
+ kfree(pcc_mbox_channels);
+err_put_pcct:
+ acpi_put_table(pcct_tbl);
+ return rc;
+}
+
+/**
+ * pcc_mbox_probe - Called when we find a match for the
+ * PCCT platform device. This is purely used to represent
+ * the PCCT as a virtual device for registering with the
+ * generic Mailbox framework.
+ *
+ * @pdev: Pointer to platform device returned when a match
+ * is found.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_mbox_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ pcc_mbox_ctrl.chans = pcc_mbox_channels;
+ pcc_mbox_ctrl.ops = &pcc_chan_ops;
+ pcc_mbox_ctrl.dev = &pdev->dev;
+
+ pr_info("Registering PCC driver as Mailbox controller\n");
+ ret = mbox_controller_register(&pcc_mbox_ctrl);
+
+ if (ret) {
+ pr_err("Err registering PCC as Mailbox controller: %d\n", ret);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static struct platform_driver pcc_mbox_driver = {
+ .probe = pcc_mbox_probe,
+ .driver = {
+ .name = "PCCT",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pcc_init(void)
+{
+ int ret;
+ struct platform_device *pcc_pdev;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* Check if PCC support is available. */
+ ret = acpi_pcc_probe();
+
+ if (ret) {
+ pr_debug("ACPI PCC probe failed.\n");
+ return -ENODEV;
+ }
+
+ pcc_pdev = platform_create_bundle(&pcc_mbox_driver,
+ pcc_mbox_probe, NULL, 0, NULL, 0);
+
+ if (IS_ERR(pcc_pdev)) {
+ pr_debug("Err creating PCC platform bundle\n");
+ return PTR_ERR(pcc_pdev);
+ }
+
+ return 0;
+}
+
+/*
+ * Make PCC init postcore so that users of this mailbox
+ * such as the ACPI Processor driver have it available
+ * at their init.
+ */
+postcore_initcall(pcc_init);
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
new file mode 100644
index 000000000..25e0b6f7a
--- /dev/null
+++ b/drivers/mailbox/pl320-ipc.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2012 Calxeda, Inc.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+
+#include <linux/pl320-ipc.h>
+
+#define IPCMxSOURCE(m) ((m) * 0x40)
+#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
+#define IPCMxDCLEAR(m) (((m) * 0x40) + 0x008)
+#define IPCMxDSTATUS(m) (((m) * 0x40) + 0x00C)
+#define IPCMxMODE(m) (((m) * 0x40) + 0x010)
+#define IPCMxMSET(m) (((m) * 0x40) + 0x014)
+#define IPCMxMCLEAR(m) (((m) * 0x40) + 0x018)
+#define IPCMxMSTATUS(m) (((m) * 0x40) + 0x01C)
+#define IPCMxSEND(m) (((m) * 0x40) + 0x020)
+#define IPCMxDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
+
+#define IPCMMIS(irq) (((irq) * 8) + 0x800)
+#define IPCMRIS(irq) (((irq) * 8) + 0x804)
+
+#define MBOX_MASK(n) (1 << (n))
+#define IPC_TX_MBOX 1
+#define IPC_RX_MBOX 2
+
+#define CHAN_MASK(n) (1 << (n))
+#define A9_SOURCE 1
+#define M3_SOURCE 0
+
+static void __iomem *ipc_base;
+static int ipc_irq;
+static DEFINE_MUTEX(ipc_m1_lock);
+static DECLARE_COMPLETION(ipc_completion);
+static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
+
+static inline void set_destination(int source, int mbox)
+{
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
+}
+
+static inline void clear_destination(int source, int mbox)
+{
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
+}
+
+static void __ipc_send(int mbox, u32 *data)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ writel_relaxed(data[i], ipc_base + IPCMxDR(mbox, i));
+ writel_relaxed(0x1, ipc_base + IPCMxSEND(mbox));
+}
+
+static u32 __ipc_rcv(int mbox, u32 *data)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ data[i] = readl_relaxed(ipc_base + IPCMxDR(mbox, i));
+ return data[1];
+}
+
+/* blocking implmentation from the A9 side, not usuable in interrupts! */
+int pl320_ipc_transmit(u32 *data)
+{
+ int ret;
+
+ mutex_lock(&ipc_m1_lock);
+
+ init_completion(&ipc_completion);
+ __ipc_send(IPC_TX_MBOX, data);
+ ret = wait_for_completion_timeout(&ipc_completion,
+ msecs_to_jiffies(1000));
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ ret = __ipc_rcv(IPC_TX_MBOX, data);
+out:
+ mutex_unlock(&ipc_m1_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_transmit);
+
+static irqreturn_t ipc_handler(int irq, void *dev)
+{
+ u32 irq_stat;
+ u32 data[7];
+
+ irq_stat = readl_relaxed(ipc_base + IPCMMIS(1));
+ if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
+ writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+ complete(&ipc_completion);
+ }
+ if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
+ __ipc_rcv(IPC_RX_MBOX, data);
+ atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
+ writel_relaxed(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
+ }
+
+ return IRQ_HANDLED;
+}
+
+int pl320_ipc_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ipc_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_register_notifier);
+
+int pl320_ipc_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ipc_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
+
+static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+
+ ipc_base = ioremap(adev->res.start, resource_size(&adev->res));
+ if (ipc_base == NULL)
+ return -ENOMEM;
+
+ writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+
+ ipc_irq = adev->irq[0];
+ ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
+ if (ret < 0)
+ goto err;
+
+ /* Init slow mailbox */
+ writel_relaxed(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxDSET(IPC_TX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_TX_MBOX));
+
+ /* Init receive mailbox */
+ writel_relaxed(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
+ writel_relaxed(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxDSET(IPC_RX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_RX_MBOX));
+
+ return 0;
+err:
+ iounmap(ipc_base);
+ return ret;
+}
+
+static struct amba_id pl320_ids[] = {
+ {
+ .id = 0x00041320,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl320_driver = {
+ .drv = {
+ .name = "pl320",
+ },
+ .id_table = pl320_ids,
+ .probe = pl320_probe,
+};
+
+static int __init ipc_init(void)
+{
+ return amba_driver_register(&pl320_driver);
+}
+subsys_initcall(ipc_init);
diff --git a/drivers/mailbox/platform_mhu.c b/drivers/mailbox/platform_mhu.c
new file mode 100644
index 000000000..b6e349522
--- /dev/null
+++ b/drivers/mailbox/platform_mhu.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 BayLibre SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Synchronised with arm_mhu.c from :
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Jassi Brar <jaswinder.singh@linaro.org>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+
+#define INTR_SET_OFS 0x0
+#define INTR_STAT_OFS 0x4
+#define INTR_CLR_OFS 0x8
+
+#define MHU_SEC_OFFSET 0x0
+#define MHU_LP_OFFSET 0xc
+#define MHU_HP_OFFSET 0x18
+#define TX_REG_OFFSET 0x24
+
+#define MHU_CHANS 3
+
+struct platform_mhu_link {
+ int irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct platform_mhu {
+ void __iomem *base;
+ struct platform_mhu_link mlink[MHU_CHANS];
+ struct mbox_chan chan[MHU_CHANS];
+ struct mbox_controller mbox;
+};
+
+static irqreturn_t platform_mhu_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 val;
+
+ val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
+ if (!val)
+ return IRQ_NONE;
+
+ mbox_chan_received_data(chan, (void *)&val);
+
+ writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
+
+ return IRQ_HANDLED;
+}
+
+static bool platform_mhu_last_tx_done(struct mbox_chan *chan)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+
+ return (val == 0);
+}
+
+static int platform_mhu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 *arg = data;
+
+ writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int platform_mhu_startup(struct mbox_chan *chan)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+ writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
+
+ ret = request_irq(mlink->irq, platform_mhu_rx_interrupt,
+ IRQF_SHARED, "platform_mhu_link", chan);
+ if (ret) {
+ dev_err(chan->mbox->dev,
+ "Unable to acquire IRQ %d\n", mlink->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void platform_mhu_shutdown(struct mbox_chan *chan)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+
+ free_irq(mlink->irq, chan);
+}
+
+static const struct mbox_chan_ops platform_mhu_ops = {
+ .send_data = platform_mhu_send_data,
+ .startup = platform_mhu_startup,
+ .shutdown = platform_mhu_shutdown,
+ .last_tx_done = platform_mhu_last_tx_done,
+};
+
+static int platform_mhu_probe(struct platform_device *pdev)
+{
+ int i, err;
+ struct platform_mhu *mhu;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int platform_mhu_reg[MHU_CHANS] = {
+ MHU_SEC_OFFSET, MHU_LP_OFFSET, MHU_HP_OFFSET
+ };
+
+ /* Allocate memory for device */
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mhu->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mhu->base)) {
+ dev_err(dev, "ioremap failed\n");
+ return PTR_ERR(mhu->base);
+ }
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ mhu->chan[i].con_priv = &mhu->mlink[i];
+ mhu->mlink[i].irq = platform_get_irq(pdev, i);
+ if (mhu->mlink[i].irq < 0) {
+ dev_err(dev, "failed to get irq%d\n", i);
+ return mhu->mlink[i].irq;
+ }
+ mhu->mlink[i].rx_reg = mhu->base + platform_mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+ }
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = &mhu->chan[0];
+ mhu->mbox.num_chans = MHU_CHANS;
+ mhu->mbox.ops = &platform_mhu_ops;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+ platform_set_drvdata(pdev, mhu);
+
+ err = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ dev_info(dev, "Platform MHU Mailbox registered\n");
+ return 0;
+}
+
+static const struct of_device_id platform_mhu_dt_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-mhu", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, platform_mhu_dt_ids);
+
+static struct platform_driver platform_mhu_driver = {
+ .probe = platform_mhu_probe,
+ .driver = {
+ .name = "platform-mhu",
+ .of_match_table = platform_mhu_dt_ids,
+ },
+};
+
+module_platform_driver(platform_mhu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:platform-mhu");
+MODULE_DESCRIPTION("Platform MHU Driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
new file mode 100644
index 000000000..3d100a004
--- /dev/null
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017, Linaro Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mailbox_controller.h>
+
+#define QCOM_APCS_IPC_BITS 32
+
+struct qcom_apcs_ipc {
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[QCOM_APCS_IPC_BITS];
+
+ struct regmap *regmap;
+ unsigned long offset;
+ struct platform_device *clk;
+};
+
+struct qcom_apcs_ipc_data {
+ int offset;
+ char *clk_name;
+};
+
+static const struct qcom_apcs_ipc_data ipq6018_apcs_data = {
+ .offset = 8, .clk_name = "qcom,apss-ipq6018-clk"
+};
+
+static const struct qcom_apcs_ipc_data ipq8074_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8916_apcs_data = {
+ .offset = 8, .clk_name = "qcom-apcs-msm8916-clk"
+};
+
+static const struct qcom_apcs_ipc_data msm8994_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
+ .offset = 16, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8998_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data sdm660_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
+ .offset = 12, .clk_name = NULL
+};
+
+static const struct regmap_config apcs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xFFC,
+ .fast_io = true,
+};
+
+static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_apcs_ipc *apcs = container_of(chan->mbox,
+ struct qcom_apcs_ipc, mbox);
+ unsigned long idx = (unsigned long)chan->con_priv;
+
+ return regmap_write(apcs->regmap, apcs->offset, BIT(idx));
+}
+
+static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
+ .send_data = qcom_apcs_ipc_send_data,
+};
+
+static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+{
+ struct qcom_apcs_ipc *apcs;
+ const struct qcom_apcs_ipc_data *apcs_data;
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *base;
+ unsigned long i;
+ int ret;
+
+ apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
+ if (!apcs)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &apcs_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ apcs_data = of_device_get_match_data(&pdev->dev);
+
+ apcs->regmap = regmap;
+ apcs->offset = apcs_data->offset;
+
+ /* Initialize channel identifiers */
+ for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
+ apcs->mbox_chans[i].con_priv = (void *)i;
+
+ apcs->mbox.dev = &pdev->dev;
+ apcs->mbox.ops = &qcom_apcs_ipc_ops;
+ apcs->mbox.chans = apcs->mbox_chans;
+ apcs->mbox.num_chans = ARRAY_SIZE(apcs->mbox_chans);
+
+ ret = devm_mbox_controller_register(&pdev->dev, &apcs->mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register APCS IPC controller\n");
+ return ret;
+ }
+
+ if (apcs_data->clk_name) {
+ apcs->clk = platform_device_register_data(&pdev->dev,
+ apcs_data->clk_name,
+ PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ if (IS_ERR(apcs->clk))
+ dev_err(&pdev->dev, "failed to register APCS clk\n");
+ }
+
+ platform_set_drvdata(pdev, apcs);
+
+ return 0;
+}
+
+static int qcom_apcs_ipc_remove(struct platform_device *pdev)
+{
+ struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev);
+ struct platform_device *clk = apcs->clk;
+
+ platform_device_unregister(clk);
+
+ return 0;
+}
+
+/* .data is the offset of the ipc register within the global block */
+static const struct of_device_id qcom_apcs_ipc_of_match[] = {
+ { .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq8074_apcs_data },
+ { .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
+ { .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sdm660-apcs-hmss-global", .data = &sdm660_apcs_data },
+ { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
+
+static struct platform_driver qcom_apcs_ipc_driver = {
+ .probe = qcom_apcs_ipc_probe,
+ .remove = qcom_apcs_ipc_remove,
+ .driver = {
+ .name = "qcom_apcs_ipc",
+ .of_match_table = qcom_apcs_ipc_of_match,
+ },
+};
+
+static int __init qcom_apcs_ipc_init(void)
+{
+ return platform_driver_register(&qcom_apcs_ipc_driver);
+}
+postcore_initcall(qcom_apcs_ipc_init);
+
+static void __exit qcom_apcs_ipc_exit(void)
+{
+ platform_driver_unregister(&qcom_apcs_ipc_driver);
+}
+module_exit(qcom_apcs_ipc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APCS IPC driver");
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
new file mode 100644
index 000000000..584700cd1
--- /dev/null
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/mailbox/qcom-ipcc.h>
+
+#define IPCC_MBOX_MAX_CHAN 48
+
+/* IPCC Register offsets */
+#define IPCC_REG_SEND_ID 0x0c
+#define IPCC_REG_RECV_ID 0x10
+#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14
+#define IPCC_REG_RECV_SIGNAL_DISABLE 0x18
+#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c
+#define IPCC_REG_CLIENT_CLEAR 0x38
+
+#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0)
+#define IPCC_CLIENT_ID_MASK GENMASK(31, 16)
+
+#define IPCC_NO_PENDING_IRQ GENMASK(31, 0)
+
+/**
+ * struct qcom_ipcc_chan_info - Per-mailbox-channel info
+ * @client_id: The client-id to which the interrupt has to be triggered
+ * @signal_id: The signal-id to which the interrupt has to be triggered
+ */
+struct qcom_ipcc_chan_info {
+ u16 client_id;
+ u16 signal_id;
+};
+
+/**
+ * struct qcom_ipcc - Holder for the mailbox driver
+ * @dev: Device associated with this instance
+ * @base: Base address of the IPCC frame associated to APSS
+ * @irq_domain: The irq_domain associated with this instance
+ * @chan: The mailbox channels array
+ * @mchan: The per-mailbox channel info array
+ * @mbox: The mailbox controller
+ * @irq: Summary irq
+ */
+struct qcom_ipcc {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ struct mbox_chan chan[IPCC_MBOX_MAX_CHAN];
+ struct qcom_ipcc_chan_info mchan[IPCC_MBOX_MAX_CHAN];
+ struct mbox_controller mbox;
+ int irq;
+};
+
+static inline struct qcom_ipcc *to_qcom_ipcc(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct qcom_ipcc, mbox);
+}
+
+static inline u32 qcom_ipcc_get_hwirq(u16 client_id, u16 signal_id)
+{
+ return FIELD_PREP(IPCC_CLIENT_ID_MASK, client_id) |
+ FIELD_PREP(IPCC_SIGNAL_ID_MASK, signal_id);
+}
+
+static irqreturn_t qcom_ipcc_irq_fn(int irq, void *data)
+{
+ struct qcom_ipcc *ipcc = data;
+ u32 hwirq;
+ int virq;
+
+ for (;;) {
+ hwirq = readl(ipcc->base + IPCC_REG_RECV_ID);
+ if (hwirq == IPCC_NO_PENDING_IRQ)
+ break;
+
+ virq = irq_find_mapping(ipcc->irq_domain, hwirq);
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_CLEAR);
+ generic_handle_irq(virq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void qcom_ipcc_mask_irq(struct irq_data *irqd)
+{
+ struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_DISABLE);
+}
+
+static void qcom_ipcc_unmask_irq(struct irq_data *irqd)
+{
+ struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_ENABLE);
+}
+
+static struct irq_chip qcom_ipcc_irq_chip = {
+ .name = "ipcc",
+ .irq_mask = qcom_ipcc_mask_irq,
+ .irq_unmask = qcom_ipcc_unmask_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int qcom_ipcc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct qcom_ipcc *ipcc = d->host_data;
+
+ irq_set_chip_and_handler(irq, &qcom_ipcc_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, ipcc);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static int qcom_ipcc_domain_xlate(struct irq_domain *d,
+ struct device_node *node, const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (intsize != 3)
+ return -EINVAL;
+
+ *out_hwirq = qcom_ipcc_get_hwirq(intspec[0], intspec[1]);
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static const struct irq_domain_ops qcom_ipcc_irq_ops = {
+ .map = qcom_ipcc_domain_map,
+ .xlate = qcom_ipcc_domain_xlate,
+};
+
+static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_ipcc *ipcc = to_qcom_ipcc(chan->mbox);
+ struct qcom_ipcc_chan_info *mchan = chan->con_priv;
+ u32 hwirq;
+
+ hwirq = qcom_ipcc_get_hwirq(mchan->client_id, mchan->signal_id);
+ writel(hwirq, ipcc->base + IPCC_REG_SEND_ID);
+
+ return 0;
+}
+
+static void qcom_ipcc_mbox_shutdown(struct mbox_chan *chan)
+{
+ chan->con_priv = NULL;
+}
+
+static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *ph)
+{
+ struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox);
+ struct qcom_ipcc_chan_info *mchan;
+ struct mbox_chan *chan;
+ unsigned int i;
+
+ if (ph->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < IPCC_MBOX_MAX_CHAN; i++) {
+ chan = &ipcc->chan[i];
+ if (!chan->con_priv) {
+ mchan = &ipcc->mchan[i];
+ mchan->client_id = ph->args[0];
+ mchan->signal_id = ph->args[1];
+ chan->con_priv = mchan;
+ break;
+ }
+
+ chan = NULL;
+ }
+
+ return chan ?: ERR_PTR(-EBUSY);
+}
+
+static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
+ .send_data = qcom_ipcc_mbox_send_data,
+ .shutdown = qcom_ipcc_mbox_shutdown,
+};
+
+static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
+{
+ struct mbox_controller *mbox;
+ struct device *dev = ipcc->dev;
+
+ mbox = &ipcc->mbox;
+ mbox->dev = dev;
+ mbox->num_chans = IPCC_MBOX_MAX_CHAN;
+ mbox->chans = ipcc->chan;
+ mbox->ops = &ipcc_mbox_chan_ops;
+ mbox->of_xlate = qcom_ipcc_mbox_xlate;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = false;
+
+ return devm_mbox_controller_register(dev, mbox);
+}
+
+static int qcom_ipcc_probe(struct platform_device *pdev)
+{
+ struct qcom_ipcc *ipcc;
+ int ret;
+
+ ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL);
+ if (!ipcc)
+ return -ENOMEM;
+
+ ipcc->dev = &pdev->dev;
+
+ ipcc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ipcc->base))
+ return PTR_ERR(ipcc->base);
+
+ ipcc->irq = platform_get_irq(pdev, 0);
+ if (ipcc->irq < 0)
+ return ipcc->irq;
+
+ ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
+ &qcom_ipcc_irq_ops, ipcc);
+ if (!ipcc->irq_domain)
+ return -ENOMEM;
+
+ ret = qcom_ipcc_setup_mbox(ipcc);
+ if (ret)
+ goto err_mbox;
+
+ ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
+ IRQF_TRIGGER_HIGH, "ipcc", ipcc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
+ goto err_mbox;
+ }
+
+ enable_irq_wake(ipcc->irq);
+ platform_set_drvdata(pdev, ipcc);
+
+ return 0;
+
+err_mbox:
+ irq_domain_remove(ipcc->irq_domain);
+
+ return ret;
+}
+
+static int qcom_ipcc_remove(struct platform_device *pdev)
+{
+ struct qcom_ipcc *ipcc = platform_get_drvdata(pdev);
+
+ disable_irq_wake(ipcc->irq);
+ irq_domain_remove(ipcc->irq_domain);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_ipcc_of_match[] = {
+ { .compatible = "qcom,ipcc"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match);
+
+static struct platform_driver qcom_ipcc_driver = {
+ .probe = qcom_ipcc_probe,
+ .remove = qcom_ipcc_remove,
+ .driver = {
+ .name = "qcom-ipcc",
+ .of_match_table = qcom_ipcc_of_match,
+ },
+};
+
+static int __init qcom_ipcc_init(void)
+{
+ return platform_driver_register(&qcom_ipcc_driver);
+}
+arch_initcall(qcom_ipcc_init);
+
+MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vnkgutta@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
new file mode 100644
index 000000000..979acc810
--- /dev/null
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define MAILBOX_A2B_INTEN 0x00
+#define MAILBOX_A2B_STATUS 0x04
+#define MAILBOX_A2B_CMD(x) (0x08 + (x) * 8)
+#define MAILBOX_A2B_DAT(x) (0x0c + (x) * 8)
+
+#define MAILBOX_B2A_INTEN 0x28
+#define MAILBOX_B2A_STATUS 0x2C
+#define MAILBOX_B2A_CMD(x) (0x30 + (x) * 8)
+#define MAILBOX_B2A_DAT(x) (0x34 + (x) * 8)
+
+struct rockchip_mbox_msg {
+ u32 cmd;
+ int rx_size;
+};
+
+struct rockchip_mbox_data {
+ int num_chans;
+};
+
+struct rockchip_mbox_chan {
+ int idx;
+ int irq;
+ struct rockchip_mbox_msg *msg;
+ struct rockchip_mbox *mb;
+};
+
+struct rockchip_mbox {
+ struct mbox_controller mbox;
+ struct clk *pclk;
+ void __iomem *mbox_base;
+
+ /* The maximum size of buf for each channel */
+ u32 buf_size;
+
+ struct rockchip_mbox_chan *chans;
+};
+
+static int rockchip_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ struct rockchip_mbox_msg *msg = data;
+ struct rockchip_mbox_chan *chans = mb->chans;
+
+ if (!msg)
+ return -EINVAL;
+
+ if (msg->rx_size > mb->buf_size) {
+ dev_err(mb->mbox.dev, "Transmit size over buf size(%d)\n",
+ mb->buf_size);
+ return -EINVAL;
+ }
+
+ dev_dbg(mb->mbox.dev, "Chan[%d]: A2B message, cmd 0x%08x\n",
+ chans->idx, msg->cmd);
+
+ mb->chans[chans->idx].msg = msg;
+
+ writel_relaxed(msg->cmd, mb->mbox_base + MAILBOX_A2B_CMD(chans->idx));
+ writel_relaxed(msg->rx_size, mb->mbox_base +
+ MAILBOX_A2B_DAT(chans->idx));
+
+ return 0;
+}
+
+static int rockchip_mbox_startup(struct mbox_chan *chan)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+
+ /* Enable all B2A interrupts */
+ writel_relaxed((1 << mb->mbox.num_chans) - 1,
+ mb->mbox_base + MAILBOX_B2A_INTEN);
+
+ return 0;
+}
+
+static void rockchip_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ struct rockchip_mbox_chan *chans = mb->chans;
+
+ /* Disable all B2A interrupts */
+ writel_relaxed(0, mb->mbox_base + MAILBOX_B2A_INTEN);
+
+ mb->chans[chans->idx].msg = NULL;
+}
+
+static const struct mbox_chan_ops rockchip_mbox_chan_ops = {
+ .send_data = rockchip_mbox_send_data,
+ .startup = rockchip_mbox_startup,
+ .shutdown = rockchip_mbox_shutdown,
+};
+
+static irqreturn_t rockchip_mbox_irq(int irq, void *dev_id)
+{
+ int idx;
+ struct rockchip_mbox *mb = (struct rockchip_mbox *)dev_id;
+ u32 status = readl_relaxed(mb->mbox_base + MAILBOX_B2A_STATUS);
+
+ for (idx = 0; idx < mb->mbox.num_chans; idx++) {
+ if ((status & (1 << idx)) && (irq == mb->chans[idx].irq)) {
+ /* Clear mbox interrupt */
+ writel_relaxed(1 << idx,
+ mb->mbox_base + MAILBOX_B2A_STATUS);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t rockchip_mbox_isr(int irq, void *dev_id)
+{
+ int idx;
+ struct rockchip_mbox_msg *msg = NULL;
+ struct rockchip_mbox *mb = (struct rockchip_mbox *)dev_id;
+
+ for (idx = 0; idx < mb->mbox.num_chans; idx++) {
+ if (irq != mb->chans[idx].irq)
+ continue;
+
+ msg = mb->chans[idx].msg;
+ if (!msg) {
+ dev_err(mb->mbox.dev,
+ "Chan[%d]: B2A message is NULL\n", idx);
+ break; /* spurious */
+ }
+
+ mbox_chan_received_data(&mb->mbox.chans[idx], msg);
+ mb->chans[idx].msg = NULL;
+
+ dev_dbg(mb->mbox.dev, "Chan[%d]: B2A message, cmd 0x%08x\n",
+ idx, msg->cmd);
+
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct rockchip_mbox_data rk3368_drv_data = {
+ .num_chans = 4,
+};
+
+static const struct of_device_id rockchip_mbox_of_match[] = {
+ { .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data},
+ { },
+};
+MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match);
+
+static int rockchip_mbox_probe(struct platform_device *pdev)
+{
+ struct rockchip_mbox *mb;
+ const struct of_device_id *match;
+ const struct rockchip_mbox_data *drv_data;
+ struct resource *res;
+ int ret, irq, i;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ match = of_match_node(rockchip_mbox_of_match, pdev->dev.of_node);
+ drv_data = (const struct rockchip_mbox_data *)match->data;
+
+ mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->chans = devm_kcalloc(&pdev->dev, drv_data->num_chans,
+ sizeof(*mb->chans), GFP_KERNEL);
+ if (!mb->chans)
+ return -ENOMEM;
+
+ mb->mbox.chans = devm_kcalloc(&pdev->dev, drv_data->num_chans,
+ sizeof(*mb->mbox.chans), GFP_KERNEL);
+ if (!mb->mbox.chans)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mb);
+
+ mb->mbox.dev = &pdev->dev;
+ mb->mbox.num_chans = drv_data->num_chans;
+ mb->mbox.ops = &rockchip_mbox_chan_ops;
+ mb->mbox.txdone_irq = true;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ mb->mbox_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mb->mbox_base))
+ return PTR_ERR(mb->mbox_base);
+
+ /* Each channel has two buffers for A2B and B2A */
+ mb->buf_size = (size_t)resource_size(res) / (drv_data->num_chans * 2);
+
+ mb->pclk = devm_clk_get(&pdev->dev, "pclk_mailbox");
+ if (IS_ERR(mb->pclk)) {
+ ret = PTR_ERR(mb->pclk);
+ dev_err(&pdev->dev, "failed to get pclk_mailbox clock: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mb->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable pclk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < mb->mbox.num_chans; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ rockchip_mbox_irq,
+ rockchip_mbox_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mb);
+ if (ret < 0)
+ return ret;
+
+ mb->chans[i].idx = i;
+ mb->chans[i].irq = irq;
+ mb->chans[i].mb = mb;
+ mb->chans[i].msg = NULL;
+ }
+
+ ret = devm_mbox_controller_register(&pdev->dev, &mb->mbox);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to register mailbox: %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver rockchip_mbox_driver = {
+ .probe = rockchip_mbox_probe,
+ .driver = {
+ .name = "rockchip-mailbox",
+ .of_match_table = of_match_ptr(rockchip_mbox_of_match),
+ },
+};
+
+module_platform_driver(rockchip_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Rockchip mailbox: communicate between CPU cores and MCU");
+MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
+MODULE_AUTHOR("Caesar Wang <wxt@rock-chips.com>");
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
new file mode 100644
index 000000000..94d9067dc
--- /dev/null
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Spreadtrum mailbox driver
+ *
+ * Copyright (c) 2020 Spreadtrum Communications Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define SPRD_MBOX_ID 0x0
+#define SPRD_MBOX_MSG_LOW 0x4
+#define SPRD_MBOX_MSG_HIGH 0x8
+#define SPRD_MBOX_TRIGGER 0xc
+#define SPRD_MBOX_FIFO_RST 0x10
+#define SPRD_MBOX_FIFO_STS 0x14
+#define SPRD_MBOX_IRQ_STS 0x18
+#define SPRD_MBOX_IRQ_MSK 0x1c
+#define SPRD_MBOX_LOCK 0x20
+#define SPRD_MBOX_FIFO_DEPTH 0x24
+
+/* Bit and mask definiation for inbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_INBOX_FIFO_DELIVER_MASK GENMASK(23, 16)
+#define SPRD_INBOX_FIFO_OVERLOW_MASK GENMASK(15, 8)
+#define SPRD_INBOX_FIFO_DELIVER_SHIFT 16
+#define SPRD_INBOX_FIFO_BUSY_MASK GENMASK(7, 0)
+
+/* Bit and mask definiation for SPRD_MBOX_IRQ_STS register */
+#define SPRD_MBOX_IRQ_CLR BIT(0)
+
+/* Bit and mask definiation for outbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_OUTBOX_FIFO_FULL BIT(2)
+#define SPRD_OUTBOX_FIFO_WR_SHIFT 16
+#define SPRD_OUTBOX_FIFO_RD_SHIFT 24
+#define SPRD_OUTBOX_FIFO_POS_MASK GENMASK(7, 0)
+
+/* Bit and mask definiation for inbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_INBOX_FIFO_BLOCK_IRQ BIT(0)
+#define SPRD_INBOX_FIFO_OVERFLOW_IRQ BIT(1)
+#define SPRD_INBOX_FIFO_DELIVER_IRQ BIT(2)
+#define SPRD_INBOX_FIFO_IRQ_MASK GENMASK(2, 0)
+
+/* Bit and mask definiation for outbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ BIT(0)
+#define SPRD_OUTBOX_FIFO_IRQ_MASK GENMASK(4, 0)
+
+#define SPRD_MBOX_CHAN_MAX 8
+
+struct sprd_mbox_priv {
+ struct mbox_controller mbox;
+ struct device *dev;
+ void __iomem *inbox_base;
+ void __iomem *outbox_base;
+ struct clk *clk;
+ u32 outbox_fifo_depth;
+
+ struct mutex lock;
+ u32 refcnt;
+ struct mbox_chan chan[SPRD_MBOX_CHAN_MAX];
+};
+
+static struct sprd_mbox_priv *to_sprd_mbox_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct sprd_mbox_priv, mbox);
+}
+
+static u32 sprd_mbox_get_fifo_len(struct sprd_mbox_priv *priv, u32 fifo_sts)
+{
+ u32 wr_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_WR_SHIFT) &
+ SPRD_OUTBOX_FIFO_POS_MASK;
+ u32 rd_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_RD_SHIFT) &
+ SPRD_OUTBOX_FIFO_POS_MASK;
+ u32 fifo_len;
+
+ /*
+ * If the read pointer is equal with write pointer, which means the fifo
+ * is full or empty.
+ */
+ if (wr_pos == rd_pos) {
+ if (fifo_sts & SPRD_OUTBOX_FIFO_FULL)
+ fifo_len = priv->outbox_fifo_depth;
+ else
+ fifo_len = 0;
+ } else if (wr_pos > rd_pos) {
+ fifo_len = wr_pos - rd_pos;
+ } else {
+ fifo_len = priv->outbox_fifo_depth - rd_pos + wr_pos;
+ }
+
+ return fifo_len;
+}
+
+static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+ struct mbox_chan *chan;
+ u32 fifo_sts, fifo_len, msg[2];
+ int i, id;
+
+ fifo_sts = readl(priv->outbox_base + SPRD_MBOX_FIFO_STS);
+
+ fifo_len = sprd_mbox_get_fifo_len(priv, fifo_sts);
+ if (!fifo_len) {
+ dev_warn_ratelimited(priv->dev, "spurious outbox interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < fifo_len; i++) {
+ msg[0] = readl(priv->outbox_base + SPRD_MBOX_MSG_LOW);
+ msg[1] = readl(priv->outbox_base + SPRD_MBOX_MSG_HIGH);
+ id = readl(priv->outbox_base + SPRD_MBOX_ID);
+
+ chan = &priv->chan[id];
+ if (chan->cl)
+ mbox_chan_received_data(chan, (void *)msg);
+ else
+ dev_warn_ratelimited(priv->dev,
+ "message's been dropped at ch[%d]\n", id);
+
+ /* Trigger to update outbox FIFO pointer */
+ writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
+ }
+
+ /* Clear irq status after reading all message. */
+ writel(SPRD_MBOX_IRQ_CLR, priv->outbox_base + SPRD_MBOX_IRQ_STS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sprd_mbox_inbox_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+ struct mbox_chan *chan;
+ u32 fifo_sts, send_sts, busy, id;
+
+ fifo_sts = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS);
+
+ /* Get the inbox data delivery status */
+ send_sts = (fifo_sts & SPRD_INBOX_FIFO_DELIVER_MASK) >>
+ SPRD_INBOX_FIFO_DELIVER_SHIFT;
+ if (!send_sts) {
+ dev_warn_ratelimited(priv->dev, "spurious inbox interrupt\n");
+ return IRQ_NONE;
+ }
+
+ while (send_sts) {
+ id = __ffs(send_sts);
+ send_sts &= (send_sts - 1);
+
+ chan = &priv->chan[id];
+
+ /*
+ * Check if the message was fetched by remote traget, if yes,
+ * that means the transmission has been completed.
+ */
+ busy = fifo_sts & SPRD_INBOX_FIFO_BUSY_MASK;
+ if (!(busy & BIT(id)))
+ mbox_chan_txdone(chan, 0);
+ }
+
+ /* Clear FIFO delivery and overflow status */
+ writel(fifo_sts &
+ (SPRD_INBOX_FIFO_DELIVER_MASK | SPRD_INBOX_FIFO_OVERLOW_MASK),
+ priv->inbox_base + SPRD_MBOX_FIFO_RST);
+
+ /* Clear irq status */
+ writel(SPRD_MBOX_IRQ_CLR, priv->inbox_base + SPRD_MBOX_IRQ_STS);
+
+ return IRQ_HANDLED;
+}
+
+static int sprd_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ unsigned long id = (unsigned long)chan->con_priv;
+ u32 *data = msg;
+
+ /* Write data into inbox FIFO, and only support 8 bytes every time */
+ writel(data[0], priv->inbox_base + SPRD_MBOX_MSG_LOW);
+ writel(data[1], priv->inbox_base + SPRD_MBOX_MSG_HIGH);
+
+ /* Set target core id */
+ writel(id, priv->inbox_base + SPRD_MBOX_ID);
+
+ /* Trigger remote request */
+ writel(0x1, priv->inbox_base + SPRD_MBOX_TRIGGER);
+
+ return 0;
+}
+
+static int sprd_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ unsigned long id = (unsigned long)chan->con_priv;
+ u32 busy;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ busy = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS) &
+ SPRD_INBOX_FIFO_BUSY_MASK;
+ if (!(busy & BIT(id))) {
+ mbox_chan_txdone(chan, 0);
+ return 0;
+ }
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int sprd_mbox_startup(struct mbox_chan *chan)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ u32 val;
+
+ mutex_lock(&priv->lock);
+ if (priv->refcnt++ == 0) {
+ /* Select outbox FIFO mode and reset the outbox FIFO status */
+ writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
+
+ /* Enable inbox FIFO overflow and delivery interrupt */
+ val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
+ writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+
+ /* Enable outbox FIFO not empty interrupt */
+ val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
+ writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+ }
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static void sprd_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+
+ mutex_lock(&priv->lock);
+ if (--priv->refcnt == 0) {
+ /* Disable inbox & outbox interrupt */
+ writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+ }
+ mutex_unlock(&priv->lock);
+}
+
+static const struct mbox_chan_ops sprd_mbox_ops = {
+ .send_data = sprd_mbox_send_data,
+ .flush = sprd_mbox_flush,
+ .startup = sprd_mbox_startup,
+ .shutdown = sprd_mbox_shutdown,
+};
+
+static void sprd_mbox_disable(void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+
+ clk_disable_unprepare(priv->clk);
+}
+
+static int sprd_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sprd_mbox_priv *priv;
+ int ret, inbox_irq, outbox_irq;
+ unsigned long id;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ mutex_init(&priv->lock);
+
+ /*
+ * The Spreadtrum mailbox uses an inbox to send messages to the target
+ * core, and uses an outbox to receive messages from other cores.
+ *
+ * Thus the mailbox controller supplies 2 different register addresses
+ * and IRQ numbers for inbox and outbox.
+ */
+ priv->inbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->inbox_base))
+ return PTR_ERR(priv->inbox_base);
+
+ priv->outbox_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->outbox_base))
+ return PTR_ERR(priv->outbox_base);
+
+ priv->clk = devm_clk_get(dev, "enable");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get mailbox clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv);
+ if (ret) {
+ dev_err(dev, "failed to add mailbox disable action\n");
+ return ret;
+ }
+
+ inbox_irq = platform_get_irq(pdev, 0);
+ if (inbox_irq < 0)
+ return inbox_irq;
+
+ ret = devm_request_irq(dev, inbox_irq, sprd_mbox_inbox_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request inbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ outbox_irq = platform_get_irq(pdev, 1);
+ if (outbox_irq < 0)
+ return outbox_irq;
+
+ ret = devm_request_irq(dev, outbox_irq, sprd_mbox_outbox_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request outbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ /* Get the default outbox FIFO depth */
+ priv->outbox_fifo_depth =
+ readl(priv->outbox_base + SPRD_MBOX_FIFO_DEPTH) + 1;
+ priv->mbox.dev = dev;
+ priv->mbox.chans = &priv->chan[0];
+ priv->mbox.num_chans = SPRD_MBOX_CHAN_MAX;
+ priv->mbox.ops = &sprd_mbox_ops;
+ priv->mbox.txdone_irq = true;
+
+ for (id = 0; id < SPRD_MBOX_CHAN_MAX; id++)
+ priv->chan[id].con_priv = (void *)id;
+
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret) {
+ dev_err(dev, "failed to register mailbox: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_mbox_of_match[] = {
+ { .compatible = "sprd,sc9860-mailbox", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_mbox_of_match);
+
+static struct platform_driver sprd_mbox_driver = {
+ .driver = {
+ .name = "sprd-mailbox",
+ .of_match_table = sprd_mbox_of_match,
+ },
+ .probe = sprd_mbox_probe,
+};
+module_platform_driver(sprd_mbox_driver);
+
+MODULE_AUTHOR("Baolin Wang <baolin.wang@unisoc.com>");
+MODULE_DESCRIPTION("Spreadtrum mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
new file mode 100644
index 000000000..ef966887a
--- /dev/null
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
+ * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+
+#define IPCC_XCR 0x000
+#define XCR_RXOIE BIT(0)
+#define XCR_TXOIE BIT(16)
+
+#define IPCC_XMR 0x004
+#define IPCC_XSCR 0x008
+#define IPCC_XTOYSR 0x00c
+
+#define IPCC_PROC_OFFST 0x010
+
+#define IPCC_HWCFGR 0x3f0
+#define IPCFGR_CHAN_MASK GENMASK(7, 0)
+
+#define IPCC_VER 0x3f4
+#define VER_MINREV_MASK GENMASK(3, 0)
+#define VER_MAJREV_MASK GENMASK(7, 4)
+
+#define RX_BIT_MASK GENMASK(15, 0)
+#define RX_BIT_CHAN(chan) BIT(chan)
+#define TX_BIT_SHIFT 16
+#define TX_BIT_MASK GENMASK(31, 16)
+#define TX_BIT_CHAN(chan) BIT(TX_BIT_SHIFT + (chan))
+
+#define STM32_MAX_PROCS 2
+
+enum {
+ IPCC_IRQ_RX,
+ IPCC_IRQ_TX,
+ IPCC_IRQ_NUM,
+};
+
+struct stm32_ipcc {
+ struct mbox_controller controller;
+ void __iomem *reg_base;
+ void __iomem *reg_proc;
+ struct clk *clk;
+ spinlock_t lock; /* protect access to IPCC registers */
+ int irqs[IPCC_IRQ_NUM];
+ u32 proc_id;
+ u32 n_chans;
+ u32 xcr;
+ u32 xmr;
+};
+
+static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg,
+ u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ writel_relaxed(readl_relaxed(reg) | mask, reg);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg,
+ u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ writel_relaxed(readl_relaxed(reg) & ~mask, reg);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
+{
+ struct stm32_ipcc *ipcc = data;
+ struct device *dev = ipcc->controller.dev;
+ u32 status, mr, tosr, chan;
+ irqreturn_t ret = IRQ_NONE;
+ int proc_offset;
+
+ /* read 'channel occupied' status from other proc */
+ proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
+ tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
+ mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+
+ /* search for unmasked 'channel occupied' */
+ status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);
+
+ for (chan = 0; chan < ipcc->n_chans; chan++) {
+ if (!(status & (1 << chan)))
+ continue;
+
+ dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);
+
+ mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);
+
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
+ RX_BIT_CHAN(chan));
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
+{
+ struct stm32_ipcc *ipcc = data;
+ struct device *dev = ipcc->controller.dev;
+ u32 status, mr, tosr, chan;
+ irqreturn_t ret = IRQ_NONE;
+
+ tosr = readl_relaxed(ipcc->reg_proc + IPCC_XTOYSR);
+ mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+
+ /* search for unmasked 'channel free' */
+ status = ~tosr & FIELD_GET(TX_BIT_MASK, ~mr);
+
+ for (chan = 0; chan < ipcc->n_chans ; chan++) {
+ if (!(status & (1 << chan)))
+ continue;
+
+ dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan);
+
+ /* mask 'tx channel free' interrupt */
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
+ TX_BIT_CHAN(chan));
+
+ mbox_chan_txdone(&ipcc->controller.chans[chan], 0);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
+{
+ unsigned int chan = (unsigned int)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+
+ dev_dbg(ipcc->controller.dev, "%s: chan:%d\n", __func__, chan);
+
+ /* set channel n occupied */
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
+ TX_BIT_CHAN(chan));
+
+ /* unmask 'tx channel free' interrupt */
+ stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
+ TX_BIT_CHAN(chan));
+
+ return 0;
+}
+
+static int stm32_ipcc_startup(struct mbox_chan *link)
+{
+ unsigned int chan = (unsigned int)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+ int ret;
+
+ ret = clk_prepare_enable(ipcc->clk);
+ if (ret) {
+ dev_err(ipcc->controller.dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ /* unmask 'rx channel occupied' interrupt */
+ stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
+ RX_BIT_CHAN(chan));
+
+ return 0;
+}
+
+static void stm32_ipcc_shutdown(struct mbox_chan *link)
+{
+ unsigned int chan = (unsigned int)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+
+ /* mask rx/tx interrupt */
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
+ RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan));
+
+ clk_disable_unprepare(ipcc->clk);
+}
+
+static const struct mbox_chan_ops stm32_ipcc_ops = {
+ .send_data = stm32_ipcc_send_data,
+ .startup = stm32_ipcc_startup,
+ .shutdown = stm32_ipcc_shutdown,
+};
+
+static int stm32_ipcc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct stm32_ipcc *ipcc;
+ struct resource *res;
+ unsigned int i;
+ int ret;
+ u32 ip_ver;
+ static const char * const irq_name[] = {"rx", "tx"};
+ irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq};
+
+ if (!np) {
+ dev_err(dev, "No DT found\n");
+ return -ENODEV;
+ }
+
+ ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL);
+ if (!ipcc)
+ return -ENOMEM;
+
+ spin_lock_init(&ipcc->lock);
+
+ /* proc_id */
+ if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) {
+ dev_err(dev, "Missing st,proc-id\n");
+ return -ENODEV;
+ }
+
+ if (ipcc->proc_id >= STM32_MAX_PROCS) {
+ dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id);
+ return -EINVAL;
+ }
+
+ /* regs */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ipcc->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ipcc->reg_base))
+ return PTR_ERR(ipcc->reg_base);
+
+ ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST;
+
+ /* clock */
+ ipcc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ipcc->clk))
+ return PTR_ERR(ipcc->clk);
+
+ ret = clk_prepare_enable(ipcc->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ /* irq */
+ for (i = 0; i < IPCC_IRQ_NUM; i++) {
+ ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
+ if (ipcc->irqs[i] < 0) {
+ if (ipcc->irqs[i] != -EPROBE_DEFER)
+ dev_err(dev, "no IRQ specified %s\n",
+ irq_name[i]);
+ ret = ipcc->irqs[i];
+ goto err_clk;
+ }
+
+ ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL,
+ irq_thread[i], IRQF_ONESHOT,
+ dev_name(dev), ipcc);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d (%d)\n", i, ret);
+ goto err_clk;
+ }
+ }
+
+ /* mask and enable rx/tx irq */
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
+ RX_BIT_MASK | TX_BIT_MASK);
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XCR,
+ XCR_RXOIE | XCR_TXOIE);
+
+ /* wakeup */
+ if (of_property_read_bool(np, "wakeup-source")) {
+ device_set_wakeup_capable(dev, true);
+
+ ret = dev_pm_set_wake_irq(dev, ipcc->irqs[IPCC_IRQ_RX]);
+ if (ret) {
+ dev_err(dev, "Failed to set wake up irq\n");
+ goto err_init_wkp;
+ }
+ }
+
+ /* mailbox controller */
+ ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR);
+ ipcc->n_chans &= IPCFGR_CHAN_MASK;
+
+ ipcc->controller.dev = dev;
+ ipcc->controller.txdone_irq = true;
+ ipcc->controller.ops = &stm32_ipcc_ops;
+ ipcc->controller.num_chans = ipcc->n_chans;
+ ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans,
+ sizeof(*ipcc->controller.chans),
+ GFP_KERNEL);
+ if (!ipcc->controller.chans) {
+ ret = -ENOMEM;
+ goto err_irq_wkp;
+ }
+
+ for (i = 0; i < ipcc->controller.num_chans; i++)
+ ipcc->controller.chans[i].con_priv = (void *)i;
+
+ ret = devm_mbox_controller_register(dev, &ipcc->controller);
+ if (ret)
+ goto err_irq_wkp;
+
+ platform_set_drvdata(pdev, ipcc);
+
+ ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER);
+
+ dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n",
+ FIELD_GET(VER_MAJREV_MASK, ip_ver),
+ FIELD_GET(VER_MINREV_MASK, ip_ver),
+ ipcc->controller.num_chans, ipcc->proc_id);
+
+ clk_disable_unprepare(ipcc->clk);
+ return 0;
+
+err_irq_wkp:
+ if (of_property_read_bool(np, "wakeup-source"))
+ dev_pm_clear_wake_irq(dev);
+err_init_wkp:
+ device_set_wakeup_capable(dev, false);
+err_clk:
+ clk_disable_unprepare(ipcc->clk);
+ return ret;
+}
+
+static int stm32_ipcc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (of_property_read_bool(dev->of_node, "wakeup-source"))
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+ device_set_wakeup_capable(dev, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_ipcc_suspend(struct device *dev)
+{
+ struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
+
+ ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+ ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
+
+ return 0;
+}
+
+static int stm32_ipcc_resume(struct device *dev)
+{
+ struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
+
+ writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
+ writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stm32_ipcc_pm_ops,
+ stm32_ipcc_suspend, stm32_ipcc_resume);
+
+static const struct of_device_id stm32_ipcc_of_match[] = {
+ { .compatible = "st,stm32mp1-ipcc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_ipcc_of_match);
+
+static struct platform_driver stm32_ipcc_driver = {
+ .driver = {
+ .name = "stm32-ipcc",
+ .pm = &stm32_ipcc_pm_ops,
+ .of_match_table = stm32_ipcc_of_match,
+ },
+ .probe = stm32_ipcc_probe,
+ .remove = stm32_ipcc_remove,
+};
+
+module_platform_driver(stm32_ipcc_driver);
+
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STM32 IPCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c
new file mode 100644
index 000000000..ccecf2e59
--- /dev/null
+++ b/drivers/mailbox/sun6i-msgbox.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2017-2019 Samuel Holland <samuel@sholland.org>
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define NUM_CHANS 8
+
+#define CTRL_REG(n) (0x0000 + 0x4 * ((n) / 4))
+#define CTRL_RX(n) BIT(0 + 8 * ((n) % 4))
+#define CTRL_TX(n) BIT(4 + 8 * ((n) % 4))
+
+#define REMOTE_IRQ_EN_REG 0x0040
+#define REMOTE_IRQ_STAT_REG 0x0050
+#define LOCAL_IRQ_EN_REG 0x0060
+#define LOCAL_IRQ_STAT_REG 0x0070
+
+#define RX_IRQ(n) BIT(0 + 2 * (n))
+#define RX_IRQ_MASK 0x5555
+#define TX_IRQ(n) BIT(1 + 2 * (n))
+#define TX_IRQ_MASK 0xaaaa
+
+#define FIFO_STAT_REG(n) (0x0100 + 0x4 * (n))
+#define FIFO_STAT_MASK GENMASK(0, 0)
+
+#define MSG_STAT_REG(n) (0x0140 + 0x4 * (n))
+#define MSG_STAT_MASK GENMASK(2, 0)
+
+#define MSG_DATA_REG(n) (0x0180 + 0x4 * (n))
+
+#define mbox_dbg(mbox, ...) dev_dbg((mbox)->controller.dev, __VA_ARGS__)
+
+struct sun6i_msgbox {
+ struct mbox_controller controller;
+ struct clk *clk;
+ spinlock_t lock;
+ void __iomem *regs;
+};
+
+static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan);
+static bool sun6i_msgbox_peek_data(struct mbox_chan *chan);
+
+static inline int channel_number(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline struct sun6i_msgbox *to_sun6i_msgbox(struct mbox_chan *chan)
+{
+ return chan->con_priv;
+}
+
+static irqreturn_t sun6i_msgbox_irq(int irq, void *dev_id)
+{
+ struct sun6i_msgbox *mbox = dev_id;
+ uint32_t status;
+ int n;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mbox->regs + LOCAL_IRQ_EN_REG) &
+ readl(mbox->regs + LOCAL_IRQ_STAT_REG);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < NUM_CHANS; ++n) {
+ struct mbox_chan *chan = &mbox->controller.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ while (sun6i_msgbox_peek_data(chan)) {
+ uint32_t msg = readl(mbox->regs + MSG_DATA_REG(n));
+
+ mbox_dbg(mbox, "Channel %d received 0x%08x\n", n, msg);
+ mbox_chan_received_data(chan, &msg);
+ }
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sun6i_msgbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+ uint32_t msg = *(uint32_t *)data;
+
+ /* Using a channel backwards gets the hardware into a bad state. */
+ if (WARN_ON_ONCE(!(readl(mbox->regs + CTRL_REG(n)) & CTRL_TX(n))))
+ return 0;
+
+ writel(msg, mbox->regs + MSG_DATA_REG(n));
+ mbox_dbg(mbox, "Channel %d sent 0x%08x\n", n, msg);
+
+ return 0;
+}
+
+static int sun6i_msgbox_startup(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ /* The coprocessor is responsible for setting channel directions. */
+ if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
+ /* Flush the receive FIFO. */
+ while (sun6i_msgbox_peek_data(chan))
+ readl(mbox->regs + MSG_DATA_REG(n));
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+
+ /* Enable the receive IRQ. */
+ spin_lock(&mbox->lock);
+ writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) | RX_IRQ(n),
+ mbox->regs + LOCAL_IRQ_EN_REG);
+ spin_unlock(&mbox->lock);
+ }
+
+ mbox_dbg(mbox, "Channel %d startup complete\n", n);
+
+ return 0;
+}
+
+static void sun6i_msgbox_shutdown(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
+ /* Disable the receive IRQ. */
+ spin_lock(&mbox->lock);
+ writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) & ~RX_IRQ(n),
+ mbox->regs + LOCAL_IRQ_EN_REG);
+ spin_unlock(&mbox->lock);
+
+ /* Attempt to flush the FIFO until the IRQ is cleared. */
+ do {
+ while (sun6i_msgbox_peek_data(chan))
+ readl(mbox->regs + MSG_DATA_REG(n));
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+ } while (readl(mbox->regs + LOCAL_IRQ_STAT_REG) & RX_IRQ(n));
+ }
+
+ mbox_dbg(mbox, "Channel %d shutdown complete\n", n);
+}
+
+static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ /*
+ * The hardware allows snooping on the remote user's IRQ statuses.
+ * We consider a message to be acknowledged only once the receive IRQ
+ * for that channel is cleared. Since the receive IRQ for a channel
+ * cannot be cleared until the FIFO for that channel is empty, this
+ * ensures that the message has actually been read. It also gives the
+ * recipient an opportunity to perform minimal processing before
+ * acknowledging the message.
+ */
+ return !(readl(mbox->regs + REMOTE_IRQ_STAT_REG) & RX_IRQ(n));
+}
+
+static bool sun6i_msgbox_peek_data(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ return readl(mbox->regs + MSG_STAT_REG(n)) & MSG_STAT_MASK;
+}
+
+static const struct mbox_chan_ops sun6i_msgbox_chan_ops = {
+ .send_data = sun6i_msgbox_send_data,
+ .startup = sun6i_msgbox_startup,
+ .shutdown = sun6i_msgbox_shutdown,
+ .last_tx_done = sun6i_msgbox_last_tx_done,
+ .peek_data = sun6i_msgbox_peek_data,
+};
+
+static int sun6i_msgbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mbox_chan *chans;
+ struct reset_control *reset;
+ struct resource *res;
+ struct sun6i_msgbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(dev, NUM_CHANS, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ for (i = 0; i < NUM_CHANS; ++i)
+ chans[i].con_priv = mbox;
+
+ mbox->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mbox->clk)) {
+ ret = PTR_ERR(mbox->clk);
+ dev_err(dev, "Failed to get clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mbox->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(reset)) {
+ ret = PTR_ERR(reset);
+ dev_err(dev, "Failed to get reset control: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ /*
+ * NOTE: We rely on platform firmware to preconfigure the channel
+ * directions, and we share this hardware block with other firmware
+ * that runs concurrently with Linux (e.g. a trusted monitor).
+ *
+ * Therefore, we do *not* assert the reset line if probing fails or
+ * when removing the device.
+ */
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ dev_err(dev, "Failed to deassert reset: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto err_disable_unprepare;
+ }
+
+ mbox->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mbox->regs)) {
+ ret = PTR_ERR(mbox->regs);
+ dev_err(dev, "Failed to map MMIO resource: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ /* Disable all IRQs for this end of the msgbox. */
+ writel(0, mbox->regs + LOCAL_IRQ_EN_REG);
+
+ ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
+ sun6i_msgbox_irq, 0, dev_name(dev), mbox);
+ if (ret) {
+ dev_err(dev, "Failed to register IRQ handler: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.ops = &sun6i_msgbox_chan_ops;
+ mbox->controller.chans = chans;
+ mbox->controller.num_chans = NUM_CHANS;
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+
+ spin_lock_init(&mbox->lock);
+ platform_set_drvdata(pdev, mbox);
+
+ ret = mbox_controller_register(&mbox->controller);
+ if (ret) {
+ dev_err(dev, "Failed to register controller: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ return 0;
+
+err_disable_unprepare:
+ clk_disable_unprepare(mbox->clk);
+
+ return ret;
+}
+
+static int sun6i_msgbox_remove(struct platform_device *pdev)
+{
+ struct sun6i_msgbox *mbox = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&mbox->controller);
+ /* See the comment in sun6i_msgbox_probe about the reset line. */
+ clk_disable_unprepare(mbox->clk);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_msgbox_of_match[] = {
+ { .compatible = "allwinner,sun6i-a31-msgbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun6i_msgbox_of_match);
+
+static struct platform_driver sun6i_msgbox_driver = {
+ .driver = {
+ .name = "sun6i-msgbox",
+ .of_match_table = sun6i_msgbox_of_match,
+ },
+ .probe = sun6i_msgbox_probe,
+ .remove = sun6i_msgbox_remove,
+};
+module_platform_driver(sun6i_msgbox_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Allwinner sun6i/sun8i/sun9i/sun50i Message Box");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
new file mode 100644
index 000000000..4895d8074
--- /dev/null
+++ b/drivers/mailbox/tegra-hsp.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/fuse.h>
+
+#include <dt-bindings/mailbox/tegra186-hsp.h>
+
+#include "mailbox.h"
+
+#define HSP_INT_IE(x) (0x100 + ((x) * 4))
+#define HSP_INT_IV 0x300
+#define HSP_INT_IR 0x304
+
+#define HSP_INT_EMPTY_SHIFT 0
+#define HSP_INT_EMPTY_MASK 0xff
+#define HSP_INT_FULL_SHIFT 8
+#define HSP_INT_FULL_MASK 0xff
+
+#define HSP_INT_DIMENSIONING 0x380
+#define HSP_nSM_SHIFT 0
+#define HSP_nSS_SHIFT 4
+#define HSP_nAS_SHIFT 8
+#define HSP_nDB_SHIFT 12
+#define HSP_nSI_SHIFT 16
+#define HSP_nINT_MASK 0xf
+
+#define HSP_DB_TRIGGER 0x0
+#define HSP_DB_ENABLE 0x4
+#define HSP_DB_RAW 0x8
+#define HSP_DB_PENDING 0xc
+
+#define HSP_SM_SHRD_MBOX 0x0
+#define HSP_SM_SHRD_MBOX_FULL BIT(31)
+#define HSP_SM_SHRD_MBOX_FULL_INT_IE 0x04
+#define HSP_SM_SHRD_MBOX_EMPTY_INT_IE 0x08
+
+#define HSP_DB_CCPLEX 1
+#define HSP_DB_BPMP 3
+#define HSP_DB_MAX 7
+
+struct tegra_hsp_channel;
+struct tegra_hsp;
+
+struct tegra_hsp_channel {
+ struct tegra_hsp *hsp;
+ struct mbox_chan *chan;
+ void __iomem *regs;
+};
+
+struct tegra_hsp_doorbell {
+ struct tegra_hsp_channel channel;
+ struct list_head list;
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_mailbox {
+ struct tegra_hsp_channel channel;
+ unsigned int index;
+ bool producer;
+};
+
+struct tegra_hsp_db_map {
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_soc {
+ const struct tegra_hsp_db_map *map;
+ bool has_per_mb_ie;
+};
+
+struct tegra_hsp {
+ struct device *dev;
+ const struct tegra_hsp_soc *soc;
+ struct mbox_controller mbox_db;
+ struct mbox_controller mbox_sm;
+ void __iomem *regs;
+ unsigned int doorbell_irq;
+ unsigned int *shared_irqs;
+ unsigned int shared_irq;
+ unsigned int num_sm;
+ unsigned int num_as;
+ unsigned int num_ss;
+ unsigned int num_db;
+ unsigned int num_si;
+ spinlock_t lock;
+
+ struct list_head doorbells;
+ struct tegra_hsp_mailbox *mailboxes;
+
+ unsigned long mask;
+};
+
+static inline u32 tegra_hsp_readl(struct tegra_hsp *hsp, unsigned int offset)
+{
+ return readl(hsp->regs + offset);
+}
+
+static inline void tegra_hsp_writel(struct tegra_hsp *hsp, u32 value,
+ unsigned int offset)
+{
+ writel(value, hsp->regs + offset);
+}
+
+static inline u32 tegra_hsp_channel_readl(struct tegra_hsp_channel *channel,
+ unsigned int offset)
+{
+ return readl(channel->regs + offset);
+}
+
+static inline void tegra_hsp_channel_writel(struct tegra_hsp_channel *channel,
+ u32 value, unsigned int offset)
+{
+ writel(value, channel->regs + offset);
+}
+
+static bool tegra_hsp_doorbell_can_ring(struct tegra_hsp_doorbell *db)
+{
+ u32 value;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_ENABLE);
+
+ return (value & BIT(TEGRA_HSP_DB_MASTER_CCPLEX)) != 0;
+}
+
+static struct tegra_hsp_doorbell *
+__tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *entry;
+
+ list_for_each_entry(entry, &hsp->doorbells, list)
+ if (entry->master == master)
+ return entry;
+
+ return NULL;
+}
+
+static struct tegra_hsp_doorbell *
+tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return db;
+}
+
+static irqreturn_t tegra_hsp_doorbell_irq(int irq, void *data)
+{
+ struct tegra_hsp *hsp = data;
+ struct tegra_hsp_doorbell *db;
+ unsigned long master, value;
+
+ db = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!db)
+ return IRQ_NONE;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_PENDING);
+ tegra_hsp_channel_writel(&db->channel, value, HSP_DB_PENDING);
+
+ spin_lock(&hsp->lock);
+
+ for_each_set_bit(master, &value, hsp->mbox_db.num_chans) {
+ struct tegra_hsp_doorbell *db;
+
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ /*
+ * Depending on the bootloader chain, the CCPLEX doorbell will
+ * have some doorbells enabled, which means that requesting an
+ * interrupt will immediately fire.
+ *
+ * In that case, db->channel.chan will still be NULL here and
+ * cause a crash if not properly guarded.
+ *
+ * It remains to be seen if ignoring the doorbell in that case
+ * is the correct solution.
+ */
+ if (db && db->channel.chan)
+ mbox_chan_received_data(db->channel.chan, NULL);
+ }
+
+ spin_unlock(&hsp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_hsp_shared_irq(int irq, void *data)
+{
+ struct tegra_hsp *hsp = data;
+ unsigned long bit, mask;
+ u32 status, value;
+ void *msg;
+
+ status = tegra_hsp_readl(hsp, HSP_INT_IR) & hsp->mask;
+
+ /* process EMPTY interrupts first */
+ mask = (status >> HSP_INT_EMPTY_SHIFT) & HSP_INT_EMPTY_MASK;
+
+ for_each_set_bit(bit, &mask, hsp->num_sm) {
+ struct tegra_hsp_mailbox *mb = &hsp->mailboxes[bit];
+
+ if (mb->producer) {
+ /*
+ * Disable EMPTY interrupts until data is sent with
+ * the next message. These interrupts are level-
+ * triggered, so if we kept them enabled they would
+ * constantly trigger until we next write data into
+ * the message.
+ */
+ spin_lock(&hsp->lock);
+
+ hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
+ tegra_hsp_writel(hsp, hsp->mask,
+ HSP_INT_IE(hsp->shared_irq));
+
+ spin_unlock(&hsp->lock);
+
+ mbox_chan_txdone(mb->channel.chan, 0);
+ }
+ }
+
+ /* process FULL interrupts */
+ mask = (status >> HSP_INT_FULL_SHIFT) & HSP_INT_FULL_MASK;
+
+ for_each_set_bit(bit, &mask, hsp->num_sm) {
+ struct tegra_hsp_mailbox *mb = &hsp->mailboxes[bit];
+
+ if (!mb->producer) {
+ value = tegra_hsp_channel_readl(&mb->channel,
+ HSP_SM_SHRD_MBOX);
+ value &= ~HSP_SM_SHRD_MBOX_FULL;
+ msg = (void *)(unsigned long)value;
+ mbox_chan_received_data(mb->channel.chan, msg);
+
+ /*
+ * Need to clear all bits here since some producers,
+ * such as TCU, depend on fields in the register
+ * getting cleared by the consumer.
+ *
+ * The mailbox API doesn't give the consumers a way
+ * of doing that explicitly, so we have to make sure
+ * we cover all possible cases.
+ */
+ tegra_hsp_channel_writel(&mb->channel, 0x0,
+ HSP_SM_SHRD_MBOX);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct tegra_hsp_channel *
+tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
+ unsigned int master, unsigned int index)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned int offset;
+ unsigned long flags;
+
+ db = devm_kzalloc(hsp->dev, sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return ERR_PTR(-ENOMEM);
+
+ offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) * SZ_64K;
+ offset += index * 0x100;
+
+ db->channel.regs = hsp->regs + offset;
+ db->channel.hsp = hsp;
+
+ db->name = devm_kstrdup_const(hsp->dev, name, GFP_KERNEL);
+ db->master = master;
+ db->index = index;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ list_add_tail(&db->list, &hsp->doorbells);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return &db->channel;
+}
+
+static int tegra_hsp_doorbell_send_data(struct mbox_chan *chan, void *data)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+
+ tegra_hsp_channel_writel(&db->channel, 1, HSP_DB_TRIGGER);
+
+ return 0;
+}
+
+static int tegra_hsp_doorbell_startup(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ if (db->master >= chan->mbox->num_chans) {
+ dev_err(chan->mbox->dev,
+ "invalid master ID %u for HSP channel\n",
+ db->master);
+ return -EINVAL;
+ }
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return -ENODEV;
+
+ /*
+ * On simulation platforms the BPMP hasn't had a chance yet to mark
+ * the doorbell as ringable by the CCPLEX, so we want to skip extra
+ * checks here.
+ */
+ if (tegra_is_silicon() && !tegra_hsp_doorbell_can_ring(db))
+ return -ENODEV;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value |= BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return 0;
+}
+
+static void tegra_hsp_doorbell_shutdown(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value &= ~BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static const struct mbox_chan_ops tegra_hsp_db_ops = {
+ .send_data = tegra_hsp_doorbell_send_data,
+ .startup = tegra_hsp_doorbell_startup,
+ .shutdown = tegra_hsp_doorbell_shutdown,
+};
+
+static int tegra_hsp_mailbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct tegra_hsp_mailbox *mb = chan->con_priv;
+ struct tegra_hsp *hsp = mb->channel.hsp;
+ unsigned long flags;
+ u32 value;
+
+ if (WARN_ON(!mb->producer))
+ return -EPERM;
+
+ /* copy data and mark mailbox full */
+ value = (u32)(unsigned long)data;
+ value |= HSP_SM_SHRD_MBOX_FULL;
+
+ tegra_hsp_channel_writel(&mb->channel, value, HSP_SM_SHRD_MBOX);
+
+ /* enable EMPTY interrupt for the shared mailbox */
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ hsp->mask |= BIT(HSP_INT_EMPTY_SHIFT + mb->index);
+ tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return 0;
+}
+
+static int tegra_hsp_mailbox_flush(struct mbox_chan *chan,
+ unsigned long timeout)
+{
+ struct tegra_hsp_mailbox *mb = chan->con_priv;
+ struct tegra_hsp_channel *ch = &mb->channel;
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_hsp_channel_readl(ch, HSP_SM_SHRD_MBOX);
+ if ((value & HSP_SM_SHRD_MBOX_FULL) == 0) {
+ mbox_chan_txdone(chan, 0);
+
+ /* Wait until channel is empty */
+ if (chan->active_req != NULL)
+ continue;
+
+ return 0;
+ }
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int tegra_hsp_mailbox_startup(struct mbox_chan *chan)
+{
+ struct tegra_hsp_mailbox *mb = chan->con_priv;
+ struct tegra_hsp_channel *ch = &mb->channel;
+ struct tegra_hsp *hsp = mb->channel.hsp;
+ unsigned long flags;
+
+ chan->txdone_method = TXDONE_BY_IRQ;
+
+ /*
+ * Shared mailboxes start out as consumers by default. FULL and EMPTY
+ * interrupts are coalesced at the same shared interrupt.
+ *
+ * Keep EMPTY interrupts disabled at startup and only enable them when
+ * the mailbox is actually full. This is required because the FULL and
+ * EMPTY interrupts are level-triggered, so keeping EMPTY interrupts
+ * enabled all the time would cause an interrupt storm while mailboxes
+ * are idle.
+ */
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ if (mb->producer)
+ hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
+ else
+ hsp->mask |= BIT(HSP_INT_FULL_SHIFT + mb->index);
+
+ tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ if (hsp->soc->has_per_mb_ie) {
+ if (mb->producer)
+ tegra_hsp_channel_writel(ch, 0x0,
+ HSP_SM_SHRD_MBOX_EMPTY_INT_IE);
+ else
+ tegra_hsp_channel_writel(ch, 0x1,
+ HSP_SM_SHRD_MBOX_FULL_INT_IE);
+ }
+
+ return 0;
+}
+
+static void tegra_hsp_mailbox_shutdown(struct mbox_chan *chan)
+{
+ struct tegra_hsp_mailbox *mb = chan->con_priv;
+ struct tegra_hsp_channel *ch = &mb->channel;
+ struct tegra_hsp *hsp = mb->channel.hsp;
+ unsigned long flags;
+
+ if (hsp->soc->has_per_mb_ie) {
+ if (mb->producer)
+ tegra_hsp_channel_writel(ch, 0x0,
+ HSP_SM_SHRD_MBOX_EMPTY_INT_IE);
+ else
+ tegra_hsp_channel_writel(ch, 0x0,
+ HSP_SM_SHRD_MBOX_FULL_INT_IE);
+ }
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ if (mb->producer)
+ hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
+ else
+ hsp->mask &= ~BIT(HSP_INT_FULL_SHIFT + mb->index);
+
+ tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static const struct mbox_chan_ops tegra_hsp_sm_ops = {
+ .send_data = tegra_hsp_mailbox_send_data,
+ .flush = tegra_hsp_mailbox_flush,
+ .startup = tegra_hsp_mailbox_startup,
+ .shutdown = tegra_hsp_mailbox_shutdown,
+};
+
+static struct mbox_chan *tegra_hsp_db_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ struct tegra_hsp *hsp = container_of(mbox, struct tegra_hsp, mbox_db);
+ unsigned int type = args->args[0], master = args->args[1];
+ struct tegra_hsp_channel *channel = ERR_PTR(-ENODEV);
+ struct tegra_hsp_doorbell *db;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ unsigned int i;
+
+ if (type != TEGRA_HSP_MBOX_TYPE_DB || !hsp->doorbell_irq)
+ return ERR_PTR(-ENODEV);
+
+ db = tegra_hsp_doorbell_get(hsp, master);
+ if (db)
+ channel = &db->channel;
+
+ if (IS_ERR(channel))
+ return ERR_CAST(channel);
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ chan = &mbox->chans[i];
+ if (!chan->con_priv) {
+ channel->chan = chan;
+ chan->con_priv = db;
+ break;
+ }
+
+ chan = NULL;
+ }
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return chan ?: ERR_PTR(-EBUSY);
+}
+
+static struct mbox_chan *tegra_hsp_sm_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ struct tegra_hsp *hsp = container_of(mbox, struct tegra_hsp, mbox_sm);
+ unsigned int type = args->args[0], index;
+ struct tegra_hsp_mailbox *mb;
+
+ index = args->args[1] & TEGRA_HSP_SM_MASK;
+
+ if (type != TEGRA_HSP_MBOX_TYPE_SM || !hsp->shared_irqs ||
+ index >= hsp->num_sm)
+ return ERR_PTR(-ENODEV);
+
+ mb = &hsp->mailboxes[index];
+
+ if ((args->args[1] & TEGRA_HSP_SM_FLAG_TX) == 0)
+ mb->producer = false;
+ else
+ mb->producer = true;
+
+ return mb->channel.chan;
+}
+
+static int tegra_hsp_add_doorbells(struct tegra_hsp *hsp)
+{
+ const struct tegra_hsp_db_map *map = hsp->soc->map;
+ struct tegra_hsp_channel *channel;
+
+ while (map->name) {
+ channel = tegra_hsp_doorbell_create(hsp, map->name,
+ map->master, map->index);
+ if (IS_ERR(channel))
+ return PTR_ERR(channel);
+
+ map++;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_add_mailboxes(struct tegra_hsp *hsp, struct device *dev)
+{
+ int i;
+
+ hsp->mailboxes = devm_kcalloc(dev, hsp->num_sm, sizeof(*hsp->mailboxes),
+ GFP_KERNEL);
+ if (!hsp->mailboxes)
+ return -ENOMEM;
+
+ for (i = 0; i < hsp->num_sm; i++) {
+ struct tegra_hsp_mailbox *mb = &hsp->mailboxes[i];
+
+ mb->index = i;
+
+ mb->channel.hsp = hsp;
+ mb->channel.regs = hsp->regs + SZ_64K + i * SZ_32K;
+ mb->channel.chan = &hsp->mbox_sm.chans[i];
+ mb->channel.chan->con_priv = mb;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_request_shared_irq(struct tegra_hsp *hsp)
+{
+ unsigned int i, irq = 0;
+ int err;
+
+ for (i = 0; i < hsp->num_si; i++) {
+ irq = hsp->shared_irqs[i];
+ if (irq <= 0)
+ continue;
+
+ err = devm_request_irq(hsp->dev, irq, tegra_hsp_shared_irq, 0,
+ dev_name(hsp->dev), hsp);
+ if (err < 0) {
+ dev_err(hsp->dev, "failed to request interrupt: %d\n",
+ err);
+ continue;
+ }
+
+ hsp->shared_irq = i;
+
+ /* disable all interrupts */
+ tegra_hsp_writel(hsp, 0, HSP_INT_IE(hsp->shared_irq));
+
+ dev_dbg(hsp->dev, "interrupt requested: %u\n", irq);
+
+ break;
+ }
+
+ if (i == hsp->num_si) {
+ dev_err(hsp->dev, "failed to find available interrupt\n");
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_probe(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp;
+ struct resource *res;
+ unsigned int i;
+ u32 value;
+ int err;
+
+ hsp = devm_kzalloc(&pdev->dev, sizeof(*hsp), GFP_KERNEL);
+ if (!hsp)
+ return -ENOMEM;
+
+ hsp->dev = &pdev->dev;
+ hsp->soc = of_device_get_match_data(&pdev->dev);
+ INIT_LIST_HEAD(&hsp->doorbells);
+ spin_lock_init(&hsp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hsp->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hsp->regs))
+ return PTR_ERR(hsp->regs);
+
+ value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
+ hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
+ hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
+ hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+
+ err = platform_get_irq_byname_optional(pdev, "doorbell");
+ if (err >= 0)
+ hsp->doorbell_irq = err;
+
+ if (hsp->num_si > 0) {
+ unsigned int count = 0;
+
+ hsp->shared_irqs = devm_kcalloc(&pdev->dev, hsp->num_si,
+ sizeof(*hsp->shared_irqs),
+ GFP_KERNEL);
+ if (!hsp->shared_irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < hsp->num_si; i++) {
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "shared%u", i);
+ if (!name)
+ return -ENOMEM;
+
+ err = platform_get_irq_byname_optional(pdev, name);
+ if (err >= 0) {
+ hsp->shared_irqs[i] = err;
+ count++;
+ }
+
+ kfree(name);
+ }
+
+ if (count == 0) {
+ devm_kfree(&pdev->dev, hsp->shared_irqs);
+ hsp->shared_irqs = NULL;
+ }
+ }
+
+ /* setup the doorbell controller */
+ hsp->mbox_db.of_xlate = tegra_hsp_db_xlate;
+ hsp->mbox_db.num_chans = 32;
+ hsp->mbox_db.dev = &pdev->dev;
+ hsp->mbox_db.ops = &tegra_hsp_db_ops;
+
+ hsp->mbox_db.chans = devm_kcalloc(&pdev->dev, hsp->mbox_db.num_chans,
+ sizeof(*hsp->mbox_db.chans),
+ GFP_KERNEL);
+ if (!hsp->mbox_db.chans)
+ return -ENOMEM;
+
+ if (hsp->doorbell_irq) {
+ err = tegra_hsp_add_doorbells(hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to add doorbells: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ err = devm_mbox_controller_register(&pdev->dev, &hsp->mbox_db);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register doorbell mailbox: %d\n",
+ err);
+ return err;
+ }
+
+ /* setup the shared mailbox controller */
+ hsp->mbox_sm.of_xlate = tegra_hsp_sm_xlate;
+ hsp->mbox_sm.num_chans = hsp->num_sm;
+ hsp->mbox_sm.dev = &pdev->dev;
+ hsp->mbox_sm.ops = &tegra_hsp_sm_ops;
+
+ hsp->mbox_sm.chans = devm_kcalloc(&pdev->dev, hsp->mbox_sm.num_chans,
+ sizeof(*hsp->mbox_sm.chans),
+ GFP_KERNEL);
+ if (!hsp->mbox_sm.chans)
+ return -ENOMEM;
+
+ if (hsp->shared_irqs) {
+ err = tegra_hsp_add_mailboxes(hsp, &pdev->dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to add mailboxes: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ err = devm_mbox_controller_register(&pdev->dev, &hsp->mbox_sm);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register shared mailbox: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hsp);
+
+ if (hsp->doorbell_irq) {
+ err = devm_request_irq(&pdev->dev, hsp->doorbell_irq,
+ tegra_hsp_doorbell_irq, IRQF_NO_SUSPEND,
+ dev_name(&pdev->dev), hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to request doorbell IRQ#%u: %d\n",
+ hsp->doorbell_irq, err);
+ return err;
+ }
+ }
+
+ if (hsp->shared_irqs) {
+ err = tegra_hsp_request_shared_irq(hsp);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_hsp_resume(struct device *dev)
+{
+ struct tegra_hsp *hsp = dev_get_drvdata(dev);
+ unsigned int i;
+ struct tegra_hsp_doorbell *db;
+
+ list_for_each_entry(db, &hsp->doorbells, list) {
+ if (db && db->channel.chan)
+ tegra_hsp_doorbell_startup(db->channel.chan);
+ }
+
+ if (hsp->mailboxes) {
+ for (i = 0; i < hsp->num_sm; i++) {
+ struct tegra_hsp_mailbox *mb = &hsp->mailboxes[i];
+
+ if (mb->channel.chan->cl)
+ tegra_hsp_mailbox_startup(mb->channel.chan);
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_hsp_pm_ops = {
+ .resume_noirq = tegra_hsp_resume,
+};
+
+static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = {
+ { "ccplex", TEGRA_HSP_DB_MASTER_CCPLEX, HSP_DB_CCPLEX, },
+ { "bpmp", TEGRA_HSP_DB_MASTER_BPMP, HSP_DB_BPMP, },
+ { /* sentinel */ }
+};
+
+static const struct tegra_hsp_soc tegra186_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+ .has_per_mb_ie = false,
+};
+
+static const struct tegra_hsp_soc tegra194_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+ .has_per_mb_ie = true,
+};
+
+static const struct of_device_id tegra_hsp_match[] = {
+ { .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
+ { .compatible = "nvidia,tegra194-hsp", .data = &tegra194_hsp_soc },
+ { }
+};
+
+static struct platform_driver tegra_hsp_driver = {
+ .driver = {
+ .name = "tegra-hsp",
+ .of_match_table = tegra_hsp_match,
+ .pm = &tegra_hsp_pm_ops,
+ },
+ .probe = tegra_hsp_probe,
+};
+
+static int __init tegra_hsp_init(void)
+{
+ return platform_driver_register(&tegra_hsp_driver);
+}
+core_initcall(tegra_hsp_init);
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
new file mode 100644
index 000000000..535fe73ce
--- /dev/null
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' Message Manager Driver
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/
+ * Nishanth Menon
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+
+#define Q_DATA_OFFSET(proxy, queue, reg) \
+ ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4))
+#define Q_STATE_OFFSET(queue) ((queue) * 0x4)
+#define Q_STATE_ENTRY_COUNT_MASK (0xFFF000)
+
+#define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid))
+#define SPROXY_THREAD_DATA_OFFSET(tid, reg) \
+ (SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4)
+
+#define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid))
+
+#define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF)
+
+#define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid))
+#define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31)
+
+/**
+ * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor
+ * @queue_id: Queue Number for this path
+ * @proxy_id: Proxy ID representing the processor in SoC
+ * @is_tx: Is this a receive path?
+ */
+struct ti_msgmgr_valid_queue_desc {
+ u8 queue_id;
+ u8 proxy_id;
+ bool is_tx;
+};
+
+/**
+ * struct ti_msgmgr_desc - Description of message manager integration
+ * @queue_count: Number of Queues
+ * @max_message_size: Message size in bytes
+ * @max_messages: Number of messages
+ * @data_first_reg: First data register for proxy data region
+ * @data_last_reg: Last data register for proxy data region
+ * @status_cnt_mask: Mask for getting the status value
+ * @status_err_mask: Mask for getting the error value, if applicable
+ * @tx_polled: Do I need to use polled mechanism for tx
+ * @tx_poll_timeout_ms: Timeout in ms if polled
+ * @valid_queues: List of Valid queues that the processor can access
+ * @data_region_name: Name of the proxy data region
+ * @status_region_name: Name of the proxy status region
+ * @ctrl_region_name: Name of the proxy control region
+ * @num_valid_queues: Number of valid queues
+ * @is_sproxy: Is this an Secure Proxy instance?
+ *
+ * This structure is used in of match data to describe how integration
+ * for a specific compatible SoC is done.
+ */
+struct ti_msgmgr_desc {
+ u8 queue_count;
+ u8 max_message_size;
+ u8 max_messages;
+ u8 data_first_reg;
+ u8 data_last_reg;
+ u32 status_cnt_mask;
+ u32 status_err_mask;
+ bool tx_polled;
+ int tx_poll_timeout_ms;
+ const struct ti_msgmgr_valid_queue_desc *valid_queues;
+ const char *data_region_name;
+ const char *status_region_name;
+ const char *ctrl_region_name;
+ int num_valid_queues;
+ bool is_sproxy;
+};
+
+/**
+ * struct ti_queue_inst - Description of a queue instance
+ * @name: Queue Name
+ * @queue_id: Queue Identifier as mapped on SoC
+ * @proxy_id: Proxy Identifier as mapped on SoC
+ * @irq: IRQ for Rx Queue
+ * @is_tx: 'true' if transmit queue, else, 'false'
+ * @queue_buff_start: First register of Data Buffer
+ * @queue_buff_end: Last (or confirmation) register of Data buffer
+ * @queue_state: Queue status register
+ * @queue_ctrl: Queue Control register
+ * @chan: Mailbox channel
+ * @rx_buff: Receive buffer pointer allocated at probe, max_message_size
+ */
+struct ti_queue_inst {
+ char name[30];
+ u8 queue_id;
+ u8 proxy_id;
+ int irq;
+ bool is_tx;
+ void __iomem *queue_buff_start;
+ void __iomem *queue_buff_end;
+ void __iomem *queue_state;
+ void __iomem *queue_ctrl;
+ struct mbox_chan *chan;
+ u32 *rx_buff;
+};
+
+/**
+ * struct ti_msgmgr_inst - Description of a Message Manager Instance
+ * @dev: device pointer corresponding to the Message Manager instance
+ * @desc: Description of the SoC integration
+ * @queue_proxy_region: Queue proxy region where queue buffers are located
+ * @queue_state_debug_region: Queue status register regions
+ * @queue_ctrl_region: Queue Control register regions
+ * @num_valid_queues: Number of valid queues defined for the processor
+ * Note: other queues are probably reserved for other processors
+ * in the SoC.
+ * @qinsts: Array of valid Queue Instances for the Processor
+ * @mbox: Mailbox Controller
+ * @chans: Array for channels corresponding to the Queue Instances.
+ */
+struct ti_msgmgr_inst {
+ struct device *dev;
+ const struct ti_msgmgr_desc *desc;
+ void __iomem *queue_proxy_region;
+ void __iomem *queue_state_debug_region;
+ void __iomem *queue_ctrl_region;
+ u8 num_valid_queues;
+ struct ti_queue_inst *qinsts;
+ struct mbox_controller mbox;
+ struct mbox_chan *chans;
+};
+
+/**
+ * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages
+ * @d: Description of message manager
+ * @qinst: Queue instance for which we check the number of pending messages
+ *
+ * Return: number of messages pending in the queue (0 == no pending messages)
+ */
+static inline int
+ti_msgmgr_queue_get_num_messages(const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst)
+{
+ u32 val;
+ u32 status_cnt_mask = d->status_cnt_mask;
+
+ /*
+ * We cannot use relaxed operation here - update may happen
+ * real-time.
+ */
+ val = readl(qinst->queue_state) & status_cnt_mask;
+ val >>= __ffs(status_cnt_mask);
+
+ return val;
+}
+
+/**
+ * ti_msgmgr_queue_is_error() - Check to see if there is queue error
+ * @d: Description of message manager
+ * @qinst: Queue instance for which we check the number of pending messages
+ *
+ * Return: true if error, else false
+ */
+static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst)
+{
+ u32 val;
+
+ /* Msgmgr has no error detection */
+ if (!d->is_sproxy)
+ return false;
+
+ /*
+ * We cannot use relaxed operation here - update may happen
+ * real-time.
+ */
+ val = readl(qinst->queue_state) & d->status_err_mask;
+
+ return val ? true : false;
+}
+
+/**
+ * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
+ * @irq: Interrupt number
+ * @p: Channel Pointer
+ *
+ * Return: -EINVAL if there is no instance
+ * IRQ_NONE if the interrupt is not ours.
+ * IRQ_HANDLED if the rx interrupt was successfully handled.
+ */
+static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst = chan->con_priv;
+ const struct ti_msgmgr_desc *desc;
+ int msg_count, num_words;
+ struct ti_msgmgr_message message;
+ void __iomem *data_reg;
+ u32 *word_data;
+
+ if (WARN_ON(!inst)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+
+ /* Do I have an invalid interrupt source? */
+ if (qinst->is_tx) {
+ dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n",
+ qinst->name);
+ return IRQ_NONE;
+ }
+
+ desc = inst->desc;
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on Rx channel %s\n", qinst->name);
+ return IRQ_NONE;
+ }
+
+ /* Do I actually have messages to read? */
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
+ if (!msg_count) {
+ /* Shared IRQ? */
+ dev_dbg(dev, "Spurious event - 0 pending data!\n");
+ return IRQ_NONE;
+ }
+
+ /*
+ * I have no idea about the protocol being used to communicate with the
+ * remote producer - 0 could be valid data, so I wont make a judgement
+ * of how many bytes I should be reading. Let the client figure this
+ * out.. I just read the full message and pass it on..
+ */
+ message.len = desc->max_message_size;
+ message.buf = (u8 *)qinst->rx_buff;
+
+ /*
+ * NOTE about register access involved here:
+ * the hardware block is implemented with 32bit access operations and no
+ * support for data splitting. We don't want the hardware to misbehave
+ * with sub 32bit access - For example: if the last register read is
+ * split into byte wise access, it can result in the queue getting
+ * stuck or indeterminate behavior. An out of order read operation may
+ * result in weird data results as well.
+ * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead
+ * we depend on readl for the purpose.
+ *
+ * Also note that the final register read automatically marks the
+ * queue message as read.
+ */
+ for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff,
+ num_words = (desc->max_message_size / sizeof(u32));
+ num_words; num_words--, data_reg += sizeof(u32), word_data++)
+ *word_data = readl(data_reg);
+
+ /*
+ * Last register read automatically clears the IRQ if only 1 message
+ * is pending - so send the data up the stack..
+ * NOTE: Client is expected to be as optimal as possible, since
+ * we invoke the handler in IRQ context.
+ */
+ mbox_chan_received_data(chan, (void *)&message);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ti_msgmgr_queue_peek_data() - Peek to see if there are any rx messages.
+ * @chan: Channel Pointer
+ *
+ * Return: 'true' if there is pending rx data, 'false' if there is none.
+ */
+static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc = inst->desc;
+ int msg_count;
+
+ if (qinst->is_tx)
+ return false;
+
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
+
+ return msg_count ? true : false;
+}
+
+/**
+ * ti_msgmgr_last_tx_done() - See if all the tx messages are sent
+ * @chan: Channel pointer
+ *
+ * Return: 'true' is no pending tx data, 'false' if there are any.
+ */
+static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc = inst->desc;
+ int msg_count;
+
+ if (!qinst->is_tx)
+ return false;
+
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
+
+ if (desc->is_sproxy) {
+ /* In secure proxy, msg_count indicates how many we can send */
+ return msg_count ? true : false;
+ }
+
+ /* if we have any messages pending.. */
+ return msg_count ? false : true;
+}
+
+/**
+ * ti_msgmgr_send_data() - Send data
+ * @chan: Channel Pointer
+ * @data: ti_msgmgr_message * Message Pointer
+ *
+ * Return: 0 if all goes good, else appropriate error messages.
+ */
+static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc;
+ struct ti_queue_inst *qinst = chan->con_priv;
+ int num_words, trail_bytes;
+ struct ti_msgmgr_message *message = data;
+ void __iomem *data_reg;
+ u32 *word_data;
+
+ if (WARN_ON(!inst)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+ desc = inst->desc;
+
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
+ if (desc->max_message_size < message->len) {
+ dev_err(dev, "Queue %s message length %zu > max %d\n",
+ qinst->name, message->len, desc->max_message_size);
+ return -EINVAL;
+ }
+
+ /* NOTE: Constraints similar to rx path exists here as well */
+ for (data_reg = qinst->queue_buff_start,
+ num_words = message->len / sizeof(u32),
+ word_data = (u32 *)message->buf;
+ num_words; num_words--, data_reg += sizeof(u32), word_data++)
+ writel(*word_data, data_reg);
+
+ trail_bytes = message->len % sizeof(u32);
+ if (trail_bytes) {
+ u32 data_trail = *word_data;
+
+ /* Ensure all unused data is 0 */
+ data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
+ writel(data_trail, data_reg);
+ data_reg += sizeof(u32);
+ }
+
+ /*
+ * 'data_reg' indicates next register to write. If we did not already
+ * write on tx complete reg(last reg), we must do so for transmit
+ * In addition, we also need to make sure all intermediate data
+ * registers(if any required), are reset to 0 for TISCI backward
+ * compatibility to be maintained.
+ */
+ while (data_reg <= qinst->queue_buff_end) {
+ writel(0, data_reg);
+ data_reg += sizeof(u32);
+ }
+
+ return 0;
+}
+
+/**
+ * ti_msgmgr_queue_rx_irq_req() - RX IRQ request
+ * @dev: device pointer
+ * @d: descriptor for ti_msgmgr
+ * @qinst: Queue instance
+ * @chan: Channel pointer
+ */
+static int ti_msgmgr_queue_rx_irq_req(struct device *dev,
+ const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst,
+ struct mbox_chan *chan)
+{
+ int ret = 0;
+ char of_rx_irq_name[7];
+ struct device_node *np;
+
+ snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
+ "rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id);
+
+ /* Get the IRQ if not found */
+ if (qinst->irq < 0) {
+ np = of_node_get(dev->of_node);
+ if (!np)
+ return -ENODATA;
+ qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
+ of_node_put(np);
+
+ if (qinst->irq < 0) {
+ dev_err(dev,
+ "QID %d PID %d:No IRQ[%s]: %d\n",
+ qinst->queue_id, qinst->proxy_id,
+ of_rx_irq_name, qinst->irq);
+ return qinst->irq;
+ }
+ }
+
+ /* With the expectation that the IRQ might be shared in SoC */
+ ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
+ IRQF_SHARED, qinst->name, chan);
+ if (ret) {
+ dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
+ qinst->irq, qinst->name, ret);
+ }
+
+ return ret;
+}
+
+/**
+ * ti_msgmgr_queue_startup() - Startup queue
+ * @chan: Channel pointer
+ *
+ * Return: 0 if all goes good, else return corresponding error message
+ */
+static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst = chan->con_priv;
+ const struct ti_msgmgr_desc *d = inst->desc;
+ int ret;
+ int msg_count;
+
+ /*
+ * If sproxy is starting and can send messages, we are a Tx thread,
+ * else Rx
+ */
+ if (d->is_sproxy) {
+ qinst->is_tx = (readl(qinst->queue_ctrl) &
+ SPROXY_THREAD_CTRL_DIR_MASK) ? false : true;
+
+ msg_count = ti_msgmgr_queue_get_num_messages(d, qinst);
+
+ if (!msg_count && qinst->is_tx) {
+ dev_err(dev, "%s: Cannot transmit with 0 credits!\n",
+ qinst->name);
+ return -EINVAL;
+ }
+ }
+
+ if (!qinst->is_tx) {
+ /* Allocate usage buffer for rx */
+ qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL);
+ if (!qinst->rx_buff)
+ return -ENOMEM;
+ /* Request IRQ */
+ ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan);
+ if (ret) {
+ kfree(qinst->rx_buff);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ti_msgmgr_queue_shutdown() - Shutdown the queue
+ * @chan: Channel pointer
+ */
+static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+
+ if (!qinst->is_tx) {
+ free_irq(qinst->irq, chan);
+ kfree(qinst->rx_buff);
+ }
+}
+
+/**
+ * ti_msgmgr_of_xlate() - Translation of phandle to queue
+ * @mbox: Mailbox controller
+ * @p: phandle pointer
+ *
+ * Return: Mailbox channel corresponding to the queue, else return error
+ * pointer.
+ */
+static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *p)
+{
+ struct ti_msgmgr_inst *inst;
+ int req_qid, req_pid;
+ struct ti_queue_inst *qinst;
+ const struct ti_msgmgr_desc *d;
+ int i, ncells;
+
+ inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
+ if (WARN_ON(!inst))
+ return ERR_PTR(-EINVAL);
+
+ d = inst->desc;
+
+ if (d->is_sproxy)
+ ncells = 1;
+ else
+ ncells = 2;
+ if (p->args_count != ncells) {
+ dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n",
+ p->args_count, ncells);
+ return ERR_PTR(-EINVAL);
+ }
+ if (ncells == 1) {
+ req_qid = 0;
+ req_pid = p->args[0];
+ } else {
+ req_qid = p->args[0];
+ req_pid = p->args[1];
+ }
+
+ if (d->is_sproxy) {
+ if (req_pid >= d->num_valid_queues)
+ goto err;
+ qinst = &inst->qinsts[req_pid];
+ return qinst->chan;
+ }
+
+ for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
+ i++, qinst++) {
+ if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id)
+ return qinst->chan;
+ }
+
+err:
+ dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %pOFn\n",
+ req_qid, req_pid, p->np);
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * ti_msgmgr_queue_setup() - Setup data structures for each queue instance
+ * @idx: index of the queue
+ * @dev: pointer to the message manager device
+ * @np: pointer to the of node
+ * @inst: Queue instance pointer
+ * @d: Message Manager instance description data
+ * @qd: Queue description data
+ * @qinst: Queue instance pointer
+ * @chan: pointer to mailbox channel
+ *
+ * Return: 0 if all went well, else return corresponding error
+ */
+static int ti_msgmgr_queue_setup(int idx, struct device *dev,
+ struct device_node *np,
+ struct ti_msgmgr_inst *inst,
+ const struct ti_msgmgr_desc *d,
+ const struct ti_msgmgr_valid_queue_desc *qd,
+ struct ti_queue_inst *qinst,
+ struct mbox_chan *chan)
+{
+ char *dir;
+
+ qinst->proxy_id = qd->proxy_id;
+ qinst->queue_id = qd->queue_id;
+
+ if (qinst->queue_id > d->queue_count) {
+ dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n",
+ idx, qinst->queue_id, d->queue_count);
+ return -ERANGE;
+ }
+
+ if (d->is_sproxy) {
+ qinst->queue_buff_start = inst->queue_proxy_region +
+ SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
+ d->data_first_reg);
+ qinst->queue_buff_end = inst->queue_proxy_region +
+ SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
+ d->data_last_reg);
+ qinst->queue_state = inst->queue_state_debug_region +
+ SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id);
+ qinst->queue_ctrl = inst->queue_ctrl_region +
+ SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id);
+
+ /* XXX: DONOT read registers here!.. Some may be unusable */
+ dir = "thr";
+ snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d",
+ dev_name(dev), dir, qinst->proxy_id);
+ } else {
+ qinst->queue_buff_start = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
+ d->data_first_reg);
+ qinst->queue_buff_end = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
+ d->data_last_reg);
+ qinst->queue_state =
+ inst->queue_state_debug_region +
+ Q_STATE_OFFSET(qinst->queue_id);
+ qinst->is_tx = qd->is_tx;
+ dir = qinst->is_tx ? "tx" : "rx";
+ snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
+ dev_name(dev), dir, qinst->queue_id, qinst->proxy_id);
+ }
+
+ qinst->chan = chan;
+
+ /* Setup an error value for IRQ - Lazy allocation */
+ qinst->irq = -EINVAL;
+
+ chan->con_priv = qinst;
+
+ dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
+ idx, qinst->queue_id, qinst->proxy_id, qinst->irq,
+ qinst->queue_buff_start, qinst->queue_buff_end);
+ return 0;
+}
+
+/* Queue operations */
+static const struct mbox_chan_ops ti_msgmgr_chan_ops = {
+ .startup = ti_msgmgr_queue_startup,
+ .shutdown = ti_msgmgr_queue_shutdown,
+ .peek_data = ti_msgmgr_queue_peek_data,
+ .last_tx_done = ti_msgmgr_last_tx_done,
+ .send_data = ti_msgmgr_send_data,
+};
+
+/* Keystone K2G SoC integration details */
+static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = {
+ {.queue_id = 0, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 1, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 2, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 3, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 5, .proxy_id = 2, .is_tx = false,},
+ {.queue_id = 56, .proxy_id = 1, .is_tx = true,},
+ {.queue_id = 57, .proxy_id = 2, .is_tx = false,},
+ {.queue_id = 58, .proxy_id = 3, .is_tx = true,},
+ {.queue_id = 59, .proxy_id = 4, .is_tx = true,},
+ {.queue_id = 60, .proxy_id = 5, .is_tx = true,},
+ {.queue_id = 61, .proxy_id = 6, .is_tx = true,},
+};
+
+static const struct ti_msgmgr_desc k2g_desc = {
+ .queue_count = 64,
+ .max_message_size = 64,
+ .max_messages = 128,
+ .data_region_name = "queue_proxy_region",
+ .status_region_name = "queue_state_debug_region",
+ .data_first_reg = 16,
+ .data_last_reg = 31,
+ .status_cnt_mask = Q_STATE_ENTRY_COUNT_MASK,
+ .tx_polled = false,
+ .valid_queues = k2g_valid_queues,
+ .num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
+ .is_sproxy = false,
+};
+
+static const struct ti_msgmgr_desc am654_desc = {
+ .queue_count = 190,
+ .num_valid_queues = 190,
+ .max_message_size = 60,
+ .data_region_name = "target_data",
+ .status_region_name = "rt",
+ .ctrl_region_name = "scfg",
+ .data_first_reg = 0,
+ .data_last_reg = 14,
+ .status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK,
+ .tx_polled = false,
+ .is_sproxy = true,
+};
+
+static const struct of_device_id ti_msgmgr_of_match[] = {
+ {.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
+ {.compatible = "ti,am654-secure-proxy", .data = &am654_desc},
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
+
+static int ti_msgmgr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ struct device_node *np;
+ struct resource *res;
+ const struct ti_msgmgr_desc *desc;
+ struct ti_msgmgr_inst *inst;
+ struct ti_queue_inst *qinst;
+ struct mbox_controller *mbox;
+ struct mbox_chan *chans;
+ int queue_count;
+ int i;
+ int ret = -EINVAL;
+ const struct ti_msgmgr_valid_queue_desc *queue_desc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "no OF information\n");
+ return -EINVAL;
+ }
+ np = dev->of_node;
+
+ of_id = of_match_device(ti_msgmgr_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+ desc = of_id->data;
+
+ inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->dev = dev;
+ inst->desc = desc;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ desc->data_region_name);
+ inst->queue_proxy_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inst->queue_proxy_region))
+ return PTR_ERR(inst->queue_proxy_region);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ desc->status_region_name);
+ inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inst->queue_state_debug_region))
+ return PTR_ERR(inst->queue_state_debug_region);
+
+ if (desc->is_sproxy) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ desc->ctrl_region_name);
+ inst->queue_ctrl_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inst->queue_ctrl_region))
+ return PTR_ERR(inst->queue_ctrl_region);
+ }
+
+ dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
+ inst->queue_proxy_region, inst->queue_state_debug_region);
+
+ queue_count = desc->num_valid_queues;
+ if (!queue_count || queue_count > desc->queue_count) {
+ dev_crit(dev, "Invalid Number of queues %d. Max %d\n",
+ queue_count, desc->queue_count);
+ return -ERANGE;
+ }
+ inst->num_valid_queues = queue_count;
+
+ qinst = devm_kcalloc(dev, queue_count, sizeof(*qinst), GFP_KERNEL);
+ if (!qinst)
+ return -ENOMEM;
+ inst->qinsts = qinst;
+
+ chans = devm_kcalloc(dev, queue_count, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+ inst->chans = chans;
+
+ if (desc->is_sproxy) {
+ struct ti_msgmgr_valid_queue_desc sproxy_desc;
+
+ /* All proxies may be valid in Secure Proxy instance */
+ for (i = 0; i < queue_count; i++, qinst++, chans++) {
+ sproxy_desc.queue_id = 0;
+ sproxy_desc.proxy_id = i;
+ ret = ti_msgmgr_queue_setup(i, dev, np, inst,
+ desc, &sproxy_desc, qinst,
+ chans);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* Only Some proxies are valid in Message Manager */
+ for (i = 0, queue_desc = desc->valid_queues;
+ i < queue_count; i++, qinst++, chans++, queue_desc++) {
+ ret = ti_msgmgr_queue_setup(i, dev, np, inst,
+ desc, queue_desc, qinst,
+ chans);
+ if (ret)
+ return ret;
+ }
+ }
+
+ mbox = &inst->mbox;
+ mbox->dev = dev;
+ mbox->ops = &ti_msgmgr_chan_ops;
+ mbox->chans = inst->chans;
+ mbox->num_chans = inst->num_valid_queues;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = desc->tx_polled;
+ if (desc->tx_polled)
+ mbox->txpoll_period = desc->tx_poll_timeout_ms;
+ mbox->of_xlate = ti_msgmgr_of_xlate;
+
+ platform_set_drvdata(pdev, inst);
+ ret = devm_mbox_controller_register(dev, mbox);
+ if (ret)
+ dev_err(dev, "Failed to register mbox_controller(%d)\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver ti_msgmgr_driver = {
+ .probe = ti_msgmgr_probe,
+ .driver = {
+ .name = "ti-msgmgr",
+ .of_match_table = of_match_ptr(ti_msgmgr_of_match),
+ },
+};
+module_platform_driver(ti_msgmgr_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI message manager driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-msgmgr");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
new file mode 100644
index 000000000..be06de791
--- /dev/null
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Inter Processor Interrupt(IPI) Mailbox Driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+/* IPI agent ID any */
+#define IPI_ID_ANY 0xFFUL
+
+/* indicate if ZynqMP IPI mailbox driver uses SMC calls or HVC calls */
+#define USE_SMC 0
+#define USE_HVC 1
+
+/* Default IPI SMC function IDs */
+#define SMC_IPI_MAILBOX_OPEN 0x82001000U
+#define SMC_IPI_MAILBOX_RELEASE 0x82001001U
+#define SMC_IPI_MAILBOX_STATUS_ENQUIRY 0x82001002U
+#define SMC_IPI_MAILBOX_NOTIFY 0x82001003U
+#define SMC_IPI_MAILBOX_ACK 0x82001004U
+#define SMC_IPI_MAILBOX_ENABLE_IRQ 0x82001005U
+#define SMC_IPI_MAILBOX_DISABLE_IRQ 0x82001006U
+
+/* IPI SMC Macros */
+#define IPI_SMC_ENQUIRY_DIRQ_MASK 0x00000001UL /* Flag to indicate if
+ * notification interrupt
+ * to be disabled.
+ */
+#define IPI_SMC_ACK_EIRQ_MASK 0x00000001UL /* Flag to indicate if
+ * notification interrupt
+ * to be enabled.
+ */
+
+/* IPI mailbox status */
+#define IPI_MB_STATUS_IDLE 0
+#define IPI_MB_STATUS_SEND_PENDING 1
+#define IPI_MB_STATUS_RECV_PENDING 2
+
+#define IPI_MB_CHNL_TX 0 /* IPI mailbox TX channel */
+#define IPI_MB_CHNL_RX 1 /* IPI mailbox RX channel */
+
+/**
+ * struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel
+ * @is_opened: indicate if the IPI channel is opened
+ * @req_buf: local to remote request buffer start address
+ * @resp_buf: local to remote response buffer start address
+ * @req_buf_size: request buffer size
+ * @resp_buf_size: response buffer size
+ * @rx_buf: receive buffer to pass received message to client
+ * @chan_type: channel type
+ */
+struct zynqmp_ipi_mchan {
+ int is_opened;
+ void __iomem *req_buf;
+ void __iomem *resp_buf;
+ void *rx_buf;
+ size_t req_buf_size;
+ size_t resp_buf_size;
+ unsigned int chan_type;
+};
+
+/**
+ * struct zynqmp_ipi_mbox - Description of a ZynqMP IPI mailbox
+ * platform data.
+ * @pdata: pointer to the IPI private data
+ * @dev: device pointer corresponding to the Xilinx ZynqMP
+ * IPI mailbox
+ * @remote_id: remote IPI agent ID
+ * @mbox: mailbox Controller
+ * @mchans: array for channels, tx channel and rx channel.
+ * @irq: IPI agent interrupt ID
+ */
+struct zynqmp_ipi_mbox {
+ struct zynqmp_ipi_pdata *pdata;
+ struct device dev;
+ u32 remote_id;
+ struct mbox_controller mbox;
+ struct zynqmp_ipi_mchan mchans[2];
+};
+
+/**
+ * struct zynqmp_ipi_pdata - Description of z ZynqMP IPI agent platform data.
+ *
+ * @dev: device pointer corresponding to the Xilinx ZynqMP
+ * IPI agent
+ * @irq: IPI agent interrupt ID
+ * @method: IPI SMC or HVC is going to be used
+ * @local_id: local IPI agent ID
+ * @num_mboxes: number of mailboxes of this IPI agent
+ * @ipi_mboxes: IPI mailboxes of this IPI agent
+ */
+struct zynqmp_ipi_pdata {
+ struct device *dev;
+ int irq;
+ unsigned int method;
+ u32 local_id;
+ int num_mboxes;
+ struct zynqmp_ipi_mbox ipi_mboxes[];
+};
+
+static struct device_driver zynqmp_ipi_mbox_driver = {
+ .owner = THIS_MODULE,
+ .name = "zynqmp-ipi-mbox",
+};
+
+static void zynqmp_ipi_fw_call(struct zynqmp_ipi_mbox *ipi_mbox,
+ unsigned long a0, unsigned long a3,
+ struct arm_smccc_res *res)
+{
+ struct zynqmp_ipi_pdata *pdata = ipi_mbox->pdata;
+ unsigned long a1, a2;
+
+ a1 = pdata->local_id;
+ a2 = ipi_mbox->remote_id;
+ if (pdata->method == USE_SMC)
+ arm_smccc_smc(a0, a1, a2, a3, 0, 0, 0, 0, res);
+ else
+ arm_smccc_hvc(a0, a1, a2, a3, 0, 0, 0, 0, res);
+}
+
+/**
+ * zynqmp_ipi_interrupt - Interrupt handler for IPI notification
+ *
+ * @irq: Interrupt number
+ * @data: ZynqMP IPI mailbox platform data.
+ *
+ * Return: -EINVAL if there is no instance
+ * IRQ_NONE if the interrupt is not ours.
+ * IRQ_HANDLED if the rx interrupt was successfully handled.
+ */
+static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
+{
+ struct zynqmp_ipi_pdata *pdata = data;
+ struct mbox_chan *chan;
+ struct zynqmp_ipi_mbox *ipi_mbox;
+ struct zynqmp_ipi_mchan *mchan;
+ struct zynqmp_ipi_message *msg;
+ u64 arg0, arg3;
+ struct arm_smccc_res res;
+ int ret, i, status = IRQ_NONE;
+
+ (void)irq;
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ arg3 = IPI_SMC_ENQUIRY_DIRQ_MASK;
+ for (i = 0; i < pdata->num_mboxes; i++) {
+ ipi_mbox = &pdata->ipi_mboxes[i];
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ chan = &ipi_mbox->mbox.chans[IPI_MB_CHNL_RX];
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, arg3, &res);
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
+ if (mchan->is_opened) {
+ msg = mchan->rx_buf;
+ msg->len = mchan->req_buf_size;
+ memcpy_fromio(msg->data, mchan->req_buf,
+ msg->len);
+ mbox_chan_received_data(chan, (void *)msg);
+ status = IRQ_HANDLED;
+ }
+ }
+ }
+ return status;
+}
+
+/**
+ * zynqmp_ipi_peek_data - Peek to see if there are any rx messages.
+ *
+ * @chan: Channel Pointer
+ *
+ * Return: 'true' if there is pending rx data, 'false' if there is none.
+ */
+static bool zynqmp_ipi_peek_data(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ int ret;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return false;
+ }
+
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* TX channel, check if the message has been acked
+ * by the remote, if yes, response is available.
+ */
+ if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING)
+ return false;
+ else
+ return true;
+ } else if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
+ /* RX channel, check if there is message arrived. */
+ return true;
+ }
+ return false;
+}
+
+/**
+ * zynqmp_ipi_last_tx_done - See if the last tx message is sent
+ *
+ * @chan: Channel pointer
+ *
+ * Return: 'true' is no pending tx data, 'false' if there are any.
+ */
+static bool zynqmp_ipi_last_tx_done(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ int ret;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return false;
+ }
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* We only need to check if the message been taken
+ * by the remote in the TX channel
+ */
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ /* Check the SMC call status, a0 of the result */
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING)
+ return false;
+ return true;
+ }
+ /* Always true for the response message in RX channel */
+ return true;
+}
+
+/**
+ * zynqmp_ipi_send_data - Send data
+ *
+ * @chan: Channel Pointer
+ * @data: Message Pointer
+ *
+ * Return: 0 if all goes good, else appropriate error messages.
+ */
+static int zynqmp_ipi_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ struct zynqmp_ipi_message *msg = data;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* Send request message */
+ if (msg && msg->len > mchan->req_buf_size) {
+ dev_err(dev, "channel %d message length %u > max %lu\n",
+ mchan->chan_type, (unsigned int)msg->len,
+ mchan->req_buf_size);
+ return -EINVAL;
+ }
+ if (msg && msg->len)
+ memcpy_toio(mchan->req_buf, msg->data, msg->len);
+ /* Kick IPI mailbox to send message */
+ arg0 = SMC_IPI_MAILBOX_NOTIFY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ } else {
+ /* Send response message */
+ if (msg && msg->len > mchan->resp_buf_size) {
+ dev_err(dev, "channel %d message length %u > max %lu\n",
+ mchan->chan_type, (unsigned int)msg->len,
+ mchan->resp_buf_size);
+ return -EINVAL;
+ }
+ if (msg && msg->len)
+ memcpy_toio(mchan->resp_buf, msg->data, msg->len);
+ arg0 = SMC_IPI_MAILBOX_ACK;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, IPI_SMC_ACK_EIRQ_MASK,
+ &res);
+ }
+ return 0;
+}
+
+/**
+ * zynqmp_ipi_startup - Startup the IPI channel
+ *
+ * @chan: Channel pointer
+ *
+ * Return: 0 if all goes good, else return corresponding error message
+ */
+static int zynqmp_ipi_startup(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ u64 arg0;
+ struct arm_smccc_res res;
+ int ret = 0;
+ unsigned int nchan_type;
+
+ if (mchan->is_opened)
+ return 0;
+
+ /* If no channel has been opened, open the IPI mailbox */
+ nchan_type = (mchan->chan_type + 1) % 2;
+ if (!ipi_mbox->mchans[nchan_type].is_opened) {
+ arg0 = SMC_IPI_MAILBOX_OPEN;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ /* Check the SMC call status, a0 of the result */
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret < 0) {
+ dev_err(dev, "SMC to open the IPI channel failed.\n");
+ return ret;
+ }
+ ret = 0;
+ }
+
+ /* If it is RX channel, enable the IPI notification interrupt */
+ if (mchan->chan_type == IPI_MB_CHNL_RX) {
+ arg0 = SMC_IPI_MAILBOX_ENABLE_IRQ;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+ mchan->is_opened = 1;
+
+ return ret;
+}
+
+/**
+ * zynqmp_ipi_shutdown - Shutdown the IPI channel
+ *
+ * @chan: Channel pointer
+ */
+static void zynqmp_ipi_shutdown(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ u64 arg0;
+ struct arm_smccc_res res;
+ unsigned int chan_type;
+
+ if (!mchan->is_opened)
+ return;
+
+ /* If it is RX channel, disable notification interrupt */
+ chan_type = mchan->chan_type;
+ if (chan_type == IPI_MB_CHNL_RX) {
+ arg0 = SMC_IPI_MAILBOX_DISABLE_IRQ;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+ /* Release IPI mailbox if no other channel is opened */
+ chan_type = (chan_type + 1) % 2;
+ if (!ipi_mbox->mchans[chan_type].is_opened) {
+ arg0 = SMC_IPI_MAILBOX_RELEASE;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+
+ mchan->is_opened = 0;
+}
+
+/* ZynqMP IPI mailbox operations */
+static const struct mbox_chan_ops zynqmp_ipi_chan_ops = {
+ .startup = zynqmp_ipi_startup,
+ .shutdown = zynqmp_ipi_shutdown,
+ .peek_data = zynqmp_ipi_peek_data,
+ .last_tx_done = zynqmp_ipi_last_tx_done,
+ .send_data = zynqmp_ipi_send_data,
+};
+
+/**
+ * zynqmp_ipi_of_xlate - Translate of phandle to IPI mailbox channel
+ *
+ * @mbox: mailbox controller pointer
+ * @p: phandle pointer
+ *
+ * Return: Mailbox channel, else return error pointer.
+ */
+static struct mbox_chan *zynqmp_ipi_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *p)
+{
+ struct mbox_chan *chan;
+ struct device *dev = mbox->dev;
+ unsigned int chan_type;
+
+ /* Only supports TX and RX channels */
+ chan_type = p->args[0];
+ if (chan_type != IPI_MB_CHNL_TX && chan_type != IPI_MB_CHNL_RX) {
+ dev_err(dev, "req chnl failure: invalid chnl type %u.\n",
+ chan_type);
+ return ERR_PTR(-EINVAL);
+ }
+ chan = &mbox->chans[chan_type];
+ return chan;
+}
+
+static const struct of_device_id zynqmp_ipi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-ipi-mailbox" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match);
+
+/**
+ * zynqmp_ipi_mbox_get_buf_res - Get buffer resource from the IPI dev node
+ *
+ * @node: IPI mbox device child node
+ * @name: name of the IPI buffer
+ * @res: pointer to where the resource information will be stored.
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int zynqmp_ipi_mbox_get_buf_res(struct device_node *node,
+ const char *name,
+ struct resource *res)
+{
+ int ret, index;
+
+ index = of_property_match_string(node, "reg-names", name);
+ if (index >= 0) {
+ ret = of_address_to_resource(node, index, res);
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/**
+ * zynqmp_ipi_mbox_dev_release() - release the existence of a ipi mbox dev
+ *
+ * @dev: the ipi mailbox device
+ *
+ * This is to avoid the no device release() function kernel warning.
+ *
+ */
+static void zynqmp_ipi_mbox_dev_release(struct device *dev)
+{
+ (void)dev;
+}
+
+/**
+ * zynqmp_ipi_mbox_probe - probe IPI mailbox resource from device node
+ *
+ * @ipi_mbox: pointer to IPI mailbox private data structure
+ * @node: IPI mailbox device node
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
+ struct device_node *node)
+{
+ struct zynqmp_ipi_mchan *mchan;
+ struct mbox_chan *chans;
+ struct mbox_controller *mbox;
+ struct resource res;
+ struct device *dev, *mdev;
+ const char *name;
+ int ret;
+
+ dev = ipi_mbox->pdata->dev;
+ /* Initialize dev for IPI mailbox */
+ ipi_mbox->dev.parent = dev;
+ ipi_mbox->dev.release = NULL;
+ ipi_mbox->dev.of_node = node;
+ dev_set_name(&ipi_mbox->dev, "%s", of_node_full_name(node));
+ dev_set_drvdata(&ipi_mbox->dev, ipi_mbox);
+ ipi_mbox->dev.release = zynqmp_ipi_mbox_dev_release;
+ ipi_mbox->dev.driver = &zynqmp_ipi_mbox_driver;
+ ret = device_register(&ipi_mbox->dev);
+ if (ret) {
+ dev_err(dev, "Failed to register ipi mbox dev.\n");
+ put_device(&ipi_mbox->dev);
+ return ret;
+ }
+ mdev = &ipi_mbox->dev;
+
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ name = "local_request_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->req_buf_size = resource_size(&res);
+ mchan->req_buf = devm_ioremap(mdev, res.start,
+ mchan->req_buf_size);
+ if (!mchan->req_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret);
+ return ret;
+ }
+
+ name = "remote_response_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->resp_buf_size = resource_size(&res);
+ mchan->resp_buf = devm_ioremap(mdev, res.start,
+ mchan->resp_buf_size);
+ if (!mchan->resp_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+ mchan->rx_buf = devm_kzalloc(mdev,
+ mchan->resp_buf_size +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!mchan->rx_buf)
+ return -ENOMEM;
+
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ name = "remote_request_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->req_buf_size = resource_size(&res);
+ mchan->req_buf = devm_ioremap(mdev, res.start,
+ mchan->req_buf_size);
+ if (!mchan->req_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+
+ name = "local_response_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->resp_buf_size = resource_size(&res);
+ mchan->resp_buf = devm_ioremap(mdev, res.start,
+ mchan->resp_buf_size);
+ if (!mchan->resp_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+ mchan->rx_buf = devm_kzalloc(mdev,
+ mchan->resp_buf_size +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!mchan->rx_buf)
+ return -ENOMEM;
+
+ /* Get the IPI remote agent ID */
+ ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id);
+ if (ret < 0) {
+ dev_err(dev, "No IPI remote ID is specified.\n");
+ return ret;
+ }
+
+ mbox = &ipi_mbox->mbox;
+ mbox->dev = mdev;
+ mbox->ops = &zynqmp_ipi_chan_ops;
+ mbox->num_chans = 2;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = true;
+ mbox->txpoll_period = 5;
+ mbox->of_xlate = zynqmp_ipi_of_xlate;
+ chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+ mbox->chans = chans;
+ chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX;
+ ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX;
+ ret = devm_mbox_controller_register(mdev, mbox);
+ if (ret)
+ dev_err(mdev,
+ "Failed to register mbox_controller(%d)\n", ret);
+ else
+ dev_info(mdev,
+ "Registered ZynqMP IPI mbox with TX/RX channels.\n");
+ return ret;
+}
+
+/**
+ * zynqmp_ipi_free_mboxes - Free IPI mailboxes devices
+ *
+ * @pdata: IPI private data
+ */
+static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
+{
+ struct zynqmp_ipi_mbox *ipi_mbox;
+ int i;
+
+ i = pdata->num_mboxes;
+ for (; i >= 0; i--) {
+ ipi_mbox = &pdata->ipi_mboxes[i];
+ if (ipi_mbox->dev.parent) {
+ mbox_controller_unregister(&ipi_mbox->mbox);
+ if (device_is_registered(&ipi_mbox->dev))
+ device_unregister(&ipi_mbox->dev);
+ }
+ }
+}
+
+static int zynqmp_ipi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *nc, *np = pdev->dev.of_node;
+ struct zynqmp_ipi_pdata *pdata;
+ struct zynqmp_ipi_mbox *mbox;
+ int num_mboxes, ret = -EINVAL;
+
+ num_mboxes = of_get_available_child_count(np);
+ if (num_mboxes == 0) {
+ dev_err(dev, "mailbox nodes not available\n");
+ return -EINVAL;
+ }
+
+ pdata = devm_kzalloc(dev, struct_size(pdata, ipi_mboxes, num_mboxes),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ pdata->dev = dev;
+
+ /* Get the IPI local agents ID */
+ ret = of_property_read_u32(np, "xlnx,ipi-id", &pdata->local_id);
+ if (ret < 0) {
+ dev_err(dev, "No IPI local ID is specified.\n");
+ return ret;
+ }
+
+ pdata->num_mboxes = num_mboxes;
+
+ mbox = pdata->ipi_mboxes;
+ for_each_available_child_of_node(np, nc) {
+ mbox->pdata = pdata;
+ ret = zynqmp_ipi_mbox_probe(mbox, nc);
+ if (ret) {
+ dev_err(dev, "failed to probe subdev.\n");
+ ret = -EINVAL;
+ goto free_mbox_dev;
+ }
+ mbox++;
+ }
+
+ /* IPI IRQ */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto free_mbox_dev;
+
+ pdata->irq = ret;
+ ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
+ IRQF_SHARED, dev_name(dev), pdata);
+ if (ret) {
+ dev_err(dev, "IRQ %d is not requested successfully.\n",
+ pdata->irq);
+ goto free_mbox_dev;
+ }
+
+ platform_set_drvdata(pdev, pdata);
+ return ret;
+
+free_mbox_dev:
+ zynqmp_ipi_free_mboxes(pdata);
+ return ret;
+}
+
+static int zynqmp_ipi_remove(struct platform_device *pdev)
+{
+ struct zynqmp_ipi_pdata *pdata;
+
+ pdata = platform_get_drvdata(pdev);
+ zynqmp_ipi_free_mboxes(pdata);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_ipi_driver = {
+ .probe = zynqmp_ipi_probe,
+ .remove = zynqmp_ipi_remove,
+ .driver = {
+ .name = "zynqmp-ipi",
+ .of_match_table = of_match_ptr(zynqmp_ipi_of_match),
+ },
+};
+
+static int __init zynqmp_ipi_init(void)
+{
+ return platform_driver_register(&zynqmp_ipi_driver);
+}
+subsys_initcall(zynqmp_ipi_init);
+
+static void __exit zynqmp_ipi_exit(void)
+{
+ platform_driver_unregister(&zynqmp_ipi_driver);
+}
+module_exit(zynqmp_ipi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Xilinx ZynqMP IPI Mailbox driver");
+MODULE_AUTHOR("Xilinx Inc.");