summaryrefslogtreecommitdiffstats
path: root/drivers/mailbox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mailbox')
-rw-r--r--drivers/mailbox/Kconfig298
-rw-r--r--drivers/mailbox/Makefile64
-rw-r--r--drivers/mailbox/apple-mailbox.c441
-rw-r--r--drivers/mailbox/arm_mhu.c176
-rw-r--r--drivers/mailbox/arm_mhu_db.c351
-rw-r--r--drivers/mailbox/arm_mhuv2.c1137
-rw-r--r--drivers/mailbox/armada-37xx-rwtm-mailbox.c206
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c1686
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c1639
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c199
-rw-r--r--drivers/mailbox/hi3660-mailbox.c298
-rw-r--r--drivers/mailbox/hi6220-mailbox.c371
-rw-r--r--drivers/mailbox/imx-mailbox.c1058
-rw-r--r--drivers/mailbox/mailbox-altera.c361
-rw-r--r--drivers/mailbox/mailbox-mpfs.c287
-rw-r--r--drivers/mailbox/mailbox-sti.c497
-rw-r--r--drivers/mailbox/mailbox-test.c453
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c260
-rw-r--r--drivers/mailbox/mailbox.c666
-rw-r--r--drivers/mailbox/mailbox.h10
-rw-r--r--drivers/mailbox/mtk-adsp-mailbox.c185
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c729
-rw-r--r--drivers/mailbox/omap-mailbox.c918
-rw-r--r--drivers/mailbox/pcc.c764
-rw-r--r--drivers/mailbox/pl320-ipc.c187
-rw-r--r--drivers/mailbox/platform_mhu.c184
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c192
-rw-r--r--drivers/mailbox/qcom-ipcc.c369
-rw-r--r--drivers/mailbox/rockchip-mailbox.c255
-rw-r--r--drivers/mailbox/sprd-mailbox.c430
-rw-r--r--drivers/mailbox/stm32-ipcc.c392
-rw-r--r--drivers/mailbox/sun6i-msgbox.c319
-rw-r--r--drivers/mailbox/tegra-hsp.c963
-rw-r--r--drivers/mailbox/ti-msgmgr.c940
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c724
35 files changed, 18009 insertions, 0 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
new file mode 100644
index 000000000..bc2e265cb
--- /dev/null
+++ b/drivers/mailbox/Kconfig
@@ -0,0 +1,298 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig MAILBOX
+ bool "Mailbox Hardware Support"
+ help
+ Mailbox is a framework to control hardware communication between
+ on-chip processors through queued messages and interrupt driven
+ signals. Say Y if your platform supports hardware mailboxes.
+
+if MAILBOX
+
+config APPLE_MAILBOX
+ tristate "Apple Mailbox driver"
+ depends on ARCH_APPLE || (ARM64 && COMPILE_TEST)
+ default ARCH_APPLE
+ help
+ Apple SoCs have various co-processors required for certain
+ peripherals to work (NVMe, display controller, etc.). This
+ driver adds support for the mailbox controller used to
+ communicate with those.
+
+ Say Y here if you have a Apple SoC.
+
+config ARM_MHU
+ tristate "ARM MHU Mailbox"
+ depends on ARM_AMBA
+ help
+ Say Y here if you want to build the ARM MHU controller driver.
+ The controller has 3 mailbox channels, the last of which can be
+ used in Secure mode only.
+
+config ARM_MHU_V2
+ tristate "ARM MHUv2 Mailbox"
+ depends on ARM_AMBA
+ help
+ Say Y here if you want to build the ARM MHUv2 controller driver,
+ which provides unidirectional mailboxes between processing elements.
+
+config IMX_MBOX
+ tristate "i.MX Mailbox"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ Mailbox implementation for i.MX Messaging Unit (MU).
+
+config PLATFORM_MHU
+ tristate "Platform MHU Mailbox"
+ depends on OF
+ depends on HAS_IOMEM
+ help
+ Say Y here if you want to build a platform specific variant MHU
+ controller driver.
+ The controller has a maximum of 3 mailbox channels, the last of
+ which can be used in Secure mode only.
+
+config PL320_MBOX
+ bool "ARM PL320 Mailbox"
+ depends on ARM_AMBA
+ help
+ An implementation of the ARM PL320 Interprocessor Communication
+ Mailbox (IPCM), tailored for the Calxeda Highbank. It is used to
+ send short messages between Highbank's A9 cores and the EnergyCore
+ Management Engine, primarily for cpufreq. Say Y here if you want
+ to use the PL320 IPCM support.
+
+config ARMADA_37XX_RWTM_MBOX
+ tristate "Armada 37xx rWTM BIU Mailbox"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on OF
+ help
+ Mailbox implementation for communication with the the firmware
+ running on the Cortex-M3 rWTM secure processor of the Armada 37xx
+ SOC. Say Y here if you are building for such a device (for example
+ the Turris Mox router).
+
+config OMAP2PLUS_MBOX
+ tristate "OMAP2+ Mailbox framework support"
+ depends on ARCH_OMAP2PLUS || ARCH_K3
+ help
+ Mailbox implementation for OMAP family chips with hardware for
+ interprocessor communication involving DSP, IVA1.0 and IVA2 in
+ OMAP2/3; or IPU, IVA HD and DSP in OMAP4/5. Say Y here if you
+ want to use OMAP2+ Mailbox framework support.
+
+config OMAP_MBOX_KFIFO_SIZE
+ int "Mailbox kfifo default buffer size (bytes)"
+ depends on OMAP2PLUS_MBOX
+ default 256
+ help
+ Specify the default size of mailbox's kfifo buffers (bytes).
+ This can also be changed at runtime (via the mbox_kfifo_size
+ module parameter).
+
+config ROCKCHIP_MBOX
+ bool "Rockchip Soc Integrated Mailbox Support"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ help
+ This driver provides support for inter-processor communication
+ between CPU cores and MCU processor on Some Rockchip SOCs.
+ Please check it that the Soc you use have Mailbox hardware.
+ Say Y here if you want to use the Rockchip Mailbox support.
+
+config PCC
+ bool "Platform Communication Channel Driver"
+ depends on ACPI
+ default n
+ help
+ ACPI 5.0+ spec defines a generic mode of communication
+ between the OS and a platform such as the BMC. This medium
+ (PCC) is typically used by CPPC (ACPI CPU Performance management),
+ RAS (ACPI reliability protocol) and MPST (ACPI Memory power
+ states). Select this driver if your platform implements the
+ PCC clients mentioned above.
+
+config ALTERA_MBOX
+ tristate "Altera Mailbox"
+ depends on HAS_IOMEM
+ help
+ An implementation of the Altera Mailbox soft core. It is used
+ to send message between processors. Say Y here if you want to use the
+ Altera mailbox support.
+
+config BCM2835_MBOX
+ tristate "BCM2835 Mailbox"
+ depends on ARCH_BCM2835
+ help
+ An implementation of the BCM2385 Mailbox. It is used to invoke
+ the services of the Videocore. Say Y here if you want to use the
+ BCM2835 Mailbox.
+
+config STI_MBOX
+ tristate "STI Mailbox framework support"
+ depends on ARCH_STI && OF
+ help
+ Mailbox implementation for STMicroelectonics family chips with
+ hardware for interprocessor communication.
+
+config TI_MESSAGE_MANAGER
+ tristate "Texas Instruments Message Manager Driver"
+ depends on ARCH_KEYSTONE || ARCH_K3
+ default ARCH_K3
+ help
+ An implementation of Message Manager slave driver for Keystone
+ and K3 architecture SoCs from Texas Instruments. Message Manager
+ is a communication entity found on few of Texas Instrument's keystone
+ and K3 architecture SoCs. These may be used for communication between
+ multiple processors within the SoC. Select this driver if your
+ platform has support for the hardware block.
+
+config HI3660_MBOX
+ tristate "Hi3660 Mailbox" if EXPERT
+ depends on (ARCH_HISI || COMPILE_TEST)
+ depends on OF
+ default ARCH_HISI
+ help
+ An implementation of the hi3660 mailbox. It is used to send message
+ between application processors and other processors/MCU/DSP. Select
+ Y here if you want to use Hi3660 mailbox controller.
+
+config HI6220_MBOX
+ tristate "Hi6220 Mailbox" if EXPERT
+ depends on (ARCH_HISI || COMPILE_TEST)
+ depends on OF
+ default ARCH_HISI
+ help
+ An implementation of the hi6220 mailbox. It is used to send message
+ between application processors and MCU. Say Y here if you want to
+ build Hi6220 mailbox controller driver.
+
+config MAILBOX_TEST
+ tristate "Mailbox Test Client"
+ depends on OF
+ depends on HAS_IOMEM
+ help
+ Test client to help with testing new Controller driver
+ implementations.
+
+config POLARFIRE_SOC_MAILBOX
+ tristate "PolarFire SoC (MPFS) Mailbox"
+ depends on HAS_IOMEM
+ depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST
+ help
+ This driver adds support for the PolarFire SoC (MPFS) mailbox controller.
+
+ To compile this driver as a module, choose M here. the
+ module will be called mailbox-mpfs.
+
+ If unsure, say N.
+
+config QCOM_APCS_IPC
+ tristate "Qualcomm APCS IPC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Say y here to enable support for the APCS IPC mailbox driver,
+ providing an interface for invoking the inter-process communication
+ signals from the application processor to other masters.
+
+config TEGRA_HSP_MBOX
+ bool "Tegra HSP (Hardware Synchronization Primitives) Driver"
+ depends on ARCH_TEGRA
+ help
+ The Tegra HSP driver is used for the interprocessor communication
+ between different remote processors and host processors on Tegra186
+ and later SoCs. Say Y here if you want to have this support.
+ If unsure say N.
+
+config XGENE_SLIMPRO_MBOX
+ tristate "APM SoC X-Gene SLIMpro Mailbox Controller"
+ depends on ARCH_XGENE
+ help
+ An implementation of the APM X-Gene Interprocessor Communication
+ Mailbox (IPCM) between the ARM 64-bit cores and SLIMpro controller.
+ It is used to send short messages between ARM64-bit cores and
+ the SLIMpro Management Engine, primarily for PM. Say Y here if you
+ want to use the APM X-Gene SLIMpro IPCM support.
+
+config BCM_PDC_MBOX
+ tristate "Broadcom FlexSparx DMA Mailbox"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ help
+ Mailbox implementation for the Broadcom FlexSparx DMA ring manager,
+ which provides access to various offload engines on Broadcom
+ SoCs, including FA2/FA+ on Northstar Plus and PDC on Northstar 2.
+
+config BCM_FLEXRM_MBOX
+ tristate "Broadcom FlexRM Mailbox"
+ depends on ARM64
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ select GENERIC_MSI_IRQ
+ default m if ARCH_BCM_IPROC
+ help
+ Mailbox implementation of the Broadcom FlexRM ring manager,
+ which provides access to various offload engines on Broadcom
+ SoCs. Say Y here if you want to use the Broadcom FlexRM.
+
+config STM32_IPCC
+ tristate "STM32 IPCC Mailbox"
+ depends on MACH_STM32MP157 || COMPILE_TEST
+ help
+ Mailbox implementation for STMicroelectonics STM32 family chips
+ with hardware for Inter-Processor Communication Controller (IPCC)
+ between processors. Say Y here if you want to have this support.
+
+config MTK_ADSP_MBOX
+ tristate "MediaTek ADSP Mailbox Controller"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ Say yes here to add support for "MediaTek ADSP Mailbox Controller.
+ This mailbox driver is used to send notification or short message
+ between processors with ADSP. It will place the message to share
+ buffer and will access the ipc control.
+
+config MTK_CMDQ_MBOX
+ tristate "MediaTek CMDQ Mailbox Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select MTK_INFRACFG
+ help
+ Say yes here to add support for the MediaTek Command Queue (CMDQ)
+ mailbox driver. The CMDQ is used to help read/write registers with
+ critical time limitation, such as updating display configuration
+ during the vblank.
+
+config ZYNQMP_IPI_MBOX
+ tristate "Xilinx ZynqMP IPI Mailbox"
+ depends on ARCH_ZYNQMP && OF
+ help
+ Say yes here to add support for Xilinx IPI mailbox driver.
+ This mailbox driver is used to send notification or short message
+ between processors with Xilinx ZynqMP IPI. It will place the
+ message to the IPI buffer and will access the IPI control
+ registers to kick the other processor or enquire status.
+
+config SUN6I_MSGBOX
+ tristate "Allwinner sun6i/sun8i/sun9i/sun50i Message Box"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default ARCH_SUNXI
+ help
+ Mailbox implementation for the hardware message box present in
+ various Allwinner SoCs. This mailbox is used for communication
+ between the application CPUs and the power management coprocessor.
+
+config SPRD_MBOX
+ tristate "Spreadtrum Mailbox"
+ depends on ARCH_SPRD || COMPILE_TEST
+ help
+ Mailbox driver implementation for the Spreadtrum platform. It is used
+ to send message between application processors and MCU. Say Y here if
+ you want to build the Spreatrum mailbox controller driver.
+
+config QCOM_IPCC
+ tristate "Qualcomm Technologies, Inc. IPCC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Qualcomm Technologies, Inc. Inter-Processor Communication Controller
+ (IPCC) driver for MSM devices. The driver provides mailbox support for
+ sending interrupts to the clients. On the other hand, the driver also
+ acts as an interrupt controller for receiving interrupts from clients.
+ Say Y here if you want to build this driver.
+
+endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
new file mode 100644
index 000000000..fc9376117
--- /dev/null
+++ b/drivers/mailbox/Makefile
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0
+# Generic MAILBOX API
+
+obj-$(CONFIG_MAILBOX) += mailbox.o
+
+obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o
+
+obj-$(CONFIG_ARM_MHU) += arm_mhu.o arm_mhu_db.o
+
+obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
+
+obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
+
+obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
+
+obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o
+
+obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
+
+obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
+
+obj-$(CONFIG_ROCKCHIP_MBOX) += rockchip-mailbox.o
+
+obj-$(CONFIG_PCC) += pcc.o
+
+obj-$(CONFIG_ALTERA_MBOX) += mailbox-altera.o
+
+obj-$(CONFIG_BCM2835_MBOX) += bcm2835-mailbox.o
+
+obj-$(CONFIG_STI_MBOX) += mailbox-sti.o
+
+obj-$(CONFIG_TI_MESSAGE_MANAGER) += ti-msgmgr.o
+
+obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
+
+obj-$(CONFIG_HI3660_MBOX) += hi3660-mailbox.o
+
+obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
+
+obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
+
+obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
+
+obj-$(CONFIG_POLARFIRE_SOC_MAILBOX) += mailbox-mpfs.o
+
+obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
+
+obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
+
+obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
+
+obj-$(CONFIG_MTK_ADSP_MBOX) += mtk-adsp-mailbox.o
+
+obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
+
+obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
+
+obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
+
+obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
+
+obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
+
+obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
new file mode 100644
index 000000000..2a3e8d8ff
--- /dev/null
+++ b/drivers/mailbox/apple-mailbox.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple mailbox driver
+ *
+ * Copyright (C) 2021 The Asahi Linux Contributors
+ *
+ * This driver adds support for two mailbox variants (called ASC and M3 by
+ * Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to
+ * exchange 64+32 bit messages between the main CPU and a co-processor.
+ * Various coprocessors implement different IPC protocols based on these simple
+ * messages and shared memory buffers.
+ *
+ * Both the main CPU and the co-processor see the same set of registers but
+ * the first FIFO (A2I) is always used to transfer messages from the application
+ * processor (us) to the I/O processor and the second one (I2A) for the
+ * other direction.
+ */
+
+#include <linux/apple-mailbox.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_ASC_MBOX_A2I_CONTROL 0x110
+#define APPLE_ASC_MBOX_A2I_SEND0 0x800
+#define APPLE_ASC_MBOX_A2I_SEND1 0x808
+#define APPLE_ASC_MBOX_A2I_RECV0 0x810
+#define APPLE_ASC_MBOX_A2I_RECV1 0x818
+
+#define APPLE_ASC_MBOX_I2A_CONTROL 0x114
+#define APPLE_ASC_MBOX_I2A_SEND0 0x820
+#define APPLE_ASC_MBOX_I2A_SEND1 0x828
+#define APPLE_ASC_MBOX_I2A_RECV0 0x830
+#define APPLE_ASC_MBOX_I2A_RECV1 0x838
+
+#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_M3_MBOX_A2I_CONTROL 0x50
+#define APPLE_M3_MBOX_A2I_SEND0 0x60
+#define APPLE_M3_MBOX_A2I_SEND1 0x68
+#define APPLE_M3_MBOX_A2I_RECV0 0x70
+#define APPLE_M3_MBOX_A2I_RECV1 0x78
+
+#define APPLE_M3_MBOX_I2A_CONTROL 0x80
+#define APPLE_M3_MBOX_I2A_SEND0 0x90
+#define APPLE_M3_MBOX_I2A_SEND1 0x98
+#define APPLE_M3_MBOX_I2A_RECV0 0xa0
+#define APPLE_M3_MBOX_I2A_RECV1 0xa8
+
+#define APPLE_M3_MBOX_IRQ_ENABLE 0x48
+#define APPLE_M3_MBOX_IRQ_ACK 0x4c
+#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0)
+#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1)
+#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2)
+#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3)
+
+#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52)
+#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48)
+#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44)
+#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40)
+#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0)
+
+struct apple_mbox_hw {
+ unsigned int control_full;
+ unsigned int control_empty;
+
+ unsigned int a2i_control;
+ unsigned int a2i_send0;
+ unsigned int a2i_send1;
+
+ unsigned int i2a_control;
+ unsigned int i2a_recv0;
+ unsigned int i2a_recv1;
+
+ bool has_irq_controls;
+ unsigned int irq_enable;
+ unsigned int irq_ack;
+ unsigned int irq_bit_recv_not_empty;
+ unsigned int irq_bit_send_empty;
+};
+
+struct apple_mbox {
+ void __iomem *regs;
+ const struct apple_mbox_hw *hw;
+
+ int irq_recv_not_empty;
+ int irq_send_empty;
+
+ struct mbox_chan chan;
+
+ struct device *dev;
+ struct mbox_controller controller;
+ spinlock_t rx_lock;
+};
+
+static const struct of_device_id apple_mbox_of_match[];
+
+static bool apple_mbox_hw_can_send(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
+
+ return !(mbox_ctrl & apple_mbox->hw->control_full);
+}
+
+static bool apple_mbox_hw_send_empty(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
+
+ return mbox_ctrl & apple_mbox->hw->control_empty;
+}
+
+static int apple_mbox_hw_send(struct apple_mbox *apple_mbox,
+ struct apple_mbox_msg *msg)
+{
+ if (!apple_mbox_hw_can_send(apple_mbox))
+ return -EBUSY;
+
+ dev_dbg(apple_mbox->dev, "> TX %016llx %08x\n", msg->msg0, msg->msg1);
+
+ writeq_relaxed(msg->msg0, apple_mbox->regs + apple_mbox->hw->a2i_send0);
+ writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg->msg1),
+ apple_mbox->regs + apple_mbox->hw->a2i_send1);
+
+ return 0;
+}
+
+static bool apple_mbox_hw_can_recv(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_control);
+
+ return !(mbox_ctrl & apple_mbox->hw->control_empty);
+}
+
+static int apple_mbox_hw_recv(struct apple_mbox *apple_mbox,
+ struct apple_mbox_msg *msg)
+{
+ if (!apple_mbox_hw_can_recv(apple_mbox))
+ return -ENOMSG;
+
+ msg->msg0 = readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv0);
+ msg->msg1 = FIELD_GET(
+ APPLE_MBOX_MSG1_MSG,
+ readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv1));
+
+ dev_dbg(apple_mbox->dev, "< RX %016llx %08x\n", msg->msg0, msg->msg1);
+
+ return 0;
+}
+
+static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+ struct apple_mbox_msg *msg = data;
+ int ret;
+
+ ret = apple_mbox_hw_send(apple_mbox, msg);
+ if (ret)
+ return ret;
+
+ /*
+ * The interrupt is level triggered and will keep firing as long as the
+ * FIFO is empty. It will also keep firing if the FIFO was empty
+ * at any point in the past until it has been acknowledged at the
+ * mailbox level. By acknowledging it here we can ensure that we will
+ * only get the interrupt once the FIFO has been cleared again.
+ * If the FIFO is already empty before the ack it will fire again
+ * immediately after the ack.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_send_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_ack);
+ }
+ enable_irq(apple_mbox->irq_send_empty);
+
+ return 0;
+}
+
+static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
+{
+ struct apple_mbox *apple_mbox = data;
+
+ /*
+ * We don't need to acknowledge the interrupt at the mailbox level
+ * here even if supported by the hardware. It will keep firing but that
+ * doesn't matter since it's disabled at the main interrupt controller.
+ * apple_mbox_chan_send_data will acknowledge it before enabling
+ * it at the main controller again.
+ */
+ disable_irq_nosync(apple_mbox->irq_send_empty);
+ mbox_chan_txdone(&apple_mbox->chan, 0);
+ return IRQ_HANDLED;
+}
+
+static int apple_mbox_poll(struct apple_mbox *apple_mbox)
+{
+ struct apple_mbox_msg msg;
+ int ret = 0;
+
+ while (apple_mbox_hw_recv(apple_mbox, &msg) == 0) {
+ mbox_chan_received_data(&apple_mbox->chan, (void *)&msg);
+ ret++;
+ }
+
+ /*
+ * The interrupt will keep firing even if there are no more messages
+ * unless we also acknowledge it at the mailbox level here.
+ * There's no race if a message comes in between the check in the while
+ * loop above and the ack below: If a new messages arrives inbetween
+ * those two the interrupt will just fire again immediately after the
+ * ack since it's level triggered.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_ack);
+ }
+
+ return ret;
+}
+
+static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+{
+ struct apple_mbox *apple_mbox = data;
+
+ spin_lock(&apple_mbox->rx_lock);
+ apple_mbox_poll(apple_mbox);
+ spin_unlock(&apple_mbox->rx_lock);
+
+ return IRQ_HANDLED;
+}
+
+static bool apple_mbox_chan_peek_data(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&apple_mbox->rx_lock, flags);
+ ret = apple_mbox_poll(apple_mbox);
+ spin_unlock_irqrestore(&apple_mbox->rx_lock, flags);
+
+ return ret > 0;
+}
+
+static int apple_mbox_chan_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+ unsigned long deadline = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, deadline)) {
+ if (apple_mbox_hw_send_empty(apple_mbox)) {
+ mbox_chan_txdone(&apple_mbox->chan, 0);
+ return 0;
+ }
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int apple_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+
+ /*
+ * Only some variants of this mailbox HW provide interrupt control
+ * at the mailbox level. We therefore need to handle enabling/disabling
+ * interrupts at the main interrupt controller anyway for hardware that
+ * doesn't. Just always keep the interrupts we care about enabled at
+ * the mailbox level so that both hardware revisions behave almost
+ * the same.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty |
+ apple_mbox->hw->irq_bit_send_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_enable);
+ }
+
+ enable_irq(apple_mbox->irq_recv_not_empty);
+ return 0;
+}
+
+static void apple_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+
+ disable_irq(apple_mbox->irq_recv_not_empty);
+}
+
+static const struct mbox_chan_ops apple_mbox_ops = {
+ .send_data = apple_mbox_chan_send_data,
+ .peek_data = apple_mbox_chan_peek_data,
+ .flush = apple_mbox_chan_flush,
+ .startup = apple_mbox_chan_startup,
+ .shutdown = apple_mbox_chan_shutdown,
+};
+
+static struct mbox_chan *apple_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ if (args->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ return &mbox->chans[0];
+}
+
+static int apple_mbox_probe(struct platform_device *pdev)
+{
+ int ret;
+ const struct of_device_id *match;
+ char *irqname;
+ struct apple_mbox *mbox;
+ struct device *dev = &pdev->dev;
+
+ match = of_match_node(apple_mbox_of_match, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+ if (!match->data)
+ return -EINVAL;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, mbox);
+
+ mbox->dev = dev;
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->regs))
+ return PTR_ERR(mbox->regs);
+
+ mbox->hw = match->data;
+ mbox->irq_recv_not_empty =
+ platform_get_irq_byname(pdev, "recv-not-empty");
+ if (mbox->irq_recv_not_empty < 0)
+ return -ENODEV;
+
+ mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty");
+ if (mbox->irq_send_empty < 0)
+ return -ENODEV;
+
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = &mbox->chan;
+ mbox->controller.ops = &apple_mbox_ops;
+ mbox->controller.txdone_irq = true;
+ mbox->controller.of_xlate = apple_mbox_of_xlate;
+ mbox->chan.con_priv = mbox;
+ spin_lock_init(&mbox->rx_lock);
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dev, mbox->irq_recv_not_empty, NULL,
+ apple_mbox_recv_irq,
+ IRQF_NO_AUTOEN | IRQF_ONESHOT, irqname,
+ mbox);
+ if (ret)
+ return ret;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, mbox->irq_send_empty,
+ apple_mbox_send_empty_irq, IRQF_NO_AUTOEN,
+ irqname, mbox);
+ if (ret)
+ return ret;
+
+ return devm_mbox_controller_register(dev, &mbox->controller);
+}
+
+static const struct apple_mbox_hw apple_mbox_asc_hw = {
+ .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_ASC_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_ASC_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
+
+ .has_irq_controls = false,
+};
+
+static const struct apple_mbox_hw apple_mbox_m3_hw = {
+ .control_full = APPLE_M3_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_M3_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_M3_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_M3_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_M3_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_M3_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1,
+
+ .has_irq_controls = true,
+ .irq_enable = APPLE_M3_MBOX_IRQ_ENABLE,
+ .irq_ack = APPLE_M3_MBOX_IRQ_ACK,
+ .irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY,
+ .irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY,
+};
+
+static const struct of_device_id apple_mbox_of_match[] = {
+ { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
+
+static struct platform_driver apple_mbox_driver = {
+ .driver = {
+ .name = "apple-mailbox",
+ .of_match_table = apple_mbox_of_match,
+ },
+ .probe = apple_mbox_probe,
+};
+module_platform_driver(apple_mbox_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple Mailbox driver");
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
new file mode 100644
index 000000000..537f7bfb7
--- /dev/null
+++ b/drivers/mailbox/arm_mhu.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Jassi Brar <jaswinder.singh@linaro.org>
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#define INTR_STAT_OFS 0x0
+#define INTR_SET_OFS 0x8
+#define INTR_CLR_OFS 0x10
+
+#define MHU_LP_OFFSET 0x0
+#define MHU_HP_OFFSET 0x20
+#define MHU_SEC_OFFSET 0x200
+#define TX_REG_OFFSET 0x100
+
+#define MHU_CHANS 3
+
+struct mhu_link {
+ unsigned irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct arm_mhu {
+ void __iomem *base;
+ struct mhu_link mlink[MHU_CHANS];
+ struct mbox_chan chan[MHU_CHANS];
+ struct mbox_controller mbox;
+};
+
+static irqreturn_t mhu_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val;
+
+ val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
+ if (!val)
+ return IRQ_NONE;
+
+ mbox_chan_received_data(chan, (void *)&val);
+
+ writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
+
+ return IRQ_HANDLED;
+}
+
+static bool mhu_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+
+ return (val == 0);
+}
+
+static int mhu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 *arg = data;
+
+ writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int mhu_startup(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+ writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
+
+ ret = request_irq(mlink->irq, mhu_rx_interrupt,
+ IRQF_SHARED, "mhu_link", chan);
+ if (ret) {
+ dev_err(chan->mbox->dev,
+ "Unable to acquire IRQ %d\n", mlink->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mhu_shutdown(struct mbox_chan *chan)
+{
+ struct mhu_link *mlink = chan->con_priv;
+
+ free_irq(mlink->irq, chan);
+}
+
+static const struct mbox_chan_ops mhu_ops = {
+ .send_data = mhu_send_data,
+ .startup = mhu_startup,
+ .shutdown = mhu_shutdown,
+ .last_tx_done = mhu_last_tx_done,
+};
+
+static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int i, err;
+ struct arm_mhu *mhu;
+ struct device *dev = &adev->dev;
+ int mhu_reg[MHU_CHANS] = {MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET};
+
+ if (!of_device_is_compatible(dev->of_node, "arm,mhu"))
+ return -ENODEV;
+
+ /* Allocate memory for device */
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(mhu->base))
+ return PTR_ERR(mhu->base);
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ mhu->chan[i].con_priv = &mhu->mlink[i];
+ mhu->mlink[i].irq = adev->irq[i];
+ mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+ }
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = &mhu->chan[0];
+ mhu->mbox.num_chans = MHU_CHANS;
+ mhu->mbox.ops = &mhu_ops;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+ amba_set_drvdata(adev, mhu);
+
+ err = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ dev_info(dev, "ARM MHU Mailbox registered\n");
+ return 0;
+}
+
+static struct amba_id mhu_ids[] = {
+ {
+ .id = 0x1bb098,
+ .mask = 0xffffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhu_ids);
+
+static struct amba_driver arm_mhu_driver = {
+ .drv = {
+ .name = "mhu",
+ },
+ .id_table = mhu_ids,
+ .probe = mhu_probe,
+};
+module_amba_driver(arm_mhu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHU Driver");
+MODULE_AUTHOR("Jassi Brar <jassisinghbrar@gmail.com>");
diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c
new file mode 100644
index 000000000..27a510d46
--- /dev/null
+++ b/drivers/mailbox/arm_mhu_db.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Based on ARM MHU driver by Jassi Brar <jaswinder.singh@linaro.org>
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#define INTR_STAT_OFS 0x0
+#define INTR_SET_OFS 0x8
+#define INTR_CLR_OFS 0x10
+
+#define MHU_LP_OFFSET 0x0
+#define MHU_HP_OFFSET 0x20
+#define MHU_SEC_OFFSET 0x200
+#define TX_REG_OFFSET 0x100
+
+#define MHU_CHANS 3 /* Secure, Non-Secure High and Low Priority */
+#define MHU_CHAN_MAX 20 /* Max channels to save on unused RAM */
+#define MHU_NUM_DOORBELLS 32
+
+struct mhu_db_link {
+ unsigned int irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct arm_mhu {
+ void __iomem *base;
+ struct mhu_db_link mlink[MHU_CHANS];
+ struct mbox_controller mbox;
+ struct device *dev;
+};
+
+/**
+ * struct mhu_db_channel - ARM MHU Mailbox allocated channel information
+ *
+ * @mhu: Pointer to parent mailbox device
+ * @pchan: Physical channel within which this doorbell resides in
+ * @doorbell: doorbell number pertaining to this channel
+ */
+struct mhu_db_channel {
+ struct arm_mhu *mhu;
+ unsigned int pchan;
+ unsigned int doorbell;
+};
+
+static inline struct mbox_chan *
+mhu_db_mbox_to_channel(struct mbox_controller *mbox, unsigned int pchan,
+ unsigned int doorbell)
+{
+ int i;
+ struct mhu_db_channel *chan_info;
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ chan_info = mbox->chans[i].con_priv;
+ if (chan_info && chan_info->pchan == pchan &&
+ chan_info->doorbell == doorbell)
+ return &mbox->chans[i];
+ }
+
+ return NULL;
+}
+
+static void mhu_db_mbox_clear_irq(struct mbox_chan *chan)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].rx_reg;
+
+ writel_relaxed(BIT(chan_info->doorbell), base + INTR_CLR_OFS);
+}
+
+static unsigned int mhu_db_mbox_irq_to_pchan_num(struct arm_mhu *mhu, int irq)
+{
+ unsigned int pchan;
+
+ for (pchan = 0; pchan < MHU_CHANS; pchan++)
+ if (mhu->mlink[pchan].irq == irq)
+ break;
+ return pchan;
+}
+
+static struct mbox_chan *
+mhu_db_mbox_irq_to_channel(struct arm_mhu *mhu, unsigned int pchan)
+{
+ unsigned long bits;
+ unsigned int doorbell;
+ struct mbox_chan *chan = NULL;
+ struct mbox_controller *mbox = &mhu->mbox;
+ void __iomem *base = mhu->mlink[pchan].rx_reg;
+
+ bits = readl_relaxed(base + INTR_STAT_OFS);
+ if (!bits)
+ /* No IRQs fired in specified physical channel */
+ return NULL;
+
+ /* An IRQ has fired, find the associated channel */
+ for (doorbell = 0; bits; doorbell++) {
+ if (!test_and_clear_bit(doorbell, &bits))
+ continue;
+
+ chan = mhu_db_mbox_to_channel(mbox, pchan, doorbell);
+ if (chan)
+ break;
+ dev_err(mbox->dev,
+ "Channel not registered: pchan: %d doorbell: %d\n",
+ pchan, doorbell);
+ }
+
+ return chan;
+}
+
+static irqreturn_t mhu_db_mbox_rx_handler(int irq, void *data)
+{
+ struct mbox_chan *chan;
+ struct arm_mhu *mhu = data;
+ unsigned int pchan = mhu_db_mbox_irq_to_pchan_num(mhu, irq);
+
+ while (NULL != (chan = mhu_db_mbox_irq_to_channel(mhu, pchan))) {
+ mbox_chan_received_data(chan, NULL);
+ mhu_db_mbox_clear_irq(chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool mhu_db_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].tx_reg;
+
+ if (readl_relaxed(base + INTR_STAT_OFS) & BIT(chan_info->doorbell))
+ return false;
+
+ return true;
+}
+
+static int mhu_db_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].tx_reg;
+
+ /* Send event to co-processor */
+ writel_relaxed(BIT(chan_info->doorbell), base + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int mhu_db_startup(struct mbox_chan *chan)
+{
+ mhu_db_mbox_clear_irq(chan);
+ return 0;
+}
+
+static void mhu_db_shutdown(struct mbox_chan *chan)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ struct mbox_controller *mbox = &chan_info->mhu->mbox;
+ int i;
+
+ for (i = 0; i < mbox->num_chans; i++)
+ if (chan == &mbox->chans[i])
+ break;
+
+ if (mbox->num_chans == i) {
+ dev_warn(mbox->dev, "Request to free non-existent channel\n");
+ return;
+ }
+
+ /* Reset channel */
+ mhu_db_mbox_clear_irq(chan);
+ devm_kfree(mbox->dev, chan->con_priv);
+ chan->con_priv = NULL;
+}
+
+static struct mbox_chan *mhu_db_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct arm_mhu *mhu = dev_get_drvdata(mbox->dev);
+ struct mhu_db_channel *chan_info;
+ struct mbox_chan *chan;
+ unsigned int pchan = spec->args[0];
+ unsigned int doorbell = spec->args[1];
+ int i;
+
+ /* Bounds checking */
+ if (pchan >= MHU_CHANS || doorbell >= MHU_NUM_DOORBELLS) {
+ dev_err(mbox->dev,
+ "Invalid channel requested pchan: %d doorbell: %d\n",
+ pchan, doorbell);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Is requested channel free? */
+ chan = mhu_db_mbox_to_channel(mbox, pchan, doorbell);
+ if (chan) {
+ dev_err(mbox->dev, "Channel in use: pchan: %d doorbell: %d\n",
+ pchan, doorbell);
+ return ERR_PTR(-EBUSY);
+ }
+
+ /* Find the first free slot */
+ for (i = 0; i < mbox->num_chans; i++)
+ if (!mbox->chans[i].con_priv)
+ break;
+
+ if (mbox->num_chans == i) {
+ dev_err(mbox->dev, "No free channels left\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ chan = &mbox->chans[i];
+
+ chan_info = devm_kzalloc(mbox->dev, sizeof(*chan_info), GFP_KERNEL);
+ if (!chan_info)
+ return ERR_PTR(-ENOMEM);
+
+ chan_info->mhu = mhu;
+ chan_info->pchan = pchan;
+ chan_info->doorbell = doorbell;
+
+ chan->con_priv = chan_info;
+
+ dev_dbg(mbox->dev, "mbox: created channel phys: %d doorbell: %d\n",
+ pchan, doorbell);
+
+ return chan;
+}
+
+static const struct mbox_chan_ops mhu_db_ops = {
+ .send_data = mhu_db_send_data,
+ .startup = mhu_db_startup,
+ .shutdown = mhu_db_shutdown,
+ .last_tx_done = mhu_db_last_tx_done,
+};
+
+static int mhu_db_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ u32 cell_count;
+ int i, err, max_chans;
+ struct arm_mhu *mhu;
+ struct mbox_chan *chans;
+ struct device *dev = &adev->dev;
+ struct device_node *np = dev->of_node;
+ int mhu_reg[MHU_CHANS] = {
+ MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET,
+ };
+
+ if (!of_device_is_compatible(np, "arm,mhu-doorbell"))
+ return -ENODEV;
+
+ err = of_property_read_u32(np, "#mbox-cells", &cell_count);
+ if (err) {
+ dev_err(dev, "failed to read #mbox-cells in '%pOF'\n", np);
+ return err;
+ }
+
+ if (cell_count == 2) {
+ max_chans = MHU_CHAN_MAX;
+ } else {
+ dev_err(dev, "incorrect value of #mbox-cells in '%pOF'\n", np);
+ return -EINVAL;
+ }
+
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(mhu->base))
+ return PTR_ERR(mhu->base);
+
+ chans = devm_kcalloc(dev, max_chans, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mhu->dev = dev;
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = chans;
+ mhu->mbox.num_chans = max_chans;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+ mhu->mbox.of_xlate = mhu_db_mbox_xlate;
+ amba_set_drvdata(adev, mhu);
+
+ mhu->mbox.ops = &mhu_db_ops;
+
+ err = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ int irq = mhu->mlink[i].irq = adev->irq[i];
+
+ if (irq <= 0) {
+ dev_dbg(dev, "No IRQ found for Channel %d\n", i);
+ continue;
+ }
+
+ mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+
+ err = devm_request_threaded_irq(dev, irq, NULL,
+ mhu_db_mbox_rx_handler,
+ IRQF_ONESHOT, "mhu_db_link", mhu);
+ if (err) {
+ dev_err(dev, "Can't claim IRQ %d\n", irq);
+ mbox_controller_unregister(&mhu->mbox);
+ return err;
+ }
+ }
+
+ dev_info(dev, "ARM MHU Doorbell mailbox registered\n");
+ return 0;
+}
+
+static struct amba_id mhu_ids[] = {
+ {
+ .id = 0x1bb098,
+ .mask = 0xffffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhu_ids);
+
+static struct amba_driver arm_mhu_db_driver = {
+ .drv = {
+ .name = "mhu-doorbell",
+ },
+ .id_table = mhu_ids,
+ .probe = mhu_db_probe,
+};
+module_amba_driver(arm_mhu_db_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHU Doorbell Driver");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
new file mode 100644
index 000000000..c6d4957c4
--- /dev/null
+++ b/drivers/mailbox/arm_mhuv2.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Message Handling Unit Version 2 (MHUv2) driver.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020 Linaro Ltd.
+ *
+ * An MHUv2 mailbox controller can provide up to 124 channel windows (each 32
+ * bit long) and the driver allows any combination of both the transport
+ * protocol modes: data-transfer and doorbell, to be used on those channel
+ * windows.
+ *
+ * The transport protocols should be specified in the device tree entry for the
+ * device. The transport protocols determine how the underlying hardware
+ * resources of the device are utilized when transmitting data. Refer to the
+ * device tree bindings of the ARM MHUv2 controller for more details.
+ *
+ * The number of registered mailbox channels is dependent on both the underlying
+ * hardware - mainly the number of channel windows implemented by the platform,
+ * as well as the selected transport protocols.
+ *
+ * The MHUv2 controller can work both as a sender and receiver, but the driver
+ * and the DT bindings support unidirectional transfers for better allocation of
+ * the channels. That is, this driver will be probed for two separate devices
+ * for each mailbox controller, a sender device and a receiver device.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/interrupt.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/arm_mhuv2_message.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+/* ====== MHUv2 Registers ====== */
+
+/* Maximum number of channel windows */
+#define MHUV2_CH_WN_MAX 124
+/* Number of combined interrupt status registers */
+#define MHUV2_CMB_INT_ST_REG_CNT 4
+#define MHUV2_STAT_BYTES (sizeof(u32))
+#define MHUV2_STAT_BITS (MHUV2_STAT_BYTES * __CHAR_BIT__)
+
+#define LSB_MASK(n) ((1 << (n * __CHAR_BIT__)) - 1)
+#define MHUV2_PROTOCOL_PROP "arm,mhuv2-protocols"
+
+/* Register Message Handling Unit Configuration fields */
+struct mhu_cfg_t {
+ u32 num_ch : 7;
+ u32 pad : 25;
+} __packed;
+
+/* register Interrupt Status fields */
+struct int_st_t {
+ u32 nr2r : 1;
+ u32 r2nr : 1;
+ u32 pad : 30;
+} __packed;
+
+/* Register Interrupt Clear fields */
+struct int_clr_t {
+ u32 nr2r : 1;
+ u32 r2nr : 1;
+ u32 pad : 30;
+} __packed;
+
+/* Register Interrupt Enable fields */
+struct int_en_t {
+ u32 r2nr : 1;
+ u32 nr2r : 1;
+ u32 chcomb : 1;
+ u32 pad : 29;
+} __packed;
+
+/* Register Implementer Identification fields */
+struct iidr_t {
+ u32 implementer : 12;
+ u32 revision : 4;
+ u32 variant : 4;
+ u32 product_id : 12;
+} __packed;
+
+/* Register Architecture Identification Register fields */
+struct aidr_t {
+ u32 arch_minor_rev : 4;
+ u32 arch_major_rev : 4;
+ u32 pad : 24;
+} __packed;
+
+/* Sender Channel Window fields */
+struct mhu2_send_ch_wn_reg {
+ u32 stat;
+ u8 pad1[0x0C - 0x04];
+ u32 stat_set;
+ u32 int_st;
+ u32 int_clr;
+ u32 int_en;
+ u8 pad2[0x20 - 0x1C];
+} __packed;
+
+/* Sender frame register fields */
+struct mhu2_send_frame_reg {
+ struct mhu2_send_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
+ struct mhu_cfg_t mhu_cfg;
+ u32 resp_cfg;
+ u32 access_request;
+ u32 access_ready;
+ struct int_st_t int_st;
+ struct int_clr_t int_clr;
+ struct int_en_t int_en;
+ u32 reserved0;
+ u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
+ u8 pad[0xFC8 - 0xFB0];
+ struct iidr_t iidr;
+ struct aidr_t aidr;
+} __packed;
+
+/* Receiver Channel Window fields */
+struct mhu2_recv_ch_wn_reg {
+ u32 stat;
+ u32 stat_masked;
+ u32 stat_clear;
+ u8 reserved0[0x10 - 0x0C];
+ u32 mask;
+ u32 mask_set;
+ u32 mask_clear;
+ u8 pad[0x20 - 0x1C];
+} __packed;
+
+/* Receiver frame register fields */
+struct mhu2_recv_frame_reg {
+ struct mhu2_recv_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
+ struct mhu_cfg_t mhu_cfg;
+ u8 reserved0[0xF90 - 0xF84];
+ struct int_st_t int_st;
+ struct int_clr_t int_clr;
+ struct int_en_t int_en;
+ u32 pad;
+ u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
+ u8 reserved2[0xFC8 - 0xFB0];
+ struct iidr_t iidr;
+ struct aidr_t aidr;
+} __packed;
+
+
+/* ====== MHUv2 data structures ====== */
+
+enum mhuv2_transport_protocol {
+ DOORBELL = 0,
+ DATA_TRANSFER = 1
+};
+
+enum mhuv2_frame {
+ RECEIVER_FRAME,
+ SENDER_FRAME
+};
+
+/**
+ * struct mhuv2 - MHUv2 mailbox controller data
+ *
+ * @mbox: Mailbox controller belonging to the MHU frame.
+ * @send: Base address of the register mapping region.
+ * @recv: Base address of the register mapping region.
+ * @frame: Frame type: RECEIVER_FRAME or SENDER_FRAME.
+ * @irq: Interrupt.
+ * @windows: Channel windows implemented by the platform.
+ * @minor: Minor version of the controller.
+ * @length: Length of the protocols array in bytes.
+ * @protocols: Raw protocol information, derived from device tree.
+ * @doorbell_pending_lock: spinlock required for correct operation of Tx
+ * interrupt for doorbells.
+ */
+struct mhuv2 {
+ struct mbox_controller mbox;
+ union {
+ struct mhu2_send_frame_reg __iomem *send;
+ struct mhu2_recv_frame_reg __iomem *recv;
+ };
+ enum mhuv2_frame frame;
+ unsigned int irq;
+ unsigned int windows;
+ unsigned int minor;
+ unsigned int length;
+ u32 *protocols;
+
+ spinlock_t doorbell_pending_lock;
+};
+
+#define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv2, mbox)
+
+/**
+ * struct mhuv2_protocol_ops - MHUv2 operations
+ *
+ * Each transport protocol must provide an implementation of the operations
+ * provided here.
+ *
+ * @rx_startup: Startup callback for receiver.
+ * @rx_shutdown: Shutdown callback for receiver.
+ * @read_data: Reads and clears newly available data.
+ * @tx_startup: Startup callback for receiver.
+ * @tx_shutdown: Shutdown callback for receiver.
+ * @last_tx_done: Report back if the last tx is completed or not.
+ * @send_data: Send data to the receiver.
+ */
+struct mhuv2_protocol_ops {
+ int (*rx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ void (*rx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ void *(*read_data)(struct mhuv2 *mhu, struct mbox_chan *chan);
+
+ void (*tx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ void (*tx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ int (*last_tx_done)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ int (*send_data)(struct mhuv2 *mhu, struct mbox_chan *chan, void *arg);
+};
+
+/*
+ * MHUv2 mailbox channel's private information
+ *
+ * @ops: protocol specific ops for the channel.
+ * @ch_wn_idx: Channel window index allocated to the channel.
+ * @windows: Total number of windows consumed by the channel, only relevant
+ * in DATA_TRANSFER protocol.
+ * @doorbell: Doorbell bit number within the ch_wn_idx window, only relevant
+ * in DOORBELL protocol.
+ * @pending: Flag indicating pending doorbell interrupt, only relevant in
+ * DOORBELL protocol.
+ */
+struct mhuv2_mbox_chan_priv {
+ const struct mhuv2_protocol_ops *ops;
+ u32 ch_wn_idx;
+ union {
+ u32 windows;
+ struct {
+ u32 doorbell;
+ u32 pending;
+ };
+ };
+};
+
+/* Macro for reading a bitfield within a physically mapped packed struct */
+#define readl_relaxed_bitfield(_regptr, _type, _field) \
+ ({ \
+ u32 _regval; \
+ _regval = readl_relaxed((_regptr)); \
+ (*(_type *)(&_regval))._field; \
+ })
+
+/* Macro for writing a bitfield within a physically mapped packed struct */
+#define writel_relaxed_bitfield(_value, _regptr, _type, _field) \
+ ({ \
+ u32 _regval; \
+ _regval = readl_relaxed(_regptr); \
+ (*(_type *)(&_regval))._field = _value; \
+ writel_relaxed(_regval, _regptr); \
+ })
+
+
+/* =================== Doorbell transport protocol operations =============== */
+
+static int mhuv2_doorbell_rx_startup(struct mhuv2 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->recv->ch_wn[priv->ch_wn_idx].mask_clear);
+ return 0;
+}
+
+static void mhuv2_doorbell_rx_shutdown(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->recv->ch_wn[priv->ch_wn_idx].mask_set);
+}
+
+static void *mhuv2_doorbell_read_data(struct mhuv2 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->recv->ch_wn[priv->ch_wn_idx].stat_clear);
+ return NULL;
+}
+
+static int mhuv2_doorbell_last_tx_done(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ return !(readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat) &
+ BIT(priv->doorbell));
+}
+
+static int mhuv2_doorbell_send_data(struct mhuv2 *mhu, struct mbox_chan *chan,
+ void *arg)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
+
+ priv->pending = 1;
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->send->ch_wn[priv->ch_wn_idx].stat_set);
+
+ spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
+
+ return 0;
+}
+
+static const struct mhuv2_protocol_ops mhuv2_doorbell_ops = {
+ .rx_startup = mhuv2_doorbell_rx_startup,
+ .rx_shutdown = mhuv2_doorbell_rx_shutdown,
+ .read_data = mhuv2_doorbell_read_data,
+ .last_tx_done = mhuv2_doorbell_last_tx_done,
+ .send_data = mhuv2_doorbell_send_data,
+};
+#define IS_PROTOCOL_DOORBELL(_priv) (_priv->ops == &mhuv2_doorbell_ops)
+
+/* ============= Data transfer transport protocol operations ================ */
+
+static int mhuv2_data_transfer_rx_startup(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ /*
+ * The protocol mandates that all but the last status register must be
+ * masked.
+ */
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_clear);
+ return 0;
+}
+
+static void mhuv2_data_transfer_rx_shutdown(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
+}
+
+static void *mhuv2_data_transfer_read_data(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ const int windows = priv->windows;
+ struct arm_mhuv2_mbox_msg *msg;
+ u32 *data;
+ int i, idx;
+
+ msg = kzalloc(sizeof(*msg) + windows * MHUV2_STAT_BYTES, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ data = msg->data = msg + 1;
+ msg->len = windows * MHUV2_STAT_BYTES;
+
+ /*
+ * Messages are expected in order of most significant word to least
+ * significant word. Refer mhuv2_data_transfer_send_data() for more
+ * details.
+ *
+ * We also need to read the stat register instead of stat_masked, as we
+ * masked all but the last window.
+ *
+ * Last channel window must be cleared as the final operation. Upon
+ * clearing the last channel window register, which is unmasked in
+ * data-transfer protocol, the interrupt is de-asserted.
+ */
+ for (i = 0; i < windows; i++) {
+ idx = priv->ch_wn_idx + i;
+ data[windows - 1 - i] = readl_relaxed(&mhu->recv->ch_wn[idx].stat);
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[idx].stat_clear);
+ }
+
+ return msg;
+}
+
+static void mhuv2_data_transfer_tx_startup(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ /* Enable interrupts only for the last window */
+ if (mhu->minor) {
+ writel_relaxed(0x1, &mhu->send->ch_wn[i].int_clr);
+ writel_relaxed(0x1, &mhu->send->ch_wn[i].int_en);
+ }
+}
+
+static void mhuv2_data_transfer_tx_shutdown(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ if (mhu->minor)
+ writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
+}
+
+static int mhuv2_data_transfer_last_tx_done(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ /* Just checking the last channel window should be enough */
+ return !readl_relaxed(&mhu->send->ch_wn[i].stat);
+}
+
+/*
+ * Message will be transmitted from most significant to least significant word.
+ * This is to allow for messages shorter than channel windows to still trigger
+ * the receiver interrupt which gets activated when the last stat register is
+ * written. As an example, a 6-word message is to be written on a 4-channel MHU
+ * connection: Registers marked with '*' are masked, and will not generate an
+ * interrupt on the receiver side once written.
+ *
+ * u32 *data = [0x00000001], [0x00000002], [0x00000003], [0x00000004],
+ * [0x00000005], [0x00000006]
+ *
+ * ROUND 1:
+ * stat reg To write Write sequence
+ * [ stat 3 ] <- [0x00000001] 4 <- triggers interrupt on receiver
+ * [ stat 2 ] <- [0x00000002] 3
+ * [ stat 1 ] <- [0x00000003] 2
+ * [ stat 0 ] <- [0x00000004] 1
+ *
+ * data += 4 // Increment data pointer by number of stat regs
+ *
+ * ROUND 2:
+ * stat reg To write Write sequence
+ * [ stat 3 ] <- [0x00000005] 2 <- triggers interrupt on receiver
+ * [ stat 2 ] <- [0x00000006] 1
+ * [ stat 1 ] <- [0x00000000]
+ * [ stat 0 ] <- [0x00000000]
+ */
+static int mhuv2_data_transfer_send_data(struct mhuv2 *mhu,
+ struct mbox_chan *chan, void *arg)
+{
+ const struct arm_mhuv2_mbox_msg *msg = arg;
+ int bytes_left = msg->len, bytes_to_send, bytes_in_round, i;
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int windows = priv->windows;
+ u32 *data = msg->data, word;
+
+ while (bytes_left) {
+ if (!data[0]) {
+ dev_err(mhu->mbox.dev, "Data aligned at first window can't be zero to guarantee interrupt generation at receiver");
+ return -EINVAL;
+ }
+
+ while(!mhuv2_data_transfer_last_tx_done(mhu, chan))
+ continue;
+
+ bytes_in_round = min(bytes_left, (int)(windows * MHUV2_STAT_BYTES));
+
+ for (i = windows - 1; i >= 0; i--) {
+ /* Data less than windows can transfer ? */
+ if (unlikely(bytes_in_round <= i * MHUV2_STAT_BYTES))
+ continue;
+
+ word = data[i];
+ bytes_to_send = bytes_in_round & (MHUV2_STAT_BYTES - 1);
+ if (unlikely(bytes_to_send))
+ word &= LSB_MASK(bytes_to_send);
+ else
+ bytes_to_send = MHUV2_STAT_BYTES;
+
+ writel_relaxed(word, &mhu->send->ch_wn[priv->ch_wn_idx + windows - 1 - i].stat_set);
+ bytes_left -= bytes_to_send;
+ bytes_in_round -= bytes_to_send;
+ }
+
+ data += windows;
+ }
+
+ return 0;
+}
+
+static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = {
+ .rx_startup = mhuv2_data_transfer_rx_startup,
+ .rx_shutdown = mhuv2_data_transfer_rx_shutdown,
+ .read_data = mhuv2_data_transfer_read_data,
+ .tx_startup = mhuv2_data_transfer_tx_startup,
+ .tx_shutdown = mhuv2_data_transfer_tx_shutdown,
+ .last_tx_done = mhuv2_data_transfer_last_tx_done,
+ .send_data = mhuv2_data_transfer_send_data,
+};
+
+/* Interrupt handlers */
+
+static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg)
+{
+ struct mbox_chan *chans = mhu->mbox.chans;
+ int channel = 0, i, offset = 0, windows, protocol, ch_wn;
+ u32 stat;
+
+ for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) {
+ stat = readl_relaxed(reg + i);
+ if (!stat)
+ continue;
+
+ ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat);
+
+ for (i = 0; i < mhu->length; i += 2) {
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (ch_wn >= offset + windows) {
+ if (protocol == DOORBELL)
+ channel += MHUV2_STAT_BITS * windows;
+ else
+ channel++;
+
+ offset += windows;
+ continue;
+ }
+
+ /* Return first chan of the window in doorbell mode */
+ if (protocol == DOORBELL)
+ channel += MHUV2_STAT_BITS * (ch_wn - offset);
+
+ return &chans[channel];
+ }
+ }
+
+ return ERR_PTR(-EIO);
+}
+
+static irqreturn_t mhuv2_sender_interrupt(int irq, void *data)
+{
+ struct mhuv2 *mhu = data;
+ struct device *dev = mhu->mbox.dev;
+ struct mhuv2_mbox_chan_priv *priv;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ int i, found = 0;
+ u32 stat;
+
+ chan = get_irq_chan_comb(mhu, mhu->send->chcomb_int_st);
+ if (IS_ERR(chan)) {
+ dev_warn(dev, "Failed to find channel for the Tx interrupt\n");
+ return IRQ_NONE;
+ }
+ priv = chan->con_priv;
+
+ if (!IS_PROTOCOL_DOORBELL(priv)) {
+ writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + priv->windows - 1].int_clr);
+
+ if (chan->cl) {
+ mbox_chan_txdone(chan, 0);
+ return IRQ_HANDLED;
+ }
+
+ dev_warn(dev, "Tx interrupt Received on channel (%u) not currently attached to a mailbox client\n",
+ priv->ch_wn_idx);
+ return IRQ_NONE;
+ }
+
+ /* Clear the interrupt first, so we don't miss any doorbell later */
+ writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx].int_clr);
+
+ /*
+ * In Doorbell mode, make sure no new transitions happen while the
+ * interrupt handler is trying to find the finished doorbell tx
+ * operations, else we may think few of the transfers were complete
+ * before they actually were.
+ */
+ spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
+
+ /*
+ * In case of doorbell mode, the first channel of the window is returned
+ * by get_irq_chan_comb(). Find all the pending channels here.
+ */
+ stat = readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat);
+
+ for (i = 0; i < MHUV2_STAT_BITS; i++) {
+ priv = chan[i].con_priv;
+
+ /* Find cases where pending was 1, but stat's bit is cleared */
+ if (priv->pending ^ ((stat >> i) & 0x1)) {
+ BUG_ON(!priv->pending);
+
+ if (!chan->cl) {
+ dev_warn(dev, "Tx interrupt received on doorbell (%u : %u) channel not currently attached to a mailbox client\n",
+ priv->ch_wn_idx, i);
+ continue;
+ }
+
+ mbox_chan_txdone(&chan[i], 0);
+ priv->pending = 0;
+ found++;
+ }
+ }
+
+ spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
+
+ if (!found) {
+ /*
+ * We may have already processed the doorbell in the previous
+ * iteration if the interrupt came right after we cleared it but
+ * before we read the stat register.
+ */
+ dev_dbg(dev, "Couldn't find the doorbell (%u) for the Tx interrupt interrupt\n",
+ priv->ch_wn_idx);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct mbox_chan *get_irq_chan_comb_rx(struct mhuv2 *mhu)
+{
+ struct mhuv2_mbox_chan_priv *priv;
+ struct mbox_chan *chan;
+ u32 stat;
+
+ chan = get_irq_chan_comb(mhu, mhu->recv->chcomb_int_st);
+ if (IS_ERR(chan))
+ return chan;
+
+ priv = chan->con_priv;
+ if (!IS_PROTOCOL_DOORBELL(priv))
+ return chan;
+
+ /*
+ * In case of doorbell mode, the first channel of the window is returned
+ * by the routine. Find the exact channel here.
+ */
+ stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
+ BUG_ON(!stat);
+
+ return chan + __builtin_ctz(stat);
+}
+
+static struct mbox_chan *get_irq_chan_stat_rx(struct mhuv2 *mhu)
+{
+ struct mbox_chan *chans = mhu->mbox.chans;
+ struct mhuv2_mbox_chan_priv *priv;
+ u32 stat;
+ int i = 0;
+
+ while (i < mhu->mbox.num_chans) {
+ priv = chans[i].con_priv;
+ stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
+
+ if (stat) {
+ if (IS_PROTOCOL_DOORBELL(priv))
+ i += __builtin_ctz(stat);
+ return &chans[i];
+ }
+
+ i += IS_PROTOCOL_DOORBELL(priv) ? MHUV2_STAT_BITS : 1;
+ }
+
+ return ERR_PTR(-EIO);
+}
+
+static struct mbox_chan *get_irq_chan_rx(struct mhuv2 *mhu)
+{
+ if (!mhu->minor)
+ return get_irq_chan_stat_rx(mhu);
+
+ return get_irq_chan_comb_rx(mhu);
+}
+
+static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg)
+{
+ struct mhuv2 *mhu = arg;
+ struct mbox_chan *chan = get_irq_chan_rx(mhu);
+ struct device *dev = mhu->mbox.dev;
+ struct mhuv2_mbox_chan_priv *priv;
+ int ret = IRQ_NONE;
+ void *data;
+
+ if (IS_ERR(chan)) {
+ dev_warn(dev, "Failed to find channel for the rx interrupt\n");
+ return IRQ_NONE;
+ }
+ priv = chan->con_priv;
+
+ /* Read and clear the data first */
+ data = priv->ops->read_data(mhu, chan);
+
+ if (!chan->cl) {
+ dev_warn(dev, "Received data on channel (%u) not currently attached to a mailbox client\n",
+ priv->ch_wn_idx);
+ } else if (IS_ERR(data)) {
+ dev_err(dev, "Failed to read data: %lu\n", PTR_ERR(data));
+ } else {
+ mbox_chan_received_data(chan, data);
+ ret = IRQ_HANDLED;
+ }
+
+ if (!IS_ERR(data))
+ kfree(data);
+
+ return ret;
+}
+
+/* Sender and receiver ops */
+static bool mhuv2_sender_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ return priv->ops->last_tx_done(mhu, chan);
+}
+
+static int mhuv2_sender_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ if (!priv->ops->last_tx_done(mhu, chan))
+ return -EBUSY;
+
+ return priv->ops->send_data(mhu, chan, data);
+}
+
+static int mhuv2_sender_startup(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ if (priv->ops->tx_startup)
+ priv->ops->tx_startup(mhu, chan);
+ return 0;
+}
+
+static void mhuv2_sender_shutdown(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ if (priv->ops->tx_shutdown)
+ priv->ops->tx_shutdown(mhu, chan);
+}
+
+static const struct mbox_chan_ops mhuv2_sender_ops = {
+ .send_data = mhuv2_sender_send_data,
+ .startup = mhuv2_sender_startup,
+ .shutdown = mhuv2_sender_shutdown,
+ .last_tx_done = mhuv2_sender_last_tx_done,
+};
+
+static int mhuv2_receiver_startup(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ return priv->ops->rx_startup(mhu, chan);
+}
+
+static void mhuv2_receiver_shutdown(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ priv->ops->rx_shutdown(mhu, chan);
+}
+
+static int mhuv2_receiver_send_data(struct mbox_chan *chan, void *data)
+{
+ dev_err(chan->mbox->dev,
+ "Trying to transmit on a receiver MHU frame\n");
+ return -EIO;
+}
+
+static bool mhuv2_receiver_last_tx_done(struct mbox_chan *chan)
+{
+ dev_err(chan->mbox->dev, "Trying to Tx poll on a receiver MHU frame\n");
+ return true;
+}
+
+static const struct mbox_chan_ops mhuv2_receiver_ops = {
+ .send_data = mhuv2_receiver_send_data,
+ .startup = mhuv2_receiver_startup,
+ .shutdown = mhuv2_receiver_shutdown,
+ .last_tx_done = mhuv2_receiver_last_tx_done,
+};
+
+static struct mbox_chan *mhuv2_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *pa)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(mbox);
+ struct mbox_chan *chans = mbox->chans;
+ int channel = 0, i, offset, doorbell, protocol, windows;
+
+ if (pa->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ offset = pa->args[0];
+ doorbell = pa->args[1];
+ if (doorbell >= MHUV2_STAT_BITS)
+ goto out;
+
+ for (i = 0; i < mhu->length; i += 2) {
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (protocol == DOORBELL) {
+ if (offset < windows)
+ return &chans[channel + MHUV2_STAT_BITS * offset + doorbell];
+
+ channel += MHUV2_STAT_BITS * windows;
+ offset -= windows;
+ } else {
+ if (offset == 0) {
+ if (doorbell)
+ goto out;
+
+ return &chans[channel];
+ }
+
+ channel++;
+ offset--;
+ }
+ }
+
+out:
+ dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n",
+ pa->args[0], doorbell);
+ return ERR_PTR(-ENODEV);
+}
+
+static int mhuv2_verify_protocol(struct mhuv2 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+ int protocol, windows, channels = 0, total_windows = 0, i;
+
+ for (i = 0; i < mhu->length; i += 2) {
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (!windows) {
+ dev_err(dev, "Window size can't be zero (%d)\n", i);
+ return -EINVAL;
+ }
+ total_windows += windows;
+
+ if (protocol == DOORBELL) {
+ channels += MHUV2_STAT_BITS * windows;
+ } else if (protocol == DATA_TRANSFER) {
+ channels++;
+ } else {
+ dev_err(dev, "Invalid protocol (%d) present in %s property at index %d\n",
+ protocol, MHUV2_PROTOCOL_PROP, i);
+ return -EINVAL;
+ }
+ }
+
+ if (total_windows > mhu->windows) {
+ dev_err(dev, "Channel windows can't be more than what's implemented by the hardware ( %d: %d)\n",
+ total_windows, mhu->windows);
+ return -EINVAL;
+ }
+
+ mhu->mbox.num_chans = channels;
+ return 0;
+}
+
+static int mhuv2_allocate_channels(struct mhuv2 *mhu)
+{
+ struct mbox_controller *mbox = &mhu->mbox;
+ struct mhuv2_mbox_chan_priv *priv;
+ struct device *dev = mbox->dev;
+ struct mbox_chan *chans;
+ int protocol, windows = 0, next_window = 0, i, j, k;
+
+ chans = devm_kcalloc(dev, mbox->num_chans, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mbox->chans = chans;
+
+ for (i = 0; i < mhu->length; i += 2) {
+ next_window += windows;
+
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (protocol == DATA_TRANSFER) {
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ch_wn_idx = next_window;
+ priv->ops = &mhuv2_data_transfer_ops;
+ priv->windows = windows;
+ chans++->con_priv = priv;
+ continue;
+ }
+
+ for (j = 0; j < windows; j++) {
+ for (k = 0; k < MHUV2_STAT_BITS; k++) {
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ch_wn_idx = next_window + j;
+ priv->ops = &mhuv2_doorbell_ops;
+ priv->doorbell = k;
+ chans++->con_priv = priv;
+ }
+
+ /*
+ * Permanently enable interrupt as we can't
+ * control it per doorbell.
+ */
+ if (mhu->frame == SENDER_FRAME && mhu->minor)
+ writel_relaxed(0x1, &mhu->send->ch_wn[priv->ch_wn_idx].int_en);
+ }
+ }
+
+ /* Make sure we have initialized all channels */
+ BUG_ON(chans - mbox->chans != mbox->num_chans);
+
+ return 0;
+}
+
+static int mhuv2_parse_channels(struct mhuv2 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+ const struct device_node *np = dev->of_node;
+ int ret, count;
+ u32 *protocols;
+
+ count = of_property_count_u32_elems(np, MHUV2_PROTOCOL_PROP);
+ if (count <= 0 || count % 2) {
+ dev_err(dev, "Invalid %s property (%d)\n", MHUV2_PROTOCOL_PROP,
+ count);
+ return -EINVAL;
+ }
+
+ protocols = devm_kmalloc_array(dev, count, sizeof(*protocols), GFP_KERNEL);
+ if (!protocols)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, MHUV2_PROTOCOL_PROP, protocols, count);
+ if (ret) {
+ dev_err(dev, "Failed to read %s property: %d\n",
+ MHUV2_PROTOCOL_PROP, ret);
+ return ret;
+ }
+
+ mhu->protocols = protocols;
+ mhu->length = count;
+
+ ret = mhuv2_verify_protocol(mhu);
+ if (ret)
+ return ret;
+
+ return mhuv2_allocate_channels(mhu);
+}
+
+static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu,
+ void __iomem *reg)
+{
+ struct device *dev = mhu->mbox.dev;
+ int ret, i;
+
+ mhu->frame = SENDER_FRAME;
+ mhu->mbox.ops = &mhuv2_sender_ops;
+ mhu->send = reg;
+
+ mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, struct mhu_cfg_t, num_ch);
+ mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, struct aidr_t, arch_minor_rev);
+
+ spin_lock_init(&mhu->doorbell_pending_lock);
+
+ /*
+ * For minor version 1 and forward, tx interrupt is provided by
+ * the controller.
+ */
+ if (mhu->minor && adev->irq[0]) {
+ ret = devm_request_threaded_irq(dev, adev->irq[0], NULL,
+ mhuv2_sender_interrupt,
+ IRQF_ONESHOT, "mhuv2-tx", mhu);
+ if (ret) {
+ dev_err(dev, "Failed to request tx IRQ, fallback to polling mode: %d\n",
+ ret);
+ } else {
+ mhu->mbox.txdone_irq = true;
+ mhu->mbox.txdone_poll = false;
+ mhu->irq = adev->irq[0];
+
+ writel_relaxed_bitfield(1, &mhu->send->int_en, struct int_en_t, chcomb);
+
+ /* Disable all channel interrupts */
+ for (i = 0; i < mhu->windows; i++)
+ writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
+
+ goto out;
+ }
+ }
+
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+out:
+ /* Wait for receiver to be ready */
+ writel_relaxed(0x1, &mhu->send->access_request);
+ while (!readl_relaxed(&mhu->send->access_ready))
+ continue;
+
+ return 0;
+}
+
+static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu,
+ void __iomem *reg)
+{
+ struct device *dev = mhu->mbox.dev;
+ int ret, i;
+
+ mhu->frame = RECEIVER_FRAME;
+ mhu->mbox.ops = &mhuv2_receiver_ops;
+ mhu->recv = reg;
+
+ mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, struct mhu_cfg_t, num_ch);
+ mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, struct aidr_t, arch_minor_rev);
+
+ mhu->irq = adev->irq[0];
+ if (!mhu->irq) {
+ dev_err(dev, "Missing receiver IRQ\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(dev, mhu->irq, NULL,
+ mhuv2_receiver_interrupt, IRQF_ONESHOT,
+ "mhuv2-rx", mhu);
+ if (ret) {
+ dev_err(dev, "Failed to request rx IRQ\n");
+ return ret;
+ }
+
+ /* Mask all the channel windows */
+ for (i = 0; i < mhu->windows; i++)
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
+
+ if (mhu->minor)
+ writel_relaxed_bitfield(1, &mhu->recv->int_en, struct int_en_t, chcomb);
+
+ return 0;
+}
+
+static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device *dev = &adev->dev;
+ const struct device_node *np = dev->of_node;
+ struct mhuv2 *mhu;
+ void __iomem *reg;
+ int ret = -EINVAL;
+
+ reg = devm_of_iomap(dev, dev->of_node, 0, NULL);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.of_xlate = mhuv2_mbox_of_xlate;
+
+ if (of_device_is_compatible(np, "arm,mhuv2-tx"))
+ ret = mhuv2_tx_init(adev, mhu, reg);
+ else if (of_device_is_compatible(np, "arm,mhuv2-rx"))
+ ret = mhuv2_rx_init(adev, mhu, reg);
+ else
+ dev_err(dev, "Invalid compatible property\n");
+
+ if (ret)
+ return ret;
+
+ /* Channel windows can't be 0 */
+ BUG_ON(!mhu->windows);
+
+ ret = mhuv2_parse_channels(mhu);
+ if (ret)
+ return ret;
+
+ amba_set_drvdata(adev, mhu);
+
+ ret = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (ret)
+ dev_err(dev, "failed to register ARM MHUv2 driver %d\n", ret);
+
+ return ret;
+}
+
+static void mhuv2_remove(struct amba_device *adev)
+{
+ struct mhuv2 *mhu = amba_get_drvdata(adev);
+
+ if (mhu->frame == SENDER_FRAME)
+ writel_relaxed(0x0, &mhu->send->access_request);
+}
+
+static struct amba_id mhuv2_ids[] = {
+ {
+ /* 2.0 */
+ .id = 0xbb0d1,
+ .mask = 0xfffff,
+ },
+ {
+ /* 2.1 */
+ .id = 0xbb076,
+ .mask = 0xfffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhuv2_ids);
+
+static struct amba_driver mhuv2_driver = {
+ .drv = {
+ .name = "arm-mhuv2",
+ },
+ .id_table = mhuv2_ids,
+ .probe = mhuv2_probe,
+ .remove = mhuv2_remove,
+};
+module_amba_driver(mhuv2_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHUv2 Driver");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Tushar Khandelwal <tushar.khandelwal@arm.com>");
diff --git a/drivers/mailbox/armada-37xx-rwtm-mailbox.c b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
new file mode 100644
index 000000000..456a117a6
--- /dev/null
+++ b/drivers/mailbox/armada-37xx-rwtm-mailbox.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * rWTM BIU Mailbox driver for Armada 37xx
+ *
+ * Author: Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/armada-37xx-rwtm-mailbox.h>
+
+#define DRIVER_NAME "armada-37xx-rwtm-mailbox"
+
+/* relative to rWTM BIU Mailbox Registers */
+#define RWTM_MBOX_PARAM(i) (0x0 + ((i) << 2))
+#define RWTM_MBOX_COMMAND 0x40
+#define RWTM_MBOX_RETURN_STATUS 0x80
+#define RWTM_MBOX_STATUS(i) (0x84 + ((i) << 2))
+#define RWTM_MBOX_FIFO_STATUS 0xc4
+#define FIFO_STS_RDY 0x100
+#define FIFO_STS_CNTR_MASK 0x7
+#define FIFO_STS_CNTR_MAX 4
+
+#define RWTM_HOST_INT_RESET 0xc8
+#define RWTM_HOST_INT_MASK 0xcc
+#define SP_CMD_COMPLETE BIT(0)
+#define SP_CMD_QUEUE_FULL_ACCESS BIT(17)
+#define SP_CMD_QUEUE_FULL BIT(18)
+
+struct a37xx_mbox {
+ struct device *dev;
+ struct mbox_controller controller;
+ void __iomem *base;
+ int irq;
+};
+
+static void a37xx_mbox_receive(struct mbox_chan *chan)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ struct armada_37xx_rwtm_rx_msg rx_msg;
+ int i;
+
+ rx_msg.retval = readl(mbox->base + RWTM_MBOX_RETURN_STATUS);
+ for (i = 0; i < 16; ++i)
+ rx_msg.status[i] = readl(mbox->base + RWTM_MBOX_STATUS(i));
+
+ mbox_chan_received_data(chan, &rx_msg);
+}
+
+static irqreturn_t a37xx_mbox_irq_handler(int irq, void *data)
+{
+ struct mbox_chan *chan = data;
+ struct a37xx_mbox *mbox = chan->con_priv;
+ u32 reg;
+
+ reg = readl(mbox->base + RWTM_HOST_INT_RESET);
+
+ if (reg & SP_CMD_COMPLETE)
+ a37xx_mbox_receive(chan);
+
+ if (reg & (SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL))
+ dev_err(mbox->dev, "Secure processor command queue full\n");
+
+ writel(reg, mbox->base + RWTM_HOST_INT_RESET);
+ if (reg)
+ mbox_chan_txdone(chan, 0);
+
+ return reg ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int a37xx_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ struct armada_37xx_rwtm_tx_msg *msg = data;
+ int i;
+ u32 reg;
+
+ if (!data)
+ return -EINVAL;
+
+ reg = readl(mbox->base + RWTM_MBOX_FIFO_STATUS);
+ if (!(reg & FIFO_STS_RDY))
+ dev_warn(mbox->dev, "Secure processor not ready\n");
+
+ if ((reg & FIFO_STS_CNTR_MASK) >= FIFO_STS_CNTR_MAX) {
+ dev_err(mbox->dev, "Secure processor command queue full\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < 16; ++i)
+ writel(msg->args[i], mbox->base + RWTM_MBOX_PARAM(i));
+ writel(msg->command, mbox->base + RWTM_MBOX_COMMAND);
+
+ return 0;
+}
+
+static int a37xx_mbox_startup(struct mbox_chan *chan)
+{
+ struct a37xx_mbox *mbox = chan->con_priv;
+ u32 reg;
+ int ret;
+
+ ret = devm_request_irq(mbox->dev, mbox->irq, a37xx_mbox_irq_handler, 0,
+ DRIVER_NAME, chan);
+ if (ret < 0) {
+ dev_err(mbox->dev, "Cannot request irq\n");
+ return ret;
+ }
+
+ /* enable IRQ generation */
+ reg = readl(mbox->base + RWTM_HOST_INT_MASK);
+ reg &= ~(SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL);
+ writel(reg, mbox->base + RWTM_HOST_INT_MASK);
+
+ return 0;
+}
+
+static void a37xx_mbox_shutdown(struct mbox_chan *chan)
+{
+ u32 reg;
+ struct a37xx_mbox *mbox = chan->con_priv;
+
+ /* disable interrupt generation */
+ reg = readl(mbox->base + RWTM_HOST_INT_MASK);
+ reg |= SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL;
+ writel(reg, mbox->base + RWTM_HOST_INT_MASK);
+
+ devm_free_irq(mbox->dev, mbox->irq, chan);
+}
+
+static const struct mbox_chan_ops a37xx_mbox_ops = {
+ .send_data = a37xx_mbox_send_data,
+ .startup = a37xx_mbox_startup,
+ .shutdown = a37xx_mbox_shutdown,
+};
+
+static int armada_37xx_mbox_probe(struct platform_device *pdev)
+{
+ struct a37xx_mbox *mbox;
+ struct mbox_chan *chans;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Allocated one channel */
+ chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->base))
+ return PTR_ERR(mbox->base);
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+
+ mbox->dev = &pdev->dev;
+
+ /* Hardware supports only one channel. */
+ chans[0].con_priv = mbox;
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = chans;
+ mbox->controller.ops = &a37xx_mbox_ops;
+ mbox->controller.txdone_irq = true;
+
+ ret = devm_mbox_controller_register(mbox->dev, &mbox->controller);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register mailbox controller\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ return ret;
+}
+
+
+static const struct of_device_id armada_37xx_mbox_match[] = {
+ { .compatible = "marvell,armada-3700-rwtm-mailbox" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, armada_37xx_mbox_match);
+
+static struct platform_driver armada_37xx_mbox_driver = {
+ .probe = armada_37xx_mbox_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = armada_37xx_mbox_match,
+ },
+};
+
+module_platform_driver(armada_37xx_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
new file mode 100644
index 000000000..a2b8839d4
--- /dev/null
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -0,0 +1,1686 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2017 Broadcom
+
+/*
+ * Broadcom FlexRM Mailbox Driver
+ *
+ * Each Broadcom FlexSparx4 offload engine is implemented as an
+ * extension to Broadcom FlexRM ring manager. The FlexRM ring
+ * manager provides a set of rings which can be used to submit
+ * work to a FlexSparx4 offload engine.
+ *
+ * This driver creates a mailbox controller using a set of FlexRM
+ * rings where each mailbox channel represents a separate FlexRM ring.
+ */
+
+#include <asm/barrier.h>
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/brcm-message.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* ====== FlexRM register defines ===== */
+
+/* FlexRM configuration */
+#define RING_REGS_SIZE 0x10000
+#define RING_DESC_SIZE 8
+#define RING_DESC_INDEX(offset) \
+ ((offset) / RING_DESC_SIZE)
+#define RING_DESC_OFFSET(index) \
+ ((index) * RING_DESC_SIZE)
+#define RING_MAX_REQ_COUNT 1024
+#define RING_BD_ALIGN_ORDER 12
+#define RING_BD_ALIGN_CHECK(addr) \
+ (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
+#define RING_BD_TOGGLE_INVALID(offset) \
+ (((offset) >> RING_BD_ALIGN_ORDER) & 0x1)
+#define RING_BD_TOGGLE_VALID(offset) \
+ (!RING_BD_TOGGLE_INVALID(offset))
+#define RING_BD_DESC_PER_REQ 32
+#define RING_BD_DESC_COUNT \
+ (RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ)
+#define RING_BD_SIZE \
+ (RING_BD_DESC_COUNT * RING_DESC_SIZE)
+#define RING_CMPL_ALIGN_ORDER 13
+#define RING_CMPL_DESC_COUNT RING_MAX_REQ_COUNT
+#define RING_CMPL_SIZE \
+ (RING_CMPL_DESC_COUNT * RING_DESC_SIZE)
+#define RING_VER_MAGIC 0x76303031
+
+/* Per-Ring register offsets */
+#define RING_VER 0x000
+#define RING_BD_START_ADDR 0x004
+#define RING_BD_READ_PTR 0x008
+#define RING_BD_WRITE_PTR 0x00c
+#define RING_BD_READ_PTR_DDR_LS 0x010
+#define RING_BD_READ_PTR_DDR_MS 0x014
+#define RING_CMPL_START_ADDR 0x018
+#define RING_CMPL_WRITE_PTR 0x01c
+#define RING_NUM_REQ_RECV_LS 0x020
+#define RING_NUM_REQ_RECV_MS 0x024
+#define RING_NUM_REQ_TRANS_LS 0x028
+#define RING_NUM_REQ_TRANS_MS 0x02c
+#define RING_NUM_REQ_OUTSTAND 0x030
+#define RING_CONTROL 0x034
+#define RING_FLUSH_DONE 0x038
+#define RING_MSI_ADDR_LS 0x03c
+#define RING_MSI_ADDR_MS 0x040
+#define RING_MSI_CONTROL 0x048
+#define RING_BD_READ_PTR_DDR_CONTROL 0x04c
+#define RING_MSI_DATA_VALUE 0x064
+
+/* Register RING_BD_START_ADDR fields */
+#define BD_LAST_UPDATE_HW_SHIFT 28
+#define BD_LAST_UPDATE_HW_MASK 0x1
+#define BD_START_ADDR_VALUE(pa) \
+ ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
+#define BD_START_ADDR_DECODE(val) \
+ ((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
+
+/* Register RING_CMPL_START_ADDR fields */
+#define CMPL_START_ADDR_VALUE(pa) \
+ ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
+
+/* Register RING_CONTROL fields */
+#define CONTROL_MASK_DISABLE_CONTROL 12
+#define CONTROL_FLUSH_SHIFT 5
+#define CONTROL_ACTIVE_SHIFT 4
+#define CONTROL_RATE_ADAPT_MASK 0xf
+#define CONTROL_RATE_DYNAMIC 0x0
+#define CONTROL_RATE_FAST 0x8
+#define CONTROL_RATE_MEDIUM 0x9
+#define CONTROL_RATE_SLOW 0xa
+#define CONTROL_RATE_IDLE 0xb
+
+/* Register RING_FLUSH_DONE fields */
+#define FLUSH_DONE_MASK 0x1
+
+/* Register RING_MSI_CONTROL fields */
+#define MSI_TIMER_VAL_SHIFT 16
+#define MSI_TIMER_VAL_MASK 0xffff
+#define MSI_ENABLE_SHIFT 15
+#define MSI_ENABLE_MASK 0x1
+#define MSI_COUNT_SHIFT 0
+#define MSI_COUNT_MASK 0x3ff
+
+/* Register RING_BD_READ_PTR_DDR_CONTROL fields */
+#define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16
+#define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff
+#define BD_READ_PTR_DDR_ENABLE_SHIFT 15
+#define BD_READ_PTR_DDR_ENABLE_MASK 0x1
+
+/* ====== FlexRM ring descriptor defines ===== */
+
+/* Completion descriptor format */
+#define CMPL_OPAQUE_SHIFT 0
+#define CMPL_OPAQUE_MASK 0xffff
+#define CMPL_ENGINE_STATUS_SHIFT 16
+#define CMPL_ENGINE_STATUS_MASK 0xffff
+#define CMPL_DME_STATUS_SHIFT 32
+#define CMPL_DME_STATUS_MASK 0xffff
+#define CMPL_RM_STATUS_SHIFT 48
+#define CMPL_RM_STATUS_MASK 0xffff
+
+/* Completion DME status code */
+#define DME_STATUS_MEM_COR_ERR BIT(0)
+#define DME_STATUS_MEM_UCOR_ERR BIT(1)
+#define DME_STATUS_FIFO_UNDERFLOW BIT(2)
+#define DME_STATUS_FIFO_OVERFLOW BIT(3)
+#define DME_STATUS_RRESP_ERR BIT(4)
+#define DME_STATUS_BRESP_ERR BIT(5)
+#define DME_STATUS_ERROR_MASK (DME_STATUS_MEM_COR_ERR | \
+ DME_STATUS_MEM_UCOR_ERR | \
+ DME_STATUS_FIFO_UNDERFLOW | \
+ DME_STATUS_FIFO_OVERFLOW | \
+ DME_STATUS_RRESP_ERR | \
+ DME_STATUS_BRESP_ERR)
+
+/* Completion RM status code */
+#define RM_STATUS_CODE_SHIFT 0
+#define RM_STATUS_CODE_MASK 0x3ff
+#define RM_STATUS_CODE_GOOD 0x0
+#define RM_STATUS_CODE_AE_TIMEOUT 0x3ff
+
+/* General descriptor format */
+#define DESC_TYPE_SHIFT 60
+#define DESC_TYPE_MASK 0xf
+#define DESC_PAYLOAD_SHIFT 0
+#define DESC_PAYLOAD_MASK 0x0fffffffffffffff
+
+/* Null descriptor format */
+#define NULL_TYPE 0
+#define NULL_TOGGLE_SHIFT 58
+#define NULL_TOGGLE_MASK 0x1
+
+/* Header descriptor format */
+#define HEADER_TYPE 1
+#define HEADER_TOGGLE_SHIFT 58
+#define HEADER_TOGGLE_MASK 0x1
+#define HEADER_ENDPKT_SHIFT 57
+#define HEADER_ENDPKT_MASK 0x1
+#define HEADER_STARTPKT_SHIFT 56
+#define HEADER_STARTPKT_MASK 0x1
+#define HEADER_BDCOUNT_SHIFT 36
+#define HEADER_BDCOUNT_MASK 0x1f
+#define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK
+#define HEADER_FLAGS_SHIFT 16
+#define HEADER_FLAGS_MASK 0xffff
+#define HEADER_OPAQUE_SHIFT 0
+#define HEADER_OPAQUE_MASK 0xffff
+
+/* Source (SRC) descriptor format */
+#define SRC_TYPE 2
+#define SRC_LENGTH_SHIFT 44
+#define SRC_LENGTH_MASK 0xffff
+#define SRC_ADDR_SHIFT 0
+#define SRC_ADDR_MASK 0x00000fffffffffff
+
+/* Destination (DST) descriptor format */
+#define DST_TYPE 3
+#define DST_LENGTH_SHIFT 44
+#define DST_LENGTH_MASK 0xffff
+#define DST_ADDR_SHIFT 0
+#define DST_ADDR_MASK 0x00000fffffffffff
+
+/* Immediate (IMM) descriptor format */
+#define IMM_TYPE 4
+#define IMM_DATA_SHIFT 0
+#define IMM_DATA_MASK 0x0fffffffffffffff
+
+/* Next pointer (NPTR) descriptor format */
+#define NPTR_TYPE 5
+#define NPTR_TOGGLE_SHIFT 58
+#define NPTR_TOGGLE_MASK 0x1
+#define NPTR_ADDR_SHIFT 0
+#define NPTR_ADDR_MASK 0x00000fffffffffff
+
+/* Mega source (MSRC) descriptor format */
+#define MSRC_TYPE 6
+#define MSRC_LENGTH_SHIFT 44
+#define MSRC_LENGTH_MASK 0xffff
+#define MSRC_ADDR_SHIFT 0
+#define MSRC_ADDR_MASK 0x00000fffffffffff
+
+/* Mega destination (MDST) descriptor format */
+#define MDST_TYPE 7
+#define MDST_LENGTH_SHIFT 44
+#define MDST_LENGTH_MASK 0xffff
+#define MDST_ADDR_SHIFT 0
+#define MDST_ADDR_MASK 0x00000fffffffffff
+
+/* Source with tlast (SRCT) descriptor format */
+#define SRCT_TYPE 8
+#define SRCT_LENGTH_SHIFT 44
+#define SRCT_LENGTH_MASK 0xffff
+#define SRCT_ADDR_SHIFT 0
+#define SRCT_ADDR_MASK 0x00000fffffffffff
+
+/* Destination with tlast (DSTT) descriptor format */
+#define DSTT_TYPE 9
+#define DSTT_LENGTH_SHIFT 44
+#define DSTT_LENGTH_MASK 0xffff
+#define DSTT_ADDR_SHIFT 0
+#define DSTT_ADDR_MASK 0x00000fffffffffff
+
+/* Immediate with tlast (IMMT) descriptor format */
+#define IMMT_TYPE 10
+#define IMMT_DATA_SHIFT 0
+#define IMMT_DATA_MASK 0x0fffffffffffffff
+
+/* Descriptor helper macros */
+#define DESC_DEC(_d, _s, _m) (((_d) >> (_s)) & (_m))
+#define DESC_ENC(_d, _v, _s, _m) \
+ do { \
+ (_d) &= ~((u64)(_m) << (_s)); \
+ (_d) |= (((u64)(_v) & (_m)) << (_s)); \
+ } while (0)
+
+/* ====== FlexRM data structures ===== */
+
+struct flexrm_ring {
+ /* Unprotected members */
+ int num;
+ struct flexrm_mbox *mbox;
+ void __iomem *regs;
+ bool irq_requested;
+ unsigned int irq;
+ cpumask_t irq_aff_hint;
+ unsigned int msi_timer_val;
+ unsigned int msi_count_threshold;
+ struct brcm_message *requests[RING_MAX_REQ_COUNT];
+ void *bd_base;
+ dma_addr_t bd_dma_base;
+ u32 bd_write_offset;
+ void *cmpl_base;
+ dma_addr_t cmpl_dma_base;
+ /* Atomic stats */
+ atomic_t msg_send_count;
+ atomic_t msg_cmpl_count;
+ /* Protected members */
+ spinlock_t lock;
+ DECLARE_BITMAP(requests_bmap, RING_MAX_REQ_COUNT);
+ u32 cmpl_read_offset;
+};
+
+struct flexrm_mbox {
+ struct device *dev;
+ void __iomem *regs;
+ u32 num_rings;
+ struct flexrm_ring *rings;
+ struct dma_pool *bd_pool;
+ struct dma_pool *cmpl_pool;
+ struct dentry *root;
+ struct mbox_controller controller;
+};
+
+/* ====== FlexRM ring descriptor helper routines ===== */
+
+static u64 flexrm_read_desc(void *desc_ptr)
+{
+ return le64_to_cpu(*((u64 *)desc_ptr));
+}
+
+static void flexrm_write_desc(void *desc_ptr, u64 desc)
+{
+ *((u64 *)desc_ptr) = cpu_to_le64(desc);
+}
+
+static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc)
+{
+ return (u32)(cmpl_desc & CMPL_OPAQUE_MASK);
+}
+
+static int flexrm_cmpl_desc_to_error(u64 cmpl_desc)
+{
+ u32 status;
+
+ status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT,
+ CMPL_DME_STATUS_MASK);
+ if (status & DME_STATUS_ERROR_MASK)
+ return -EIO;
+
+ status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT,
+ CMPL_RM_STATUS_MASK);
+ status &= RM_STATUS_CODE_MASK;
+ if (status == RM_STATUS_CODE_AE_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static bool flexrm_is_next_table_desc(void *desc_ptr)
+{
+ u64 desc = flexrm_read_desc(desc_ptr);
+ u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+
+ return (type == NPTR_TYPE) ? true : false;
+}
+
+static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK);
+ DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_null_desc(u32 toggle)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK);
+
+ return desc;
+}
+
+static u32 flexrm_estimate_header_desc_count(u32 nhcnt)
+{
+ u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX;
+
+ if (!(nhcnt % HEADER_BDCOUNT_MAX))
+ hcnt += 1;
+
+ return hcnt;
+}
+
+static void flexrm_flip_header_toggle(void *desc_ptr)
+{
+ u64 desc = flexrm_read_desc(desc_ptr);
+
+ if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT))
+ desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT);
+ else
+ desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT);
+
+ flexrm_write_desc(desc_ptr, desc);
+}
+
+static u64 flexrm_header_desc(u32 toggle, u32 startpkt, u32 endpkt,
+ u32 bdcount, u32 flags, u32 opaque)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK);
+ DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK);
+ DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK);
+ DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK);
+ DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK);
+ DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK);
+
+ return desc;
+}
+
+static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid,
+ u64 desc, void **desc_ptr, u32 *toggle,
+ void *start_desc, void *end_desc)
+{
+ u64 d;
+ u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount;
+
+ /* Sanity check */
+ if (nhcnt <= nhpos)
+ return;
+
+ /*
+ * Each request or packet start with a HEADER descriptor followed
+ * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
+ * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
+ * following a HEADER descriptor is represented by BDCOUNT field
+ * of HEADER descriptor. The max value of BDCOUNT field is 31 which
+ * means we can only have 31 non-HEADER descriptors following one
+ * HEADER descriptor.
+ *
+ * In general use, number of non-HEADER descriptors can easily go
+ * beyond 31. To tackle this situation, we have packet (or request)
+ * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
+ *
+ * To use packet extension, the first HEADER descriptor of request
+ * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
+ * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
+ * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
+ * TOGGLE bit of the first HEADER will be set to invalid state to
+ * ensure that FlexRM does not start fetching descriptors till all
+ * descriptors are enqueued. The user of this function will flip
+ * the TOGGLE bit of first HEADER after all descriptors are
+ * enqueued.
+ */
+
+ if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
+ /* Prepare the header descriptor */
+ nhavail = (nhcnt - nhpos);
+ _toggle = (nhpos == 0) ? !(*toggle) : (*toggle);
+ _startpkt = (nhpos == 0) ? 0x1 : 0x0;
+ _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
+ _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
+ nhavail : HEADER_BDCOUNT_MAX;
+ if (nhavail <= HEADER_BDCOUNT_MAX)
+ _bdcount = nhavail;
+ else
+ _bdcount = HEADER_BDCOUNT_MAX;
+ d = flexrm_header_desc(_toggle, _startpkt, _endpkt,
+ _bdcount, 0x0, reqid);
+
+ /* Write header descriptor */
+ flexrm_write_desc(*desc_ptr, d);
+
+ /* Point to next descriptor */
+ *desc_ptr += sizeof(desc);
+ if (*desc_ptr == end_desc)
+ *desc_ptr = start_desc;
+
+ /* Skip next pointer descriptors */
+ while (flexrm_is_next_table_desc(*desc_ptr)) {
+ *toggle = (*toggle) ? 0 : 1;
+ *desc_ptr += sizeof(desc);
+ if (*desc_ptr == end_desc)
+ *desc_ptr = start_desc;
+ }
+ }
+
+ /* Write desired descriptor */
+ flexrm_write_desc(*desc_ptr, desc);
+
+ /* Point to next descriptor */
+ *desc_ptr += sizeof(desc);
+ if (*desc_ptr == end_desc)
+ *desc_ptr = start_desc;
+
+ /* Skip next pointer descriptors */
+ while (flexrm_is_next_table_desc(*desc_ptr)) {
+ *toggle = (*toggle) ? 0 : 1;
+ *desc_ptr += sizeof(desc);
+ if (*desc_ptr == end_desc)
+ *desc_ptr = start_desc;
+ }
+}
+
+static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK);
+ DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK);
+ DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK);
+ DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK);
+ DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_imm_desc(u64 data)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK);
+ DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK);
+ DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK);
+
+ return desc;
+}
+
+static u64 flexrm_immt_desc(u64 data)
+{
+ u64 desc = 0;
+
+ DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
+ DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK);
+
+ return desc;
+}
+
+static bool flexrm_spu_sanity_check(struct brcm_message *msg)
+{
+ struct scatterlist *sg;
+
+ if (!msg->spu.src || !msg->spu.dst)
+ return false;
+ for (sg = msg->spu.src; sg; sg = sg_next(sg)) {
+ if (sg->length & 0xf) {
+ if (sg->length > SRC_LENGTH_MASK)
+ return false;
+ } else {
+ if (sg->length > (MSRC_LENGTH_MASK * 16))
+ return false;
+ }
+ }
+ for (sg = msg->spu.dst; sg; sg = sg_next(sg)) {
+ if (sg->length & 0xf) {
+ if (sg->length > DST_LENGTH_MASK)
+ return false;
+ } else {
+ if (sg->length > (MDST_LENGTH_MASK * 16))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg)
+{
+ u32 cnt = 0;
+ unsigned int dst_target = 0;
+ struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
+
+ while (src_sg || dst_sg) {
+ if (src_sg) {
+ cnt++;
+ dst_target = src_sg->length;
+ src_sg = sg_next(src_sg);
+ } else
+ dst_target = UINT_MAX;
+
+ while (dst_target && dst_sg) {
+ cnt++;
+ if (dst_sg->length < dst_target)
+ dst_target -= dst_sg->length;
+ else
+ dst_target = 0;
+ dst_sg = sg_next(dst_sg);
+ }
+ }
+
+ return cnt;
+}
+
+static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
+{
+ int rc;
+
+ rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
+ DMA_TO_DEVICE);
+ if (!rc)
+ return -EIO;
+
+ rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
+ DMA_FROM_DEVICE);
+ if (!rc) {
+ dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
+ DMA_TO_DEVICE);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg)
+{
+ dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
+ DMA_TO_DEVICE);
+}
+
+static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
+ u32 reqid, void *desc_ptr, u32 toggle,
+ void *start_desc, void *end_desc)
+{
+ u64 d;
+ u32 nhpos = 0;
+ void *orig_desc_ptr = desc_ptr;
+ unsigned int dst_target = 0;
+ struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
+
+ while (src_sg || dst_sg) {
+ if (src_sg) {
+ if (sg_dma_len(src_sg) & 0xf)
+ d = flexrm_src_desc(sg_dma_address(src_sg),
+ sg_dma_len(src_sg));
+ else
+ d = flexrm_msrc_desc(sg_dma_address(src_sg),
+ sg_dma_len(src_sg)/16);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ dst_target = sg_dma_len(src_sg);
+ src_sg = sg_next(src_sg);
+ } else
+ dst_target = UINT_MAX;
+
+ while (dst_target && dst_sg) {
+ if (sg_dma_len(dst_sg) & 0xf)
+ d = flexrm_dst_desc(sg_dma_address(dst_sg),
+ sg_dma_len(dst_sg));
+ else
+ d = flexrm_mdst_desc(sg_dma_address(dst_sg),
+ sg_dma_len(dst_sg)/16);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ if (sg_dma_len(dst_sg) < dst_target)
+ dst_target -= sg_dma_len(dst_sg);
+ else
+ dst_target = 0;
+ dst_sg = sg_next(dst_sg);
+ }
+ }
+
+ /* Null descriptor with invalid toggle bit */
+ flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
+
+ /* Ensure that descriptors have been written to memory */
+ wmb();
+
+ /* Flip toggle bit in header */
+ flexrm_flip_header_toggle(orig_desc_ptr);
+
+ return desc_ptr;
+}
+
+static bool flexrm_sba_sanity_check(struct brcm_message *msg)
+{
+ u32 i;
+
+ if (!msg->sba.cmds || !msg->sba.cmds_count)
+ return false;
+
+ for (i = 0; i < msg->sba.cmds_count; i++) {
+ if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
+ (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) &&
+ (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT))
+ return false;
+ if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) &&
+ (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
+ return false;
+ if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) &&
+ (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
+ return false;
+ if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) &&
+ (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK))
+ return false;
+ if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) &&
+ (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK))
+ return false;
+ }
+
+ return true;
+}
+
+static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg)
+{
+ u32 i, cnt;
+
+ cnt = 0;
+ for (i = 0; i < msg->sba.cmds_count; i++) {
+ cnt++;
+
+ if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
+ (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C))
+ cnt++;
+
+ if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP)
+ cnt++;
+
+ if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt,
+ u32 reqid, void *desc_ptr, u32 toggle,
+ void *start_desc, void *end_desc)
+{
+ u64 d;
+ u32 i, nhpos = 0;
+ struct brcm_sba_command *c;
+ void *orig_desc_ptr = desc_ptr;
+
+ /* Convert SBA commands into descriptors */
+ for (i = 0; i < msg->sba.cmds_count; i++) {
+ c = &msg->sba.cmds[i];
+
+ if ((c->flags & BRCM_SBA_CMD_HAS_RESP) &&
+ (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) {
+ /* Destination response descriptor */
+ d = flexrm_dst_desc(c->resp, c->resp_len);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) {
+ /* Destination response with tlast descriptor */
+ d = flexrm_dstt_desc(c->resp, c->resp_len);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ }
+
+ if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) {
+ /* Destination with tlast descriptor */
+ d = flexrm_dstt_desc(c->data, c->data_len);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ }
+
+ if (c->flags & BRCM_SBA_CMD_TYPE_B) {
+ /* Command as immediate descriptor */
+ d = flexrm_imm_desc(c->cmd);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ } else {
+ /* Command as immediate descriptor with tlast */
+ d = flexrm_immt_desc(c->cmd);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ }
+
+ if ((c->flags & BRCM_SBA_CMD_TYPE_B) ||
+ (c->flags & BRCM_SBA_CMD_TYPE_C)) {
+ /* Source with tlast descriptor */
+ d = flexrm_srct_desc(c->data, c->data_len);
+ flexrm_enqueue_desc(nhpos, nhcnt, reqid,
+ d, &desc_ptr, &toggle,
+ start_desc, end_desc);
+ nhpos++;
+ }
+ }
+
+ /* Null descriptor with invalid toggle bit */
+ flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
+
+ /* Ensure that descriptors have been written to memory */
+ wmb();
+
+ /* Flip toggle bit in header */
+ flexrm_flip_header_toggle(orig_desc_ptr);
+
+ return desc_ptr;
+}
+
+static bool flexrm_sanity_check(struct brcm_message *msg)
+{
+ if (!msg)
+ return false;
+
+ switch (msg->type) {
+ case BRCM_MESSAGE_SPU:
+ return flexrm_spu_sanity_check(msg);
+ case BRCM_MESSAGE_SBA:
+ return flexrm_sba_sanity_check(msg);
+ default:
+ return false;
+ };
+}
+
+static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg)
+{
+ if (!msg)
+ return 0;
+
+ switch (msg->type) {
+ case BRCM_MESSAGE_SPU:
+ return flexrm_spu_estimate_nonheader_desc_count(msg);
+ case BRCM_MESSAGE_SBA:
+ return flexrm_sba_estimate_nonheader_desc_count(msg);
+ default:
+ return 0;
+ };
+}
+
+static int flexrm_dma_map(struct device *dev, struct brcm_message *msg)
+{
+ if (!dev || !msg)
+ return -EINVAL;
+
+ switch (msg->type) {
+ case BRCM_MESSAGE_SPU:
+ return flexrm_spu_dma_map(dev, msg);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg)
+{
+ if (!dev || !msg)
+ return;
+
+ switch (msg->type) {
+ case BRCM_MESSAGE_SPU:
+ flexrm_spu_dma_unmap(dev, msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt,
+ u32 reqid, void *desc_ptr, u32 toggle,
+ void *start_desc, void *end_desc)
+{
+ if (!msg || !desc_ptr || !start_desc || !end_desc)
+ return ERR_PTR(-ENOTSUPP);
+
+ if ((desc_ptr < start_desc) || (end_desc <= desc_ptr))
+ return ERR_PTR(-ERANGE);
+
+ switch (msg->type) {
+ case BRCM_MESSAGE_SPU:
+ return flexrm_spu_write_descs(msg, nhcnt, reqid,
+ desc_ptr, toggle,
+ start_desc, end_desc);
+ case BRCM_MESSAGE_SBA:
+ return flexrm_sba_write_descs(msg, nhcnt, reqid,
+ desc_ptr, toggle,
+ start_desc, end_desc);
+ default:
+ return ERR_PTR(-ENOTSUPP);
+ };
+}
+
+/* ====== FlexRM driver helper routines ===== */
+
+static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox,
+ struct seq_file *file)
+{
+ int i;
+ const char *state;
+ struct flexrm_ring *ring;
+
+ seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n",
+ "Ring#", "State", "BD_Addr", "BD_Size",
+ "Cmpl_Addr", "Cmpl_Size");
+
+ for (i = 0; i < mbox->num_rings; i++) {
+ ring = &mbox->rings[i];
+ if (readl(ring->regs + RING_CONTROL) &
+ BIT(CONTROL_ACTIVE_SHIFT))
+ state = "active";
+ else
+ state = "inactive";
+ seq_printf(file,
+ "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n",
+ ring->num, state,
+ (unsigned long long)ring->bd_dma_base,
+ (u32)RING_BD_SIZE,
+ (unsigned long long)ring->cmpl_dma_base,
+ (u32)RING_CMPL_SIZE);
+ }
+}
+
+static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox,
+ struct seq_file *file)
+{
+ int i;
+ u32 val, bd_read_offset;
+ struct flexrm_ring *ring;
+
+ seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n",
+ "Ring#", "BD_Read", "BD_Write",
+ "Cmpl_Read", "Submitted", "Completed");
+
+ for (i = 0; i < mbox->num_rings; i++) {
+ ring = &mbox->rings[i];
+ bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
+ val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
+ bd_read_offset *= RING_DESC_SIZE;
+ bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) -
+ ring->bd_dma_base);
+ seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n",
+ ring->num,
+ (u32)bd_read_offset,
+ (u32)ring->bd_write_offset,
+ (u32)ring->cmpl_read_offset,
+ (u32)atomic_read(&ring->msg_send_count),
+ (u32)atomic_read(&ring->msg_cmpl_count));
+ }
+}
+
+static int flexrm_new_request(struct flexrm_ring *ring,
+ struct brcm_message *batch_msg,
+ struct brcm_message *msg)
+{
+ void *next;
+ unsigned long flags;
+ u32 val, count, nhcnt;
+ u32 read_offset, write_offset;
+ bool exit_cleanup = false;
+ int ret = 0, reqid;
+
+ /* Do sanity check on message */
+ if (!flexrm_sanity_check(msg))
+ return -EIO;
+ msg->error = 0;
+
+ /* If no requests possible then save data pointer and goto done. */
+ spin_lock_irqsave(&ring->lock, flags);
+ reqid = bitmap_find_free_region(ring->requests_bmap,
+ RING_MAX_REQ_COUNT, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
+ if (reqid < 0)
+ return -ENOSPC;
+ ring->requests[reqid] = msg;
+
+ /* Do DMA mappings for the message */
+ ret = flexrm_dma_map(ring->mbox->dev, msg);
+ if (ret < 0) {
+ ring->requests[reqid] = NULL;
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return ret;
+ }
+
+ /* Determine current HW BD read offset */
+ read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
+ val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
+ read_offset *= RING_DESC_SIZE;
+ read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);
+
+ /*
+ * Number required descriptors = number of non-header descriptors +
+ * number of header descriptors +
+ * 1x null descriptor
+ */
+ nhcnt = flexrm_estimate_nonheader_desc_count(msg);
+ count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1;
+
+ /* Check for available descriptor space. */
+ write_offset = ring->bd_write_offset;
+ while (count) {
+ if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
+ count--;
+ write_offset += RING_DESC_SIZE;
+ if (write_offset == RING_BD_SIZE)
+ write_offset = 0x0;
+ if (write_offset == read_offset)
+ break;
+ }
+ if (count) {
+ ret = -ENOSPC;
+ exit_cleanup = true;
+ goto exit;
+ }
+
+ /* Write descriptors to ring */
+ next = flexrm_write_descs(msg, nhcnt, reqid,
+ ring->bd_base + ring->bd_write_offset,
+ RING_BD_TOGGLE_VALID(ring->bd_write_offset),
+ ring->bd_base, ring->bd_base + RING_BD_SIZE);
+ if (IS_ERR(next)) {
+ ret = PTR_ERR(next);
+ exit_cleanup = true;
+ goto exit;
+ }
+
+ /* Save ring BD write offset */
+ ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
+
+ /* Increment number of messages sent */
+ atomic_inc_return(&ring->msg_send_count);
+
+exit:
+ /* Update error status in message */
+ msg->error = ret;
+
+ /* Cleanup if we failed */
+ if (exit_cleanup) {
+ flexrm_dma_unmap(ring->mbox->dev, msg);
+ ring->requests[reqid] = NULL;
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
+ }
+
+ return ret;
+}
+
+static int flexrm_process_completions(struct flexrm_ring *ring)
+{
+ u64 desc;
+ int err, count = 0;
+ unsigned long flags;
+ struct brcm_message *msg = NULL;
+ u32 reqid, cmpl_read_offset, cmpl_write_offset;
+ struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /*
+ * Get current completion read and write offset
+ *
+ * Note: We should read completion write pointer at least once
+ * after we get a MSI interrupt because HW maintains internal
+ * MSI status which will allow next MSI interrupt only after
+ * completion write pointer is read.
+ */
+ cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
+ cmpl_write_offset *= RING_DESC_SIZE;
+ cmpl_read_offset = ring->cmpl_read_offset;
+ ring->cmpl_read_offset = cmpl_write_offset;
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* For each completed request notify mailbox clients */
+ reqid = 0;
+ while (cmpl_read_offset != cmpl_write_offset) {
+ /* Dequeue next completion descriptor */
+ desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));
+
+ /* Next read offset */
+ cmpl_read_offset += RING_DESC_SIZE;
+ if (cmpl_read_offset == RING_CMPL_SIZE)
+ cmpl_read_offset = 0;
+
+ /* Decode error from completion descriptor */
+ err = flexrm_cmpl_desc_to_error(desc);
+ if (err < 0) {
+ dev_warn(ring->mbox->dev,
+ "ring%d got completion desc=0x%lx with error %d\n",
+ ring->num, (unsigned long)desc, err);
+ }
+
+ /* Determine request id from completion descriptor */
+ reqid = flexrm_cmpl_desc_to_reqid(desc);
+
+ /* Determine message pointer based on reqid */
+ msg = ring->requests[reqid];
+ if (!msg) {
+ dev_warn(ring->mbox->dev,
+ "ring%d null msg pointer for completion desc=0x%lx\n",
+ ring->num, (unsigned long)desc);
+ continue;
+ }
+
+ /* Release reqid for recycling */
+ ring->requests[reqid] = NULL;
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Unmap DMA mappings */
+ flexrm_dma_unmap(ring->mbox->dev, msg);
+
+ /* Give-back message to mailbox client */
+ msg->error = err;
+ mbox_chan_received_data(chan, msg);
+
+ /* Increment number of completions processed */
+ atomic_inc_return(&ring->msg_cmpl_count);
+ count++;
+ }
+
+ return count;
+}
+
+/* ====== FlexRM Debugfs callbacks ====== */
+
+static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset)
+{
+ struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
+
+ /* Write config in file */
+ flexrm_write_config_in_seqfile(mbox, file);
+
+ return 0;
+}
+
+static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset)
+{
+ struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
+
+ /* Write stats in file */
+ flexrm_write_stats_in_seqfile(mbox, file);
+
+ return 0;
+}
+
+/* ====== FlexRM interrupt handler ===== */
+
+static irqreturn_t flexrm_irq_event(int irq, void *dev_id)
+{
+ /* We only have MSI for completions so just wakeup IRQ thread */
+ /* Ring related errors will be informed via completion descriptors */
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t flexrm_irq_thread(int irq, void *dev_id)
+{
+ flexrm_process_completions(dev_id);
+
+ return IRQ_HANDLED;
+}
+
+/* ====== FlexRM mailbox callbacks ===== */
+
+static int flexrm_send_data(struct mbox_chan *chan, void *data)
+{
+ int i, rc;
+ struct flexrm_ring *ring = chan->con_priv;
+ struct brcm_message *msg = data;
+
+ if (msg->type == BRCM_MESSAGE_BATCH) {
+ for (i = msg->batch.msgs_queued;
+ i < msg->batch.msgs_count; i++) {
+ rc = flexrm_new_request(ring, msg,
+ &msg->batch.msgs[i]);
+ if (rc) {
+ msg->error = rc;
+ return rc;
+ }
+ msg->batch.msgs_queued++;
+ }
+ return 0;
+ }
+
+ return flexrm_new_request(ring, NULL, data);
+}
+
+static bool flexrm_peek_data(struct mbox_chan *chan)
+{
+ int cnt = flexrm_process_completions(chan->con_priv);
+
+ return (cnt > 0) ? true : false;
+}
+
+static int flexrm_startup(struct mbox_chan *chan)
+{
+ u64 d;
+ u32 val, off;
+ int ret = 0;
+ dma_addr_t next_addr;
+ struct flexrm_ring *ring = chan->con_priv;
+
+ /* Allocate BD memory */
+ ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
+ GFP_KERNEL, &ring->bd_dma_base);
+ if (!ring->bd_base) {
+ dev_err(ring->mbox->dev,
+ "can't allocate BD memory for ring%d\n",
+ ring->num);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* Configure next table pointer entries in BD memory */
+ for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) {
+ next_addr = off + RING_DESC_SIZE;
+ if (next_addr == RING_BD_SIZE)
+ next_addr = 0;
+ next_addr += ring->bd_dma_base;
+ if (RING_BD_ALIGN_CHECK(next_addr))
+ d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off),
+ next_addr);
+ else
+ d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off));
+ flexrm_write_desc(ring->bd_base + off, d);
+ }
+
+ /* Allocate completion memory */
+ ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool,
+ GFP_KERNEL, &ring->cmpl_dma_base);
+ if (!ring->cmpl_base) {
+ dev_err(ring->mbox->dev,
+ "can't allocate completion memory for ring%d\n",
+ ring->num);
+ ret = -ENOMEM;
+ goto fail_free_bd_memory;
+ }
+
+ /* Request IRQ */
+ if (ring->irq == UINT_MAX) {
+ dev_err(ring->mbox->dev,
+ "ring%d IRQ not available\n", ring->num);
+ ret = -ENODEV;
+ goto fail_free_cmpl_memory;
+ }
+ ret = request_threaded_irq(ring->irq,
+ flexrm_irq_event,
+ flexrm_irq_thread,
+ 0, dev_name(ring->mbox->dev), ring);
+ if (ret) {
+ dev_err(ring->mbox->dev,
+ "failed to request ring%d IRQ\n", ring->num);
+ goto fail_free_cmpl_memory;
+ }
+ ring->irq_requested = true;
+
+ /* Set IRQ affinity hint */
+ ring->irq_aff_hint = CPU_MASK_NONE;
+ val = ring->mbox->num_rings;
+ val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
+ cpumask_set_cpu((ring->num / val) % num_online_cpus(),
+ &ring->irq_aff_hint);
+ ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint);
+ if (ret) {
+ dev_err(ring->mbox->dev,
+ "failed to set IRQ affinity hint for ring%d\n",
+ ring->num);
+ goto fail_free_irq;
+ }
+
+ /* Disable/inactivate ring */
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
+
+ /* Program BD start address */
+ val = BD_START_ADDR_VALUE(ring->bd_dma_base);
+ writel_relaxed(val, ring->regs + RING_BD_START_ADDR);
+
+ /* BD write pointer will be same as HW write pointer */
+ ring->bd_write_offset =
+ readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
+ ring->bd_write_offset *= RING_DESC_SIZE;
+
+ /* Program completion start address */
+ val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
+ writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
+
+ /* Completion read pointer will be same as HW write pointer */
+ ring->cmpl_read_offset =
+ readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
+ ring->cmpl_read_offset *= RING_DESC_SIZE;
+
+ /* Read ring Tx, Rx, and Outstanding counts to clear */
+ readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
+ readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
+ readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
+ readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
+ readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);
+
+ /* Configure RING_MSI_CONTROL */
+ val = 0;
+ val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
+ val |= BIT(MSI_ENABLE_SHIFT);
+ val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
+ writel_relaxed(val, ring->regs + RING_MSI_CONTROL);
+
+ /* Enable/activate ring */
+ val = BIT(CONTROL_ACTIVE_SHIFT);
+ writel_relaxed(val, ring->regs + RING_CONTROL);
+
+ /* Reset stats to zero */
+ atomic_set(&ring->msg_send_count, 0);
+ atomic_set(&ring->msg_cmpl_count, 0);
+
+ return 0;
+
+fail_free_irq:
+ free_irq(ring->irq, ring);
+ ring->irq_requested = false;
+fail_free_cmpl_memory:
+ dma_pool_free(ring->mbox->cmpl_pool,
+ ring->cmpl_base, ring->cmpl_dma_base);
+ ring->cmpl_base = NULL;
+fail_free_bd_memory:
+ dma_pool_free(ring->mbox->bd_pool,
+ ring->bd_base, ring->bd_dma_base);
+ ring->bd_base = NULL;
+fail:
+ return ret;
+}
+
+static void flexrm_shutdown(struct mbox_chan *chan)
+{
+ u32 reqid;
+ unsigned int timeout;
+ struct brcm_message *msg;
+ struct flexrm_ring *ring = chan->con_priv;
+
+ /* Disable/inactivate ring */
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
+
+ /* Set ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
+ ring->regs + RING_CONTROL);
+ do {
+ if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK)
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "setting ring%d flush state timedout\n", ring->num);
+
+ /* Clear ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(0x0, ring->regs + RING_CONTROL);
+ do {
+ if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK))
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "clearing ring%d flush state timedout\n", ring->num);
+
+ /* Abort all in-flight requests */
+ for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
+ msg = ring->requests[reqid];
+ if (!msg)
+ continue;
+
+ /* Release reqid for recycling */
+ ring->requests[reqid] = NULL;
+
+ /* Unmap DMA mappings */
+ flexrm_dma_unmap(ring->mbox->dev, msg);
+
+ /* Give-back message to mailbox client */
+ msg->error = -EIO;
+ mbox_chan_received_data(chan, msg);
+ }
+
+ /* Clear requests bitmap */
+ bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
+
+ /* Release IRQ */
+ if (ring->irq_requested) {
+ irq_update_affinity_hint(ring->irq, NULL);
+ free_irq(ring->irq, ring);
+ ring->irq_requested = false;
+ }
+
+ /* Free-up completion descriptor ring */
+ if (ring->cmpl_base) {
+ dma_pool_free(ring->mbox->cmpl_pool,
+ ring->cmpl_base, ring->cmpl_dma_base);
+ ring->cmpl_base = NULL;
+ }
+
+ /* Free-up BD descriptor ring */
+ if (ring->bd_base) {
+ dma_pool_free(ring->mbox->bd_pool,
+ ring->bd_base, ring->bd_dma_base);
+ ring->bd_base = NULL;
+ }
+}
+
+static const struct mbox_chan_ops flexrm_mbox_chan_ops = {
+ .send_data = flexrm_send_data,
+ .startup = flexrm_startup,
+ .shutdown = flexrm_shutdown,
+ .peek_data = flexrm_peek_data,
+};
+
+static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr,
+ const struct of_phandle_args *pa)
+{
+ struct mbox_chan *chan;
+ struct flexrm_ring *ring;
+
+ if (pa->args_count < 3)
+ return ERR_PTR(-EINVAL);
+
+ if (pa->args[0] >= cntlr->num_chans)
+ return ERR_PTR(-ENOENT);
+
+ if (pa->args[1] > MSI_COUNT_MASK)
+ return ERR_PTR(-EINVAL);
+
+ if (pa->args[2] > MSI_TIMER_VAL_MASK)
+ return ERR_PTR(-EINVAL);
+
+ chan = &cntlr->chans[pa->args[0]];
+ ring = chan->con_priv;
+ ring->msi_count_threshold = pa->args[1];
+ ring->msi_timer_val = pa->args[2];
+
+ return chan;
+}
+
+/* ====== FlexRM platform driver ===== */
+
+static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct flexrm_mbox *mbox = dev_get_drvdata(dev);
+ struct flexrm_ring *ring = &mbox->rings[desc->msi_index];
+
+ /* Configure per-Ring MSI registers */
+ writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
+ writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS);
+ writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
+}
+
+static int flexrm_mbox_probe(struct platform_device *pdev)
+{
+ int index, ret = 0;
+ void __iomem *regs;
+ void __iomem *regs_end;
+ struct resource *iomem;
+ struct flexrm_ring *ring;
+ struct flexrm_mbox *mbox;
+ struct device *dev = &pdev->dev;
+
+ /* Allocate driver mailbox struct */
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ mbox->dev = dev;
+ platform_set_drvdata(pdev, mbox);
+
+ /* Get resource for registers and map registers of all rings */
+ mbox->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &iomem);
+ if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
+ ret = -ENODEV;
+ goto fail;
+ } else if (IS_ERR(mbox->regs)) {
+ ret = PTR_ERR(mbox->regs);
+ goto fail;
+ }
+ regs_end = mbox->regs + resource_size(iomem);
+
+ /* Scan and count available rings */
+ mbox->num_rings = 0;
+ for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) {
+ if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC)
+ mbox->num_rings++;
+ }
+ if (!mbox->num_rings) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ /* Allocate driver ring structs */
+ ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ mbox->rings = ring;
+
+ /* Initialize members of driver ring structs */
+ regs = mbox->regs;
+ for (index = 0; index < mbox->num_rings; index++) {
+ ring = &mbox->rings[index];
+ ring->num = index;
+ ring->mbox = mbox;
+ while ((regs < regs_end) &&
+ (readl_relaxed(regs + RING_VER) != RING_VER_MAGIC))
+ regs += RING_REGS_SIZE;
+ if (regs_end <= regs) {
+ ret = -ENODEV;
+ goto fail;
+ }
+ ring->regs = regs;
+ regs += RING_REGS_SIZE;
+ ring->irq = UINT_MAX;
+ ring->irq_requested = false;
+ ring->msi_timer_val = MSI_TIMER_VAL_MASK;
+ ring->msi_count_threshold = 0x1;
+ memset(ring->requests, 0, sizeof(ring->requests));
+ ring->bd_base = NULL;
+ ring->bd_dma_base = 0;
+ ring->cmpl_base = NULL;
+ ring->cmpl_dma_base = 0;
+ atomic_set(&ring->msg_send_count, 0);
+ atomic_set(&ring->msg_cmpl_count, 0);
+ spin_lock_init(&ring->lock);
+ bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
+ ring->cmpl_read_offset = 0;
+ }
+
+ /* FlexRM is capable of 40-bit physical addresses only */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
+ }
+
+ /* Create DMA pool for ring BD memory */
+ mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE,
+ 1 << RING_BD_ALIGN_ORDER, 0);
+ if (!mbox->bd_pool) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* Create DMA pool for ring completion memory */
+ mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE,
+ 1 << RING_CMPL_ALIGN_ORDER, 0);
+ if (!mbox->cmpl_pool) {
+ ret = -ENOMEM;
+ goto fail_destroy_bd_pool;
+ }
+
+ /* Allocate platform MSIs for each ring */
+ ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
+ flexrm_mbox_msi_write);
+ if (ret)
+ goto fail_destroy_cmpl_pool;
+
+ /* Save alloced IRQ numbers for each ring */
+ for (index = 0; index < mbox->num_rings; index++)
+ mbox->rings[index].irq = msi_get_virq(dev, index);
+
+ /* Check availability of debugfs */
+ if (!debugfs_initialized())
+ goto skip_debugfs;
+
+ /* Create debugfs root entry */
+ mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL);
+
+ /* Create debugfs config entry */
+ debugfs_create_devm_seqfile(mbox->dev, "config", mbox->root,
+ flexrm_debugfs_conf_show);
+
+ /* Create debugfs stats entry */
+ debugfs_create_devm_seqfile(mbox->dev, "stats", mbox->root,
+ flexrm_debugfs_stats_show);
+
+skip_debugfs:
+
+ /* Initialize mailbox controller */
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = false;
+ mbox->controller.ops = &flexrm_mbox_chan_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = mbox->num_rings;
+ mbox->controller.of_xlate = flexrm_mbox_of_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings,
+ sizeof(*mbox->controller.chans), GFP_KERNEL);
+ if (!mbox->controller.chans) {
+ ret = -ENOMEM;
+ goto fail_free_debugfs_root;
+ }
+ for (index = 0; index < mbox->num_rings; index++)
+ mbox->controller.chans[index].con_priv = &mbox->rings[index];
+
+ /* Register mailbox controller */
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
+ if (ret)
+ goto fail_free_debugfs_root;
+
+ dev_info(dev, "registered flexrm mailbox with %d channels\n",
+ mbox->controller.num_chans);
+
+ return 0;
+
+fail_free_debugfs_root:
+ debugfs_remove_recursive(mbox->root);
+ platform_msi_domain_free_irqs(dev);
+fail_destroy_cmpl_pool:
+ dma_pool_destroy(mbox->cmpl_pool);
+fail_destroy_bd_pool:
+ dma_pool_destroy(mbox->bd_pool);
+fail:
+ return ret;
+}
+
+static int flexrm_mbox_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(mbox->root);
+
+ platform_msi_domain_free_irqs(dev);
+
+ dma_pool_destroy(mbox->cmpl_pool);
+ dma_pool_destroy(mbox->bd_pool);
+
+ return 0;
+}
+
+static const struct of_device_id flexrm_mbox_of_match[] = {
+ { .compatible = "brcm,iproc-flexrm-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match);
+
+static struct platform_driver flexrm_mbox_driver = {
+ .driver = {
+ .name = "brcm-flexrm-mbox",
+ .of_match_table = flexrm_mbox_of_match,
+ },
+ .probe = flexrm_mbox_probe,
+ .remove = flexrm_mbox_remove,
+};
+module_platform_driver(flexrm_mbox_driver);
+
+MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
new file mode 100644
index 000000000..d67db63b4
--- /dev/null
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -0,0 +1,1639 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2016 Broadcom
+ */
+
+/*
+ * Broadcom PDC Mailbox Driver
+ * The PDC provides a ring based programming interface to one or more hardware
+ * offload engines. For example, the PDC driver works with both SPU-M and SPU2
+ * cryptographic offload hardware. In some chips the PDC is referred to as MDE,
+ * and in others the FA2/FA+ hardware is used with this PDC driver.
+ *
+ * The PDC driver registers with the Linux mailbox framework as a mailbox
+ * controller, once for each PDC instance. Ring 0 for each PDC is registered as
+ * a mailbox channel. The PDC driver uses interrupts to determine when data
+ * transfers to and from an offload engine are complete. The PDC driver uses
+ * threaded IRQs so that response messages are handled outside of interrupt
+ * context.
+ *
+ * The PDC driver allows multiple messages to be pending in the descriptor
+ * rings. The tx_msg_start descriptor index indicates where the last message
+ * starts. The txin_numd value at this index indicates how many descriptor
+ * indexes make up the message. Similar state is kept on the receive side. When
+ * an rx interrupt indicates a response is ready, the PDC driver processes numd
+ * descriptors from the tx and rx ring, thus processing one response at a time.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/brcm-message.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#define PDC_SUCCESS 0
+
+#define RING_ENTRY_SIZE sizeof(struct dma64dd)
+
+/* # entries in PDC dma ring */
+#define PDC_RING_ENTRIES 512
+/*
+ * Minimum number of ring descriptor entries that must be free to tell mailbox
+ * framework that it can submit another request
+ */
+#define PDC_RING_SPACE_MIN 15
+
+#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
+/* Rings are 8k aligned */
+#define RING_ALIGN_ORDER 13
+#define RING_ALIGN BIT(RING_ALIGN_ORDER)
+
+#define RX_BUF_ALIGN_ORDER 5
+#define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
+
+/* descriptor bumping macros */
+#define XXD(x, max_mask) ((x) & (max_mask))
+#define TXD(x, max_mask) XXD((x), (max_mask))
+#define RXD(x, max_mask) XXD((x), (max_mask))
+#define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
+#define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
+#define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
+#define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
+#define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
+#define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
+
+/* Length of BCM header at start of SPU msg, in bytes */
+#define BCM_HDR_LEN 8
+
+/*
+ * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
+ * not currently support use of multiple ringsets on a single PDC engine.
+ */
+#define PDC_RINGSET 0
+
+/*
+ * Interrupt mask and status definitions. Enable interrupts for tx and rx on
+ * ring 0
+ */
+#define PDC_RCVINT_0 (16 + PDC_RINGSET)
+#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
+#define PDC_INTMASK (PDC_RCVINTEN_0)
+#define PDC_LAZY_FRAMECOUNT 1
+#define PDC_LAZY_TIMEOUT 10000
+#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
+#define PDC_INTMASK_OFFSET 0x24
+#define PDC_INTSTATUS_OFFSET 0x20
+#define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
+#define FA_RCVLAZY0_OFFSET 0x100
+
+/*
+ * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
+ * before frame
+ */
+#define PDC_SPU2_RESP_HDR_LEN 17
+#define PDC_CKSUM_CTRL BIT(27)
+#define PDC_CKSUM_CTRL_OFFSET 0x400
+
+#define PDC_SPUM_RESP_HDR_LEN 32
+
+/*
+ * Sets the following bits for write to transmit control reg:
+ * 11 - PtyChkDisable - parity check is disabled
+ * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
+ */
+#define PDC_TX_CTL 0x000C0800
+
+/* Bit in tx control reg to enable tx channel */
+#define PDC_TX_ENABLE 0x1
+
+/*
+ * Sets the following bits for write to receive control reg:
+ * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
+ * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
+ * that have StartOfFrame set
+ * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
+ * remaining bytes in current frame, report error
+ * in rx frame status for current frame
+ * 11 - PtyChkDisable - parity check is disabled
+ * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
+ */
+#define PDC_RX_CTL 0x000C0E00
+
+/* Bit in rx control reg to enable rx channel */
+#define PDC_RX_ENABLE 0x1
+
+#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
+
+/* descriptor flags */
+#define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
+#define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
+#define D64_CTRL1_EOF BIT(30) /* end of frame */
+#define D64_CTRL1_SOF BIT(31) /* start of frame */
+
+#define RX_STATUS_OVERFLOW 0x00800000
+#define RX_STATUS_LEN 0x0000FFFF
+
+#define PDC_TXREGS_OFFSET 0x200
+#define PDC_RXREGS_OFFSET 0x220
+
+/* Maximum size buffer the DMA engine can handle */
+#define PDC_DMA_BUF_MAX 16384
+
+enum pdc_hw {
+ FA_HW, /* FA2/FA+ hardware (i.e. Northstar Plus) */
+ PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
+};
+
+struct pdc_dma_map {
+ void *ctx; /* opaque context associated with frame */
+};
+
+/* dma descriptor */
+struct dma64dd {
+ u32 ctrl1; /* misc control bits */
+ u32 ctrl2; /* buffer count and address extension */
+ u32 addrlow; /* memory address of the date buffer, bits 31:0 */
+ u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
+};
+
+/* dma registers per channel(xmt or rcv) */
+struct dma64_regs {
+ u32 control; /* enable, et al */
+ u32 ptr; /* last descriptor posted to chip */
+ u32 addrlow; /* descriptor ring base address low 32-bits */
+ u32 addrhigh; /* descriptor ring base address bits 63:32 */
+ u32 status0; /* last rx descriptor written by hw */
+ u32 status1; /* driver does not use */
+};
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/* dma registers. matches hw layout. */
+struct dma64 {
+ struct dma64_regs dmaxmt; /* dma tx */
+ u32 PAD[2];
+ struct dma64_regs dmarcv; /* dma rx */
+ u32 PAD[2];
+};
+
+/* PDC registers */
+struct pdc_regs {
+ u32 devcontrol; /* 0x000 */
+ u32 devstatus; /* 0x004 */
+ u32 PAD;
+ u32 biststatus; /* 0x00c */
+ u32 PAD[4];
+ u32 intstatus; /* 0x020 */
+ u32 intmask; /* 0x024 */
+ u32 gptimer; /* 0x028 */
+
+ u32 PAD;
+ u32 intrcvlazy_0; /* 0x030 (Only in PDC, not FA2) */
+ u32 intrcvlazy_1; /* 0x034 (Only in PDC, not FA2) */
+ u32 intrcvlazy_2; /* 0x038 (Only in PDC, not FA2) */
+ u32 intrcvlazy_3; /* 0x03c (Only in PDC, not FA2) */
+
+ u32 PAD[48];
+ u32 fa_intrecvlazy; /* 0x100 (Only in FA2, not PDC) */
+ u32 flowctlthresh; /* 0x104 */
+ u32 wrrthresh; /* 0x108 */
+ u32 gmac_idle_cnt_thresh; /* 0x10c */
+
+ u32 PAD[4];
+ u32 ifioaccessaddr; /* 0x120 */
+ u32 ifioaccessbyte; /* 0x124 */
+ u32 ifioaccessdata; /* 0x128 */
+
+ u32 PAD[21];
+ u32 phyaccess; /* 0x180 */
+ u32 PAD;
+ u32 phycontrol; /* 0x188 */
+ u32 txqctl; /* 0x18c */
+ u32 rxqctl; /* 0x190 */
+ u32 gpioselect; /* 0x194 */
+ u32 gpio_output_en; /* 0x198 */
+ u32 PAD; /* 0x19c */
+ u32 txq_rxq_mem_ctl; /* 0x1a0 */
+ u32 memory_ecc_status; /* 0x1a4 */
+ u32 serdes_ctl; /* 0x1a8 */
+ u32 serdes_status0; /* 0x1ac */
+ u32 serdes_status1; /* 0x1b0 */
+ u32 PAD[11]; /* 0x1b4-1dc */
+ u32 clk_ctl_st; /* 0x1e0 */
+ u32 hw_war; /* 0x1e4 (Only in PDC, not FA2) */
+ u32 pwrctl; /* 0x1e8 */
+ u32 PAD[5];
+
+#define PDC_NUM_DMA_RINGS 4
+ struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
+
+ /* more registers follow, but we don't use them */
+};
+
+/* structure for allocating/freeing DMA rings */
+struct pdc_ring_alloc {
+ dma_addr_t dmabase; /* DMA address of start of ring */
+ void *vbase; /* base kernel virtual address of ring */
+ u32 size; /* ring allocation size in bytes */
+};
+
+/*
+ * context associated with a receive descriptor.
+ * @rxp_ctx: opaque context associated with frame that starts at each
+ * rx ring index.
+ * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
+ * index. Retained in order to unmap each sg after reply is processed.
+ * @rxin_numd: Number of rx descriptors associated with the message that starts
+ * at a descriptor index. Not set for every index. For example,
+ * if descriptor index i points to a scatterlist with 4 entries,
+ * then the next three descriptor indexes don't have a value set.
+ * @resp_hdr: Virtual address of buffer used to catch DMA rx status
+ * @resp_hdr_daddr: physical address of DMA rx status buffer
+ */
+struct pdc_rx_ctx {
+ void *rxp_ctx;
+ struct scatterlist *dst_sg;
+ u32 rxin_numd;
+ void *resp_hdr;
+ dma_addr_t resp_hdr_daddr;
+};
+
+/* PDC state structure */
+struct pdc_state {
+ /* Index of the PDC whose state is in this structure instance */
+ u8 pdc_idx;
+
+ /* Platform device for this PDC instance */
+ struct platform_device *pdev;
+
+ /*
+ * Each PDC instance has a mailbox controller. PDC receives request
+ * messages through mailboxes, and sends response messages through the
+ * mailbox framework.
+ */
+ struct mbox_controller mbc;
+
+ unsigned int pdc_irq;
+
+ /* tasklet for deferred processing after DMA rx interrupt */
+ struct tasklet_struct rx_tasklet;
+
+ /* Number of bytes of receive status prior to each rx frame */
+ u32 rx_status_len;
+ /* Whether a BCM header is prepended to each frame */
+ bool use_bcm_hdr;
+ /* Sum of length of BCM header and rx status header */
+ u32 pdc_resp_hdr_len;
+
+ /* The base virtual address of DMA hw registers */
+ void __iomem *pdc_reg_vbase;
+
+ /* Pool for allocation of DMA rings */
+ struct dma_pool *ring_pool;
+
+ /* Pool for allocation of metadata buffers for response messages */
+ struct dma_pool *rx_buf_pool;
+
+ /*
+ * The base virtual address of DMA tx/rx descriptor rings. Corresponding
+ * DMA address and size of ring allocation.
+ */
+ struct pdc_ring_alloc tx_ring_alloc;
+ struct pdc_ring_alloc rx_ring_alloc;
+
+ struct pdc_regs *regs; /* start of PDC registers */
+
+ struct dma64_regs *txregs_64; /* dma tx engine registers */
+ struct dma64_regs *rxregs_64; /* dma rx engine registers */
+
+ /*
+ * Arrays of PDC_RING_ENTRIES descriptors
+ * To use multiple ringsets, this needs to be extended
+ */
+ struct dma64dd *txd_64; /* tx descriptor ring */
+ struct dma64dd *rxd_64; /* rx descriptor ring */
+
+ /* descriptor ring sizes */
+ u32 ntxd; /* # tx descriptors */
+ u32 nrxd; /* # rx descriptors */
+ u32 nrxpost; /* # rx buffers to keep posted */
+ u32 ntxpost; /* max number of tx buffers that can be posted */
+
+ /*
+ * Index of next tx descriptor to reclaim. That is, the descriptor
+ * index of the oldest tx buffer for which the host has yet to process
+ * the corresponding response.
+ */
+ u32 txin;
+
+ /*
+ * Index of the first receive descriptor for the sequence of
+ * message fragments currently under construction. Used to build up
+ * the rxin_numd count for a message. Updated to rxout when the host
+ * starts a new sequence of rx buffers for a new message.
+ */
+ u32 tx_msg_start;
+
+ /* Index of next tx descriptor to post. */
+ u32 txout;
+
+ /*
+ * Number of tx descriptors associated with the message that starts
+ * at this tx descriptor index.
+ */
+ u32 txin_numd[PDC_RING_ENTRIES];
+
+ /*
+ * Index of next rx descriptor to reclaim. This is the index of
+ * the next descriptor whose data has yet to be processed by the host.
+ */
+ u32 rxin;
+
+ /*
+ * Index of the first receive descriptor for the sequence of
+ * message fragments currently under construction. Used to build up
+ * the rxin_numd count for a message. Updated to rxout when the host
+ * starts a new sequence of rx buffers for a new message.
+ */
+ u32 rx_msg_start;
+
+ /*
+ * Saved value of current hardware rx descriptor index.
+ * The last rx buffer written by the hw is the index previous to
+ * this one.
+ */
+ u32 last_rx_curr;
+
+ /* Index of next rx descriptor to post. */
+ u32 rxout;
+
+ struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
+
+ /*
+ * Scatterlists used to form request and reply frames beginning at a
+ * given ring index. Retained in order to unmap each sg after reply
+ * is processed
+ */
+ struct scatterlist *src_sg[PDC_RING_ENTRIES];
+
+ /* counters */
+ u32 pdc_requests; /* number of request messages submitted */
+ u32 pdc_replies; /* number of reply messages received */
+ u32 last_tx_not_done; /* too few tx descriptors to indicate done */
+ u32 tx_ring_full; /* unable to accept msg because tx ring full */
+ u32 rx_ring_full; /* unable to accept msg because rx ring full */
+ u32 txnobuf; /* unable to create tx descriptor */
+ u32 rxnobuf; /* unable to create rx descriptor */
+ u32 rx_oflow; /* count of rx overflows */
+
+ /* hardware type - FA2 or PDC/MDE */
+ enum pdc_hw hw_type;
+};
+
+/* Global variables */
+
+struct pdc_globals {
+ /* Actual number of SPUs in hardware, as reported by device tree */
+ u32 num_spu;
+};
+
+static struct pdc_globals pdcg;
+
+/* top level debug FS directory for PDC driver */
+static struct dentry *debugfs_dir;
+
+static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct pdc_state *pdcs;
+ char *buf;
+ ssize_t ret, out_offset, out_count;
+
+ out_count = 512;
+
+ buf = kmalloc(out_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pdcs = filp->private_data;
+ out_offset = 0;
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "SPU %u stats:\n", pdcs->pdc_idx);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "PDC requests....................%u\n",
+ pdcs->pdc_requests);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "PDC responses...................%u\n",
+ pdcs->pdc_replies);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Tx not done.....................%u\n",
+ pdcs->last_tx_not_done);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Tx ring full....................%u\n",
+ pdcs->tx_ring_full);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Rx ring full....................%u\n",
+ pdcs->rx_ring_full);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Tx desc write fail. Ring full...%u\n",
+ pdcs->txnobuf);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Rx desc write fail. Ring full...%u\n",
+ pdcs->rxnobuf);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Receive overflow................%u\n",
+ pdcs->rx_oflow);
+ out_offset += scnprintf(buf + out_offset, out_count - out_offset,
+ "Num frags in rx ring............%u\n",
+ NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
+ pdcs->nrxpost));
+
+ if (out_offset > out_count)
+ out_offset = out_count;
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations pdc_debugfs_stats = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = pdc_debugfs_read,
+};
+
+/**
+ * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
+ * directory has not yet been created, create it now. Create a stats file in
+ * this directory for a SPU.
+ * @pdcs: PDC state structure
+ */
+static void pdc_setup_debugfs(struct pdc_state *pdcs)
+{
+ char spu_stats_name[16];
+
+ if (!debugfs_initialized())
+ return;
+
+ snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
+ if (!debugfs_dir)
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ /* S_IRUSR == 0400 */
+ debugfs_create_file(spu_stats_name, 0400, debugfs_dir, pdcs,
+ &pdc_debugfs_stats);
+}
+
+static void pdc_free_debugfs(void)
+{
+ debugfs_remove_recursive(debugfs_dir);
+ debugfs_dir = NULL;
+}
+
+/**
+ * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
+ * @pdcs: PDC state for SPU that will generate result
+ * @dma_addr: DMA address of buffer that descriptor is being built for
+ * @buf_len: Length of the receive buffer, in bytes
+ * @flags: Flags to be stored in descriptor
+ */
+static inline void
+pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
+ u32 buf_len, u32 flags)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
+
+ dev_dbg(dev,
+ "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
+ pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
+
+ rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+ rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+ rxd->ctrl1 = cpu_to_le32(flags);
+ rxd->ctrl2 = cpu_to_le32(buf_len);
+
+ /* bump ring index and return */
+ pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
+}
+
+/**
+ * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
+ * hardware.
+ * @pdcs: PDC state for the SPU that will process this request
+ * @dma_addr: DMA address of packet to be transmitted
+ * @buf_len: Length of tx buffer, in bytes
+ * @flags: Flags to be stored in descriptor
+ */
+static inline void
+pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
+ u32 flags)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
+
+ dev_dbg(dev,
+ "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
+ pdcs->pdc_idx, pdcs->txout, buf_len, flags);
+
+ txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+ txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+ txd->ctrl1 = cpu_to_le32(flags);
+ txd->ctrl2 = cpu_to_le32(buf_len);
+
+ /* bump ring index and return */
+ pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
+}
+
+/**
+ * pdc_receive_one() - Receive a response message from a given SPU.
+ * @pdcs: PDC state for the SPU to receive from
+ *
+ * When the return code indicates success, the response message is available in
+ * the receive buffers provided prior to submission of the request.
+ *
+ * Return: PDC_SUCCESS if one or more receive descriptors was processed
+ * -EAGAIN indicates that no response message is available
+ * -EIO an error occurred
+ */
+static int
+pdc_receive_one(struct pdc_state *pdcs)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ struct mbox_controller *mbc;
+ struct mbox_chan *chan;
+ struct brcm_message mssg;
+ u32 len, rx_status;
+ u32 num_frags;
+ u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
+ u32 frags_rdy; /* number of fragments ready to read */
+ u32 rx_idx; /* ring index of start of receive frame */
+ dma_addr_t resp_hdr_daddr;
+ struct pdc_rx_ctx *rx_ctx;
+
+ mbc = &pdcs->mbc;
+ chan = &mbc->chans[0];
+ mssg.type = BRCM_MESSAGE_SPU;
+
+ /*
+ * return if a complete response message is not yet ready.
+ * rxin_numd[rxin] is the number of fragments in the next msg
+ * to read.
+ */
+ frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
+ if ((frags_rdy == 0) ||
+ (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
+ /* No response ready */
+ return -EAGAIN;
+
+ num_frags = pdcs->txin_numd[pdcs->txin];
+ WARN_ON(num_frags == 0);
+
+ dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
+ sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
+
+ pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
+
+ dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
+ pdcs->pdc_idx, num_frags);
+
+ rx_idx = pdcs->rxin;
+ rx_ctx = &pdcs->rx_ctx[rx_idx];
+ num_frags = rx_ctx->rxin_numd;
+ /* Return opaque context with result */
+ mssg.ctx = rx_ctx->rxp_ctx;
+ rx_ctx->rxp_ctx = NULL;
+ resp_hdr = rx_ctx->resp_hdr;
+ resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
+ dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
+ DMA_FROM_DEVICE);
+
+ pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
+
+ dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
+ pdcs->pdc_idx, num_frags);
+
+ dev_dbg(dev,
+ "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
+ pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
+ pdcs->rxout, pdcs->last_rx_curr);
+
+ if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
+ /*
+ * For SPU-M, get length of response msg and rx overflow status.
+ */
+ rx_status = *((u32 *)resp_hdr);
+ len = rx_status & RX_STATUS_LEN;
+ dev_dbg(dev,
+ "SPU response length %u bytes", len);
+ if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
+ if (rx_status & RX_STATUS_OVERFLOW) {
+ dev_err_ratelimited(dev,
+ "crypto receive overflow");
+ pdcs->rx_oflow++;
+ } else {
+ dev_info_ratelimited(dev, "crypto rx len = 0");
+ }
+ return -EIO;
+ }
+ }
+
+ dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
+
+ mbox_chan_received_data(chan, &mssg);
+
+ pdcs->pdc_replies++;
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_receive() - Process as many responses as are available in the rx ring.
+ * @pdcs: PDC state
+ *
+ * Called within the hard IRQ.
+ * Return:
+ */
+static int
+pdc_receive(struct pdc_state *pdcs)
+{
+ int rx_status;
+
+ /* read last_rx_curr from register once */
+ pdcs->last_rx_curr =
+ (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) &
+ CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
+
+ do {
+ /* Could be many frames ready */
+ rx_status = pdc_receive_one(pdcs);
+ } while (rx_status == PDC_SUCCESS);
+
+ return 0;
+}
+
+/**
+ * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
+ * descriptors for a given SPU. The scatterlist buffers contain the data for a
+ * SPU request message.
+ * @pdcs: PDC state for the SPU that will process this request
+ * @sg: Scatterlist whose buffers contain part of the SPU request
+ *
+ * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
+ * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 otherwise
+ */
+static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
+{
+ u32 flags = 0;
+ u32 eot;
+ u32 tx_avail;
+
+ /*
+ * Num descriptors needed. Conservatively assume we need a descriptor
+ * for every entry in sg.
+ */
+ u32 num_desc;
+ u32 desc_w = 0; /* Number of tx descriptors written */
+ u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
+ dma_addr_t databufptr; /* DMA address to put in descriptor */
+
+ num_desc = (u32)sg_nents(sg);
+
+ /* check whether enough tx descriptors are available */
+ tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
+ pdcs->ntxpost);
+ if (unlikely(num_desc > tx_avail)) {
+ pdcs->txnobuf++;
+ return -ENOSPC;
+ }
+
+ /* build tx descriptors */
+ if (pdcs->tx_msg_start == pdcs->txout) {
+ /* Start of frame */
+ pdcs->txin_numd[pdcs->tx_msg_start] = 0;
+ pdcs->src_sg[pdcs->txout] = sg;
+ flags = D64_CTRL1_SOF;
+ }
+
+ while (sg) {
+ if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
+ eot = D64_CTRL1_EOT;
+ else
+ eot = 0;
+
+ /*
+ * If sg buffer larger than PDC limit, split across
+ * multiple descriptors
+ */
+ bufcnt = sg_dma_len(sg);
+ databufptr = sg_dma_address(sg);
+ while (bufcnt > PDC_DMA_BUF_MAX) {
+ pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
+ flags | eot);
+ desc_w++;
+ bufcnt -= PDC_DMA_BUF_MAX;
+ databufptr += PDC_DMA_BUF_MAX;
+ if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
+ eot = D64_CTRL1_EOT;
+ else
+ eot = 0;
+ }
+ sg = sg_next(sg);
+ if (!sg)
+ /* Writing last descriptor for frame */
+ flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
+ pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
+ desc_w++;
+ /* Clear start of frame after first descriptor */
+ flags &= ~D64_CTRL1_SOF;
+ }
+ pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
+ * ring.
+ * @pdcs: PDC state for SPU to process the request
+ *
+ * Sets the index of the last descriptor written in both the rx and tx ring.
+ *
+ * Return: PDC_SUCCESS
+ */
+static int pdc_tx_list_final(struct pdc_state *pdcs)
+{
+ /*
+ * write barrier to ensure all register writes are complete
+ * before chip starts to process new request
+ */
+ wmb();
+ iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
+ iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
+ pdcs->pdc_requests++;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
+ * @pdcs: PDC state for SPU handling request
+ * @dst_sg: scatterlist providing rx buffers for response to be returned to
+ * mailbox client
+ * @ctx: Opaque context for this request
+ *
+ * Posts a single receive descriptor to hold the metadata that precedes a
+ * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
+ * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
+ * rx to indicate the start of a new message.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 if an error (e.g., rx ring is full)
+ */
+static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
+ void *ctx)
+{
+ u32 flags = 0;
+ u32 rx_avail;
+ u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
+ dma_addr_t daddr;
+ void *vaddr;
+ struct pdc_rx_ctx *rx_ctx;
+
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(rx_pkt_cnt > rx_avail)) {
+ pdcs->rxnobuf++;
+ return -ENOSPC;
+ }
+
+ /* allocate a buffer for the dma rx status */
+ vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
+ if (unlikely(!vaddr))
+ return -ENOMEM;
+
+ /*
+ * Update msg_start indexes for both tx and rx to indicate the start
+ * of a new sequence of descriptor indexes that contain the fragments
+ * of the same message.
+ */
+ pdcs->rx_msg_start = pdcs->rxout;
+ pdcs->tx_msg_start = pdcs->txout;
+
+ /* This is always the first descriptor in the receive sequence */
+ flags = D64_CTRL1_SOF;
+ pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
+
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags |= D64_CTRL1_EOT;
+
+ rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
+ rx_ctx->rxp_ctx = ctx;
+ rx_ctx->dst_sg = dst_sg;
+ rx_ctx->resp_hdr = vaddr;
+ rx_ctx->resp_hdr_daddr = daddr;
+ pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
+ * descriptors for a given SPU. The caller must have already DMA mapped the
+ * scatterlist.
+ * @pdcs: PDC state for the SPU that will process this request
+ * @sg: Scatterlist whose buffers are added to the receive ring
+ *
+ * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
+ * multiple receive descriptors are written, each with a buffer <=
+ * PDC_DMA_BUF_MAX.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 otherwise (e.g., receive ring is full)
+ */
+static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
+{
+ u32 flags = 0;
+ u32 rx_avail;
+
+ /*
+ * Num descriptors needed. Conservatively assume we need a descriptor
+ * for every entry from our starting point in the scatterlist.
+ */
+ u32 num_desc;
+ u32 desc_w = 0; /* Number of tx descriptors written */
+ u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
+ dma_addr_t databufptr; /* DMA address to put in descriptor */
+
+ num_desc = (u32)sg_nents(sg);
+
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(num_desc > rx_avail)) {
+ pdcs->rxnobuf++;
+ return -ENOSPC;
+ }
+
+ while (sg) {
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags = D64_CTRL1_EOT;
+ else
+ flags = 0;
+
+ /*
+ * If sg buffer larger than PDC limit, split across
+ * multiple descriptors
+ */
+ bufcnt = sg_dma_len(sg);
+ databufptr = sg_dma_address(sg);
+ while (bufcnt > PDC_DMA_BUF_MAX) {
+ pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
+ desc_w++;
+ bufcnt -= PDC_DMA_BUF_MAX;
+ databufptr += PDC_DMA_BUF_MAX;
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags = D64_CTRL1_EOT;
+ else
+ flags = 0;
+ }
+ pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
+ desc_w++;
+ sg = sg_next(sg);
+ }
+ pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_irq_handler() - Interrupt handler called in interrupt context.
+ * @irq: Interrupt number that has fired
+ * @data: device struct for DMA engine that generated the interrupt
+ *
+ * We have to clear the device interrupt status flags here. So cache the
+ * status for later use in the thread function. Other than that, just return
+ * WAKE_THREAD to invoke the thread function.
+ *
+ * Return: IRQ_WAKE_THREAD if interrupt is ours
+ * IRQ_NONE otherwise
+ */
+static irqreturn_t pdc_irq_handler(int irq, void *data)
+{
+ struct device *dev = (struct device *)data;
+ struct pdc_state *pdcs = dev_get_drvdata(dev);
+ u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
+
+ if (unlikely(intstatus == 0))
+ return IRQ_NONE;
+
+ /* Disable interrupts until soft handler runs */
+ iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
+
+ /* Clear interrupt flags in device */
+ iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
+
+ /* Wakeup IRQ thread */
+ tasklet_schedule(&pdcs->rx_tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
+ * a DMA receive interrupt. Reenables the receive interrupt.
+ * @t: Pointer to the Altera sSGDMA channel structure
+ */
+static void pdc_tasklet_cb(struct tasklet_struct *t)
+{
+ struct pdc_state *pdcs = from_tasklet(pdcs, t, rx_tasklet);
+
+ pdc_receive(pdcs);
+
+ /* reenable interrupts */
+ iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
+}
+
+/**
+ * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
+ * descriptors in one ringset.
+ * @pdcs: PDC instance state
+ * @ringset: index of ringset being used
+ *
+ * Return: PDC_SUCCESS if ring initialized
+ * < 0 otherwise
+ */
+static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
+{
+ int i;
+ int err = PDC_SUCCESS;
+ struct dma64 *dma_reg;
+ struct device *dev = &pdcs->pdev->dev;
+ struct pdc_ring_alloc tx;
+ struct pdc_ring_alloc rx;
+
+ /* Allocate tx ring */
+ tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
+ if (unlikely(!tx.vbase)) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* Allocate rx ring */
+ rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
+ if (unlikely(!rx.vbase)) {
+ err = -ENOMEM;
+ goto fail_dealloc;
+ }
+
+ dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
+ dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
+ dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
+ dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
+
+ memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
+ memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
+
+ pdcs->rxin = 0;
+ pdcs->rx_msg_start = 0;
+ pdcs->last_rx_curr = 0;
+ pdcs->rxout = 0;
+ pdcs->txin = 0;
+ pdcs->tx_msg_start = 0;
+ pdcs->txout = 0;
+
+ /* Set descriptor array base addresses */
+ pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
+ pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
+
+ /* Tell device the base DMA address of each ring */
+ dma_reg = &pdcs->regs->dmaregs[ringset];
+
+ /* But first disable DMA and set curptr to 0 for both TX & RX */
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+ iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
+ &dma_reg->dmarcv.control);
+ iowrite32(0, &dma_reg->dmaxmt.ptr);
+ iowrite32(0, &dma_reg->dmarcv.ptr);
+
+ /* Set base DMA addresses */
+ iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
+ &dma_reg->dmaxmt.addrlow);
+ iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
+ &dma_reg->dmaxmt.addrhigh);
+
+ iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
+ &dma_reg->dmarcv.addrlow);
+ iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
+ &dma_reg->dmarcv.addrhigh);
+
+ /* Re-enable DMA */
+ iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
+ iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
+ &dma_reg->dmarcv.control);
+
+ /* Initialize descriptors */
+ for (i = 0; i < PDC_RING_ENTRIES; i++) {
+ /* Every tx descriptor can be used for start of frame. */
+ if (i != pdcs->ntxpost) {
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
+ &pdcs->txd_64[i].ctrl1);
+ } else {
+ /* Last descriptor in ringset. Set End of Table. */
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
+ D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
+ }
+
+ /* Every rx descriptor can be used for start of frame */
+ if (i != pdcs->nrxpost) {
+ iowrite32(D64_CTRL1_SOF,
+ &pdcs->rxd_64[i].ctrl1);
+ } else {
+ /* Last descriptor in ringset. Set End of Table. */
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
+ &pdcs->rxd_64[i].ctrl1);
+ }
+ }
+ return PDC_SUCCESS;
+
+fail_dealloc:
+ dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
+done:
+ return err;
+}
+
+static void pdc_ring_free(struct pdc_state *pdcs)
+{
+ if (pdcs->tx_ring_alloc.vbase) {
+ dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
+ pdcs->tx_ring_alloc.dmabase);
+ pdcs->tx_ring_alloc.vbase = NULL;
+ }
+
+ if (pdcs->rx_ring_alloc.vbase) {
+ dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
+ pdcs->rx_ring_alloc.dmabase);
+ pdcs->rx_ring_alloc.vbase = NULL;
+ }
+}
+
+/**
+ * pdc_desc_count() - Count the number of DMA descriptors that will be required
+ * for a given scatterlist. Account for the max length of a DMA buffer.
+ * @sg: Scatterlist to be DMA'd
+ * Return: Number of descriptors required
+ */
+static u32 pdc_desc_count(struct scatterlist *sg)
+{
+ u32 cnt = 0;
+
+ while (sg) {
+ cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
+ sg = sg_next(sg);
+ }
+ return cnt;
+}
+
+/**
+ * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
+ * and the rx ring has room for rx_cnt descriptors.
+ * @pdcs: PDC state
+ * @tx_cnt: The number of descriptors required in the tx ring
+ * @rx_cnt: The number of descriptors required i the rx ring
+ *
+ * Return: true if one of the rings does not have enough space
+ * false if sufficient space is available in both rings
+ */
+static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
+{
+ u32 rx_avail;
+ u32 tx_avail;
+ bool full = false;
+
+ /* Check if the tx and rx rings are likely to have enough space */
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(rx_cnt > rx_avail)) {
+ pdcs->rx_ring_full++;
+ full = true;
+ }
+
+ if (likely(!full)) {
+ tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
+ pdcs->ntxpost);
+ if (unlikely(tx_cnt > tx_avail)) {
+ pdcs->tx_ring_full++;
+ full = true;
+ }
+ }
+ return full;
+}
+
+/**
+ * pdc_last_tx_done() - If both the tx and rx rings have at least
+ * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
+ * framework can submit another message.
+ * @chan: mailbox channel to check
+ * Return: true if PDC can accept another message on this channel
+ */
+static bool pdc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+ bool ret;
+
+ if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
+ PDC_RING_SPACE_MIN))) {
+ pdcs->last_tx_not_done++;
+ ret = false;
+ } else {
+ ret = true;
+ }
+ return ret;
+}
+
+/**
+ * pdc_send_data() - mailbox send_data function
+ * @chan: The mailbox channel on which the data is sent. The channel
+ * corresponds to a DMA ringset.
+ * @data: The mailbox message to be sent. The message must be a
+ * brcm_message structure.
+ *
+ * This function is registered as the send_data function for the mailbox
+ * controller. From the destination scatterlist in the mailbox message, it
+ * creates a sequence of receive descriptors in the rx ring. From the source
+ * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
+ * After creating the descriptors, it writes the rx ptr and tx ptr registers to
+ * initiate the DMA transfer.
+ *
+ * This function does the DMA map and unmap of the src and dst scatterlists in
+ * the mailbox message.
+ *
+ * Return: 0 if successful
+ * -ENOTSUPP if the mailbox message is a type this driver does not
+ * support
+ * < 0 if an error
+ */
+static int pdc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+ struct device *dev = &pdcs->pdev->dev;
+ struct brcm_message *mssg = data;
+ int err = PDC_SUCCESS;
+ int src_nent;
+ int dst_nent;
+ int nent;
+ u32 tx_desc_req;
+ u32 rx_desc_req;
+
+ if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
+ return -ENOTSUPP;
+
+ src_nent = sg_nents(mssg->spu.src);
+ if (likely(src_nent)) {
+ nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
+ if (unlikely(nent == 0))
+ return -EIO;
+ }
+
+ dst_nent = sg_nents(mssg->spu.dst);
+ if (likely(dst_nent)) {
+ nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
+ DMA_FROM_DEVICE);
+ if (unlikely(nent == 0)) {
+ dma_unmap_sg(dev, mssg->spu.src, src_nent,
+ DMA_TO_DEVICE);
+ return -EIO;
+ }
+ }
+
+ /*
+ * Check if the tx and rx rings have enough space. Do this prior to
+ * writing any tx or rx descriptors. Need to ensure that we do not write
+ * a partial set of descriptors, or write just rx descriptors but
+ * corresponding tx descriptors don't fit. Note that we want this check
+ * and the entire sequence of descriptor to happen without another
+ * thread getting in. The channel spin lock in the mailbox framework
+ * ensures this.
+ */
+ tx_desc_req = pdc_desc_count(mssg->spu.src);
+ rx_desc_req = pdc_desc_count(mssg->spu.dst);
+ if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
+ return -ENOSPC;
+
+ /* Create rx descriptors to SPU catch response */
+ err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
+ err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
+
+ /* Create tx descriptors to submit SPU request */
+ err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
+ err |= pdc_tx_list_final(pdcs); /* initiate transfer */
+
+ if (unlikely(err))
+ dev_err(&pdcs->pdev->dev,
+ "%s failed with error %d", __func__, err);
+
+ return err;
+}
+
+static int pdc_startup(struct mbox_chan *chan)
+{
+ return pdc_ring_init(chan->con_priv, PDC_RINGSET);
+}
+
+static void pdc_shutdown(struct mbox_chan *chan)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+
+ if (!pdcs)
+ return;
+
+ dev_dbg(&pdcs->pdev->dev,
+ "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
+ pdc_ring_free(pdcs);
+}
+
+/**
+ * pdc_hw_init() - Use the given initialization parameters to initialize the
+ * state for one of the PDCs.
+ * @pdcs: state of the PDC
+ */
+static
+void pdc_hw_init(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+ struct dma64 *dma_reg;
+ int ringset = PDC_RINGSET;
+
+ pdev = pdcs->pdev;
+ dev = &pdev->dev;
+
+ dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
+ dev_dbg(dev, "state structure: %p",
+ pdcs);
+ dev_dbg(dev, " - base virtual addr of hw regs %p",
+ pdcs->pdc_reg_vbase);
+
+ /* initialize data structures */
+ pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
+ pdcs->txregs_64 = (struct dma64_regs *)
+ (((u8 *)pdcs->pdc_reg_vbase) +
+ PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
+ pdcs->rxregs_64 = (struct dma64_regs *)
+ (((u8 *)pdcs->pdc_reg_vbase) +
+ PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
+
+ pdcs->ntxd = PDC_RING_ENTRIES;
+ pdcs->nrxd = PDC_RING_ENTRIES;
+ pdcs->ntxpost = PDC_RING_ENTRIES - 1;
+ pdcs->nrxpost = PDC_RING_ENTRIES - 1;
+ iowrite32(0, &pdcs->regs->intmask);
+
+ dma_reg = &pdcs->regs->dmaregs[ringset];
+
+ /* Configure DMA but will enable later in pdc_ring_init() */
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+
+ iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
+ &dma_reg->dmarcv.control);
+
+ /* Reset current index pointers after making sure DMA is disabled */
+ iowrite32(0, &dma_reg->dmaxmt.ptr);
+ iowrite32(0, &dma_reg->dmarcv.ptr);
+
+ if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
+ iowrite32(PDC_CKSUM_CTRL,
+ pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
+}
+
+/**
+ * pdc_hw_disable() - Disable the tx and rx control in the hw.
+ * @pdcs: PDC state structure
+ *
+ */
+static void pdc_hw_disable(struct pdc_state *pdcs)
+{
+ struct dma64 *dma_reg;
+
+ dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
+ iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+ iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
+ &dma_reg->dmarcv.control);
+}
+
+/**
+ * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
+ * header returned with each response message.
+ * @pdcs: PDC state structure
+ *
+ * The metadata is not returned to the mailbox client. So the PDC driver
+ * manages these buffers.
+ *
+ * Return: PDC_SUCCESS
+ * -ENOMEM if pool creation fails
+ */
+static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+
+ pdev = pdcs->pdev;
+ dev = &pdev->dev;
+
+ pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
+ if (pdcs->use_bcm_hdr)
+ pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
+
+ pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
+ pdcs->pdc_resp_hdr_len,
+ RX_BUF_ALIGN, 0);
+ if (!pdcs->rx_buf_pool)
+ return -ENOMEM;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
+ * specify a threaded IRQ handler for deferred handling of interrupts outside of
+ * interrupt context.
+ * @pdcs: PDC state
+ *
+ * Set the interrupt mask for transmit and receive done.
+ * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
+ *
+ * Return: PDC_SUCCESS
+ * <0 if threaded irq request fails
+ */
+static int pdc_interrupts_init(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev = pdcs->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = pdev->dev.of_node;
+ int err;
+
+ /* interrupt configuration */
+ iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
+
+ if (pdcs->hw_type == FA_HW)
+ iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
+ FA_RCVLAZY0_OFFSET);
+ else
+ iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
+ PDC_RCVLAZY0_OFFSET);
+
+ /* read irq from device tree */
+ pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
+ dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
+ dev_name(dev), pdcs->pdc_irq, pdcs);
+
+ err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
+ dev_name(dev), dev);
+ if (err) {
+ dev_err(dev, "IRQ %u request failed with err %d\n",
+ pdcs->pdc_irq, err);
+ return err;
+ }
+ return PDC_SUCCESS;
+}
+
+static const struct mbox_chan_ops pdc_mbox_chan_ops = {
+ .send_data = pdc_send_data,
+ .last_tx_done = pdc_last_tx_done,
+ .startup = pdc_startup,
+ .shutdown = pdc_shutdown
+};
+
+/**
+ * pdc_mb_init() - Initialize the mailbox controller.
+ * @pdcs: PDC state
+ *
+ * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
+ * driver only uses one ringset and thus one mb channel. PDC uses the transmit
+ * complete interrupt to determine when a mailbox message has successfully been
+ * transmitted.
+ *
+ * Return: 0 on success
+ * < 0 if there is an allocation or registration failure
+ */
+static int pdc_mb_init(struct pdc_state *pdcs)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ struct mbox_controller *mbc;
+ int chan_index;
+ int err;
+
+ mbc = &pdcs->mbc;
+ mbc->dev = dev;
+ mbc->ops = &pdc_mbox_chan_ops;
+ mbc->num_chans = 1;
+ mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
+ GFP_KERNEL);
+ if (!mbc->chans)
+ return -ENOMEM;
+
+ mbc->txdone_irq = false;
+ mbc->txdone_poll = true;
+ mbc->txpoll_period = 1;
+ for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
+ mbc->chans[chan_index].con_priv = pdcs;
+
+ /* Register mailbox controller */
+ err = devm_mbox_controller_register(dev, mbc);
+ if (err) {
+ dev_crit(dev,
+ "Failed to register PDC mailbox controller. Error %d.",
+ err);
+ return err;
+ }
+ return 0;
+}
+
+/* Device tree API */
+static const int pdc_hw = PDC_HW;
+static const int fa_hw = FA_HW;
+
+static const struct of_device_id pdc_mbox_of_match[] = {
+ {.compatible = "brcm,iproc-pdc-mbox", .data = &pdc_hw},
+ {.compatible = "brcm,iproc-fa2-mbox", .data = &fa_hw},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
+
+/**
+ * pdc_dt_read() - Read application-specific data from device tree.
+ * @pdev: Platform device
+ * @pdcs: PDC state
+ *
+ * Reads the number of bytes of receive status that precede each received frame.
+ * Reads whether transmit and received frames should be preceded by an 8-byte
+ * BCM header.
+ *
+ * Return: 0 if successful
+ * -ENODEV if device not available
+ */
+static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *match;
+ const int *hw_type;
+ int err;
+
+ err = of_property_read_u32(dn, "brcm,rx-status-len",
+ &pdcs->rx_status_len);
+ if (err < 0)
+ dev_err(dev,
+ "%s failed to get DMA receive status length from device tree",
+ __func__);
+
+ pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
+
+ pdcs->hw_type = PDC_HW;
+
+ match = of_match_device(of_match_ptr(pdc_mbox_of_match), dev);
+ if (match != NULL) {
+ hw_type = match->data;
+ pdcs->hw_type = *hw_type;
+ }
+
+ return 0;
+}
+
+/**
+ * pdc_probe() - Probe function for PDC driver.
+ * @pdev: PDC platform device
+ *
+ * Reserve and map register regions defined in device tree.
+ * Allocate and initialize tx and rx DMA rings.
+ * Initialize a mailbox controller for each PDC.
+ *
+ * Return: 0 if successful
+ * < 0 if an error
+ */
+static int pdc_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *pdc_regs;
+ struct pdc_state *pdcs;
+
+ /* PDC state for one SPU */
+ pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
+ if (!pdcs) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ pdcs->pdev = pdev;
+ platform_set_drvdata(pdev, pdcs);
+ pdcs->pdc_idx = pdcg.num_spu;
+ pdcg.num_spu++;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
+ if (err) {
+ dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
+ goto cleanup;
+ }
+
+ /* Create DMA pool for tx ring */
+ pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
+ RING_ALIGN, 0);
+ if (!pdcs->ring_pool) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ err = pdc_dt_read(pdev, pdcs);
+ if (err)
+ goto cleanup_ring_pool;
+
+ pdcs->pdc_reg_vbase = devm_platform_get_and_ioremap_resource(pdev, 0, &pdc_regs);
+ if (IS_ERR(pdcs->pdc_reg_vbase)) {
+ err = PTR_ERR(pdcs->pdc_reg_vbase);
+ goto cleanup_ring_pool;
+ }
+ dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
+ &pdc_regs->start, &pdc_regs->end);
+
+ /* create rx buffer pool after dt read to know how big buffers are */
+ err = pdc_rx_buf_pool_create(pdcs);
+ if (err)
+ goto cleanup_ring_pool;
+
+ pdc_hw_init(pdcs);
+
+ /* Init tasklet for deferred DMA rx processing */
+ tasklet_setup(&pdcs->rx_tasklet, pdc_tasklet_cb);
+
+ err = pdc_interrupts_init(pdcs);
+ if (err)
+ goto cleanup_buf_pool;
+
+ /* Initialize mailbox controller */
+ err = pdc_mb_init(pdcs);
+ if (err)
+ goto cleanup_buf_pool;
+
+ pdc_setup_debugfs(pdcs);
+
+ dev_dbg(dev, "pdc_probe() successful");
+ return PDC_SUCCESS;
+
+cleanup_buf_pool:
+ tasklet_kill(&pdcs->rx_tasklet);
+ dma_pool_destroy(pdcs->rx_buf_pool);
+
+cleanup_ring_pool:
+ dma_pool_destroy(pdcs->ring_pool);
+
+cleanup:
+ return err;
+}
+
+static int pdc_remove(struct platform_device *pdev)
+{
+ struct pdc_state *pdcs = platform_get_drvdata(pdev);
+
+ pdc_free_debugfs();
+
+ tasklet_kill(&pdcs->rx_tasklet);
+
+ pdc_hw_disable(pdcs);
+
+ dma_pool_destroy(pdcs->rx_buf_pool);
+ dma_pool_destroy(pdcs->ring_pool);
+ return 0;
+}
+
+static struct platform_driver pdc_mbox_driver = {
+ .probe = pdc_probe,
+ .remove = pdc_remove,
+ .driver = {
+ .name = "brcm-iproc-pdc-mbox",
+ .of_match_table = pdc_mbox_of_match,
+ },
+};
+module_platform_driver(pdc_mbox_driver);
+
+MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
new file mode 100644
index 000000000..fbfd02020
--- /dev/null
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010,2015 Broadcom
+ * Copyright (C) 2013-2014 Lubomir Rintel
+ * Copyright (C) 2013 Craig McGeachie
+ *
+ * Parts of the driver are based on:
+ * - arch/arm/mach-bcm2708/vcio.c file written by Gray Girling that was
+ * obtained from branch "rpi-3.6.y" of git://github.com/raspberrypi/
+ * linux.git
+ * - drivers/mailbox/bcm2835-ipc.c by Lubomir Rintel at
+ * https://github.com/hackerspace/rpi-linux/blob/lr-raspberry-pi/drivers/
+ * mailbox/bcm2835-ipc.c
+ * - documentation available on the following web site:
+ * https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* Mailboxes */
+#define ARM_0_MAIL0 0x00
+#define ARM_0_MAIL1 0x20
+
+/*
+ * Mailbox registers. We basically only support mailbox 0 & 1. We
+ * deliver to the VC in mailbox 1, it delivers to us in mailbox 0. See
+ * BCM2835-ARM-Peripherals.pdf section 1.3 for an explanation about
+ * the placement of memory barriers.
+ */
+#define MAIL0_RD (ARM_0_MAIL0 + 0x00)
+#define MAIL0_POL (ARM_0_MAIL0 + 0x10)
+#define MAIL0_STA (ARM_0_MAIL0 + 0x18)
+#define MAIL0_CNF (ARM_0_MAIL0 + 0x1C)
+#define MAIL1_WRT (ARM_0_MAIL1 + 0x00)
+#define MAIL1_STA (ARM_0_MAIL1 + 0x18)
+
+/* Status register: FIFO state. */
+#define ARM_MS_FULL BIT(31)
+#define ARM_MS_EMPTY BIT(30)
+
+/* Configuration register: Enable interrupts. */
+#define ARM_MC_IHAVEDATAIRQEN BIT(0)
+
+struct bcm2835_mbox {
+ void __iomem *regs;
+ spinlock_t lock;
+ struct mbox_controller controller;
+};
+
+static struct bcm2835_mbox *bcm2835_link_mbox(struct mbox_chan *link)
+{
+ return container_of(link->mbox, struct bcm2835_mbox, controller);
+}
+
+static irqreturn_t bcm2835_mbox_irq(int irq, void *dev_id)
+{
+ struct bcm2835_mbox *mbox = dev_id;
+ struct device *dev = mbox->controller.dev;
+ struct mbox_chan *link = &mbox->controller.chans[0];
+
+ while (!(readl(mbox->regs + MAIL0_STA) & ARM_MS_EMPTY)) {
+ u32 msg = readl(mbox->regs + MAIL0_RD);
+ dev_dbg(dev, "Reply 0x%08X\n", msg);
+ mbox_chan_received_data(link, &msg);
+ }
+ return IRQ_HANDLED;
+}
+
+static int bcm2835_send_data(struct mbox_chan *link, void *data)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+ u32 msg = *(u32 *)data;
+
+ spin_lock(&mbox->lock);
+ writel(msg, mbox->regs + MAIL1_WRT);
+ dev_dbg(mbox->controller.dev, "Request 0x%08X\n", msg);
+ spin_unlock(&mbox->lock);
+ return 0;
+}
+
+static int bcm2835_startup(struct mbox_chan *link)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+
+ /* Enable the interrupt on data reception */
+ writel(ARM_MC_IHAVEDATAIRQEN, mbox->regs + MAIL0_CNF);
+
+ return 0;
+}
+
+static void bcm2835_shutdown(struct mbox_chan *link)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+
+ writel(0, mbox->regs + MAIL0_CNF);
+}
+
+static bool bcm2835_last_tx_done(struct mbox_chan *link)
+{
+ struct bcm2835_mbox *mbox = bcm2835_link_mbox(link);
+ bool ret;
+
+ spin_lock(&mbox->lock);
+ ret = !(readl(mbox->regs + MAIL1_STA) & ARM_MS_FULL);
+ spin_unlock(&mbox->lock);
+ return ret;
+}
+
+static const struct mbox_chan_ops bcm2835_mbox_chan_ops = {
+ .send_data = bcm2835_send_data,
+ .startup = bcm2835_startup,
+ .shutdown = bcm2835_shutdown,
+ .last_tx_done = bcm2835_last_tx_done
+};
+
+static struct mbox_chan *bcm2835_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ if (sp->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ return &mbox->chans[0];
+}
+
+static int bcm2835_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+ struct bcm2835_mbox *mbox;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (mbox == NULL)
+ return -ENOMEM;
+ spin_lock_init(&mbox->lock);
+
+ ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
+ bcm2835_mbox_irq, 0, dev_name(dev), mbox);
+ if (ret) {
+ dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
+ ret);
+ return -ENODEV;
+ }
+
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->regs)) {
+ ret = PTR_ERR(mbox->regs);
+ return ret;
+ }
+
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+ mbox->controller.ops = &bcm2835_mbox_chan_ops;
+ mbox->controller.of_xlate = &bcm2835_mbox_index_xlate;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = devm_kzalloc(dev,
+ sizeof(*mbox->controller.chans), GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "mailbox enabled\n");
+
+ return ret;
+}
+
+static const struct of_device_id bcm2835_mbox_of_match[] = {
+ { .compatible = "brcm,bcm2835-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_mbox_of_match);
+
+static struct platform_driver bcm2835_mbox_driver = {
+ .driver = {
+ .name = "bcm2835-mbox",
+ .of_match_table = bcm2835_mbox_of_match,
+ },
+ .probe = bcm2835_mbox_probe,
+};
+module_platform_driver(bcm2835_mbox_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("BCM2835 mailbox IPC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
new file mode 100644
index 000000000..17c29e960
--- /dev/null
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018 HiSilicon Limited.
+// Copyright (c) 2017-2018 Linaro Limited.
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "mailbox.h"
+
+#define MBOX_CHAN_MAX 32
+
+#define MBOX_RX 0x0
+#define MBOX_TX 0x1
+
+#define MBOX_BASE(mbox, ch) ((mbox)->base + ((ch) * 0x40))
+#define MBOX_SRC_REG 0x00
+#define MBOX_DST_REG 0x04
+#define MBOX_DCLR_REG 0x08
+#define MBOX_DSTAT_REG 0x0c
+#define MBOX_MODE_REG 0x10
+#define MBOX_IMASK_REG 0x14
+#define MBOX_ICLR_REG 0x18
+#define MBOX_SEND_REG 0x1c
+#define MBOX_DATA_REG 0x20
+
+#define MBOX_IPC_LOCK_REG 0xa00
+#define MBOX_IPC_UNLOCK 0x1acce551
+
+#define MBOX_AUTOMATIC_ACK 1
+
+#define MBOX_STATE_IDLE BIT(4)
+#define MBOX_STATE_READY BIT(5)
+#define MBOX_STATE_ACK BIT(7)
+
+#define MBOX_MSG_LEN 8
+
+/**
+ * struct hi3660_chan_info - Hi3660 mailbox channel information
+ * @dst_irq: Interrupt vector for remote processor
+ * @ack_irq: Interrupt vector for local processor
+ *
+ * A channel can be used for TX or RX, it can trigger remote
+ * processor interrupt to notify remote processor and can receive
+ * interrupt if it has an incoming message.
+ */
+struct hi3660_chan_info {
+ unsigned int dst_irq;
+ unsigned int ack_irq;
+};
+
+/**
+ * struct hi3660_mbox - Hi3660 mailbox controller data
+ * @dev: Device to which it is attached
+ * @base: Base address of the register mapping region
+ * @chan: Representation of channels in mailbox controller
+ * @mchan: Representation of channel info
+ * @controller: Representation of a communication channel controller
+ *
+ * Mailbox controller includes 32 channels and can allocate
+ * channel for message transferring.
+ */
+struct hi3660_mbox {
+ struct device *dev;
+ void __iomem *base;
+ struct mbox_chan chan[MBOX_CHAN_MAX];
+ struct hi3660_chan_info mchan[MBOX_CHAN_MAX];
+ struct mbox_controller controller;
+};
+
+static struct hi3660_mbox *to_hi3660_mbox(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct hi3660_mbox, controller);
+}
+
+static int hi3660_mbox_check_state(struct mbox_chan *chan)
+{
+ unsigned long ch = (unsigned long)chan->con_priv;
+ struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
+ struct hi3660_chan_info *mchan = &mbox->mchan[ch];
+ void __iomem *base = MBOX_BASE(mbox, ch);
+ unsigned long val;
+ unsigned int ret;
+
+ /* Mailbox is ready to use */
+ if (readl(base + MBOX_MODE_REG) & MBOX_STATE_READY)
+ return 0;
+
+ /* Wait for acknowledge from remote */
+ ret = readx_poll_timeout_atomic(readl, base + MBOX_MODE_REG,
+ val, (val & MBOX_STATE_ACK), 1000, 300000);
+ if (ret) {
+ dev_err(mbox->dev, "%s: timeout for receiving ack\n", __func__);
+ return ret;
+ }
+
+ /* clear ack state, mailbox will get back to ready state */
+ writel(BIT(mchan->ack_irq), base + MBOX_ICLR_REG);
+
+ return 0;
+}
+
+static int hi3660_mbox_unlock(struct mbox_chan *chan)
+{
+ struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
+ unsigned int val, retry = 3;
+
+ do {
+ writel(MBOX_IPC_UNLOCK, mbox->base + MBOX_IPC_LOCK_REG);
+
+ val = readl(mbox->base + MBOX_IPC_LOCK_REG);
+ if (!val)
+ break;
+
+ udelay(10);
+ } while (retry--);
+
+ if (val)
+ dev_err(mbox->dev, "%s: failed to unlock mailbox\n", __func__);
+
+ return (!val) ? 0 : -ETIMEDOUT;
+}
+
+static int hi3660_mbox_acquire_channel(struct mbox_chan *chan)
+{
+ unsigned long ch = (unsigned long)chan->con_priv;
+ struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
+ struct hi3660_chan_info *mchan = &mbox->mchan[ch];
+ void __iomem *base = MBOX_BASE(mbox, ch);
+ unsigned int val, retry;
+
+ for (retry = 10; retry; retry--) {
+ /* Check if channel is in idle state */
+ if (readl(base + MBOX_MODE_REG) & MBOX_STATE_IDLE) {
+ writel(BIT(mchan->ack_irq), base + MBOX_SRC_REG);
+
+ /* Check ack bit has been set successfully */
+ val = readl(base + MBOX_SRC_REG);
+ if (val & BIT(mchan->ack_irq))
+ break;
+ }
+ }
+
+ if (!retry)
+ dev_err(mbox->dev, "%s: failed to acquire channel\n", __func__);
+
+ return retry ? 0 : -ETIMEDOUT;
+}
+
+static int hi3660_mbox_startup(struct mbox_chan *chan)
+{
+ int ret;
+
+ ret = hi3660_mbox_unlock(chan);
+ if (ret)
+ return ret;
+
+ ret = hi3660_mbox_acquire_channel(chan);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hi3660_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ unsigned long ch = (unsigned long)chan->con_priv;
+ struct hi3660_mbox *mbox = to_hi3660_mbox(chan->mbox);
+ struct hi3660_chan_info *mchan = &mbox->mchan[ch];
+ void __iomem *base = MBOX_BASE(mbox, ch);
+ u32 *buf = msg;
+ unsigned int i;
+ int ret;
+
+ ret = hi3660_mbox_check_state(chan);
+ if (ret)
+ return ret;
+
+ /* Clear mask for destination interrupt */
+ writel_relaxed(~BIT(mchan->dst_irq), base + MBOX_IMASK_REG);
+
+ /* Config destination for interrupt vector */
+ writel_relaxed(BIT(mchan->dst_irq), base + MBOX_DST_REG);
+
+ /* Automatic acknowledge mode */
+ writel_relaxed(MBOX_AUTOMATIC_ACK, base + MBOX_MODE_REG);
+
+ /* Fill message data */
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ writel_relaxed(buf[i], base + MBOX_DATA_REG + i * 4);
+
+ /* Trigger data transferring */
+ writel(BIT(mchan->ack_irq), base + MBOX_SEND_REG);
+ return 0;
+}
+
+static const struct mbox_chan_ops hi3660_mbox_ops = {
+ .startup = hi3660_mbox_startup,
+ .send_data = hi3660_mbox_send_data,
+};
+
+static struct mbox_chan *hi3660_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct hi3660_mbox *mbox = to_hi3660_mbox(controller);
+ struct hi3660_chan_info *mchan;
+ unsigned int ch = spec->args[0];
+
+ if (ch >= MBOX_CHAN_MAX) {
+ dev_err(mbox->dev, "Invalid channel idx %d\n", ch);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mchan = &mbox->mchan[ch];
+ mchan->dst_irq = spec->args[1];
+ mchan->ack_irq = spec->args[2];
+
+ return &mbox->chan[ch];
+}
+
+static const struct of_device_id hi3660_mbox_of_match[] = {
+ { .compatible = "hisilicon,hi3660-mbox", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hi3660_mbox_of_match);
+
+static int hi3660_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hi3660_mbox *mbox;
+ struct mbox_chan *chan;
+ unsigned long ch;
+ int err;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->base))
+ return PTR_ERR(mbox->base);
+
+ mbox->dev = dev;
+ mbox->controller.dev = dev;
+ mbox->controller.chans = mbox->chan;
+ mbox->controller.num_chans = MBOX_CHAN_MAX;
+ mbox->controller.ops = &hi3660_mbox_ops;
+ mbox->controller.of_xlate = hi3660_mbox_xlate;
+
+ /* Initialize mailbox channel data */
+ chan = mbox->chan;
+ for (ch = 0; ch < MBOX_CHAN_MAX; ch++)
+ chan[ch].con_priv = (void *)ch;
+
+ err = devm_mbox_controller_register(dev, &mbox->controller);
+ if (err) {
+ dev_err(dev, "Failed to register mailbox %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "Mailbox enabled\n");
+ return 0;
+}
+
+static struct platform_driver hi3660_mbox_driver = {
+ .probe = hi3660_mbox_probe,
+ .driver = {
+ .name = "hi3660-mbox",
+ .of_match_table = hi3660_mbox_of_match,
+ },
+};
+
+static int __init hi3660_mbox_init(void)
+{
+ return platform_driver_register(&hi3660_mbox_driver);
+}
+core_initcall(hi3660_mbox_init);
+
+static void __exit hi3660_mbox_exit(void)
+{
+ platform_driver_unregister(&hi3660_mbox_driver);
+}
+module_exit(hi3660_mbox_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Hi3660 Mailbox Controller");
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
new file mode 100644
index 000000000..f77741ce4
--- /dev/null
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Hisilicon's Hi6220 mailbox driver
+ *
+ * Copyright (c) 2015 HiSilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MBOX_CHAN_MAX 32
+
+#define MBOX_TX 0x1
+
+/* Mailbox message length: 8 words */
+#define MBOX_MSG_LEN 8
+
+/* Mailbox Registers */
+#define MBOX_OFF(m) (0x40 * (m))
+#define MBOX_MODE_REG(m) (MBOX_OFF(m) + 0x0)
+#define MBOX_DATA_REG(m) (MBOX_OFF(m) + 0x4)
+
+#define MBOX_STATE_MASK (0xF << 4)
+#define MBOX_STATE_IDLE (0x1 << 4)
+#define MBOX_STATE_TX (0x2 << 4)
+#define MBOX_STATE_RX (0x4 << 4)
+#define MBOX_STATE_ACK (0x8 << 4)
+#define MBOX_ACK_CONFIG_MASK (0x1 << 0)
+#define MBOX_ACK_AUTOMATIC (0x1 << 0)
+#define MBOX_ACK_IRQ (0x0 << 0)
+
+/* IPC registers */
+#define ACK_INT_RAW_REG(i) ((i) + 0x400)
+#define ACK_INT_MSK_REG(i) ((i) + 0x404)
+#define ACK_INT_STAT_REG(i) ((i) + 0x408)
+#define ACK_INT_CLR_REG(i) ((i) + 0x40c)
+#define ACK_INT_ENA_REG(i) ((i) + 0x500)
+#define ACK_INT_DIS_REG(i) ((i) + 0x504)
+#define DST_INT_RAW_REG(i) ((i) + 0x420)
+
+
+struct hi6220_mbox_chan {
+
+ /*
+ * Description for channel's hardware info:
+ * - direction: tx or rx
+ * - dst irq: peer core's irq number
+ * - ack irq: local irq number
+ * - slot number
+ */
+ unsigned int dir, dst_irq, ack_irq;
+ unsigned int slot;
+
+ struct hi6220_mbox *parent;
+};
+
+struct hi6220_mbox {
+ struct device *dev;
+
+ int irq;
+
+ /* flag of enabling tx's irq mode */
+ bool tx_irq_mode;
+
+ /* region for ipc event */
+ void __iomem *ipc;
+
+ /* region for mailbox */
+ void __iomem *base;
+
+ unsigned int chan_num;
+ struct hi6220_mbox_chan *mchan;
+
+ void *irq_map_chan[MBOX_CHAN_MAX];
+ struct mbox_chan *chan;
+ struct mbox_controller controller;
+};
+
+static void mbox_set_state(struct hi6220_mbox *mbox,
+ unsigned int slot, u32 val)
+{
+ u32 status;
+
+ status = readl(mbox->base + MBOX_MODE_REG(slot));
+ status = (status & ~MBOX_STATE_MASK) | val;
+ writel(status, mbox->base + MBOX_MODE_REG(slot));
+}
+
+static void mbox_set_mode(struct hi6220_mbox *mbox,
+ unsigned int slot, u32 val)
+{
+ u32 mode;
+
+ mode = readl(mbox->base + MBOX_MODE_REG(slot));
+ mode = (mode & ~MBOX_ACK_CONFIG_MASK) | val;
+ writel(mode, mbox->base + MBOX_MODE_REG(slot));
+}
+
+static bool hi6220_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+ u32 state;
+
+ /* Only set idle state for polling mode */
+ BUG_ON(mbox->tx_irq_mode);
+
+ state = readl(mbox->base + MBOX_MODE_REG(mchan->slot));
+ return ((state & MBOX_STATE_MASK) == MBOX_STATE_IDLE);
+}
+
+static int hi6220_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+ unsigned int slot = mchan->slot;
+ u32 *buf = msg;
+ int i;
+
+ /* indicate as a TX channel */
+ mchan->dir = MBOX_TX;
+
+ mbox_set_state(mbox, slot, MBOX_STATE_TX);
+
+ if (mbox->tx_irq_mode)
+ mbox_set_mode(mbox, slot, MBOX_ACK_IRQ);
+ else
+ mbox_set_mode(mbox, slot, MBOX_ACK_AUTOMATIC);
+
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ writel(buf[i], mbox->base + MBOX_DATA_REG(slot) + i * 4);
+
+ /* trigger remote request */
+ writel(BIT(mchan->dst_irq), DST_INT_RAW_REG(mbox->ipc));
+ return 0;
+}
+
+static irqreturn_t hi6220_mbox_interrupt(int irq, void *p)
+{
+ struct hi6220_mbox *mbox = p;
+ struct hi6220_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int state, intr_bit, i;
+ u32 msg[MBOX_MSG_LEN];
+
+ state = readl(ACK_INT_STAT_REG(mbox->ipc));
+ if (!state) {
+ dev_warn(mbox->dev, "%s: spurious interrupt\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ while (state) {
+ intr_bit = __ffs(state);
+ state &= (state - 1);
+
+ chan = mbox->irq_map_chan[intr_bit];
+ if (!chan) {
+ dev_warn(mbox->dev, "%s: unexpected irq vector %d\n",
+ __func__, intr_bit);
+ continue;
+ }
+
+ mchan = chan->con_priv;
+ if (mchan->dir == MBOX_TX)
+ mbox_chan_txdone(chan, 0);
+ else {
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ msg[i] = readl(mbox->base +
+ MBOX_DATA_REG(mchan->slot) + i * 4);
+
+ mbox_chan_received_data(chan, (void *)msg);
+ }
+
+ /* clear IRQ source */
+ writel(BIT(mchan->ack_irq), ACK_INT_CLR_REG(mbox->ipc));
+ mbox_set_state(mbox, mchan->slot, MBOX_STATE_IDLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hi6220_mbox_startup(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+
+ mchan->dir = 0;
+
+ /* enable interrupt */
+ writel(BIT(mchan->ack_irq), ACK_INT_ENA_REG(mbox->ipc));
+ return 0;
+}
+
+static void hi6220_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+
+ /* disable interrupt */
+ writel(BIT(mchan->ack_irq), ACK_INT_DIS_REG(mbox->ipc));
+ mbox->irq_map_chan[mchan->ack_irq] = NULL;
+}
+
+static const struct mbox_chan_ops hi6220_mbox_ops = {
+ .send_data = hi6220_mbox_send_data,
+ .startup = hi6220_mbox_startup,
+ .shutdown = hi6220_mbox_shutdown,
+ .last_tx_done = hi6220_mbox_last_tx_done,
+};
+
+static struct mbox_chan *hi6220_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct hi6220_mbox *mbox = dev_get_drvdata(controller->dev);
+ struct hi6220_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int i = spec->args[0];
+ unsigned int dst_irq = spec->args[1];
+ unsigned int ack_irq = spec->args[2];
+
+ /* Bounds checking */
+ if (i >= mbox->chan_num || dst_irq >= mbox->chan_num ||
+ ack_irq >= mbox->chan_num) {
+ dev_err(mbox->dev,
+ "Invalid channel idx %d dst_irq %d ack_irq %d\n",
+ i, dst_irq, ack_irq);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Is requested channel free? */
+ chan = &mbox->chan[i];
+ if (mbox->irq_map_chan[ack_irq] == (void *)chan) {
+ dev_err(mbox->dev, "Channel in use\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ mchan = chan->con_priv;
+ mchan->dst_irq = dst_irq;
+ mchan->ack_irq = ack_irq;
+
+ mbox->irq_map_chan[ack_irq] = (void *)chan;
+ return chan;
+}
+
+static const struct of_device_id hi6220_mbox_of_match[] = {
+ { .compatible = "hisilicon,hi6220-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hi6220_mbox_of_match);
+
+static int hi6220_mbox_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct hi6220_mbox *mbox;
+ int i, err;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->dev = dev;
+ mbox->chan_num = MBOX_CHAN_MAX;
+ mbox->mchan = devm_kcalloc(dev,
+ mbox->chan_num, sizeof(*mbox->mchan), GFP_KERNEL);
+ if (!mbox->mchan)
+ return -ENOMEM;
+
+ mbox->chan = devm_kcalloc(dev,
+ mbox->chan_num, sizeof(*mbox->chan), GFP_KERNEL);
+ if (!mbox->chan)
+ return -ENOMEM;
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+
+ mbox->ipc = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->ipc)) {
+ dev_err(dev, "ioremap ipc failed\n");
+ return PTR_ERR(mbox->ipc);
+ }
+
+ mbox->base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mbox->base)) {
+ dev_err(dev, "ioremap buffer failed\n");
+ return PTR_ERR(mbox->base);
+ }
+
+ err = devm_request_irq(dev, mbox->irq, hi6220_mbox_interrupt, 0,
+ dev_name(dev), mbox);
+ if (err) {
+ dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
+ err);
+ return -ENODEV;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.chans = &mbox->chan[0];
+ mbox->controller.num_chans = mbox->chan_num;
+ mbox->controller.ops = &hi6220_mbox_ops;
+ mbox->controller.of_xlate = hi6220_mbox_xlate;
+
+ for (i = 0; i < mbox->chan_num; i++) {
+ mbox->chan[i].con_priv = &mbox->mchan[i];
+ mbox->irq_map_chan[i] = NULL;
+
+ mbox->mchan[i].parent = mbox;
+ mbox->mchan[i].slot = i;
+ }
+
+ /* mask and clear all interrupt vectors */
+ writel(0x0, ACK_INT_MSK_REG(mbox->ipc));
+ writel(~0x0, ACK_INT_CLR_REG(mbox->ipc));
+
+ /* use interrupt for tx's ack */
+ mbox->tx_irq_mode = !of_property_read_bool(node, "hi6220,mbox-tx-noirq");
+
+ if (mbox->tx_irq_mode)
+ mbox->controller.txdone_irq = true;
+ else {
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+ }
+
+ err = devm_mbox_controller_register(dev, &mbox->controller);
+ if (err) {
+ dev_err(dev, "Failed to register mailbox %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "Mailbox enabled\n");
+ return 0;
+}
+
+static struct platform_driver hi6220_mbox_driver = {
+ .driver = {
+ .name = "hi6220-mbox",
+ .of_match_table = hi6220_mbox_of_match,
+ },
+ .probe = hi6220_mbox_probe,
+};
+
+static int __init hi6220_mbox_init(void)
+{
+ return platform_driver_register(&hi6220_mbox_driver);
+}
+core_initcall(hi6220_mbox_init);
+
+static void __exit hi6220_mbox_exit(void)
+{
+ platform_driver_unregister(&hi6220_mbox_driver);
+}
+module_exit(hi6220_mbox_exit);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("Hi6220 mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
new file mode 100644
index 000000000..3ef4dd8ad
--- /dev/null
+++ b/drivers/mailbox/imx-mailbox.c
@@ -0,0 +1,1058 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
+ * Copyright 2022 NXP, Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/s4.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include <linux/slab.h>
+
+#define IMX_MU_CHANS 17
+/* TX0/RX0/RXDB[0-3] */
+#define IMX_MU_SCU_CHANS 6
+/* TX0/RX0 */
+#define IMX_MU_S4_CHANS 2
+#define IMX_MU_CHAN_NAME_SIZE 20
+
+#define IMX_MU_NUM_RR 4
+
+#define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
+#define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
+
+/* Please not change TX & RX */
+enum imx_mu_chan_type {
+ IMX_MU_TYPE_TX = 0, /* Tx */
+ IMX_MU_TYPE_RX = 1, /* Rx */
+ IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
+ IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
+ IMX_MU_TYPE_RST = 4, /* Reset */
+};
+
+enum imx_mu_xcr {
+ IMX_MU_CR,
+ IMX_MU_GIER,
+ IMX_MU_GCR,
+ IMX_MU_TCR,
+ IMX_MU_RCR,
+ IMX_MU_xCR_MAX,
+};
+
+enum imx_mu_xsr {
+ IMX_MU_SR,
+ IMX_MU_GSR,
+ IMX_MU_TSR,
+ IMX_MU_RSR,
+ IMX_MU_xSR_MAX,
+};
+
+struct imx_sc_rpc_msg_max {
+ struct imx_sc_rpc_msg hdr;
+ u32 data[30];
+};
+
+struct imx_s4_rpc_msg_max {
+ struct imx_s4_rpc_msg hdr;
+ u32 data[254];
+};
+
+struct imx_mu_con_priv {
+ unsigned int idx;
+ char irq_desc[IMX_MU_CHAN_NAME_SIZE];
+ enum imx_mu_chan_type type;
+ struct mbox_chan *chan;
+ struct tasklet_struct txdb_tasklet;
+};
+
+struct imx_mu_priv {
+ struct device *dev;
+ void __iomem *base;
+ void *msg;
+ spinlock_t xcr_lock; /* control register lock */
+
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[IMX_MU_CHANS];
+
+ struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
+ const struct imx_mu_dcfg *dcfg;
+ struct clk *clk;
+ int irq[IMX_MU_CHANS];
+ bool suspend;
+
+ u32 xcr[IMX_MU_xCR_MAX];
+
+ bool side_b;
+};
+
+enum imx_mu_type {
+ IMX_MU_V1,
+ IMX_MU_V2 = BIT(1),
+ IMX_MU_V2_S4 = BIT(15),
+ IMX_MU_V2_IRQ = BIT(16),
+};
+
+struct imx_mu_dcfg {
+ int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
+ int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
+ int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
+ void (*init)(struct imx_mu_priv *priv);
+ enum imx_mu_type type;
+ u32 xTR; /* Transmit Register0 */
+ u32 xRR; /* Receive Register0 */
+ u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
+ u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
+};
+
+#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
+#define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
+
+/* General Purpose Interrupt Enable */
+#define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
+/* Receive Interrupt Enable */
+#define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+/* Transmit Interrupt Enable */
+#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
+/* General Purpose Interrupt Request */
+#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
+/* MU reset */
+#define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5))
+#define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7))
+
+
+static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct imx_mu_priv, mbox);
+}
+
+static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
+{
+ iowrite32(val, priv->base + offs);
+}
+
+static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
+{
+ return ioread32(priv->base + offs);
+}
+
+static int imx_mu_tx_waiting_write(struct imx_mu_priv *priv, u32 val, u32 idx)
+{
+ u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_TX_TOUT;
+ u32 status;
+ u32 can_write;
+
+ dev_dbg(priv->dev, "Trying to write %.8x to idx %d\n", val, idx);
+
+ do {
+ status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
+ can_write = status & IMX_MU_xSR_TEn(priv->dcfg->type, idx % 4);
+ } while (!can_write && time_is_after_jiffies64(timeout_time));
+
+ if (!can_write) {
+ dev_err(priv->dev, "timeout trying to write %.8x at %d(%.8x)\n",
+ val, idx, status);
+ return -ETIME;
+ }
+
+ imx_mu_write(priv, val, priv->dcfg->xTR + (idx % 4) * 4);
+
+ return 0;
+}
+
+static int imx_mu_rx_waiting_read(struct imx_mu_priv *priv, u32 *val, u32 idx)
+{
+ u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_RX_TOUT;
+ u32 status;
+ u32 can_read;
+
+ dev_dbg(priv->dev, "Trying to read from idx %d\n", idx);
+
+ do {
+ status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
+ can_read = status & IMX_MU_xSR_RFn(priv->dcfg->type, idx % 4);
+ } while (!can_read && time_is_after_jiffies64(timeout_time));
+
+ if (!can_read) {
+ dev_err(priv->dev, "timeout trying to read idx %d (%.8x)\n",
+ idx, status);
+ return -ETIME;
+ }
+
+ *val = imx_mu_read(priv, priv->dcfg->xRR + (idx % 4) * 4);
+ dev_dbg(priv->dev, "Read %.8x\n", *val);
+
+ return 0;
+}
+
+static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, enum imx_mu_xcr type, u32 set, u32 clr)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->xcr_lock, flags);
+ val = imx_mu_read(priv, priv->dcfg->xCR[type]);
+ val &= ~clr;
+ val |= set;
+ imx_mu_write(priv, val, priv->dcfg->xCR[type]);
+ spin_unlock_irqrestore(&priv->xcr_lock, flags);
+
+ return val;
+}
+
+static int imx_mu_generic_tx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp,
+ void *data)
+{
+ u32 *arg = data;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_write(priv, *arg, priv->dcfg->xTR + cp->idx * 4);
+ imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_TXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
+ tasklet_schedule(&cp->txdb_tasklet);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_generic_rx(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp)
+{
+ u32 dat;
+
+ dat = imx_mu_read(priv, priv->dcfg->xRR + (cp->idx) * 4);
+ mbox_chan_received_data(cp->chan, (void *)&dat);
+
+ return 0;
+}
+
+static int imx_mu_generic_rxdb(struct imx_mu_priv *priv,
+ struct imx_mu_con_priv *cp)
+{
+ imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
+ priv->dcfg->xSR[IMX_MU_GSR]);
+ mbox_chan_received_data(cp->chan, NULL);
+
+ return 0;
+}
+
+static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
+{
+ u32 *arg = data;
+ int i, ret;
+ u32 xsr;
+ u32 size, max_size, num_tr;
+
+ if (priv->dcfg->type & IMX_MU_V2_S4) {
+ size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
+ max_size = sizeof(struct imx_s4_rpc_msg_max);
+ num_tr = 8;
+ } else {
+ size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
+ max_size = sizeof(struct imx_sc_rpc_msg_max);
+ num_tr = 4;
+ }
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ /*
+ * msg->hdr.size specifies the number of u32 words while
+ * sizeof yields bytes.
+ */
+
+ if (size > max_size / 4) {
+ /*
+ * The real message size can be different to
+ * struct imx_sc_rpc_msg_max/imx_s4_rpc_msg_max size
+ */
+ dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on TX; got: %i bytes\n", max_size, size << 2);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_tr && i < size; i++)
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
+ for (; i < size; i++) {
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR],
+ xsr,
+ xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % num_tr),
+ 0, 5 * USEC_PER_SEC);
+ if (ret) {
+ dev_err(priv->dev, "Send data index: %d timeout\n", i);
+ return ret;
+ }
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
+ }
+
+ imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
+{
+ u32 *data;
+ int i, ret;
+ u32 xsr;
+ u32 size, max_size;
+
+ data = (u32 *)priv->msg;
+
+ imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0));
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR);
+
+ if (priv->dcfg->type & IMX_MU_V2_S4) {
+ size = ((struct imx_s4_rpc_msg_max *)priv->msg)->hdr.size;
+ max_size = sizeof(struct imx_s4_rpc_msg_max);
+ } else {
+ size = ((struct imx_sc_rpc_msg_max *)priv->msg)->hdr.size;
+ max_size = sizeof(struct imx_sc_rpc_msg_max);
+ }
+
+ if (size > max_size / 4) {
+ dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on RX; got: %i bytes\n", max_size, size << 2);
+ return -EINVAL;
+ }
+
+ for (i = 1; i < size; i++) {
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
+ xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % 4), 0,
+ 5 * USEC_PER_SEC);
+ if (ret) {
+ dev_err(priv->dev, "timeout read idx %d\n", i);
+ return ret;
+ }
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
+ }
+
+ imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
+ mbox_chan_received_data(cp->chan, (void *)priv->msg);
+
+ return 0;
+}
+
+static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
+ void *data)
+{
+ struct imx_sc_rpc_msg_max *msg = data;
+ u32 *arg = data;
+ u32 byte_size;
+ int err;
+ int i;
+
+ dev_dbg(priv->dev, "Sending message\n");
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TXDB:
+ byte_size = msg->hdr.size * sizeof(u32);
+ if (byte_size > sizeof(*msg)) {
+ /*
+ * The real message size can be different to
+ * struct imx_sc_rpc_msg_max size
+ */
+ dev_err(priv->dev,
+ "Exceed max msg size (%zu) on TX, got: %i\n",
+ sizeof(*msg), byte_size);
+ return -EINVAL;
+ }
+
+ print_hex_dump_debug("from client ", DUMP_PREFIX_OFFSET, 4, 4,
+ data, byte_size, false);
+
+ /* Send first word */
+ dev_dbg(priv->dev, "Sending header\n");
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR);
+
+ /* Send signaling */
+ dev_dbg(priv->dev, "Sending signaling\n");
+ imx_mu_xcr_rmw(priv, IMX_MU_GCR,
+ IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
+
+ /* Send words to fill the mailbox */
+ for (i = 1; i < 4 && i < msg->hdr.size; i++) {
+ dev_dbg(priv->dev, "Sending word %d\n", i);
+ imx_mu_write(priv, *arg++,
+ priv->dcfg->xTR + (i % 4) * 4);
+ }
+
+ /* Send rest of message waiting for remote read */
+ for (; i < msg->hdr.size; i++) {
+ dev_dbg(priv->dev, "Sending word %d\n", i);
+ err = imx_mu_tx_waiting_write(priv, *arg++, i);
+ if (err) {
+ dev_err(priv->dev, "Timeout tx %d\n", i);
+ return err;
+ }
+ }
+
+ /* Simulate hack for mbox framework */
+ tasklet_schedule(&cp->txdb_tasklet);
+
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev,
+ "Send data on wrong channel type: %d\n",
+ cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx_mu_seco_rxdb(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
+{
+ struct imx_sc_rpc_msg_max msg;
+ u32 *data = (u32 *)&msg;
+ u32 byte_size;
+ int err = 0;
+ int i;
+
+ dev_dbg(priv->dev, "Receiving message\n");
+
+ /* Read header */
+ dev_dbg(priv->dev, "Receiving header\n");
+ *data++ = imx_mu_read(priv, priv->dcfg->xRR);
+ byte_size = msg.hdr.size * sizeof(u32);
+ if (byte_size > sizeof(msg)) {
+ dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
+ sizeof(msg), byte_size);
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Read message waiting they are written */
+ for (i = 1; i < msg.hdr.size; i++) {
+ dev_dbg(priv->dev, "Receiving word %d\n", i);
+ err = imx_mu_rx_waiting_read(priv, data++, i);
+ if (err) {
+ dev_err(priv->dev, "Timeout rx %d\n", i);
+ goto error;
+ }
+ }
+
+ /* Clear GIP */
+ imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
+ priv->dcfg->xSR[IMX_MU_GSR]);
+
+ print_hex_dump_debug("to client ", DUMP_PREFIX_OFFSET, 4, 4,
+ &msg, byte_size, false);
+
+ /* send data to client */
+ dev_dbg(priv->dev, "Sending message to client\n");
+ mbox_chan_received_data(cp->chan, (void *)&msg);
+
+ goto exit;
+
+error:
+ mbox_chan_received_data(cp->chan, ERR_PTR(err));
+
+exit:
+ return err;
+}
+
+static void imx_mu_txdb_tasklet(unsigned long data)
+{
+ struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
+
+ mbox_chan_txdone(cp->chan, 0);
+}
+
+static irqreturn_t imx_mu_isr(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ u32 val, ctrl;
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_TCR]);
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
+ val &= IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx) &
+ (ctrl & IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
+ break;
+ case IMX_MU_TYPE_RX:
+ ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_RCR]);
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
+ val &= IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx) &
+ (ctrl & IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
+ break;
+ case IMX_MU_TYPE_RXDB:
+ ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_GIER]);
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
+ val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
+ (ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
+ break;
+ case IMX_MU_TYPE_RST:
+ return IRQ_NONE;
+ default:
+ dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
+ cp->type);
+ return IRQ_NONE;
+ }
+
+ if (!val)
+ return IRQ_NONE;
+
+ if ((val == IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx)) &&
+ (cp->type == IMX_MU_TYPE_TX)) {
+ imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
+ mbox_chan_txdone(chan, 0);
+ } else if ((val == IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx)) &&
+ (cp->type == IMX_MU_TYPE_RX)) {
+ priv->dcfg->rx(priv, cp);
+ } else if ((val == IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx)) &&
+ (cp->type == IMX_MU_TYPE_RXDB)) {
+ priv->dcfg->rxdb(priv, cp);
+ } else {
+ dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
+ return IRQ_NONE;
+ }
+
+ if (priv->suspend)
+ pm_system_wakeup();
+
+ return IRQ_HANDLED;
+}
+
+static int imx_mu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+
+ return priv->dcfg->tx(priv, cp, data);
+}
+
+static int imx_mu_startup(struct mbox_chan *chan)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ unsigned long irq_flag = 0;
+ int ret;
+
+ pm_runtime_get_sync(priv->dev);
+ if (cp->type == IMX_MU_TYPE_TXDB) {
+ /* Tx doorbell don't have ACK support */
+ tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
+ (unsigned long)cp);
+ return 0;
+ }
+
+ /* IPC MU should be with IRQF_NO_SUSPEND set */
+ if (!priv->dev->pm_domain)
+ irq_flag |= IRQF_NO_SUSPEND;
+
+ if (!(priv->dcfg->type & IMX_MU_V2_IRQ))
+ irq_flag |= IRQF_SHARED;
+
+ ret = request_irq(priv->irq[cp->type], imx_mu_isr, irq_flag, cp->irq_desc, chan);
+ if (ret) {
+ dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq[cp->type]);
+ return ret;
+ }
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_RX:
+ imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx), 0);
+ break;
+ case IMX_MU_TYPE_RXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_GIER, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx), 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void imx_mu_shutdown(struct mbox_chan *chan)
+{
+ struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
+ struct imx_mu_con_priv *cp = chan->con_priv;
+ int ret;
+ u32 sr;
+
+ if (cp->type == IMX_MU_TYPE_TXDB) {
+ tasklet_kill(&cp->txdb_tasklet);
+ pm_runtime_put_sync(priv->dev);
+ return;
+ }
+
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
+ break;
+ case IMX_MU_TYPE_RX:
+ imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
+ break;
+ case IMX_MU_TYPE_RXDB:
+ imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
+ break;
+ case IMX_MU_TYPE_RST:
+ imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0);
+ ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr,
+ !(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5);
+ if (ret)
+ dev_warn(priv->dev, "RST channel timeout\n");
+ break;
+ default:
+ break;
+ }
+
+ free_irq(priv->irq[cp->type], chan);
+ pm_runtime_put_sync(priv->dev);
+}
+
+static const struct mbox_chan_ops imx_mu_ops = {
+ .send_data = imx_mu_send_data,
+ .startup = imx_mu_startup,
+ .shutdown = imx_mu_shutdown,
+};
+
+static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 type, idx, chan;
+
+ if (sp->args_count != 2) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+ idx = sp->args[1]; /* index */
+
+ switch (type) {
+ case IMX_MU_TYPE_TX:
+ case IMX_MU_TYPE_RX:
+ if (idx != 0)
+ dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
+ chan = type;
+ break;
+ case IMX_MU_TYPE_RXDB:
+ chan = 2 + idx;
+ break;
+ default:
+ dev_err(mbox->dev, "Invalid chan type: %d\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (chan >= mbox->num_chans) {
+ dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &mbox->chans[chan];
+}
+
+static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 type, idx, chan;
+
+ if (sp->args_count != 2) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+ idx = sp->args[1]; /* index */
+ chan = type * 4 + idx;
+
+ if (chan >= mbox->num_chans) {
+ dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &mbox->chans[chan];
+}
+
+static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ u32 type;
+
+ if (sp->args_count < 1) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+
+ /* Only supports TXDB and RXDB */
+ if (type == IMX_MU_TYPE_TX || type == IMX_MU_TYPE_RX) {
+ dev_err(mbox->dev, "Invalid type: %d\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return imx_mu_xlate(mbox, sp);
+}
+
+static void imx_mu_init_generic(struct imx_mu_priv *priv)
+{
+ unsigned int i;
+ unsigned int val;
+
+ for (i = 0; i < IMX_MU_CHANS; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i % 4;
+ cp->type = i >> 2;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->mbox.num_chans = IMX_MU_CHANS;
+ priv->mbox.of_xlate = imx_mu_xlate;
+
+ if (priv->side_b)
+ return;
+
+ /* Set default MU configuration */
+ for (i = 0; i < IMX_MU_xCR_MAX; i++)
+ imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
+
+ /* Clear any pending GIP */
+ val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
+ imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
+
+ /* Clear any pending RSR */
+ for (i = 0; i < IMX_MU_NUM_RR; i++)
+ imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
+}
+
+static void imx_mu_init_specific(struct imx_mu_priv *priv)
+{
+ unsigned int i;
+ int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
+
+ for (i = 0; i < num_chans; i++) {
+ struct imx_mu_con_priv *cp = &priv->con_priv[i];
+
+ cp->idx = i < 2 ? 0 : i - 2;
+ cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ snprintf(cp->irq_desc, sizeof(cp->irq_desc),
+ "imx_mu_chan[%i-%i]", cp->type, cp->idx);
+ }
+
+ priv->mbox.num_chans = num_chans;
+ priv->mbox.of_xlate = imx_mu_specific_xlate;
+
+ /* Set default MU configuration */
+ for (i = 0; i < IMX_MU_xCR_MAX; i++)
+ imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
+}
+
+static void imx_mu_init_seco(struct imx_mu_priv *priv)
+{
+ imx_mu_init_generic(priv);
+ priv->mbox.of_xlate = imx_mu_seco_xlate;
+}
+
+static int imx_mu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx_mu_priv *priv;
+ const struct imx_mu_dcfg *dcfg;
+ int i, ret;
+ u32 size;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ dcfg = of_device_get_match_data(dev);
+ if (!dcfg)
+ return -EINVAL;
+ priv->dcfg = dcfg;
+ if (priv->dcfg->type & IMX_MU_V2_IRQ) {
+ priv->irq[IMX_MU_TYPE_TX] = platform_get_irq_byname(pdev, "tx");
+ if (priv->irq[IMX_MU_TYPE_TX] < 0)
+ return priv->irq[IMX_MU_TYPE_TX];
+ priv->irq[IMX_MU_TYPE_RX] = platform_get_irq_byname(pdev, "rx");
+ if (priv->irq[IMX_MU_TYPE_RX] < 0)
+ return priv->irq[IMX_MU_TYPE_RX];
+ } else {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < IMX_MU_CHANS; i++)
+ priv->irq[i] = ret;
+ }
+
+ if (priv->dcfg->type & IMX_MU_V2_S4)
+ size = sizeof(struct imx_s4_rpc_msg_max);
+ else
+ size = sizeof(struct imx_sc_rpc_msg_max);
+
+ priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!priv->msg)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ if (PTR_ERR(priv->clk) != -ENOENT)
+ return PTR_ERR(priv->clk);
+
+ priv->clk = NULL;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
+
+ priv->dcfg->init(priv);
+
+ spin_lock_init(&priv->xcr_lock);
+
+ priv->mbox.dev = dev;
+ priv->mbox.ops = &imx_mu_ops;
+ priv->mbox.chans = priv->mbox_chans;
+ priv->mbox.txdone_irq = true;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto disable_runtime_pm;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ goto disable_runtime_pm;
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+
+disable_runtime_pm:
+ pm_runtime_disable(dev);
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int imx_mu_remove(struct platform_device *pdev)
+{
+ struct imx_mu_priv *priv = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(priv->dev);
+
+ return 0;
+}
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .rxdb = imx_mu_generic_rxdb,
+ .init = imx_mu_init_generic,
+ .xTR = 0x0,
+ .xRR = 0x10,
+ .xSR = {0x20, 0x20, 0x20, 0x20},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .rxdb = imx_mu_generic_rxdb,
+ .init = imx_mu_init_generic,
+ .xTR = 0x20,
+ .xRR = 0x40,
+ .xSR = {0x60, 0x60, 0x60, 0x60},
+ .xCR = {0x64, 0x64, 0x64, 0x64, 0x64},
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
+ .tx = imx_mu_generic_tx,
+ .rx = imx_mu_generic_rx,
+ .rxdb = imx_mu_generic_rxdb,
+ .init = imx_mu_init_generic,
+ .type = IMX_MU_V2,
+ .xTR = 0x200,
+ .xRR = 0x280,
+ .xSR = {0xC, 0x118, 0x124, 0x12C},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
+ .tx = imx_mu_specific_tx,
+ .rx = imx_mu_specific_rx,
+ .init = imx_mu_init_specific,
+ .type = IMX_MU_V2 | IMX_MU_V2_S4,
+ .xTR = 0x200,
+ .xRR = 0x280,
+ .xSR = {0xC, 0x118, 0x124, 0x12C},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = {
+ .tx = imx_mu_specific_tx,
+ .rx = imx_mu_specific_rx,
+ .init = imx_mu_init_specific,
+ .type = IMX_MU_V2 | IMX_MU_V2_S4 | IMX_MU_V2_IRQ,
+ .xTR = 0x200,
+ .xRR = 0x280,
+ .xSR = {0xC, 0x118, 0x124, 0x12C},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
+ .tx = imx_mu_specific_tx,
+ .rx = imx_mu_specific_rx,
+ .init = imx_mu_init_specific,
+ .rxdb = imx_mu_generic_rxdb,
+ .xTR = 0x0,
+ .xRR = 0x10,
+ .xSR = {0x20, 0x20, 0x20, 0x20},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
+ .tx = imx_mu_seco_tx,
+ .rx = imx_mu_generic_rx,
+ .rxdb = imx_mu_seco_rxdb,
+ .init = imx_mu_init_seco,
+ .xTR = 0x0,
+ .xRR = 0x10,
+ .xSR = {0x20, 0x20, 0x20, 0x20},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
+};
+
+static const struct of_device_id imx_mu_dt_ids[] = {
+ { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
+ { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
+ { .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
+ { .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
+ { .compatible = "fsl,imx93-mu-s4", .data = &imx_mu_cfg_imx93_s4 },
+ { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
+ { .compatible = "fsl,imx8-mu-seco", .data = &imx_mu_cfg_imx8_seco },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
+
+static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+ int i;
+
+ if (!priv->clk) {
+ for (i = 0; i < IMX_MU_xCR_MAX; i++)
+ priv->xcr[i] = imx_mu_read(priv, priv->dcfg->xCR[i]);
+ }
+
+ priv->suspend = true;
+
+ return 0;
+}
+
+static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+ int i;
+
+ /*
+ * ONLY restore MU when context lost, the TIE could
+ * be set during noirq resume as there is MU data
+ * communication going on, and restore the saved
+ * value will overwrite the TIE and cause MU data
+ * send failed, may lead to system freeze. This issue
+ * is observed by testing freeze mode suspend.
+ */
+ if (!priv->clk && !imx_mu_read(priv, priv->dcfg->xCR[0])) {
+ for (i = 0; i < IMX_MU_xCR_MAX; i++)
+ imx_mu_write(priv, priv->xcr[i], priv->dcfg->xCR[i]);
+ }
+
+ priv->suspend = false;
+
+ return 0;
+}
+
+static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ dev_err(dev, "failed to enable clock\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_mu_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
+ imx_mu_resume_noirq)
+ SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
+ imx_mu_runtime_resume, NULL)
+};
+
+static struct platform_driver imx_mu_driver = {
+ .probe = imx_mu_probe,
+ .remove = imx_mu_remove,
+ .driver = {
+ .name = "imx_mu",
+ .of_match_table = imx_mu_dt_ids,
+ .pm = &imx_mu_pm_ops,
+ },
+};
+module_platform_driver(imx_mu_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
+MODULE_DESCRIPTION("Message Unit driver for i.MX");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
new file mode 100644
index 000000000..afb320e9d
--- /dev/null
+++ b/drivers/mailbox/mailbox-altera.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright Altera Corporation (C) 2013-2014. All rights reserved
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "altera-mailbox"
+
+#define MAILBOX_CMD_REG 0x00
+#define MAILBOX_PTR_REG 0x04
+#define MAILBOX_STS_REG 0x08
+#define MAILBOX_INTMASK_REG 0x0C
+
+#define INT_PENDING_MSK 0x1
+#define INT_SPACE_MSK 0x2
+
+#define STS_PENDING_MSK 0x1
+#define STS_FULL_MSK 0x2
+#define STS_FULL_OFT 0x1
+
+#define MBOX_PENDING(status) (((status) & STS_PENDING_MSK))
+#define MBOX_FULL(status) (((status) & STS_FULL_MSK) >> STS_FULL_OFT)
+
+enum altera_mbox_msg {
+ MBOX_CMD = 0,
+ MBOX_PTR,
+};
+
+#define MBOX_POLLING_MS 5 /* polling interval 5ms */
+
+struct altera_mbox {
+ bool is_sender; /* 1-sender, 0-receiver */
+ bool intr_mode;
+ int irq;
+ void __iomem *mbox_base;
+ struct device *dev;
+ struct mbox_controller controller;
+
+ /* If the controller supports only RX polling mode */
+ struct timer_list rxpoll_timer;
+ struct mbox_chan *chan;
+};
+
+static struct altera_mbox *mbox_chan_to_altera_mbox(struct mbox_chan *chan)
+{
+ if (!chan || !chan->con_priv)
+ return NULL;
+
+ return (struct altera_mbox *)chan->con_priv;
+}
+
+static inline int altera_mbox_full(struct altera_mbox *mbox)
+{
+ u32 status;
+
+ status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
+ return MBOX_FULL(status);
+}
+
+static inline int altera_mbox_pending(struct altera_mbox *mbox)
+{
+ u32 status;
+
+ status = readl_relaxed(mbox->mbox_base + MAILBOX_STS_REG);
+ return MBOX_PENDING(status);
+}
+
+static void altera_mbox_rx_intmask(struct altera_mbox *mbox, bool enable)
+{
+ u32 mask;
+
+ mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
+ if (enable)
+ mask |= INT_PENDING_MSK;
+ else
+ mask &= ~INT_PENDING_MSK;
+ writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
+}
+
+static void altera_mbox_tx_intmask(struct altera_mbox *mbox, bool enable)
+{
+ u32 mask;
+
+ mask = readl_relaxed(mbox->mbox_base + MAILBOX_INTMASK_REG);
+ if (enable)
+ mask |= INT_SPACE_MSK;
+ else
+ mask &= ~INT_SPACE_MSK;
+ writel_relaxed(mask, mbox->mbox_base + MAILBOX_INTMASK_REG);
+}
+
+static bool altera_mbox_is_sender(struct altera_mbox *mbox)
+{
+ u32 reg;
+ /* Write a magic number to PTR register and read back this register.
+ * This register is read-write if it is a sender.
+ */
+ #define MBOX_MAGIC 0xA5A5AA55
+ writel_relaxed(MBOX_MAGIC, mbox->mbox_base + MAILBOX_PTR_REG);
+ reg = readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
+ if (reg == MBOX_MAGIC) {
+ /* Clear to 0 */
+ writel_relaxed(0, mbox->mbox_base + MAILBOX_PTR_REG);
+ return true;
+ }
+ return false;
+}
+
+static void altera_mbox_rx_data(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ u32 data[2];
+
+ if (altera_mbox_pending(mbox)) {
+ data[MBOX_PTR] =
+ readl_relaxed(mbox->mbox_base + MAILBOX_PTR_REG);
+ data[MBOX_CMD] =
+ readl_relaxed(mbox->mbox_base + MAILBOX_CMD_REG);
+ mbox_chan_received_data(chan, (void *)data);
+ }
+}
+
+static void altera_mbox_poll_rx(struct timer_list *t)
+{
+ struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer);
+
+ altera_mbox_rx_data(mbox->chan);
+
+ mod_timer(&mbox->rxpoll_timer,
+ jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
+}
+
+static irqreturn_t altera_mbox_tx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)p;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ altera_mbox_tx_intmask(mbox, false);
+ mbox_chan_txdone(chan, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t altera_mbox_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = (struct mbox_chan *)p;
+
+ altera_mbox_rx_data(chan);
+ return IRQ_HANDLED;
+}
+
+static int altera_mbox_startup_sender(struct mbox_chan *chan)
+{
+ int ret;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ ret = request_irq(mbox->irq, altera_mbox_tx_interrupt, 0,
+ DRIVER_NAME, chan);
+ if (unlikely(ret)) {
+ dev_err(mbox->dev,
+ "failed to register mailbox interrupt:%d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int altera_mbox_startup_receiver(struct mbox_chan *chan)
+{
+ int ret;
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ ret = request_irq(mbox->irq, altera_mbox_rx_interrupt, 0,
+ DRIVER_NAME, chan);
+ if (unlikely(ret)) {
+ mbox->intr_mode = false;
+ goto polling; /* use polling if failed */
+ }
+
+ altera_mbox_rx_intmask(mbox, true);
+ return 0;
+ }
+
+polling:
+ /* Setup polling timer */
+ mbox->chan = chan;
+ timer_setup(&mbox->rxpoll_timer, altera_mbox_poll_rx, 0);
+ mod_timer(&mbox->rxpoll_timer,
+ jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
+
+ return 0;
+}
+
+static int altera_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ u32 *udata = (u32 *)data;
+
+ if (!mbox || !data)
+ return -EINVAL;
+ if (!mbox->is_sender) {
+ dev_warn(mbox->dev,
+ "failed to send. This is receiver mailbox.\n");
+ return -EINVAL;
+ }
+
+ if (altera_mbox_full(mbox))
+ return -EBUSY;
+
+ /* Enable interrupt before send */
+ if (mbox->intr_mode)
+ altera_mbox_tx_intmask(mbox, true);
+
+ /* Pointer register must write before command register */
+ writel_relaxed(udata[MBOX_PTR], mbox->mbox_base + MAILBOX_PTR_REG);
+ writel_relaxed(udata[MBOX_CMD], mbox->mbox_base + MAILBOX_CMD_REG);
+
+ return 0;
+}
+
+static bool altera_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ /* Return false if mailbox is full */
+ return altera_mbox_full(mbox) ? false : true;
+}
+
+static bool altera_mbox_peek_data(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ return altera_mbox_pending(mbox) ? true : false;
+}
+
+static int altera_mbox_startup(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ int ret = 0;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (mbox->is_sender)
+ ret = altera_mbox_startup_sender(chan);
+ else
+ ret = altera_mbox_startup_receiver(chan);
+
+ return ret;
+}
+
+static void altera_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+
+ if (mbox->intr_mode) {
+ /* Unmask all interrupt masks */
+ writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG);
+ free_irq(mbox->irq, chan);
+ } else if (!mbox->is_sender) {
+ del_timer_sync(&mbox->rxpoll_timer);
+ }
+}
+
+static const struct mbox_chan_ops altera_mbox_ops = {
+ .send_data = altera_mbox_send_data,
+ .startup = altera_mbox_startup,
+ .shutdown = altera_mbox_shutdown,
+ .last_tx_done = altera_mbox_last_tx_done,
+ .peek_data = altera_mbox_peek_data,
+};
+
+static int altera_mbox_probe(struct platform_device *pdev)
+{
+ struct altera_mbox *mbox;
+ struct mbox_chan *chans;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox),
+ GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ /* Allocated one channel */
+ chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->mbox_base))
+ return PTR_ERR(mbox->mbox_base);
+
+ /* Check is it a sender or receiver? */
+ mbox->is_sender = altera_mbox_is_sender(mbox);
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq >= 0)
+ mbox->intr_mode = true;
+
+ mbox->dev = &pdev->dev;
+
+ /* Hardware supports only one channel. */
+ chans[0].con_priv = mbox;
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = chans;
+ mbox->controller.ops = &altera_mbox_ops;
+
+ if (mbox->is_sender) {
+ if (mbox->intr_mode) {
+ mbox->controller.txdone_irq = true;
+ } else {
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = MBOX_POLLING_MS;
+ }
+ }
+
+ ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
+ if (ret) {
+ dev_err(&pdev->dev, "Register mailbox failed\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+err:
+ return ret;
+}
+
+static const struct of_device_id altera_mbox_match[] = {
+ { .compatible = "altr,mailbox-1.0" },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, altera_mbox_match);
+
+static struct platform_driver altera_mbox_driver = {
+ .probe = altera_mbox_probe,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = altera_mbox_match,
+ },
+};
+
+module_platform_driver(altera_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Altera mailbox specific functions");
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_ALIAS("platform:altera-mailbox");
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
new file mode 100644
index 000000000..20ee283a0
--- /dev/null
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip PolarFire SoC (MPFS) system controller/mailbox controller driver
+ *
+ * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <soc/microchip/mpfs.h>
+
+#define SERVICES_CR_OFFSET 0x50u
+#define SERVICES_SR_OFFSET 0x54u
+#define MAILBOX_REG_OFFSET 0x800u
+#define MSS_SYS_MAILBOX_DATA_OFFSET 0u
+#define SCB_MASK_WIDTH 16u
+
+/* SCBCTRL service control register */
+
+#define SCB_CTRL_REQ (0)
+#define SCB_CTRL_REQ_MASK BIT(SCB_CTRL_REQ)
+
+#define SCB_CTRL_BUSY (1)
+#define SCB_CTRL_BUSY_MASK BIT(SCB_CTRL_BUSY)
+
+#define SCB_CTRL_ABORT (2)
+#define SCB_CTRL_ABORT_MASK BIT(SCB_CTRL_ABORT)
+
+#define SCB_CTRL_NOTIFY (3)
+#define SCB_CTRL_NOTIFY_MASK BIT(SCB_CTRL_NOTIFY)
+
+#define SCB_CTRL_POS (16)
+#define SCB_CTRL_MASK GENMASK(SCB_CTRL_POS + SCB_MASK_WIDTH - 1, SCB_CTRL_POS)
+
+/* SCBCTRL service status register */
+
+#define SCB_STATUS_REQ (0)
+#define SCB_STATUS_REQ_MASK BIT(SCB_STATUS_REQ)
+
+#define SCB_STATUS_BUSY (1)
+#define SCB_STATUS_BUSY_MASK BIT(SCB_STATUS_BUSY)
+
+#define SCB_STATUS_ABORT (2)
+#define SCB_STATUS_ABORT_MASK BIT(SCB_STATUS_ABORT)
+
+#define SCB_STATUS_NOTIFY (3)
+#define SCB_STATUS_NOTIFY_MASK BIT(SCB_STATUS_NOTIFY)
+
+#define SCB_STATUS_POS (16)
+#define SCB_STATUS_MASK GENMASK(SCB_STATUS_POS + SCB_MASK_WIDTH - 1, SCB_STATUS_POS)
+
+struct mpfs_mbox {
+ struct mbox_controller controller;
+ struct device *dev;
+ int irq;
+ void __iomem *ctrl_base;
+ void __iomem *mbox_base;
+ void __iomem *int_reg;
+ struct mbox_chan chans[1];
+ struct mpfs_mss_response *response;
+ u16 resp_offset;
+};
+
+static bool mpfs_mbox_busy(struct mpfs_mbox *mbox)
+{
+ u32 status;
+
+ status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
+
+ return status & SCB_STATUS_BUSY_MASK;
+}
+
+static bool mpfs_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+ struct mpfs_mss_response *response = mbox->response;
+ u32 val;
+
+ if (mpfs_mbox_busy(mbox))
+ return false;
+
+ /*
+ * The service status is stored in bits 31:16 of the SERVICES_SR
+ * register & is only valid when the system controller is not busy.
+ * Failed services are intended to generated interrupts, but in reality
+ * this does not happen, so the status must be checked here.
+ */
+ val = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
+ response->resp_status = (val & SCB_STATUS_MASK) >> SCB_STATUS_POS;
+
+ return true;
+}
+
+static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+ struct mpfs_mss_msg *msg = data;
+ u32 tx_trigger;
+ u16 opt_sel;
+ u32 val = 0u;
+
+ mbox->response = msg->response;
+ mbox->resp_offset = msg->resp_offset;
+
+ if (mpfs_mbox_busy(mbox))
+ return -EBUSY;
+
+ if (msg->cmd_data_size) {
+ u32 index;
+ u8 extra_bits = msg->cmd_data_size & 3;
+ u32 *word_buf = (u32 *)msg->cmd_data;
+
+ for (index = 0; index < (msg->cmd_data_size / 4); index++)
+ writel_relaxed(word_buf[index],
+ mbox->mbox_base + msg->mbox_offset + index * 0x4);
+ if (extra_bits) {
+ u8 i;
+ u8 byte_off = ALIGN_DOWN(msg->cmd_data_size, 4);
+ u8 *byte_buf = msg->cmd_data + byte_off;
+
+ val = readl_relaxed(mbox->mbox_base + msg->mbox_offset + index * 0x4);
+
+ for (i = 0u; i < extra_bits; i++) {
+ val &= ~(0xffu << (i * 8u));
+ val |= (byte_buf[i] << (i * 8u));
+ }
+
+ writel_relaxed(val, mbox->mbox_base + msg->mbox_offset + index * 0x4);
+ }
+ }
+
+ opt_sel = ((msg->mbox_offset << 7u) | (msg->cmd_opcode & 0x7fu));
+
+ tx_trigger = (opt_sel << SCB_CTRL_POS) & SCB_CTRL_MASK;
+ tx_trigger |= SCB_CTRL_REQ_MASK | SCB_STATUS_NOTIFY_MASK;
+ writel_relaxed(tx_trigger, mbox->ctrl_base + SERVICES_CR_OFFSET);
+
+ return 0;
+}
+
+static void mpfs_mbox_rx_data(struct mbox_chan *chan)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+ struct mpfs_mss_response *response = mbox->response;
+ u16 num_words = ALIGN((response->resp_size), (4)) / 4U;
+ u32 i;
+
+ if (!response->resp_msg) {
+ dev_err(mbox->dev, "failed to assign memory for response %d\n", -ENOMEM);
+ return;
+ }
+
+ /*
+ * We should *never* get an interrupt while the controller is
+ * still in the busy state. If we do, something has gone badly
+ * wrong & the content of the mailbox would not be valid.
+ */
+ if (mpfs_mbox_busy(mbox)) {
+ dev_err(mbox->dev, "got an interrupt but system controller is busy\n");
+ response->resp_status = 0xDEAD;
+ return;
+ }
+
+ for (i = 0; i < num_words; i++) {
+ response->resp_msg[i] =
+ readl_relaxed(mbox->mbox_base
+ + mbox->resp_offset + i * 0x4);
+ }
+
+ mbox_chan_received_data(chan, response);
+}
+
+static irqreturn_t mpfs_mbox_inbox_isr(int irq, void *data)
+{
+ struct mbox_chan *chan = data;
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+
+ writel_relaxed(0, mbox->int_reg);
+
+ mpfs_mbox_rx_data(chan);
+
+ return IRQ_HANDLED;
+}
+
+static int mpfs_mbox_startup(struct mbox_chan *chan)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+ int ret = 0;
+
+ if (!mbox)
+ return -EINVAL;
+
+ ret = devm_request_irq(mbox->dev, mbox->irq, mpfs_mbox_inbox_isr, 0, "mpfs-mailbox", chan);
+ if (ret)
+ dev_err(mbox->dev, "failed to register mailbox interrupt:%d\n", ret);
+
+ return ret;
+}
+
+static void mpfs_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+
+ devm_free_irq(mbox->dev, mbox->irq, chan);
+}
+
+static const struct mbox_chan_ops mpfs_mbox_ops = {
+ .send_data = mpfs_mbox_send_data,
+ .startup = mpfs_mbox_startup,
+ .shutdown = mpfs_mbox_shutdown,
+ .last_tx_done = mpfs_mbox_last_tx_done,
+};
+
+static int mpfs_mbox_probe(struct platform_device *pdev)
+{
+ struct mpfs_mbox *mbox;
+ struct resource *regs;
+ int ret;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->ctrl_base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
+ if (IS_ERR(mbox->ctrl_base))
+ return PTR_ERR(mbox->ctrl_base);
+
+ mbox->int_reg = devm_platform_get_and_ioremap_resource(pdev, 1, &regs);
+ if (IS_ERR(mbox->int_reg))
+ return PTR_ERR(mbox->int_reg);
+
+ mbox->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 2, &regs);
+ if (IS_ERR(mbox->mbox_base)) // account for the old dt-binding w/ 2 regs
+ mbox->mbox_base = mbox->ctrl_base + MAILBOX_REG_OFFSET;
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+
+ mbox->dev = &pdev->dev;
+
+ mbox->chans[0].con_priv = mbox;
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = mbox->chans;
+ mbox->controller.ops = &mpfs_mbox_ops;
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 10u;
+
+ ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
+ if (ret) {
+ dev_err(&pdev->dev, "Registering MPFS mailbox controller failed\n");
+ return ret;
+ }
+ dev_info(&pdev->dev, "Registered MPFS mailbox controller driver\n");
+
+ return 0;
+}
+
+static const struct of_device_id mpfs_mbox_of_match[] = {
+ {.compatible = "microchip,mpfs-mailbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpfs_mbox_of_match);
+
+static struct platform_driver mpfs_mbox_driver = {
+ .driver = {
+ .name = "mpfs-mailbox",
+ .of_match_table = mpfs_mbox_of_match,
+ },
+ .probe = mpfs_mbox_probe,
+};
+module_platform_driver(mpfs_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("MPFS mailbox controller driver");
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
new file mode 100644
index 000000000..823061dd8
--- /dev/null
+++ b/drivers/mailbox/mailbox-sti.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * STi Mailbox
+ *
+ * Copyright (C) 2015 ST Microelectronics
+ *
+ * Author: Lee Jones <lee.jones@linaro.org> for ST Microelectronics
+ *
+ * Based on the original driver written by;
+ * Alexandre Torgue, Olivier Lebreton and Loic Pallardy
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "mailbox.h"
+
+#define STI_MBOX_INST_MAX 4 /* RAM saving: Max supported instances */
+#define STI_MBOX_CHAN_MAX 20 /* RAM saving: Max supported channels */
+
+#define STI_IRQ_VAL_OFFSET 0x04 /* Read interrupt status */
+#define STI_IRQ_SET_OFFSET 0x24 /* Generate a Tx channel interrupt */
+#define STI_IRQ_CLR_OFFSET 0x44 /* Clear pending Rx interrupts */
+#define STI_ENA_VAL_OFFSET 0x64 /* Read enable status */
+#define STI_ENA_SET_OFFSET 0x84 /* Enable a channel */
+#define STI_ENA_CLR_OFFSET 0xa4 /* Disable a channel */
+
+#define MBOX_BASE(mdev, inst) ((mdev)->base + ((inst) * 4))
+
+/**
+ * struct sti_mbox_device - STi Mailbox device data
+ *
+ * @dev: Device to which it is attached
+ * @mbox: Representation of a communication channel controller
+ * @base: Base address of the register mapping region
+ * @name: Name of the mailbox
+ * @enabled: Local copy of enabled channels
+ * @lock: Mutex protecting enabled status
+ *
+ * An IP Mailbox is currently composed of 4 instances
+ * Each instance is currently composed of 32 channels
+ * This means that we have 128 channels per Mailbox
+ * A channel an be used for TX or RX
+ */
+struct sti_mbox_device {
+ struct device *dev;
+ struct mbox_controller *mbox;
+ void __iomem *base;
+ const char *name;
+ u32 enabled[STI_MBOX_INST_MAX];
+ spinlock_t lock;
+};
+
+/**
+ * struct sti_mbox_pdata - STi Mailbox platform specific configuration
+ *
+ * @num_inst: Maximum number of instances in one HW Mailbox
+ * @num_chan: Maximum number of channel per instance
+ */
+struct sti_mbox_pdata {
+ unsigned int num_inst;
+ unsigned int num_chan;
+};
+
+/**
+ * struct sti_channel - STi Mailbox allocated channel information
+ *
+ * @mdev: Pointer to parent Mailbox device
+ * @instance: Instance number channel resides in
+ * @channel: Channel number pertaining to this container
+ */
+struct sti_channel {
+ struct sti_mbox_device *mdev;
+ unsigned int instance;
+ unsigned int channel;
+};
+
+static inline bool sti_mbox_channel_is_enabled(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+
+ return mdev->enabled[instance] & BIT(channel);
+}
+
+static inline
+struct mbox_chan *sti_mbox_to_channel(struct mbox_controller *mbox,
+ unsigned int instance,
+ unsigned int channel)
+{
+ struct sti_channel *chan_info;
+ int i;
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ chan_info = mbox->chans[i].con_priv;
+ if (chan_info &&
+ chan_info->instance == instance &&
+ chan_info->channel == channel)
+ return &mbox->chans[i];
+ }
+
+ dev_err(mbox->dev,
+ "Channel not registered: instance: %d channel: %d\n",
+ instance, channel);
+
+ return NULL;
+}
+
+static void sti_mbox_enable_channel(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+ unsigned long flags;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ spin_lock_irqsave(&mdev->lock, flags);
+ mdev->enabled[instance] |= BIT(channel);
+ writel_relaxed(BIT(channel), base + STI_ENA_SET_OFFSET);
+ spin_unlock_irqrestore(&mdev->lock, flags);
+}
+
+static void sti_mbox_disable_channel(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+ unsigned long flags;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ spin_lock_irqsave(&mdev->lock, flags);
+ mdev->enabled[instance] &= ~BIT(channel);
+ writel_relaxed(BIT(channel), base + STI_ENA_CLR_OFFSET);
+ spin_unlock_irqrestore(&mdev->lock, flags);
+}
+
+static void sti_mbox_clear_irq(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ writel_relaxed(BIT(channel), base + STI_IRQ_CLR_OFFSET);
+}
+
+static struct mbox_chan *sti_mbox_irq_to_channel(struct sti_mbox_device *mdev,
+ unsigned int instance)
+{
+ struct mbox_controller *mbox = mdev->mbox;
+ struct mbox_chan *chan = NULL;
+ unsigned int channel;
+ unsigned long bits;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ bits = readl_relaxed(base + STI_IRQ_VAL_OFFSET);
+ if (!bits)
+ /* No IRQs fired in specified instance */
+ return NULL;
+
+ /* An IRQ has fired, find the associated channel */
+ for (channel = 0; bits; channel++) {
+ if (!test_and_clear_bit(channel, &bits))
+ continue;
+
+ chan = sti_mbox_to_channel(mbox, instance, channel);
+ if (chan) {
+ dev_dbg(mbox->dev,
+ "IRQ fired on instance: %d channel: %d\n",
+ instance, channel);
+ break;
+ }
+ }
+
+ return chan;
+}
+
+static irqreturn_t sti_mbox_thread_handler(int irq, void *data)
+{
+ struct sti_mbox_device *mdev = data;
+ struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
+ struct mbox_chan *chan;
+ unsigned int instance;
+
+ for (instance = 0; instance < pdata->num_inst; instance++) {
+keep_looking:
+ chan = sti_mbox_irq_to_channel(mdev, instance);
+ if (!chan)
+ continue;
+
+ mbox_chan_received_data(chan, NULL);
+ sti_mbox_clear_irq(chan);
+ sti_mbox_enable_channel(chan);
+ goto keep_looking;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sti_mbox_irq_handler(int irq, void *data)
+{
+ struct sti_mbox_device *mdev = data;
+ struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
+ struct sti_channel *chan_info;
+ struct mbox_chan *chan;
+ unsigned int instance;
+ int ret = IRQ_NONE;
+
+ for (instance = 0; instance < pdata->num_inst; instance++) {
+ chan = sti_mbox_irq_to_channel(mdev, instance);
+ if (!chan)
+ continue;
+ chan_info = chan->con_priv;
+
+ if (!sti_mbox_channel_is_enabled(chan)) {
+ dev_warn(mdev->dev,
+ "Unexpected IRQ: %s\n"
+ " instance: %d: channel: %d [enabled: %x]\n",
+ mdev->name, chan_info->instance,
+ chan_info->channel, mdev->enabled[instance]);
+
+ /* Only handle IRQ if no other valid IRQs were found */
+ if (ret == IRQ_NONE)
+ ret = IRQ_HANDLED;
+ continue;
+ }
+
+ sti_mbox_disable_channel(chan);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (ret == IRQ_NONE)
+ dev_err(mdev->dev, "Spurious IRQ - was a channel requested?\n");
+
+ return ret;
+}
+
+static bool sti_mbox_tx_is_ready(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ if (!(readl_relaxed(base + STI_ENA_VAL_OFFSET) & BIT(channel))) {
+ dev_dbg(mdev->dev, "Mbox: %s: inst: %d, chan: %d disabled\n",
+ mdev->name, instance, channel);
+ return false;
+ }
+
+ if (readl_relaxed(base + STI_IRQ_VAL_OFFSET) & BIT(channel)) {
+ dev_dbg(mdev->dev, "Mbox: %s: inst: %d, chan: %d not ready\n",
+ mdev->name, instance, channel);
+ return false;
+ }
+
+ return true;
+}
+
+static int sti_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct sti_mbox_device *mdev = chan_info->mdev;
+ unsigned int instance = chan_info->instance;
+ unsigned int channel = chan_info->channel;
+ void __iomem *base = MBOX_BASE(mdev, instance);
+
+ /* Send event to co-processor */
+ writel_relaxed(BIT(channel), base + STI_IRQ_SET_OFFSET);
+
+ dev_dbg(mdev->dev,
+ "Sent via Mailbox %s: instance: %d channel: %d\n",
+ mdev->name, instance, channel);
+
+ return 0;
+}
+
+static int sti_mbox_startup_chan(struct mbox_chan *chan)
+{
+ sti_mbox_clear_irq(chan);
+ sti_mbox_enable_channel(chan);
+
+ return 0;
+}
+
+static void sti_mbox_shutdown_chan(struct mbox_chan *chan)
+{
+ struct sti_channel *chan_info = chan->con_priv;
+ struct mbox_controller *mbox = chan_info->mdev->mbox;
+ int i;
+
+ for (i = 0; i < mbox->num_chans; i++)
+ if (chan == &mbox->chans[i])
+ break;
+
+ if (mbox->num_chans == i) {
+ dev_warn(mbox->dev, "Request to free non-existent channel\n");
+ return;
+ }
+
+ /* Reset channel */
+ sti_mbox_disable_channel(chan);
+ sti_mbox_clear_irq(chan);
+ chan->con_priv = NULL;
+}
+
+static struct mbox_chan *sti_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct sti_mbox_device *mdev = dev_get_drvdata(mbox->dev);
+ struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
+ struct sti_channel *chan_info;
+ struct mbox_chan *chan = NULL;
+ unsigned int instance = spec->args[0];
+ unsigned int channel = spec->args[1];
+ int i;
+
+ /* Bounds checking */
+ if (instance >= pdata->num_inst || channel >= pdata->num_chan) {
+ dev_err(mbox->dev,
+ "Invalid channel requested instance: %d channel: %d\n",
+ instance, channel);
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ chan_info = mbox->chans[i].con_priv;
+
+ /* Is requested channel free? */
+ if (chan_info &&
+ mbox->dev == chan_info->mdev->dev &&
+ instance == chan_info->instance &&
+ channel == chan_info->channel) {
+
+ dev_err(mbox->dev, "Channel in use\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ /*
+ * Find the first free slot, then continue checking
+ * to see if requested channel is in use
+ */
+ if (!chan && !chan_info)
+ chan = &mbox->chans[i];
+ }
+
+ if (!chan) {
+ dev_err(mbox->dev, "No free channels left\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ chan_info = devm_kzalloc(mbox->dev, sizeof(*chan_info), GFP_KERNEL);
+ if (!chan_info)
+ return ERR_PTR(-ENOMEM);
+
+ chan_info->mdev = mdev;
+ chan_info->instance = instance;
+ chan_info->channel = channel;
+
+ chan->con_priv = chan_info;
+
+ dev_info(mbox->dev,
+ "Mbox: %s: Created channel: instance: %d channel: %d\n",
+ mdev->name, instance, channel);
+
+ return chan;
+}
+
+static const struct mbox_chan_ops sti_mbox_ops = {
+ .startup = sti_mbox_startup_chan,
+ .shutdown = sti_mbox_shutdown_chan,
+ .send_data = sti_mbox_send_data,
+ .last_tx_done = sti_mbox_tx_is_ready,
+};
+
+static const struct sti_mbox_pdata mbox_stih407_pdata = {
+ .num_inst = 4,
+ .num_chan = 32,
+};
+
+static const struct of_device_id sti_mailbox_match[] = {
+ {
+ .compatible = "st,stih407-mailbox",
+ .data = (void *)&mbox_stih407_pdata
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sti_mailbox_match);
+
+static int sti_mbox_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct mbox_controller *mbox;
+ struct sti_mbox_device *mdev;
+ struct device_node *np = pdev->dev.of_node;
+ struct mbox_chan *chans;
+ int irq;
+ int ret;
+
+ match = of_match_device(sti_mailbox_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "No configuration found\n");
+ return -ENODEV;
+ }
+ pdev->dev.platform_data = (struct sti_mbox_pdata *) match->data;
+
+ mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mdev);
+
+ mdev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdev->base))
+ return PTR_ERR(mdev->base);
+
+ ret = of_property_read_string(np, "mbox-name", &mdev->name);
+ if (ret)
+ mdev->name = np->full_name;
+
+ mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(&pdev->dev,
+ STI_MBOX_CHAN_MAX, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mdev->dev = &pdev->dev;
+ mdev->mbox = mbox;
+
+ spin_lock_init(&mdev->lock);
+
+ /* STi Mailbox does not have a Tx-Done or Tx-Ready IRQ */
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = true;
+ mbox->txpoll_period = 100;
+ mbox->ops = &sti_mbox_ops;
+ mbox->dev = mdev->dev;
+ mbox->of_xlate = sti_mbox_xlate;
+ mbox->chans = chans;
+ mbox->num_chans = STI_MBOX_CHAN_MAX;
+
+ ret = devm_mbox_controller_register(&pdev->dev, mbox);
+ if (ret)
+ return ret;
+
+ /* It's okay for Tx Mailboxes to not supply IRQs */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_info(&pdev->dev,
+ "%s: Registered Tx only Mailbox\n", mdev->name);
+ return 0;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ sti_mbox_irq_handler,
+ sti_mbox_thread_handler,
+ IRQF_ONESHOT, mdev->name, mdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't claim IRQ %d\n", irq);
+ return -EINVAL;
+ }
+
+ dev_info(&pdev->dev, "%s: Registered Tx/Rx Mailbox\n", mdev->name);
+
+ return 0;
+}
+
+static struct platform_driver sti_mbox_driver = {
+ .probe = sti_mbox_probe,
+ .driver = {
+ .name = "sti-mailbox",
+ .of_match_table = sti_mailbox_match,
+ },
+};
+module_platform_driver(sti_mbox_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("STMicroelectronics Mailbox Controller");
+MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org");
+MODULE_ALIAS("platform:mailbox-sti");
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
new file mode 100644
index 000000000..22d6018ce
--- /dev/null
+++ b/drivers/mailbox/mailbox-test.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 ST Microelectronics
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/sched/signal.h>
+
+#define MBOX_MAX_SIG_LEN 8
+#define MBOX_MAX_MSG_LEN 128
+#define MBOX_BYTES_PER_LINE 16
+#define MBOX_HEXDUMP_LINE_LEN ((MBOX_BYTES_PER_LINE * 4) + 2)
+#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
+ (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
+
+static bool mbox_data_ready;
+
+struct mbox_test_device {
+ struct device *dev;
+ void __iomem *tx_mmio;
+ void __iomem *rx_mmio;
+ struct mbox_chan *tx_channel;
+ struct mbox_chan *rx_channel;
+ char *rx_buffer;
+ char *signal;
+ char *message;
+ spinlock_t lock;
+ struct mutex mutex;
+ wait_queue_head_t waitq;
+ struct fasync_struct *async_queue;
+ struct dentry *root_debugfs_dir;
+};
+
+static ssize_t mbox_test_signal_write(struct file *filp,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ if (!tdev->tx_channel) {
+ dev_err(tdev->dev, "Channel cannot do Tx\n");
+ return -EINVAL;
+ }
+
+ if (count > MBOX_MAX_SIG_LEN) {
+ dev_err(tdev->dev,
+ "Signal length %zd greater than max allowed %d\n",
+ count, MBOX_MAX_SIG_LEN);
+ return -EINVAL;
+ }
+
+ /* Only allocate memory if we need to */
+ if (!tdev->signal) {
+ tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
+ if (!tdev->signal)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(tdev->signal, userbuf, count)) {
+ kfree(tdev->signal);
+ tdev->signal = NULL;
+ return -EFAULT;
+ }
+
+ return count;
+}
+
+static const struct file_operations mbox_test_signal_ops = {
+ .write = mbox_test_signal_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+static int mbox_test_message_fasync(int fd, struct file *filp, int on)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ return fasync_helper(fd, filp, on, &tdev->async_queue);
+}
+
+static ssize_t mbox_test_message_write(struct file *filp,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+ char *message;
+ void *data;
+ int ret;
+
+ if (!tdev->tx_channel) {
+ dev_err(tdev->dev, "Channel cannot do Tx\n");
+ return -EINVAL;
+ }
+
+ if (count > MBOX_MAX_MSG_LEN) {
+ dev_err(tdev->dev,
+ "Message length %zd greater than max allowed %d\n",
+ count, MBOX_MAX_MSG_LEN);
+ return -EINVAL;
+ }
+
+ message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
+ if (!message)
+ return -ENOMEM;
+
+ mutex_lock(&tdev->mutex);
+
+ tdev->message = message;
+ ret = copy_from_user(tdev->message, userbuf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * A separate signal is only of use if there is
+ * MMIO to subsequently pass the message through
+ */
+ if (tdev->tx_mmio && tdev->signal) {
+ print_hex_dump_bytes("Client: Sending: Signal: ", DUMP_PREFIX_ADDRESS,
+ tdev->signal, MBOX_MAX_SIG_LEN);
+
+ data = tdev->signal;
+ } else
+ data = tdev->message;
+
+ print_hex_dump_bytes("Client: Sending: Message: ", DUMP_PREFIX_ADDRESS,
+ tdev->message, MBOX_MAX_MSG_LEN);
+
+ ret = mbox_send_message(tdev->tx_channel, data);
+ if (ret < 0)
+ dev_err(tdev->dev, "Failed to send message via mailbox\n");
+
+out:
+ kfree(tdev->signal);
+ kfree(tdev->message);
+ tdev->signal = NULL;
+
+ mutex_unlock(&tdev->mutex);
+
+ return ret < 0 ? ret : count;
+}
+
+static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
+{
+ bool data_ready;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdev->lock, flags);
+ data_ready = mbox_data_ready;
+ spin_unlock_irqrestore(&tdev->lock, flags);
+
+ return data_ready;
+}
+
+static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+ unsigned long flags;
+ char *touser, *ptr;
+ int l = 0;
+ int ret;
+
+ DECLARE_WAITQUEUE(wait, current);
+
+ touser = kzalloc(MBOX_HEXDUMP_MAX_LEN + 1, GFP_KERNEL);
+ if (!touser)
+ return -ENOMEM;
+
+ if (!tdev->rx_channel) {
+ ret = snprintf(touser, 20, "<NO RX CAPABILITY>\n");
+ ret = simple_read_from_buffer(userbuf, count, ppos,
+ touser, ret);
+ goto kfree_err;
+ }
+
+ add_wait_queue(&tdev->waitq, &wait);
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (mbox_test_message_data_ready(tdev))
+ break;
+
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto waitq_err;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto waitq_err;
+ }
+ schedule();
+
+ } while (1);
+
+ spin_lock_irqsave(&tdev->lock, flags);
+
+ ptr = tdev->rx_buffer;
+ while (l < MBOX_HEXDUMP_MAX_LEN) {
+ hex_dump_to_buffer(ptr,
+ MBOX_BYTES_PER_LINE,
+ MBOX_BYTES_PER_LINE, 1, touser + l,
+ MBOX_HEXDUMP_LINE_LEN, true);
+
+ ptr += MBOX_BYTES_PER_LINE;
+ l += MBOX_HEXDUMP_LINE_LEN;
+ *(touser + (l - 1)) = '\n';
+ }
+ *(touser + l) = '\0';
+
+ memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
+ mbox_data_ready = false;
+
+ spin_unlock_irqrestore(&tdev->lock, flags);
+
+ ret = simple_read_from_buffer(userbuf, count, ppos, touser, MBOX_HEXDUMP_MAX_LEN);
+waitq_err:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&tdev->waitq, &wait);
+kfree_err:
+ kfree(touser);
+ return ret;
+}
+
+static __poll_t
+mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct mbox_test_device *tdev = filp->private_data;
+
+ poll_wait(filp, &tdev->waitq, wait);
+
+ if (mbox_test_message_data_ready(tdev))
+ return EPOLLIN | EPOLLRDNORM;
+ return 0;
+}
+
+static const struct file_operations mbox_test_message_ops = {
+ .write = mbox_test_message_write,
+ .read = mbox_test_message_read,
+ .fasync = mbox_test_message_fasync,
+ .poll = mbox_test_message_poll,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+static int mbox_test_add_debugfs(struct platform_device *pdev,
+ struct mbox_test_device *tdev)
+{
+ if (!debugfs_initialized())
+ return 0;
+
+ tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL);
+ if (!tdev->root_debugfs_dir) {
+ dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("message", 0600, tdev->root_debugfs_dir,
+ tdev, &mbox_test_message_ops);
+
+ debugfs_create_file("signal", 0200, tdev->root_debugfs_dir,
+ tdev, &mbox_test_signal_ops);
+
+ return 0;
+}
+
+static void mbox_test_receive_message(struct mbox_client *client, void *message)
+{
+ struct mbox_test_device *tdev = dev_get_drvdata(client->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdev->lock, flags);
+ if (tdev->rx_mmio) {
+ memcpy_fromio(tdev->rx_buffer, tdev->rx_mmio, MBOX_MAX_MSG_LEN);
+ print_hex_dump_bytes("Client: Received [MMIO]: ", DUMP_PREFIX_ADDRESS,
+ tdev->rx_buffer, MBOX_MAX_MSG_LEN);
+ } else if (message) {
+ print_hex_dump_bytes("Client: Received [API]: ", DUMP_PREFIX_ADDRESS,
+ message, MBOX_MAX_MSG_LEN);
+ memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
+ }
+ mbox_data_ready = true;
+ spin_unlock_irqrestore(&tdev->lock, flags);
+
+ wake_up_interruptible(&tdev->waitq);
+
+ kill_fasync(&tdev->async_queue, SIGIO, POLL_IN);
+}
+
+static void mbox_test_prepare_message(struct mbox_client *client, void *message)
+{
+ struct mbox_test_device *tdev = dev_get_drvdata(client->dev);
+
+ if (tdev->tx_mmio) {
+ if (tdev->signal)
+ memcpy_toio(tdev->tx_mmio, tdev->message, MBOX_MAX_MSG_LEN);
+ else
+ memcpy_toio(tdev->tx_mmio, message, MBOX_MAX_MSG_LEN);
+ }
+}
+
+static void mbox_test_message_sent(struct mbox_client *client,
+ void *message, int r)
+{
+ if (r)
+ dev_warn(client->dev,
+ "Client: Message could not be sent: %d\n", r);
+ else
+ dev_info(client->dev,
+ "Client: Message sent\n");
+}
+
+static struct mbox_chan *
+mbox_test_request_channel(struct platform_device *pdev, const char *name)
+{
+ struct mbox_client *client;
+ struct mbox_chan *channel;
+
+ client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ client->dev = &pdev->dev;
+ client->rx_callback = mbox_test_receive_message;
+ client->tx_prepare = mbox_test_prepare_message;
+ client->tx_done = mbox_test_message_sent;
+ client->tx_block = true;
+ client->knows_txdone = false;
+ client->tx_tout = 500;
+
+ channel = mbox_request_channel_byname(client, name);
+ if (IS_ERR(channel)) {
+ dev_warn(&pdev->dev, "Failed to request %s channel\n", name);
+ return NULL;
+ }
+
+ return channel;
+}
+
+static int mbox_test_probe(struct platform_device *pdev)
+{
+ struct mbox_test_device *tdev;
+ struct resource *res;
+ resource_size_t size;
+ int ret;
+
+ tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
+ if (!tdev)
+ return -ENOMEM;
+
+ /* It's okay for MMIO to be NULL */
+ tdev->tx_mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (PTR_ERR(tdev->tx_mmio) == -EBUSY) {
+ /* if reserved area in SRAM, try just ioremap */
+ size = resource_size(res);
+ tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+ } else if (IS_ERR(tdev->tx_mmio)) {
+ tdev->tx_mmio = NULL;
+ }
+
+ /* If specified, second reg entry is Rx MMIO */
+ tdev->rx_mmio = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
+ if (PTR_ERR(tdev->rx_mmio) == -EBUSY) {
+ size = resource_size(res);
+ tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+ } else if (IS_ERR(tdev->rx_mmio)) {
+ tdev->rx_mmio = tdev->tx_mmio;
+ }
+
+ tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
+ tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
+
+ if (IS_ERR_OR_NULL(tdev->tx_channel) && IS_ERR_OR_NULL(tdev->rx_channel))
+ return -EPROBE_DEFER;
+
+ /* If Rx is not specified but has Rx MMIO, then Rx = Tx */
+ if (!tdev->rx_channel && (tdev->rx_mmio != tdev->tx_mmio))
+ tdev->rx_channel = tdev->tx_channel;
+
+ tdev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, tdev);
+
+ spin_lock_init(&tdev->lock);
+ mutex_init(&tdev->mutex);
+
+ if (tdev->rx_channel) {
+ tdev->rx_buffer = devm_kzalloc(&pdev->dev,
+ MBOX_MAX_MSG_LEN, GFP_KERNEL);
+ if (!tdev->rx_buffer)
+ return -ENOMEM;
+ }
+
+ ret = mbox_test_add_debugfs(pdev, tdev);
+ if (ret)
+ return ret;
+
+ init_waitqueue_head(&tdev->waitq);
+ dev_info(&pdev->dev, "Successfully registered\n");
+
+ return 0;
+}
+
+static int mbox_test_remove(struct platform_device *pdev)
+{
+ struct mbox_test_device *tdev = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(tdev->root_debugfs_dir);
+
+ if (tdev->tx_channel)
+ mbox_free_channel(tdev->tx_channel);
+ if (tdev->rx_channel)
+ mbox_free_channel(tdev->rx_channel);
+
+ return 0;
+}
+
+static const struct of_device_id mbox_test_match[] = {
+ { .compatible = "mailbox-test" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mbox_test_match);
+
+static struct platform_driver mbox_test_driver = {
+ .driver = {
+ .name = "mailbox_test",
+ .of_match_table = mbox_test_match,
+ },
+ .probe = mbox_test_probe,
+ .remove = mbox_test_remove,
+};
+module_platform_driver(mbox_test_driver);
+
+MODULE_DESCRIPTION("Generic Mailbox Testing Facility");
+MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
new file mode 100644
index 000000000..946ea773e
--- /dev/null
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * APM X-Gene SLIMpro MailBox Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Feng Kan fkan@apm.com
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define MBOX_CON_NAME "slimpro-mbox"
+#define MBOX_REG_SET_OFFSET 0x1000
+#define MBOX_CNT 8
+#define MBOX_STATUS_AVAIL_MASK BIT(16)
+#define MBOX_STATUS_ACK_MASK BIT(0)
+
+/* Configuration and Status Registers */
+#define REG_DB_IN 0x00
+#define REG_DB_DIN0 0x04
+#define REG_DB_DIN1 0x08
+#define REG_DB_OUT 0x10
+#define REG_DB_DOUT0 0x14
+#define REG_DB_DOUT1 0x18
+#define REG_DB_STAT 0x20
+#define REG_DB_STATMASK 0x24
+
+/**
+ * X-Gene SlimPRO mailbox channel information
+ *
+ * @dev: Device to which it is attached
+ * @chan: Pointer to mailbox communication channel
+ * @reg: Base address to access channel registers
+ * @irq: Interrupt number of the channel
+ * @rx_msg: Received message storage
+ */
+struct slimpro_mbox_chan {
+ struct device *dev;
+ struct mbox_chan *chan;
+ void __iomem *reg;
+ int irq;
+ u32 rx_msg[3];
+};
+
+/**
+ * X-Gene SlimPRO Mailbox controller data
+ *
+ * X-Gene SlimPRO Mailbox controller has 8 communication channels.
+ * Each channel has a separate IRQ number assigned to it.
+ *
+ * @mb_ctrl: Representation of the communication channel controller
+ * @mc: Array of SlimPRO mailbox channels of the controller
+ * @chans: Array of mailbox communication channels
+ *
+ */
+struct slimpro_mbox {
+ struct mbox_controller mb_ctrl;
+ struct slimpro_mbox_chan mc[MBOX_CNT];
+ struct mbox_chan chans[MBOX_CNT];
+};
+
+static void mb_chan_send_msg(struct slimpro_mbox_chan *mb_chan, u32 *msg)
+{
+ writel(msg[1], mb_chan->reg + REG_DB_DOUT0);
+ writel(msg[2], mb_chan->reg + REG_DB_DOUT1);
+ writel(msg[0], mb_chan->reg + REG_DB_OUT);
+}
+
+static void mb_chan_recv_msg(struct slimpro_mbox_chan *mb_chan)
+{
+ mb_chan->rx_msg[1] = readl(mb_chan->reg + REG_DB_DIN0);
+ mb_chan->rx_msg[2] = readl(mb_chan->reg + REG_DB_DIN1);
+ mb_chan->rx_msg[0] = readl(mb_chan->reg + REG_DB_IN);
+}
+
+static int mb_chan_status_ack(struct slimpro_mbox_chan *mb_chan)
+{
+ u32 val = readl(mb_chan->reg + REG_DB_STAT);
+
+ if (val & MBOX_STATUS_ACK_MASK) {
+ writel(MBOX_STATUS_ACK_MASK, mb_chan->reg + REG_DB_STAT);
+ return 1;
+ }
+ return 0;
+}
+
+static int mb_chan_status_avail(struct slimpro_mbox_chan *mb_chan)
+{
+ u32 val = readl(mb_chan->reg + REG_DB_STAT);
+
+ if (val & MBOX_STATUS_AVAIL_MASK) {
+ mb_chan_recv_msg(mb_chan);
+ writel(MBOX_STATUS_AVAIL_MASK, mb_chan->reg + REG_DB_STAT);
+ return 1;
+ }
+ return 0;
+}
+
+static irqreturn_t slimpro_mbox_irq(int irq, void *id)
+{
+ struct slimpro_mbox_chan *mb_chan = id;
+
+ if (mb_chan_status_ack(mb_chan))
+ mbox_chan_txdone(mb_chan->chan, 0);
+
+ if (mb_chan_status_avail(mb_chan))
+ mbox_chan_received_data(mb_chan->chan, mb_chan->rx_msg);
+
+ return IRQ_HANDLED;
+}
+
+static int slimpro_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+
+ mb_chan_send_msg(mb_chan, msg);
+ return 0;
+}
+
+static int slimpro_mbox_startup(struct mbox_chan *chan)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+ int rc;
+ u32 val;
+
+ rc = devm_request_irq(mb_chan->dev, mb_chan->irq, slimpro_mbox_irq, 0,
+ MBOX_CON_NAME, mb_chan);
+ if (unlikely(rc)) {
+ dev_err(mb_chan->dev, "failed to register mailbox interrupt %d\n",
+ mb_chan->irq);
+ return rc;
+ }
+
+ /* Enable HW interrupt */
+ writel(MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK,
+ mb_chan->reg + REG_DB_STAT);
+ /* Unmask doorbell status interrupt */
+ val = readl(mb_chan->reg + REG_DB_STATMASK);
+ val &= ~(MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK);
+ writel(val, mb_chan->reg + REG_DB_STATMASK);
+
+ return 0;
+}
+
+static void slimpro_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+ u32 val;
+
+ /* Mask doorbell status interrupt */
+ val = readl(mb_chan->reg + REG_DB_STATMASK);
+ val |= (MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK);
+ writel(val, mb_chan->reg + REG_DB_STATMASK);
+
+ devm_free_irq(mb_chan->dev, mb_chan->irq, mb_chan);
+}
+
+static const struct mbox_chan_ops slimpro_mbox_ops = {
+ .send_data = slimpro_mbox_send_data,
+ .startup = slimpro_mbox_startup,
+ .shutdown = slimpro_mbox_shutdown,
+};
+
+static int slimpro_mbox_probe(struct platform_device *pdev)
+{
+ struct slimpro_mbox *ctx;
+ void __iomem *mb_base;
+ int rc;
+ int i;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ctx);
+
+ mb_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mb_base))
+ return PTR_ERR(mb_base);
+
+ /* Setup mailbox links */
+ for (i = 0; i < MBOX_CNT; i++) {
+ ctx->mc[i].irq = platform_get_irq(pdev, i);
+ if (ctx->mc[i].irq < 0) {
+ if (i == 0) {
+ dev_err(&pdev->dev, "no available IRQ\n");
+ return -EINVAL;
+ }
+ dev_info(&pdev->dev, "no IRQ for channel %d\n", i);
+ break;
+ }
+
+ ctx->mc[i].dev = &pdev->dev;
+ ctx->mc[i].reg = mb_base + i * MBOX_REG_SET_OFFSET;
+ ctx->mc[i].chan = &ctx->chans[i];
+ ctx->chans[i].con_priv = &ctx->mc[i];
+ }
+
+ /* Setup mailbox controller */
+ ctx->mb_ctrl.dev = &pdev->dev;
+ ctx->mb_ctrl.chans = ctx->chans;
+ ctx->mb_ctrl.txdone_irq = true;
+ ctx->mb_ctrl.ops = &slimpro_mbox_ops;
+ ctx->mb_ctrl.num_chans = i;
+
+ rc = devm_mbox_controller_register(&pdev->dev, &ctx->mb_ctrl);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "APM X-Gene SLIMpro MailBox register failed:%d\n", rc);
+ return rc;
+ }
+
+ dev_info(&pdev->dev, "APM X-Gene SLIMpro MailBox registered\n");
+ return 0;
+}
+
+static const struct of_device_id slimpro_of_match[] = {
+ {.compatible = "apm,xgene-slimpro-mbox" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, slimpro_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id slimpro_acpi_ids[] = {
+ {"APMC0D01", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, slimpro_acpi_ids);
+#endif
+
+static struct platform_driver slimpro_mbox_driver = {
+ .probe = slimpro_mbox_probe,
+ .driver = {
+ .name = "xgene-slimpro-mbox",
+ .of_match_table = of_match_ptr(slimpro_of_match),
+ .acpi_match_table = ACPI_PTR(slimpro_acpi_ids)
+ },
+};
+
+static int __init slimpro_mbox_init(void)
+{
+ return platform_driver_register(&slimpro_mbox_driver);
+}
+
+static void __exit slimpro_mbox_exit(void)
+{
+ platform_driver_unregister(&slimpro_mbox_driver);
+}
+
+subsys_initcall(slimpro_mbox_init);
+module_exit(slimpro_mbox_exit);
+
+MODULE_DESCRIPTION("APM X-Gene SLIMpro Mailbox Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
new file mode 100644
index 000000000..ebff3baf3
--- /dev/null
+++ b/drivers/mailbox/mailbox.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Mailbox: Common code for Mailbox controllers and users
+ *
+ * Copyright (C) 2013-2014 Linaro Ltd.
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/of.h>
+
+#include "mailbox.h"
+
+static LIST_HEAD(mbox_cons);
+static DEFINE_MUTEX(con_mutex);
+
+static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
+{
+ int idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* See if there is any space left */
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -ENOBUFS;
+ }
+
+ idx = chan->msg_free;
+ chan->msg_data[idx] = mssg;
+ chan->msg_count++;
+
+ if (idx == MBOX_TX_QUEUE_LEN - 1)
+ chan->msg_free = 0;
+ else
+ chan->msg_free++;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return idx;
+}
+
+static void msg_submit(struct mbox_chan *chan)
+{
+ unsigned count, idx;
+ unsigned long flags;
+ void *data;
+ int err = -EBUSY;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->msg_count || chan->active_req)
+ goto exit;
+
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
+
+ data = chan->msg_data[idx];
+
+ if (chan->cl->tx_prepare)
+ chan->cl->tx_prepare(chan->cl, data);
+ /* Try to submit a message to the MBOX controller */
+ err = chan->mbox->ops->send_data(chan, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
+exit:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
+ /* kick start the timer immediately to avoid delays */
+ spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
+ spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
+ }
+}
+
+static void tx_tick(struct mbox_chan *chan, int r)
+{
+ unsigned long flags;
+ void *mssg;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Submit next message */
+ msg_submit(chan);
+
+ if (!mssg)
+ return;
+
+ /* Notify the client */
+ if (chan->cl->tx_done)
+ chan->cl->tx_done(chan->cl, mssg, r);
+
+ if (r != -ETIME && chan->cl->tx_block)
+ complete(&chan->tx_complete);
+}
+
+static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
+{
+ struct mbox_controller *mbox =
+ container_of(hrtimer, struct mbox_controller, poll_hrt);
+ bool txdone, resched = false;
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ struct mbox_chan *chan = &mbox->chans[i];
+
+ if (chan->active_req && chan->cl) {
+ txdone = chan->mbox->ops->last_tx_done(chan);
+ if (txdone)
+ tx_tick(chan, 0);
+ else
+ resched = true;
+ }
+ }
+
+ if (resched) {
+ spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
+ if (!hrtimer_is_queued(hrtimer))
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
+
+ return HRTIMER_RESTART;
+ }
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * mbox_chan_received_data - A way for controller driver to push data
+ * received from remote to the upper layer.
+ * @chan: Pointer to the mailbox channel on which RX happened.
+ * @mssg: Client specific message typecasted as void *
+ *
+ * After startup and before shutdown any data received on the chan
+ * is passed on to the API via atomic mbox_chan_received_data().
+ * The controller should ACK the RX only after this call returns.
+ */
+void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
+{
+ /* No buffering the received data */
+ if (chan->cl->rx_callback)
+ chan->cl->rx_callback(chan->cl, mssg);
+}
+EXPORT_SYMBOL_GPL(mbox_chan_received_data);
+
+/**
+ * mbox_chan_txdone - A way for controller driver to notify the
+ * framework that the last TX has completed.
+ * @chan: Pointer to the mailbox chan on which TX happened.
+ * @r: Status of last TX - OK or ERROR
+ *
+ * The controller that has IRQ for TX ACK calls this atomic API
+ * to tick the TX state machine. It works only if txdone_irq
+ * is set by the controller.
+ */
+void mbox_chan_txdone(struct mbox_chan *chan, int r)
+{
+ if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
+ dev_err(chan->mbox->dev,
+ "Controller can't run the TX ticker\n");
+ return;
+ }
+
+ tx_tick(chan, r);
+}
+EXPORT_SYMBOL_GPL(mbox_chan_txdone);
+
+/**
+ * mbox_client_txdone - The way for a client to run the TX state machine.
+ * @chan: Mailbox channel assigned to this client.
+ * @r: Success status of last transmission.
+ *
+ * The client/protocol had received some 'ACK' packet and it notifies
+ * the API that the last packet was sent successfully. This only works
+ * if the controller can't sense TX-Done.
+ */
+void mbox_client_txdone(struct mbox_chan *chan, int r)
+{
+ if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
+ dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
+ return;
+ }
+
+ tx_tick(chan, r);
+}
+EXPORT_SYMBOL_GPL(mbox_client_txdone);
+
+/**
+ * mbox_client_peek_data - A way for client driver to pull data
+ * received from remote by the controller.
+ * @chan: Mailbox channel assigned to this client.
+ *
+ * A poke to controller driver for any received data.
+ * The data is actually passed onto client via the
+ * mbox_chan_received_data()
+ * The call can be made from atomic context, so the controller's
+ * implementation of peek_data() must not sleep.
+ *
+ * Return: True, if controller has, and is going to push after this,
+ * some data.
+ * False, if controller doesn't have any data to be read.
+ */
+bool mbox_client_peek_data(struct mbox_chan *chan)
+{
+ if (chan->mbox->ops->peek_data)
+ return chan->mbox->ops->peek_data(chan);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(mbox_client_peek_data);
+
+/**
+ * mbox_send_message - For client to submit a message to be
+ * sent to the remote.
+ * @chan: Mailbox channel assigned to this client.
+ * @mssg: Client specific message typecasted.
+ *
+ * For client to submit data to the controller destined for a remote
+ * processor. If the client had set 'tx_block', the call will return
+ * either when the remote receives the data or when 'tx_tout' millisecs
+ * run out.
+ * In non-blocking mode, the requests are buffered by the API and a
+ * non-negative token is returned for each queued request. If the request
+ * is not queued, a negative token is returned. Upon failure or successful
+ * TX, the API calls 'tx_done' from atomic context, from which the client
+ * could submit yet another request.
+ * The pointer to message should be preserved until it is sent
+ * over the chan, i.e, tx_done() is made.
+ * This function could be called from atomic context as it simply
+ * queues the data and returns a token against the request.
+ *
+ * Return: Non-negative integer for successful submission (non-blocking mode)
+ * or transmission over chan (blocking mode).
+ * Negative value denotes failure.
+ */
+int mbox_send_message(struct mbox_chan *chan, void *mssg)
+{
+ int t;
+
+ if (!chan || !chan->cl)
+ return -EINVAL;
+
+ t = add_to_rbuf(chan, mssg);
+ if (t < 0) {
+ dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
+ return t;
+ }
+
+ msg_submit(chan);
+
+ if (chan->cl->tx_block) {
+ unsigned long wait;
+ int ret;
+
+ if (!chan->cl->tx_tout) /* wait forever */
+ wait = msecs_to_jiffies(3600000);
+ else
+ wait = msecs_to_jiffies(chan->cl->tx_tout);
+
+ ret = wait_for_completion_timeout(&chan->tx_complete, wait);
+ if (ret == 0) {
+ t = -ETIME;
+ tx_tick(chan, t);
+ }
+ }
+
+ return t;
+}
+EXPORT_SYMBOL_GPL(mbox_send_message);
+
+/**
+ * mbox_flush - flush a mailbox channel
+ * @chan: mailbox channel to flush
+ * @timeout: time, in milliseconds, to allow the flush operation to succeed
+ *
+ * Mailbox controllers that need to work in atomic context can implement the
+ * ->flush() callback to busy loop until a transmission has been completed.
+ * The implementation must call mbox_chan_txdone() upon success. Clients can
+ * call the mbox_flush() function at any time after mbox_send_message() to
+ * flush the transmission. After the function returns success, the mailbox
+ * transmission is guaranteed to have completed.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ int ret;
+
+ if (!chan->mbox->ops->flush)
+ return -ENOTSUPP;
+
+ ret = chan->mbox->ops->flush(chan, timeout);
+ if (ret < 0)
+ tx_tick(chan, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mbox_flush);
+
+static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+{
+ struct device *dev = cl->dev;
+ unsigned long flags;
+ int ret;
+
+ if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
+ dev_dbg(dev, "%s: mailbox not free\n", __func__);
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method = TXDONE_BY_ACK;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (chan->mbox->ops->startup) {
+ ret = chan->mbox->ops->startup(chan);
+
+ if (ret) {
+ dev_err(dev, "Unable to startup the chan (%d)\n", ret);
+ mbox_free_channel(chan);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mbox_bind_client - Request a mailbox channel.
+ * @chan: The mailbox channel to bind the client to.
+ * @cl: Identity of the client requesting the channel.
+ *
+ * The Client specifies its requirements and capabilities while asking for
+ * a mailbox channel. It can't be called from atomic context.
+ * The channel is exclusively allocated and can't be used by another
+ * client before the owner calls mbox_free_channel.
+ * After assignment, any packet received on this channel will be
+ * handed over to the client via the 'rx_callback'.
+ * The framework holds reference to the client, so the mbox_client
+ * structure shouldn't be modified until the mbox_free_channel returns.
+ *
+ * Return: 0 if the channel was assigned to the client successfully.
+ * <0 for request failure.
+ */
+int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
+{
+ int ret;
+
+ mutex_lock(&con_mutex);
+ ret = __mbox_bind_client(chan, cl);
+ mutex_unlock(&con_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mbox_bind_client);
+
+/**
+ * mbox_request_channel - Request a mailbox channel.
+ * @cl: Identity of the client requesting the channel.
+ * @index: Index of mailbox specifier in 'mboxes' property.
+ *
+ * The Client specifies its requirements and capabilities while asking for
+ * a mailbox channel. It can't be called from atomic context.
+ * The channel is exclusively allocated and can't be used by another
+ * client before the owner calls mbox_free_channel.
+ * After assignment, any packet received on this channel will be
+ * handed over to the client via the 'rx_callback'.
+ * The framework holds reference to the client, so the mbox_client
+ * structure shouldn't be modified until the mbox_free_channel returns.
+ *
+ * Return: Pointer to the channel assigned to the client if successful.
+ * ERR_PTR for request failure.
+ */
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+{
+ struct device *dev = cl->dev;
+ struct mbox_controller *mbox;
+ struct of_phandle_args spec;
+ struct mbox_chan *chan;
+ int ret;
+
+ if (!dev || !dev->of_node) {
+ pr_debug("%s: No owner device node\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&con_mutex);
+
+ if (of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", index, &spec)) {
+ dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ mutex_unlock(&con_mutex);
+ return ERR_PTR(-ENODEV);
+ }
+
+ chan = ERR_PTR(-EPROBE_DEFER);
+ list_for_each_entry(mbox, &mbox_cons, node)
+ if (mbox->dev->of_node == spec.np) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
+
+ of_node_put(spec.np);
+
+ if (IS_ERR(chan)) {
+ mutex_unlock(&con_mutex);
+ return chan;
+ }
+
+ ret = __mbox_bind_client(chan, cl);
+ if (ret)
+ chan = ERR_PTR(ret);
+
+ mutex_unlock(&con_mutex);
+ return chan;
+}
+EXPORT_SYMBOL_GPL(mbox_request_channel);
+
+struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
+ const char *name)
+{
+ struct device_node *np = cl->dev->of_node;
+ struct property *prop;
+ const char *mbox_name;
+ int index = 0;
+
+ if (!np) {
+ dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!of_get_property(np, "mbox-names", NULL)) {
+ dev_err(cl->dev,
+ "%s() requires an \"mbox-names\" property\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
+ if (!strncmp(name, mbox_name, strlen(name)))
+ return mbox_request_channel(cl, index);
+ index++;
+ }
+
+ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+ __func__, name);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
+
+/**
+ * mbox_free_channel - The client relinquishes control of a mailbox
+ * channel by this call.
+ * @chan: The mailbox channel to be freed.
+ */
+void mbox_free_channel(struct mbox_chan *chan)
+{
+ unsigned long flags;
+
+ if (!chan || !chan->cl)
+ return;
+
+ if (chan->mbox->ops->shutdown)
+ chan->mbox->ops->shutdown(chan);
+
+ /* The queued TX requests are simply aborted, no callbacks are made */
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+
+ module_put(chan->mbox->dev->driver->owner);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+EXPORT_SYMBOL_GPL(mbox_free_channel);
+
+static struct mbox_chan *
+of_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int ind = sp->args[0];
+
+ if (ind >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ return &mbox->chans[ind];
+}
+
+/**
+ * mbox_controller_register - Register the mailbox controller
+ * @mbox: Pointer to the mailbox controller.
+ *
+ * The controller driver registers its communication channels
+ */
+int mbox_controller_register(struct mbox_controller *mbox)
+{
+ int i, txdone;
+
+ /* Sanity check */
+ if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
+ return -EINVAL;
+
+ if (mbox->txdone_irq)
+ txdone = TXDONE_BY_IRQ;
+ else if (mbox->txdone_poll)
+ txdone = TXDONE_BY_POLL;
+ else /* It has to be ACK then */
+ txdone = TXDONE_BY_ACK;
+
+ if (txdone == TXDONE_BY_POLL) {
+
+ if (!mbox->ops->last_tx_done) {
+ dev_err(mbox->dev, "last_tx_done method is absent\n");
+ return -EINVAL;
+ }
+
+ hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ mbox->poll_hrt.function = txdone_hrtimer;
+ spin_lock_init(&mbox->poll_hrt_lock);
+ }
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ struct mbox_chan *chan = &mbox->chans[i];
+
+ chan->cl = NULL;
+ chan->mbox = mbox;
+ chan->txdone_method = txdone;
+ spin_lock_init(&chan->lock);
+ }
+
+ if (!mbox->of_xlate)
+ mbox->of_xlate = of_mbox_index_xlate;
+
+ mutex_lock(&con_mutex);
+ list_add_tail(&mbox->node, &mbox_cons);
+ mutex_unlock(&con_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mbox_controller_register);
+
+/**
+ * mbox_controller_unregister - Unregister the mailbox controller
+ * @mbox: Pointer to the mailbox controller.
+ */
+void mbox_controller_unregister(struct mbox_controller *mbox)
+{
+ int i;
+
+ if (!mbox)
+ return;
+
+ mutex_lock(&con_mutex);
+
+ list_del(&mbox->node);
+
+ for (i = 0; i < mbox->num_chans; i++)
+ mbox_free_channel(&mbox->chans[i]);
+
+ if (mbox->txdone_poll)
+ hrtimer_cancel(&mbox->poll_hrt);
+
+ mutex_unlock(&con_mutex);
+}
+EXPORT_SYMBOL_GPL(mbox_controller_unregister);
+
+static void __devm_mbox_controller_unregister(struct device *dev, void *res)
+{
+ struct mbox_controller **mbox = res;
+
+ mbox_controller_unregister(*mbox);
+}
+
+static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
+{
+ struct mbox_controller **mbox = res;
+
+ if (WARN_ON(!mbox || !*mbox))
+ return 0;
+
+ return *mbox == data;
+}
+
+/**
+ * devm_mbox_controller_register() - managed mbox_controller_register()
+ * @dev: device owning the mailbox controller being registered
+ * @mbox: mailbox controller being registered
+ *
+ * This function adds a device-managed resource that will make sure that the
+ * mailbox controller, which is registered using mbox_controller_register()
+ * as part of this function, will be unregistered along with the rest of
+ * device-managed resources upon driver probe failure or driver removal.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int devm_mbox_controller_register(struct device *dev,
+ struct mbox_controller *mbox)
+{
+ struct mbox_controller **ptr;
+ int err;
+
+ ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ err = mbox_controller_register(mbox);
+ if (err < 0) {
+ devres_free(ptr);
+ return err;
+ }
+
+ devres_add(dev, ptr);
+ *ptr = mbox;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
+
+/**
+ * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
+ * @dev: device owning the mailbox controller being unregistered
+ * @mbox: mailbox controller being unregistered
+ *
+ * This function unregisters the mailbox controller and removes the device-
+ * managed resource that was set up to automatically unregister the mailbox
+ * controller on driver probe failure or driver removal. It's typically not
+ * necessary to call this function.
+ */
+void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
+{
+ WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
+ devm_mbox_controller_match, mbox));
+}
+EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
diff --git a/drivers/mailbox/mailbox.h b/drivers/mailbox/mailbox.h
new file mode 100644
index 000000000..046d6d258
--- /dev/null
+++ b/drivers/mailbox/mailbox.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __MAILBOX_H
+#define __MAILBOX_H
+
+#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
+#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
+#define TXDONE_BY_ACK BIT(2) /* S/W ACK received by Client ticks the TX */
+
+#endif /* __MAILBOX_H */
diff --git a/drivers/mailbox/mtk-adsp-mailbox.c b/drivers/mailbox/mtk-adsp-mailbox.c
new file mode 100644
index 000000000..91487aa4d
--- /dev/null
+++ b/drivers/mailbox/mtk-adsp-mailbox.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 MediaTek Corporation. All rights reserved.
+ * Author: Allen-KH Cheng <allen-kh.cheng@mediatek.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct mtk_adsp_mbox_priv {
+ struct device *dev;
+ struct mbox_controller mbox;
+ void __iomem *va_mboxreg;
+ const struct mtk_adsp_mbox_cfg *cfg;
+};
+
+struct mtk_adsp_mbox_cfg {
+ u32 set_in;
+ u32 set_out;
+ u32 clr_in;
+ u32 clr_out;
+};
+
+static inline struct mtk_adsp_mbox_priv *get_mtk_adsp_mbox_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct mtk_adsp_mbox_priv, mbox);
+}
+
+static irqreturn_t mtk_adsp_mbox_irq(int irq, void *data)
+{
+ struct mbox_chan *chan = data;
+ struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox);
+ u32 op = readl(priv->va_mboxreg + priv->cfg->set_out);
+
+ writel(op, priv->va_mboxreg + priv->cfg->clr_out);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mtk_adsp_mbox_isr(int irq, void *data)
+{
+ struct mbox_chan *chan = data;
+
+ mbox_chan_received_data(chan, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static struct mbox_chan *mtk_adsp_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ return mbox->chans;
+}
+
+static int mtk_adsp_mbox_startup(struct mbox_chan *chan)
+{
+ struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox);
+
+ /* Clear ADSP mbox command */
+ writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_in);
+ writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_out);
+
+ return 0;
+}
+
+static void mtk_adsp_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox);
+
+ /* Clear ADSP mbox command */
+ writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_in);
+ writel(0xFFFFFFFF, priv->va_mboxreg + priv->cfg->clr_out);
+}
+
+static int mtk_adsp_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox);
+ u32 *msg = data;
+
+ writel(*msg, priv->va_mboxreg + priv->cfg->set_in);
+
+ return 0;
+}
+
+static bool mtk_adsp_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct mtk_adsp_mbox_priv *priv = get_mtk_adsp_mbox_priv(chan->mbox);
+
+ return readl(priv->va_mboxreg + priv->cfg->set_in) == 0;
+}
+
+static const struct mbox_chan_ops mtk_adsp_mbox_chan_ops = {
+ .send_data = mtk_adsp_mbox_send_data,
+ .startup = mtk_adsp_mbox_startup,
+ .shutdown = mtk_adsp_mbox_shutdown,
+ .last_tx_done = mtk_adsp_mbox_last_tx_done,
+};
+
+static int mtk_adsp_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_adsp_mbox_priv *priv;
+ const struct mtk_adsp_mbox_cfg *cfg;
+ struct mbox_controller *mbox;
+ int ret, irq;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mbox = &priv->mbox;
+ mbox->dev = dev;
+ mbox->ops = &mtk_adsp_mbox_chan_ops;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = true;
+ mbox->of_xlate = mtk_adsp_mbox_xlate;
+ mbox->num_chans = 1;
+ mbox->chans = devm_kzalloc(dev, sizeof(*mbox->chans), GFP_KERNEL);
+ if (!mbox->chans)
+ return -ENOMEM;
+
+ priv->va_mboxreg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->va_mboxreg))
+ return PTR_ERR(priv->va_mboxreg);
+
+ cfg = of_device_get_match_data(dev);
+ if (!cfg)
+ return -EINVAL;
+ priv->cfg = cfg;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, mtk_adsp_mbox_irq,
+ mtk_adsp_mbox_isr, IRQF_TRIGGER_NONE,
+ dev_name(dev), mbox->chans);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, priv);
+
+ return devm_mbox_controller_register(dev, &priv->mbox);
+}
+
+static const struct mtk_adsp_mbox_cfg mt8186_adsp_mbox_cfg = {
+ .set_in = 0x00,
+ .set_out = 0x04,
+ .clr_in = 0x08,
+ .clr_out = 0x0C,
+};
+
+static const struct mtk_adsp_mbox_cfg mt8195_adsp_mbox_cfg = {
+ .set_in = 0x00,
+ .set_out = 0x1c,
+ .clr_in = 0x04,
+ .clr_out = 0x20,
+};
+
+static const struct of_device_id mtk_adsp_mbox_of_match[] = {
+ { .compatible = "mediatek,mt8186-adsp-mbox", .data = &mt8186_adsp_mbox_cfg },
+ { .compatible = "mediatek,mt8195-adsp-mbox", .data = &mt8195_adsp_mbox_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_adsp_mbox_of_match);
+
+static struct platform_driver mtk_adsp_mbox_driver = {
+ .probe = mtk_adsp_mbox_probe,
+ .driver = {
+ .name = "mtk_adsp_mbox",
+ .of_match_table = mtk_adsp_mbox_of_match,
+ },
+};
+module_platform_driver(mtk_adsp_mbox_driver);
+
+MODULE_AUTHOR("Allen-KH Cheng <Allen-KH.Cheng@mediatek.com>");
+MODULE_DESCRIPTION("MTK ADSP Mailbox Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
new file mode 100644
index 000000000..4d62b07c1
--- /dev/null
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -0,0 +1,729 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/mtk-cmdq-mailbox.h>
+#include <linux/of.h>
+
+#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
+#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
+#define CMDQ_GCE_NUM_MAX (2)
+
+#define CMDQ_CURR_IRQ_STATUS 0x10
+#define CMDQ_SYNC_TOKEN_UPDATE 0x68
+#define CMDQ_THR_SLOT_CYCLES 0x30
+#define CMDQ_THR_BASE 0x100
+#define CMDQ_THR_SIZE 0x80
+#define CMDQ_THR_WARM_RESET 0x00
+#define CMDQ_THR_ENABLE_TASK 0x04
+#define CMDQ_THR_SUSPEND_TASK 0x08
+#define CMDQ_THR_CURR_STATUS 0x0c
+#define CMDQ_THR_IRQ_STATUS 0x10
+#define CMDQ_THR_IRQ_ENABLE 0x14
+#define CMDQ_THR_CURR_ADDR 0x20
+#define CMDQ_THR_END_ADDR 0x24
+#define CMDQ_THR_WAIT_TOKEN 0x30
+#define CMDQ_THR_PRIORITY 0x40
+
+#define GCE_GCTL_VALUE 0x48
+#define GCE_CTRL_BY_SW GENMASK(2, 0)
+#define GCE_DDR_EN GENMASK(18, 16)
+
+#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
+#define CMDQ_THR_ENABLED 0x1
+#define CMDQ_THR_DISABLED 0x0
+#define CMDQ_THR_SUSPEND 0x1
+#define CMDQ_THR_RESUME 0x0
+#define CMDQ_THR_STATUS_SUSPENDED BIT(1)
+#define CMDQ_THR_DO_WARM_RESET BIT(0)
+#define CMDQ_THR_IRQ_DONE 0x1
+#define CMDQ_THR_IRQ_ERROR 0x12
+#define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
+#define CMDQ_THR_IS_WAITING BIT(31)
+
+#define CMDQ_JUMP_BY_OFFSET 0x10000000
+#define CMDQ_JUMP_BY_PA 0x10000001
+
+struct cmdq_thread {
+ struct mbox_chan *chan;
+ void __iomem *base;
+ struct list_head task_busy_list;
+ u32 priority;
+};
+
+struct cmdq_task {
+ struct cmdq *cmdq;
+ struct list_head list_entry;
+ dma_addr_t pa_base;
+ struct cmdq_thread *thread;
+ struct cmdq_pkt *pkt; /* the packet sent from mailbox client */
+};
+
+struct cmdq {
+ struct mbox_controller mbox;
+ void __iomem *base;
+ int irq;
+ u32 irq_mask;
+ const struct gce_plat *pdata;
+ struct cmdq_thread *thread;
+ struct clk_bulk_data clocks[CMDQ_GCE_NUM_MAX];
+ bool suspended;
+};
+
+struct gce_plat {
+ u32 thread_nr;
+ u8 shift;
+ bool control_by_sw;
+ bool sw_ddr_en;
+ u32 gce_num;
+};
+
+static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
+{
+ WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
+
+ if (enable)
+ writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
+ else
+ writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
+
+ clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+}
+
+u8 cmdq_get_shift_pa(struct mbox_chan *chan)
+{
+ struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
+
+ return cmdq->pdata->shift;
+}
+EXPORT_SYMBOL(cmdq_get_shift_pa);
+
+static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ u32 status;
+
+ writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
+
+ /* If already disabled, treat as suspended successful. */
+ if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
+ return 0;
+
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
+ status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
+ dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
+ (u32)(thread->base - cmdq->base));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void cmdq_thread_resume(struct cmdq_thread *thread)
+{
+ writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
+}
+
+static void cmdq_init(struct cmdq *cmdq)
+{
+ int i;
+ u32 gctl_regval = 0;
+
+ WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
+ if (cmdq->pdata->control_by_sw)
+ gctl_regval = GCE_CTRL_BY_SW;
+ if (cmdq->pdata->sw_ddr_en)
+ gctl_regval |= GCE_DDR_EN;
+
+ if (gctl_regval)
+ writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
+
+ writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
+ for (i = 0; i <= CMDQ_MAX_EVENT; i++)
+ writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
+ clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+}
+
+static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ u32 warm_reset;
+
+ writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
+ warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
+ 0, 10)) {
+ dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
+ (u32)(thread->base - cmdq->base));
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
+{
+ cmdq_thread_reset(cmdq, thread);
+ writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
+}
+
+/* notify GCE to re-fetch commands by setting GCE thread PC */
+static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
+{
+ writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
+ thread->base + CMDQ_THR_CURR_ADDR);
+}
+
+static void cmdq_task_insert_into_thread(struct cmdq_task *task)
+{
+ struct device *dev = task->cmdq->mbox.dev;
+ struct cmdq_thread *thread = task->thread;
+ struct cmdq_task *prev_task = list_last_entry(
+ &thread->task_busy_list, typeof(*task), list_entry);
+ u64 *prev_task_base = prev_task->pkt->va_base;
+
+ /* let previous task jump to this task */
+ dma_sync_single_for_cpu(dev, prev_task->pa_base,
+ prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
+ prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
+ (u64)CMDQ_JUMP_BY_PA << 32 |
+ (task->pa_base >> task->cmdq->pdata->shift);
+ dma_sync_single_for_device(dev, prev_task->pa_base,
+ prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
+
+ cmdq_thread_invalidate_fetched_data(thread);
+}
+
+static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
+{
+ return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
+}
+
+static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
+{
+ struct cmdq_cb_data data;
+
+ data.sta = sta;
+ data.pkt = task->pkt;
+ mbox_chan_received_data(task->thread->chan, &data);
+
+ list_del(&task->list_entry);
+}
+
+static void cmdq_task_handle_error(struct cmdq_task *task)
+{
+ struct cmdq_thread *thread = task->thread;
+ struct cmdq_task *next_task;
+ struct cmdq *cmdq = task->cmdq;
+
+ dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ next_task = list_first_entry_or_null(&thread->task_busy_list,
+ struct cmdq_task, list_entry);
+ if (next_task)
+ writel(next_task->pa_base >> cmdq->pdata->shift,
+ thread->base + CMDQ_THR_CURR_ADDR);
+ cmdq_thread_resume(thread);
+}
+
+static void cmdq_thread_irq_handler(struct cmdq *cmdq,
+ struct cmdq_thread *thread)
+{
+ struct cmdq_task *task, *tmp, *curr_task = NULL;
+ u32 curr_pa, irq_flag, task_end_pa;
+ bool err;
+
+ irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
+ writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
+
+ /*
+ * When ISR call this function, another CPU core could run
+ * "release task" right before we acquire the spin lock, and thus
+ * reset / disable this GCE thread, so we need to check the enable
+ * bit of this GCE thread.
+ */
+ if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
+ return;
+
+ if (irq_flag & CMDQ_THR_IRQ_ERROR)
+ err = true;
+ else if (irq_flag & CMDQ_THR_IRQ_DONE)
+ err = false;
+ else
+ return;
+
+ curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
+ if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
+ curr_task = task;
+
+ if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
+ cmdq_task_exec_done(task, 0);
+ kfree(task);
+ } else if (err) {
+ cmdq_task_exec_done(task, -ENOEXEC);
+ cmdq_task_handle_error(curr_task);
+ kfree(task);
+ }
+
+ if (curr_task)
+ break;
+ }
+
+ if (list_empty(&thread->task_busy_list)) {
+ cmdq_thread_disable(cmdq, thread);
+ clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+ }
+}
+
+static irqreturn_t cmdq_irq_handler(int irq, void *dev)
+{
+ struct cmdq *cmdq = dev;
+ unsigned long irq_status, flags = 0L;
+ int bit;
+
+ irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
+ if (!(irq_status ^ cmdq->irq_mask))
+ return IRQ_NONE;
+
+ for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) {
+ struct cmdq_thread *thread = &cmdq->thread[bit];
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ cmdq_thread_irq_handler(cmdq, thread);
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cmdq_suspend(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+ struct cmdq_thread *thread;
+ int i;
+ bool task_running = false;
+
+ cmdq->suspended = true;
+
+ for (i = 0; i < cmdq->pdata->thread_nr; i++) {
+ thread = &cmdq->thread[i];
+ if (!list_empty(&thread->task_busy_list)) {
+ task_running = true;
+ break;
+ }
+ }
+
+ if (task_running)
+ dev_warn(dev, "exist running task(s) in suspend\n");
+
+ if (cmdq->pdata->sw_ddr_en)
+ cmdq_sw_ddr_enable(cmdq, false);
+
+ clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
+
+ return 0;
+}
+
+static int cmdq_resume(struct device *dev)
+{
+ struct cmdq *cmdq = dev_get_drvdata(dev);
+
+ WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
+ cmdq->suspended = false;
+
+ if (cmdq->pdata->sw_ddr_en)
+ cmdq_sw_ddr_enable(cmdq, true);
+
+ return 0;
+}
+
+static int cmdq_remove(struct platform_device *pdev)
+{
+ struct cmdq *cmdq = platform_get_drvdata(pdev);
+
+ if (cmdq->pdata->sw_ddr_en)
+ cmdq_sw_ddr_enable(cmdq, false);
+
+ clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
+ return 0;
+}
+
+static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task;
+ unsigned long curr_pa, end_pa;
+
+ /* Client should not flush new tasks if suspended. */
+ WARN_ON(cmdq->suspended);
+
+ task = kzalloc(sizeof(*task), GFP_ATOMIC);
+ if (!task)
+ return -ENOMEM;
+
+ task->cmdq = cmdq;
+ INIT_LIST_HEAD(&task->list_entry);
+ task->pa_base = pkt->pa_base;
+ task->thread = thread;
+ task->pkt = pkt;
+
+ if (list_empty(&thread->task_busy_list)) {
+ WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
+
+ /*
+ * The thread reset will clear thread related register to 0,
+ * including pc, end, priority, irq, suspend and enable. Thus
+ * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
+ * thread and make it running.
+ */
+ WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
+
+ writel(task->pa_base >> cmdq->pdata->shift,
+ thread->base + CMDQ_THR_CURR_ADDR);
+ writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
+ thread->base + CMDQ_THR_END_ADDR);
+
+ writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
+ writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
+ writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
+ } else {
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
+ cmdq->pdata->shift;
+ end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
+ cmdq->pdata->shift;
+ /* check boundary */
+ if (curr_pa == end_pa - CMDQ_INST_SIZE ||
+ curr_pa == end_pa) {
+ /* set to this task directly */
+ writel(task->pa_base >> cmdq->pdata->shift,
+ thread->base + CMDQ_THR_CURR_ADDR);
+ } else {
+ cmdq_task_insert_into_thread(task);
+ smp_mb(); /* modify jump before enable thread */
+ }
+ writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
+ thread->base + CMDQ_THR_END_ADDR);
+ cmdq_thread_resume(thread);
+ }
+ list_move_tail(&task->list_entry, &thread->task_busy_list);
+
+ return 0;
+}
+
+static int cmdq_mbox_startup(struct mbox_chan *chan)
+{
+ return 0;
+}
+
+static void cmdq_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ if (list_empty(&thread->task_busy_list))
+ goto done;
+
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+
+ /* make sure executed tasks have success callback */
+ cmdq_thread_irq_handler(cmdq, thread);
+ if (list_empty(&thread->task_busy_list))
+ goto done;
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ cmdq_task_exec_done(task, -ECONNABORTED);
+ kfree(task);
+ }
+
+ cmdq_thread_disable(cmdq, thread);
+ clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+
+done:
+ /*
+ * The thread->task_busy_list empty means thread already disable. The
+ * cmdq_mbox_send_data() always reset thread which clear disable and
+ * suspend statue when first pkt send to channel, so there is no need
+ * to do any operation here, only unlock and leave.
+ */
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+}
+
+static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+ struct cmdq_cb_data data;
+ struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+ struct cmdq_task *task, *tmp;
+ unsigned long flags;
+ u32 enable;
+
+ spin_lock_irqsave(&thread->chan->lock, flags);
+ if (list_empty(&thread->task_busy_list))
+ goto out;
+
+ WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+ if (!cmdq_thread_is_in_wfe(thread))
+ goto wait;
+
+ list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+ list_entry) {
+ data.sta = -ECONNABORTED;
+ data.pkt = task->pkt;
+ mbox_chan_received_data(task->thread->chan, &data);
+ list_del(&task->list_entry);
+ kfree(task);
+ }
+
+ cmdq_thread_resume(thread);
+ cmdq_thread_disable(cmdq, thread);
+ clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+
+out:
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ return 0;
+
+wait:
+ cmdq_thread_resume(thread);
+ spin_unlock_irqrestore(&thread->chan->lock, flags);
+ if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
+ enable, enable == 0, 1, timeout)) {
+ dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
+ (u32)(thread->base - cmdq->base));
+
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
+ .send_data = cmdq_mbox_send_data,
+ .startup = cmdq_mbox_startup,
+ .shutdown = cmdq_mbox_shutdown,
+ .flush = cmdq_mbox_flush,
+};
+
+static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int ind = sp->args[0];
+ struct cmdq_thread *thread;
+
+ if (ind >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
+ thread->priority = sp->args[1];
+ thread->chan = &mbox->chans[ind];
+
+ return &mbox->chans[ind];
+}
+
+static int cmdq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cmdq *cmdq;
+ int err, i;
+ struct device_node *phandle = dev->of_node;
+ struct device_node *node;
+ int alias_id = 0;
+ static const char * const clk_name = "gce";
+ static const char * const clk_names[] = { "gce0", "gce1" };
+
+ cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
+ if (!cmdq)
+ return -ENOMEM;
+
+ cmdq->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cmdq->base))
+ return PTR_ERR(cmdq->base);
+
+ cmdq->irq = platform_get_irq(pdev, 0);
+ if (cmdq->irq < 0)
+ return cmdq->irq;
+
+ cmdq->pdata = device_get_match_data(dev);
+ if (!cmdq->pdata) {
+ dev_err(dev, "failed to get match data\n");
+ return -EINVAL;
+ }
+
+ cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0);
+
+ dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
+ dev, cmdq->base, cmdq->irq);
+
+ if (cmdq->pdata->gce_num > 1) {
+ for_each_child_of_node(phandle->parent, node) {
+ alias_id = of_alias_get_id(node, clk_name);
+ if (alias_id >= 0 && alias_id < cmdq->pdata->gce_num) {
+ cmdq->clocks[alias_id].id = clk_names[alias_id];
+ cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
+ if (IS_ERR(cmdq->clocks[alias_id].clk)) {
+ of_node_put(node);
+ return dev_err_probe(dev,
+ PTR_ERR(cmdq->clocks[alias_id].clk),
+ "failed to get gce clk: %d\n",
+ alias_id);
+ }
+ }
+ }
+ } else {
+ cmdq->clocks[alias_id].id = clk_name;
+ cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(cmdq->clocks[alias_id].clk)) {
+ return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
+ "failed to get gce clk\n");
+ }
+ }
+
+ cmdq->mbox.dev = dev;
+ cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
+ sizeof(*cmdq->mbox.chans), GFP_KERNEL);
+ if (!cmdq->mbox.chans)
+ return -ENOMEM;
+
+ cmdq->mbox.num_chans = cmdq->pdata->thread_nr;
+ cmdq->mbox.ops = &cmdq_mbox_chan_ops;
+ cmdq->mbox.of_xlate = cmdq_xlate;
+
+ /* make use of TXDONE_BY_ACK */
+ cmdq->mbox.txdone_irq = false;
+ cmdq->mbox.txdone_poll = false;
+
+ cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr,
+ sizeof(*cmdq->thread), GFP_KERNEL);
+ if (!cmdq->thread)
+ return -ENOMEM;
+
+ for (i = 0; i < cmdq->pdata->thread_nr; i++) {
+ cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
+ CMDQ_THR_SIZE * i;
+ INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
+ cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
+ }
+
+ err = devm_mbox_controller_register(dev, &cmdq->mbox);
+ if (err < 0) {
+ dev_err(dev, "failed to register mailbox: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, cmdq);
+
+ WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
+
+ cmdq_init(cmdq);
+
+ err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
+ "mtk_cmdq", cmdq);
+ if (err < 0) {
+ dev_err(dev, "failed to register ISR (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops cmdq_pm_ops = {
+ .suspend = cmdq_suspend,
+ .resume = cmdq_resume,
+};
+
+static const struct gce_plat gce_plat_v2 = {
+ .thread_nr = 16,
+ .shift = 0,
+ .control_by_sw = false,
+ .gce_num = 1
+};
+
+static const struct gce_plat gce_plat_v3 = {
+ .thread_nr = 24,
+ .shift = 0,
+ .control_by_sw = false,
+ .gce_num = 1
+};
+
+static const struct gce_plat gce_plat_v4 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .control_by_sw = false,
+ .gce_num = 1
+};
+
+static const struct gce_plat gce_plat_v5 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .control_by_sw = true,
+ .gce_num = 1
+};
+
+static const struct gce_plat gce_plat_v6 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .control_by_sw = true,
+ .gce_num = 2
+};
+
+static const struct gce_plat gce_plat_v7 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .control_by_sw = true,
+ .sw_ddr_en = true,
+ .gce_num = 1
+};
+
+static const struct of_device_id cmdq_of_ids[] = {
+ {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
+ {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
+ {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_v7},
+ {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
+ {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
+ {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
+ {}
+};
+
+static struct platform_driver cmdq_drv = {
+ .probe = cmdq_probe,
+ .remove = cmdq_remove,
+ .driver = {
+ .name = "mtk_cmdq",
+ .pm = &cmdq_pm_ops,
+ .of_match_table = cmdq_of_ids,
+ }
+};
+
+static int __init cmdq_drv_init(void)
+{
+ return platform_driver_register(&cmdq_drv);
+}
+
+static void __exit cmdq_drv_exit(void)
+{
+ platform_driver_unregister(&cmdq_drv);
+}
+
+subsys_initcall(cmdq_drv_init);
+module_exit(cmdq_drv_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
new file mode 100644
index 000000000..792bcaebb
--- /dev/null
+++ b/drivers/mailbox/omap-mailbox.c
@@ -0,0 +1,918 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OMAP mailbox driver
+ *
+ * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2013-2021 Texas Instruments Incorporated - https://www.ti.com
+ *
+ * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/omap-mailbox.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+
+#include "mailbox.h"
+
+#define MAILBOX_REVISION 0x000
+#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
+#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
+#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
+
+#define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
+#define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
+
+#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
+
+#define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
+ OMAP2_MAILBOX_IRQSTATUS(u))
+#define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \
+ OMAP2_MAILBOX_IRQENABLE(u))
+#define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
+ : OMAP2_MAILBOX_IRQENABLE(u))
+
+#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
+#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
+
+/* Interrupt register configuration types */
+#define MBOX_INTR_CFG_TYPE1 0
+#define MBOX_INTR_CFG_TYPE2 1
+
+struct omap_mbox_fifo {
+ unsigned long msg;
+ unsigned long fifo_stat;
+ unsigned long msg_stat;
+ unsigned long irqenable;
+ unsigned long irqstatus;
+ unsigned long irqdisable;
+ u32 intr_bit;
+};
+
+struct omap_mbox_queue {
+ spinlock_t lock;
+ struct kfifo fifo;
+ struct work_struct work;
+ struct omap_mbox *mbox;
+ bool full;
+};
+
+struct omap_mbox_match_data {
+ u32 intr_type;
+};
+
+struct omap_mbox_device {
+ struct device *dev;
+ struct mutex cfg_lock;
+ void __iomem *mbox_base;
+ u32 *irq_ctx;
+ u32 num_users;
+ u32 num_fifos;
+ u32 intr_type;
+ struct omap_mbox **mboxes;
+ struct mbox_controller controller;
+ struct list_head elem;
+};
+
+struct omap_mbox_fifo_info {
+ int tx_id;
+ int tx_usr;
+ int tx_irq;
+
+ int rx_id;
+ int rx_usr;
+ int rx_irq;
+
+ const char *name;
+ bool send_no_irq;
+};
+
+struct omap_mbox {
+ const char *name;
+ int irq;
+ struct omap_mbox_queue *rxq;
+ struct device *dev;
+ struct omap_mbox_device *parent;
+ struct omap_mbox_fifo tx_fifo;
+ struct omap_mbox_fifo rx_fifo;
+ u32 intr_type;
+ struct mbox_chan *chan;
+ bool send_no_irq;
+};
+
+/* global variables for the mailbox devices */
+static DEFINE_MUTEX(omap_mbox_devices_lock);
+static LIST_HEAD(omap_mbox_devices);
+
+static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
+module_param(mbox_kfifo_size, uint, S_IRUGO);
+MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
+
+static struct omap_mbox *mbox_chan_to_omap_mbox(struct mbox_chan *chan)
+{
+ if (!chan || !chan->con_priv)
+ return NULL;
+
+ return (struct omap_mbox *)chan->con_priv;
+}
+
+static inline
+unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
+{
+ return __raw_readl(mdev->mbox_base + ofs);
+}
+
+static inline
+void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
+{
+ __raw_writel(val, mdev->mbox_base + ofs);
+}
+
+/* Mailbox FIFO handle functions */
+static u32 mbox_fifo_read(struct omap_mbox *mbox)
+{
+ struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+
+ return mbox_read_reg(mbox->parent, fifo->msg);
+}
+
+static void mbox_fifo_write(struct omap_mbox *mbox, u32 msg)
+{
+ struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+
+ mbox_write_reg(mbox->parent, msg, fifo->msg);
+}
+
+static int mbox_fifo_empty(struct omap_mbox *mbox)
+{
+ struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+
+ return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
+}
+
+static int mbox_fifo_full(struct omap_mbox *mbox)
+{
+ struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+
+ return mbox_read_reg(mbox->parent, fifo->fifo_stat);
+}
+
+/* Mailbox IRQ handle functions */
+static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqstatus = fifo->irqstatus;
+
+ mbox_write_reg(mbox->parent, bit, irqstatus);
+
+ /* Flush posted write for irq status to avoid spurious interrupts */
+ mbox_read_reg(mbox->parent, irqstatus);
+}
+
+static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqenable = fifo->irqenable;
+ u32 irqstatus = fifo->irqstatus;
+
+ u32 enable = mbox_read_reg(mbox->parent, irqenable);
+ u32 status = mbox_read_reg(mbox->parent, irqstatus);
+
+ return (int)(enable & status & bit);
+}
+
+static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ u32 l;
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqenable = fifo->irqenable;
+
+ l = mbox_read_reg(mbox->parent, irqenable);
+ l |= bit;
+ mbox_write_reg(mbox->parent, l, irqenable);
+}
+
+static void _omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+{
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqdisable = fifo->irqdisable;
+
+ /*
+ * Read and update the interrupt configuration register for pre-OMAP4.
+ * OMAP4 and later SoCs have a dedicated interrupt disabling register.
+ */
+ if (!mbox->intr_type)
+ bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
+
+ mbox_write_reg(mbox->parent, bit, irqdisable);
+}
+
+void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
+
+ _omap_mbox_enable_irq(mbox, irq);
+}
+EXPORT_SYMBOL(omap_mbox_enable_irq);
+
+void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+
+ if (WARN_ON(!mbox))
+ return;
+
+ _omap_mbox_disable_irq(mbox, irq);
+}
+EXPORT_SYMBOL(omap_mbox_disable_irq);
+
+/*
+ * Message receiver(workqueue)
+ */
+static void mbox_rx_work(struct work_struct *work)
+{
+ struct omap_mbox_queue *mq =
+ container_of(work, struct omap_mbox_queue, work);
+ mbox_msg_t data;
+ u32 msg;
+ int len;
+
+ while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
+ len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
+ WARN_ON(len != sizeof(msg));
+ data = msg;
+
+ mbox_chan_received_data(mq->mbox->chan, (void *)data);
+ spin_lock_irq(&mq->lock);
+ if (mq->full) {
+ mq->full = false;
+ _omap_mbox_enable_irq(mq->mbox, IRQ_RX);
+ }
+ spin_unlock_irq(&mq->lock);
+ }
+}
+
+/*
+ * Mailbox interrupt handler
+ */
+static void __mbox_tx_interrupt(struct omap_mbox *mbox)
+{
+ _omap_mbox_disable_irq(mbox, IRQ_TX);
+ ack_mbox_irq(mbox, IRQ_TX);
+ mbox_chan_txdone(mbox->chan, 0);
+}
+
+static void __mbox_rx_interrupt(struct omap_mbox *mbox)
+{
+ struct omap_mbox_queue *mq = mbox->rxq;
+ u32 msg;
+ int len;
+
+ while (!mbox_fifo_empty(mbox)) {
+ if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
+ _omap_mbox_disable_irq(mbox, IRQ_RX);
+ mq->full = true;
+ goto nomem;
+ }
+
+ msg = mbox_fifo_read(mbox);
+
+ len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
+ WARN_ON(len != sizeof(msg));
+ }
+
+ /* no more messages in the fifo. clear IRQ source. */
+ ack_mbox_irq(mbox, IRQ_RX);
+nomem:
+ schedule_work(&mbox->rxq->work);
+}
+
+static irqreturn_t mbox_interrupt(int irq, void *p)
+{
+ struct omap_mbox *mbox = p;
+
+ if (is_mbox_irq(mbox, IRQ_TX))
+ __mbox_tx_interrupt(mbox);
+
+ if (is_mbox_irq(mbox, IRQ_RX))
+ __mbox_rx_interrupt(mbox);
+
+ return IRQ_HANDLED;
+}
+
+static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
+ void (*work)(struct work_struct *))
+{
+ struct omap_mbox_queue *mq;
+
+ if (!work)
+ return NULL;
+
+ mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+ if (!mq)
+ return NULL;
+
+ spin_lock_init(&mq->lock);
+
+ if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
+ goto error;
+
+ INIT_WORK(&mq->work, work);
+ return mq;
+
+error:
+ kfree(mq);
+ return NULL;
+}
+
+static void mbox_queue_free(struct omap_mbox_queue *q)
+{
+ kfifo_free(&q->fifo);
+ kfree(q);
+}
+
+static int omap_mbox_startup(struct omap_mbox *mbox)
+{
+ int ret = 0;
+ struct omap_mbox_queue *mq;
+
+ mq = mbox_queue_alloc(mbox, mbox_rx_work);
+ if (!mq)
+ return -ENOMEM;
+ mbox->rxq = mq;
+ mq->mbox = mbox;
+
+ ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
+ mbox->name, mbox);
+ if (unlikely(ret)) {
+ pr_err("failed to register mailbox interrupt:%d\n", ret);
+ goto fail_request_irq;
+ }
+
+ if (mbox->send_no_irq)
+ mbox->chan->txdone_method = TXDONE_BY_ACK;
+
+ _omap_mbox_enable_irq(mbox, IRQ_RX);
+
+ return 0;
+
+fail_request_irq:
+ mbox_queue_free(mbox->rxq);
+ return ret;
+}
+
+static void omap_mbox_fini(struct omap_mbox *mbox)
+{
+ _omap_mbox_disable_irq(mbox, IRQ_RX);
+ free_irq(mbox->irq, mbox);
+ flush_work(&mbox->rxq->work);
+ mbox_queue_free(mbox->rxq);
+}
+
+static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
+ const char *mbox_name)
+{
+ struct omap_mbox *_mbox, *mbox = NULL;
+ struct omap_mbox **mboxes = mdev->mboxes;
+ int i;
+
+ if (!mboxes)
+ return NULL;
+
+ for (i = 0; (_mbox = mboxes[i]); i++) {
+ if (!strcmp(_mbox->name, mbox_name)) {
+ mbox = _mbox;
+ break;
+ }
+ }
+ return mbox;
+}
+
+struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
+ const char *chan_name)
+{
+ struct device *dev = cl->dev;
+ struct omap_mbox *mbox = NULL;
+ struct omap_mbox_device *mdev;
+ int ret;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (dev->of_node) {
+ pr_err("%s: please use mbox_request_channel(), this API is supported only for OMAP non-DT usage\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_for_each_entry(mdev, &omap_mbox_devices, elem) {
+ mbox = omap_mbox_device_find(mdev, chan_name);
+ if (mbox)
+ break;
+ }
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ if (!mbox || !mbox->chan)
+ return ERR_PTR(-ENOENT);
+
+ ret = mbox_bind_client(mbox->chan, cl);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return mbox->chan;
+}
+EXPORT_SYMBOL(omap_mbox_request_channel);
+
+static struct class omap_mbox_class = { .name = "mbox", };
+
+static int omap_mbox_register(struct omap_mbox_device *mdev)
+{
+ int ret;
+ int i;
+ struct omap_mbox **mboxes;
+
+ if (!mdev || !mdev->mboxes)
+ return -EINVAL;
+
+ mboxes = mdev->mboxes;
+ for (i = 0; mboxes[i]; i++) {
+ struct omap_mbox *mbox = mboxes[i];
+
+ mbox->dev = device_create(&omap_mbox_class, mdev->dev,
+ 0, mbox, "%s", mbox->name);
+ if (IS_ERR(mbox->dev)) {
+ ret = PTR_ERR(mbox->dev);
+ goto err_out;
+ }
+ }
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_add(&mdev->elem, &omap_mbox_devices);
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ ret = devm_mbox_controller_register(mdev->dev, &mdev->controller);
+
+err_out:
+ if (ret) {
+ while (i--)
+ device_unregister(mboxes[i]->dev);
+ }
+ return ret;
+}
+
+static int omap_mbox_unregister(struct omap_mbox_device *mdev)
+{
+ int i;
+ struct omap_mbox **mboxes;
+
+ if (!mdev || !mdev->mboxes)
+ return -EINVAL;
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_del(&mdev->elem);
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ mboxes = mdev->mboxes;
+ for (i = 0; mboxes[i]; i++)
+ device_unregister(mboxes[i]->dev);
+ return 0;
+}
+
+static int omap_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox_device *mdev = mbox->parent;
+ int ret = 0;
+
+ mutex_lock(&mdev->cfg_lock);
+ pm_runtime_get_sync(mdev->dev);
+ ret = omap_mbox_startup(mbox);
+ if (ret)
+ pm_runtime_put_sync(mdev->dev);
+ mutex_unlock(&mdev->cfg_lock);
+ return ret;
+}
+
+static void omap_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ struct omap_mbox_device *mdev = mbox->parent;
+
+ mutex_lock(&mdev->cfg_lock);
+ omap_mbox_fini(mbox);
+ pm_runtime_put_sync(mdev->dev);
+ mutex_unlock(&mdev->cfg_lock);
+}
+
+static int omap_mbox_chan_send_noirq(struct omap_mbox *mbox, u32 msg)
+{
+ int ret = -EBUSY;
+
+ if (!mbox_fifo_full(mbox)) {
+ _omap_mbox_enable_irq(mbox, IRQ_RX);
+ mbox_fifo_write(mbox, msg);
+ ret = 0;
+ _omap_mbox_disable_irq(mbox, IRQ_RX);
+
+ /* we must read and ack the interrupt directly from here */
+ mbox_fifo_read(mbox);
+ ack_mbox_irq(mbox, IRQ_RX);
+ }
+
+ return ret;
+}
+
+static int omap_mbox_chan_send(struct omap_mbox *mbox, u32 msg)
+{
+ int ret = -EBUSY;
+
+ if (!mbox_fifo_full(mbox)) {
+ mbox_fifo_write(mbox, msg);
+ ret = 0;
+ }
+
+ /* always enable the interrupt */
+ _omap_mbox_enable_irq(mbox, IRQ_TX);
+ return ret;
+}
+
+static int omap_mbox_chan_send_data(struct mbox_chan *chan, void *data)
+{
+ struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
+ int ret;
+ u32 msg = omap_mbox_message(data);
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (mbox->send_no_irq)
+ ret = omap_mbox_chan_send_noirq(mbox, msg);
+ else
+ ret = omap_mbox_chan_send(mbox, msg);
+
+ return ret;
+}
+
+static const struct mbox_chan_ops omap_mbox_chan_ops = {
+ .startup = omap_mbox_chan_startup,
+ .send_data = omap_mbox_chan_send_data,
+ .shutdown = omap_mbox_chan_shutdown,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int omap_mbox_suspend(struct device *dev)
+{
+ struct omap_mbox_device *mdev = dev_get_drvdata(dev);
+ u32 usr, fifo, reg;
+
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
+ if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
+ dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
+ fifo);
+ return -EBUSY;
+ }
+ }
+
+ for (usr = 0; usr < mdev->num_users; usr++) {
+ reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
+ mdev->irq_ctx[usr] = mbox_read_reg(mdev, reg);
+ }
+
+ return 0;
+}
+
+static int omap_mbox_resume(struct device *dev)
+{
+ struct omap_mbox_device *mdev = dev_get_drvdata(dev);
+ u32 usr, reg;
+
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ for (usr = 0; usr < mdev->num_users; usr++) {
+ reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
+ mbox_write_reg(mdev, mdev->irq_ctx[usr], reg);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap_mbox_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
+};
+
+static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
+static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
+
+static const struct of_device_id omap_mailbox_of_match[] = {
+ {
+ .compatible = "ti,omap2-mailbox",
+ .data = &omap2_data,
+ },
+ {
+ .compatible = "ti,omap3-mailbox",
+ .data = &omap2_data,
+ },
+ {
+ .compatible = "ti,omap4-mailbox",
+ .data = &omap4_data,
+ },
+ {
+ .compatible = "ti,am654-mailbox",
+ .data = &omap4_data,
+ },
+ {
+ .compatible = "ti,am64-mailbox",
+ .data = &omap4_data,
+ },
+ {
+ /* end */
+ },
+};
+MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
+
+static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *sp)
+{
+ phandle phandle = sp->args[0];
+ struct device_node *node;
+ struct omap_mbox_device *mdev;
+ struct omap_mbox *mbox;
+
+ mdev = container_of(controller, struct omap_mbox_device, controller);
+ if (WARN_ON(!mdev))
+ return ERR_PTR(-EINVAL);
+
+ node = of_find_node_by_phandle(phandle);
+ if (!node) {
+ pr_err("%s: could not find node phandle 0x%x\n",
+ __func__, phandle);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mbox = omap_mbox_device_find(mdev, node->name);
+ of_node_put(node);
+ return mbox ? mbox->chan : ERR_PTR(-ENOENT);
+}
+
+static int omap_mbox_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct mbox_chan *chnls;
+ struct omap_mbox **list, *mbox, *mboxblk;
+ struct omap_mbox_fifo_info *finfo, *finfoblk;
+ struct omap_mbox_device *mdev;
+ struct omap_mbox_fifo *fifo;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *child;
+ const struct omap_mbox_match_data *match_data;
+ u32 intr_type, info_count;
+ u32 num_users, num_fifos;
+ u32 tmp[3];
+ u32 l;
+ int i;
+
+ if (!node) {
+ pr_err("%s: only DT-based devices are supported\n", __func__);
+ return -ENODEV;
+ }
+
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
+ return -ENODEV;
+ intr_type = match_data->intr_type;
+
+ if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
+ return -ENODEV;
+
+ if (of_property_read_u32(node, "ti,mbox-num-fifos", &num_fifos))
+ return -ENODEV;
+
+ info_count = of_get_available_child_count(node);
+ if (!info_count) {
+ dev_err(&pdev->dev, "no available mbox devices found\n");
+ return -ENODEV;
+ }
+
+ finfoblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*finfoblk),
+ GFP_KERNEL);
+ if (!finfoblk)
+ return -ENOMEM;
+
+ finfo = finfoblk;
+ child = NULL;
+ for (i = 0; i < info_count; i++, finfo++) {
+ child = of_get_next_available_child(node, child);
+ ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
+ ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ finfo->tx_id = tmp[0];
+ finfo->tx_irq = tmp[1];
+ finfo->tx_usr = tmp[2];
+
+ ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
+ ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ finfo->rx_id = tmp[0];
+ finfo->rx_irq = tmp[1];
+ finfo->rx_usr = tmp[2];
+
+ finfo->name = child->name;
+
+ finfo->send_no_irq = of_property_read_bool(child, "ti,mbox-send-noirq");
+
+ if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
+ finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
+ return -EINVAL;
+ }
+
+ mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdev->mbox_base))
+ return PTR_ERR(mdev->mbox_base);
+
+ mdev->irq_ctx = devm_kcalloc(&pdev->dev, num_users, sizeof(u32),
+ GFP_KERNEL);
+ if (!mdev->irq_ctx)
+ return -ENOMEM;
+
+ /* allocate one extra for marking end of list */
+ list = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*list),
+ GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ chnls = devm_kcalloc(&pdev->dev, info_count + 1, sizeof(*chnls),
+ GFP_KERNEL);
+ if (!chnls)
+ return -ENOMEM;
+
+ mboxblk = devm_kcalloc(&pdev->dev, info_count, sizeof(*mbox),
+ GFP_KERNEL);
+ if (!mboxblk)
+ return -ENOMEM;
+
+ mbox = mboxblk;
+ finfo = finfoblk;
+ for (i = 0; i < info_count; i++, finfo++) {
+ fifo = &mbox->tx_fifo;
+ fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
+ fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
+
+ fifo = &mbox->rx_fifo;
+ fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
+ fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
+
+ mbox->send_no_irq = finfo->send_no_irq;
+ mbox->intr_type = intr_type;
+
+ mbox->parent = mdev;
+ mbox->name = finfo->name;
+ mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
+ if (mbox->irq < 0)
+ return mbox->irq;
+ mbox->chan = &chnls[i];
+ chnls[i].con_priv = mbox;
+ list[i] = mbox++;
+ }
+
+ mutex_init(&mdev->cfg_lock);
+ mdev->dev = &pdev->dev;
+ mdev->num_users = num_users;
+ mdev->num_fifos = num_fifos;
+ mdev->intr_type = intr_type;
+ mdev->mboxes = list;
+
+ /*
+ * OMAP/K3 Mailbox IP does not have a Tx-Done IRQ, but rather a Tx-Ready
+ * IRQ and is needed to run the Tx state machine
+ */
+ mdev->controller.txdone_irq = true;
+ mdev->controller.dev = mdev->dev;
+ mdev->controller.ops = &omap_mbox_chan_ops;
+ mdev->controller.chans = chnls;
+ mdev->controller.num_chans = info_count;
+ mdev->controller.of_xlate = omap_mbox_of_xlate;
+ ret = omap_mbox_register(mdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mdev);
+ pm_runtime_enable(mdev->dev);
+
+ ret = pm_runtime_resume_and_get(mdev->dev);
+ if (ret < 0)
+ goto unregister;
+
+ /*
+ * just print the raw revision register, the format is not
+ * uniform across all SoCs
+ */
+ l = mbox_read_reg(mdev, MAILBOX_REVISION);
+ dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
+
+ ret = pm_runtime_put_sync(mdev->dev);
+ if (ret < 0 && ret != -ENOSYS)
+ goto unregister;
+
+ devm_kfree(&pdev->dev, finfoblk);
+ return 0;
+
+unregister:
+ pm_runtime_disable(mdev->dev);
+ omap_mbox_unregister(mdev);
+ return ret;
+}
+
+static int omap_mbox_remove(struct platform_device *pdev)
+{
+ struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(mdev->dev);
+ omap_mbox_unregister(mdev);
+
+ return 0;
+}
+
+static struct platform_driver omap_mbox_driver = {
+ .probe = omap_mbox_probe,
+ .remove = omap_mbox_remove,
+ .driver = {
+ .name = "omap-mailbox",
+ .pm = &omap_mbox_pm_ops,
+ .of_match_table = of_match_ptr(omap_mailbox_of_match),
+ },
+};
+
+static int __init omap_mbox_init(void)
+{
+ int err;
+
+ err = class_register(&omap_mbox_class);
+ if (err)
+ return err;
+
+ /* kfifo size sanity check: alignment and minimal size */
+ mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(u32));
+ mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(u32));
+
+ err = platform_driver_register(&omap_mbox_driver);
+ if (err)
+ class_unregister(&omap_mbox_class);
+
+ return err;
+}
+subsys_initcall(omap_mbox_init);
+
+static void __exit omap_mbox_exit(void)
+{
+ platform_driver_unregister(&omap_mbox_driver);
+ class_unregister(&omap_mbox_class);
+}
+module_exit(omap_mbox_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
+MODULE_AUTHOR("Toshihiro Kobayashi");
+MODULE_AUTHOR("Hiroshi DOYU");
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
new file mode 100644
index 000000000..a44d4b3e5
--- /dev/null
+++ b/drivers/mailbox/pcc.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
+ *
+ * PCC (Platform Communication Channel) is defined in the ACPI 5.0+
+ * specification. It is a mailbox like mechanism to allow clients
+ * such as CPPC (Collaborative Processor Performance Control), RAS
+ * (Reliability, Availability and Serviceability) and MPST (Memory
+ * Node Power State Table) to talk to the platform (e.g. BMC) through
+ * shared memory regions as defined in the PCC table entries. The PCC
+ * specification supports a Doorbell mechanism for the PCC clients
+ * to notify the platform about new data. This Doorbell information
+ * is also specified in each PCC table entry.
+ *
+ * Typical high level flow of operation is:
+ *
+ * PCC Reads:
+ * * Client tries to acquire a channel lock.
+ * * After it is acquired it writes READ cmd in communication region cmd
+ * address.
+ * * Client issues mbox_send_message() which rings the PCC doorbell
+ * for its PCC channel.
+ * * If command completes, then client has control over channel and
+ * it can proceed with its reads.
+ * * Client releases lock.
+ *
+ * PCC Writes:
+ * * Client tries to acquire channel lock.
+ * * Client writes to its communication region after it acquires a
+ * channel lock.
+ * * Client writes WRITE cmd in communication region cmd address.
+ * * Client issues mbox_send_message() which rings the PCC doorbell
+ * for its PCC channel.
+ * * If command completes, then writes have succeeded and it can release
+ * the channel lock.
+ *
+ * There is a Nominal latency defined for each channel which indicates
+ * how long to wait until a command completes. If command is not complete
+ * the client needs to retry or assume failure.
+ *
+ * For more details about PCC, please see the ACPI specification from
+ * http://www.uefi.org/ACPIv5.1 Section 14.
+ *
+ * This file implements PCC as a Mailbox controller and allows for PCC
+ * clients to be implemented as its Mailbox Client Channels.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <acpi/pcc.h>
+
+#include "mailbox.h"
+
+#define MBOX_IRQ_NAME "pcc-mbox"
+
+/**
+ * struct pcc_chan_reg - PCC register bundle
+ *
+ * @vaddr: cached virtual address for this register
+ * @gas: pointer to the generic address structure for this register
+ * @preserve_mask: bitmask to preserve when writing to this register
+ * @set_mask: bitmask to set when writing to this register
+ * @status_mask: bitmask to determine and/or update the status for this register
+ */
+struct pcc_chan_reg {
+ void __iomem *vaddr;
+ struct acpi_generic_address *gas;
+ u64 preserve_mask;
+ u64 set_mask;
+ u64 status_mask;
+};
+
+/**
+ * struct pcc_chan_info - PCC channel specific information
+ *
+ * @chan: PCC channel information with Shared Memory Region info
+ * @db: PCC register bundle for the doorbell register
+ * @plat_irq_ack: PCC register bundle for the platform interrupt acknowledge
+ * register
+ * @cmd_complete: PCC register bundle for the command complete check register
+ * @cmd_update: PCC register bundle for the command complete update register
+ * @error: PCC register bundle for the error status register
+ * @plat_irq: platform interrupt
+ */
+struct pcc_chan_info {
+ struct pcc_mbox_chan chan;
+ struct pcc_chan_reg db;
+ struct pcc_chan_reg plat_irq_ack;
+ struct pcc_chan_reg cmd_complete;
+ struct pcc_chan_reg cmd_update;
+ struct pcc_chan_reg error;
+ int plat_irq;
+};
+
+#define to_pcc_chan_info(c) container_of(c, struct pcc_chan_info, chan)
+static struct pcc_chan_info *chan_info;
+static int pcc_chan_count;
+
+/*
+ * PCC can be used with perf critical drivers such as CPPC
+ * So it makes sense to locally cache the virtual address and
+ * use it to read/write to PCC registers such as doorbell register
+ *
+ * The below read_register and write_registers are used to read and
+ * write from perf critical registers such as PCC doorbell register
+ */
+static void read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
+{
+ switch (bit_width) {
+ case 8:
+ *val = readb(vaddr);
+ break;
+ case 16:
+ *val = readw(vaddr);
+ break;
+ case 32:
+ *val = readl(vaddr);
+ break;
+ case 64:
+ *val = readq(vaddr);
+ break;
+ }
+}
+
+static void write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
+{
+ switch (bit_width) {
+ case 8:
+ writeb(val, vaddr);
+ break;
+ case 16:
+ writew(val, vaddr);
+ break;
+ case 32:
+ writel(val, vaddr);
+ break;
+ case 64:
+ writeq(val, vaddr);
+ break;
+ }
+}
+
+static int pcc_chan_reg_read(struct pcc_chan_reg *reg, u64 *val)
+{
+ int ret = 0;
+
+ if (!reg->gas) {
+ *val = 0;
+ return 0;
+ }
+
+ if (reg->vaddr)
+ read_register(reg->vaddr, val, reg->gas->bit_width);
+ else
+ ret = acpi_read(val, reg->gas);
+
+ return ret;
+}
+
+static int pcc_chan_reg_write(struct pcc_chan_reg *reg, u64 val)
+{
+ int ret = 0;
+
+ if (!reg->gas)
+ return 0;
+
+ if (reg->vaddr)
+ write_register(reg->vaddr, val, reg->gas->bit_width);
+ else
+ ret = acpi_write(val, reg->gas);
+
+ return ret;
+}
+
+static int pcc_chan_reg_read_modify_write(struct pcc_chan_reg *reg)
+{
+ int ret = 0;
+ u64 val;
+
+ ret = pcc_chan_reg_read(reg, &val);
+ if (ret)
+ return ret;
+
+ val &= reg->preserve_mask;
+ val |= reg->set_mask;
+
+ return pcc_chan_reg_write(reg, val);
+}
+
+/**
+ * pcc_map_interrupt - Map a PCC subspace GSI to a linux IRQ number
+ * @interrupt: GSI number.
+ * @flags: interrupt flags
+ *
+ * Returns: a valid linux IRQ number on success
+ * 0 or -EINVAL on failure
+ */
+static int pcc_map_interrupt(u32 interrupt, u32 flags)
+{
+ int trigger, polarity;
+
+ if (!interrupt)
+ return 0;
+
+ trigger = (flags & ACPI_PCCT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
+ : ACPI_LEVEL_SENSITIVE;
+
+ polarity = (flags & ACPI_PCCT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
+ : ACPI_ACTIVE_HIGH;
+
+ return acpi_register_gsi(NULL, interrupt, trigger, polarity);
+}
+
+/**
+ * pcc_mbox_irq - PCC mailbox interrupt handler
+ * @irq: interrupt number
+ * @p: data/cookie passed from the caller to identify the channel
+ *
+ * Returns: IRQ_HANDLED if interrupt is handled or IRQ_NONE if not
+ */
+static irqreturn_t pcc_mbox_irq(int irq, void *p)
+{
+ struct pcc_chan_info *pchan;
+ struct mbox_chan *chan = p;
+ u64 val;
+ int ret;
+
+ pchan = chan->con_priv;
+
+ ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (ret)
+ return IRQ_NONE;
+
+ if (val) { /* Ensure GAS exists and value is non-zero */
+ val &= pchan->cmd_complete.status_mask;
+ if (!val)
+ return IRQ_NONE;
+ }
+
+ ret = pcc_chan_reg_read(&pchan->error, &val);
+ if (ret)
+ return IRQ_NONE;
+ val &= pchan->error.status_mask;
+ if (val) {
+ val &= ~pchan->error.status_mask;
+ pcc_chan_reg_write(&pchan->error, val);
+ return IRQ_NONE;
+ }
+
+ if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
+ return IRQ_NONE;
+
+ mbox_chan_received_data(chan, NULL);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * pcc_mbox_request_channel - PCC clients call this function to
+ * request a pointer to their PCC subspace, from which they
+ * can get the details of communicating with the remote.
+ * @cl: Pointer to Mailbox client, so we know where to bind the
+ * Channel.
+ * @subspace_id: The PCC Subspace index as parsed in the PCC client
+ * ACPI package. This is used to lookup the array of PCC
+ * subspaces as parsed by the PCC Mailbox controller.
+ *
+ * Return: Pointer to the PCC Mailbox Channel if successful or ERR_PTR.
+ */
+struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
+{
+ struct pcc_chan_info *pchan;
+ struct mbox_chan *chan;
+ int rc;
+
+ if (subspace_id < 0 || subspace_id >= pcc_chan_count)
+ return ERR_PTR(-ENOENT);
+
+ pchan = chan_info + subspace_id;
+ chan = pchan->chan.mchan;
+ if (IS_ERR(chan) || chan->cl) {
+ pr_err("Channel not found for idx: %d\n", subspace_id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ rc = mbox_bind_client(chan, cl);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return &pchan->chan;
+}
+EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
+
+/**
+ * pcc_mbox_free_channel - Clients call this to free their Channel.
+ *
+ * @pchan: Pointer to the PCC mailbox channel as returned by
+ * pcc_mbox_request_channel()
+ */
+void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
+{
+ struct mbox_chan *chan = pchan->mchan;
+
+ if (!chan || !chan->cl)
+ return;
+
+ mbox_free_channel(chan);
+}
+EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+
+/**
+ * pcc_send_data - Called from Mailbox Controller code. Used
+ * here only to ring the channel doorbell. The PCC client
+ * specific read/write is done in the client driver in
+ * order to maintain atomicity over PCC channel once
+ * OS has control over it. See above for flow of operations.
+ * @chan: Pointer to Mailbox channel over which to send data.
+ * @data: Client specific data written over channel. Used here
+ * only for debug after PCC transaction completes.
+ *
+ * Return: Err if something failed else 0 for success.
+ */
+static int pcc_send_data(struct mbox_chan *chan, void *data)
+{
+ int ret;
+ struct pcc_chan_info *pchan = chan->con_priv;
+
+ ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+ if (ret)
+ return ret;
+
+ return pcc_chan_reg_read_modify_write(&pchan->db);
+}
+
+/**
+ * pcc_startup - Called from Mailbox Controller code. Used here
+ * to request the interrupt.
+ * @chan: Pointer to Mailbox channel to startup.
+ *
+ * Return: Err if something failed else 0 for success.
+ */
+static int pcc_startup(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ int rc;
+
+ if (pchan->plat_irq > 0) {
+ rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq, 0,
+ MBOX_IRQ_NAME, chan);
+ if (unlikely(rc)) {
+ dev_err(chan->mbox->dev, "failed to register PCC interrupt %d\n",
+ pchan->plat_irq);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pcc_shutdown - Called from Mailbox Controller code. Used here
+ * to free the interrupt.
+ * @chan: Pointer to Mailbox channel to shutdown.
+ */
+static void pcc_shutdown(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+
+ if (pchan->plat_irq > 0)
+ devm_free_irq(chan->mbox->dev, pchan->plat_irq, chan);
+}
+
+static const struct mbox_chan_ops pcc_chan_ops = {
+ .send_data = pcc_send_data,
+ .startup = pcc_startup,
+ .shutdown = pcc_shutdown,
+};
+
+/**
+ * parse_pcc_subspace - Count PCC subspaces defined
+ * @header: Pointer to the ACPI subtable header under the PCCT.
+ * @end: End of subtable entry.
+ *
+ * Return: If we find a PCC subspace entry of a valid type, return 0.
+ * Otherwise, return -EINVAL.
+ *
+ * This gets called for each entry in the PCC table.
+ */
+static int parse_pcc_subspace(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_pcct_subspace *ss = (struct acpi_pcct_subspace *) header;
+
+ if (ss->header.type < ACPI_PCCT_TYPE_RESERVED)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int
+pcc_chan_reg_init(struct pcc_chan_reg *reg, struct acpi_generic_address *gas,
+ u64 preserve_mask, u64 set_mask, u64 status_mask, char *name)
+{
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ if (!(gas->bit_width >= 8 && gas->bit_width <= 64 &&
+ is_power_of_2(gas->bit_width))) {
+ pr_err("Error: Cannot access register of %u bit width",
+ gas->bit_width);
+ return -EFAULT;
+ }
+
+ reg->vaddr = acpi_os_ioremap(gas->address, gas->bit_width / 8);
+ if (!reg->vaddr) {
+ pr_err("Failed to ioremap PCC %s register\n", name);
+ return -ENOMEM;
+ }
+ }
+ reg->gas = gas;
+ reg->preserve_mask = preserve_mask;
+ reg->set_mask = set_mask;
+ reg->status_mask = status_mask;
+ return 0;
+}
+
+/**
+ * pcc_parse_subspace_irq - Parse the PCC IRQ and PCC ACK register
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
+ *
+ * Return: 0 for Success, else errno.
+ *
+ * There should be one entry per PCC channel. This gets called for each
+ * entry in the PCC table. This uses PCCY Type1 structure for all applicable
+ * types(Type 1-4) to fetch irq
+ */
+static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
+{
+ int ret = 0;
+ struct acpi_pcct_hw_reduced *pcct_ss;
+
+ if (pcct_entry->type < ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
+ pcct_entry->type > ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ return 0;
+
+ pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
+ pchan->plat_irq = pcc_map_interrupt(pcct_ss->platform_interrupt,
+ (u32)pcct_ss->flags);
+ if (pchan->plat_irq <= 0) {
+ pr_err("PCC GSI %d not registered\n",
+ pcct_ss->platform_interrupt);
+ return -EINVAL;
+ }
+
+ if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
+
+ ret = pcc_chan_reg_init(&pchan->plat_irq_ack,
+ &pcct2_ss->platform_ack_register,
+ pcct2_ss->ack_preserve_mask,
+ pcct2_ss->ack_write_mask, 0,
+ "PLAT IRQ ACK");
+
+ } else if (pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE ||
+ pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) {
+ struct acpi_pcct_ext_pcc_master *pcct_ext = (void *)pcct_ss;
+
+ ret = pcc_chan_reg_init(&pchan->plat_irq_ack,
+ &pcct_ext->platform_ack_register,
+ pcct_ext->ack_preserve_mask,
+ pcct_ext->ack_set_mask, 0,
+ "PLAT IRQ ACK");
+ }
+
+ return ret;
+}
+
+/**
+ * pcc_parse_subspace_db_reg - Parse the PCC doorbell register
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_parse_subspace_db_reg(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
+{
+ int ret = 0;
+
+ if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_subspace *pcct_ss;
+
+ pcct_ss = (struct acpi_pcct_subspace *)pcct_entry;
+
+ ret = pcc_chan_reg_init(&pchan->db,
+ &pcct_ss->doorbell_register,
+ pcct_ss->preserve_mask,
+ pcct_ss->write_mask, 0, "Doorbell");
+
+ } else {
+ struct acpi_pcct_ext_pcc_master *pcct_ext;
+
+ pcct_ext = (struct acpi_pcct_ext_pcc_master *)pcct_entry;
+
+ ret = pcc_chan_reg_init(&pchan->db,
+ &pcct_ext->doorbell_register,
+ pcct_ext->preserve_mask,
+ pcct_ext->write_mask, 0, "Doorbell");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->cmd_complete,
+ &pcct_ext->cmd_complete_register,
+ 0, 0, pcct_ext->cmd_complete_mask,
+ "Command Complete Check");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->cmd_update,
+ &pcct_ext->cmd_update_register,
+ pcct_ext->cmd_update_preserve_mask,
+ pcct_ext->cmd_update_set_mask, 0,
+ "Command Complete Update");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->error,
+ &pcct_ext->error_status_register,
+ 0, 0, pcct_ext->error_status_mask,
+ "Error Status");
+ }
+ return ret;
+}
+
+/**
+ * pcc_parse_subspace_shmem - Parse the PCC Shared Memory Region information
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
+ *
+ */
+static void pcc_parse_subspace_shmem(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
+{
+ if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_subspace *pcct_ss =
+ (struct acpi_pcct_subspace *)pcct_entry;
+
+ pchan->chan.shmem_base_addr = pcct_ss->base_address;
+ pchan->chan.shmem_size = pcct_ss->length;
+ pchan->chan.latency = pcct_ss->latency;
+ pchan->chan.max_access_rate = pcct_ss->max_access_rate;
+ pchan->chan.min_turnaround_time = pcct_ss->min_turnaround_time;
+ } else {
+ struct acpi_pcct_ext_pcc_master *pcct_ext =
+ (struct acpi_pcct_ext_pcc_master *)pcct_entry;
+
+ pchan->chan.shmem_base_addr = pcct_ext->base_address;
+ pchan->chan.shmem_size = pcct_ext->length;
+ pchan->chan.latency = pcct_ext->latency;
+ pchan->chan.max_access_rate = pcct_ext->max_access_rate;
+ pchan->chan.min_turnaround_time = pcct_ext->min_turnaround_time;
+ }
+}
+
+/**
+ * acpi_pcc_probe - Parse the ACPI tree for the PCCT.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int __init acpi_pcc_probe(void)
+{
+ int count, i, rc = 0;
+ acpi_status status;
+ struct acpi_table_header *pcct_tbl;
+ struct acpi_subtable_proc proc[ACPI_PCCT_TYPE_RESERVED];
+
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
+ if (ACPI_FAILURE(status) || !pcct_tbl)
+ return -ENODEV;
+
+ /* Set up the subtable handlers */
+ for (i = ACPI_PCCT_TYPE_GENERIC_SUBSPACE;
+ i < ACPI_PCCT_TYPE_RESERVED; i++) {
+ proc[i].id = i;
+ proc[i].count = 0;
+ proc[i].handler = parse_pcc_subspace;
+ }
+
+ count = acpi_table_parse_entries_array(ACPI_SIG_PCCT,
+ sizeof(struct acpi_table_pcct), proc,
+ ACPI_PCCT_TYPE_RESERVED, MAX_PCC_SUBSPACES);
+ if (count <= 0 || count > MAX_PCC_SUBSPACES) {
+ if (count < 0)
+ pr_warn("Error parsing PCC subspaces from PCCT\n");
+ else
+ pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
+
+ rc = -EINVAL;
+ } else {
+ pcc_chan_count = count;
+ }
+
+ acpi_put_table(pcct_tbl);
+
+ return rc;
+}
+
+/**
+ * pcc_mbox_probe - Called when we find a match for the
+ * PCCT platform device. This is purely used to represent
+ * the PCCT as a virtual device for registering with the
+ * generic Mailbox framework.
+ *
+ * @pdev: Pointer to platform device returned when a match
+ * is found.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mbox_controller *pcc_mbox_ctrl;
+ struct mbox_chan *pcc_mbox_channels;
+ struct acpi_table_header *pcct_tbl;
+ struct acpi_subtable_header *pcct_entry;
+ struct acpi_table_pcct *acpi_pcct_tbl;
+ acpi_status status = AE_OK;
+ int i, rc, count = pcc_chan_count;
+
+ /* Search for PCCT */
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
+
+ if (ACPI_FAILURE(status) || !pcct_tbl)
+ return -ENODEV;
+
+ pcc_mbox_channels = devm_kcalloc(dev, count, sizeof(*pcc_mbox_channels),
+ GFP_KERNEL);
+ if (!pcc_mbox_channels) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ chan_info = devm_kcalloc(dev, count, sizeof(*chan_info), GFP_KERNEL);
+ if (!chan_info) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ pcc_mbox_ctrl = devm_kzalloc(dev, sizeof(*pcc_mbox_ctrl), GFP_KERNEL);
+ if (!pcc_mbox_ctrl) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ /* Point to the first PCC subspace entry */
+ pcct_entry = (struct acpi_subtable_header *) (
+ (unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
+
+ acpi_pcct_tbl = (struct acpi_table_pcct *) pcct_tbl;
+ if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL)
+ pcc_mbox_ctrl->txdone_irq = true;
+
+ for (i = 0; i < count; i++) {
+ struct pcc_chan_info *pchan = chan_info + i;
+
+ pcc_mbox_channels[i].con_priv = pchan;
+ pchan->chan.mchan = &pcc_mbox_channels[i];
+
+ if (pcct_entry->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE &&
+ !pcc_mbox_ctrl->txdone_irq) {
+ pr_err("Platform Interrupt flag must be set to 1");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (pcc_mbox_ctrl->txdone_irq) {
+ rc = pcc_parse_subspace_irq(pchan, pcct_entry);
+ if (rc < 0)
+ goto err;
+ }
+ rc = pcc_parse_subspace_db_reg(pchan, pcct_entry);
+ if (rc < 0)
+ goto err;
+
+ pcc_parse_subspace_shmem(pchan, pcct_entry);
+
+ pcct_entry = (struct acpi_subtable_header *)
+ ((unsigned long) pcct_entry + pcct_entry->length);
+ }
+
+ pcc_mbox_ctrl->num_chans = count;
+
+ pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl->num_chans);
+
+ pcc_mbox_ctrl->chans = pcc_mbox_channels;
+ pcc_mbox_ctrl->ops = &pcc_chan_ops;
+ pcc_mbox_ctrl->dev = dev;
+
+ pr_info("Registering PCC driver as Mailbox controller\n");
+ rc = mbox_controller_register(pcc_mbox_ctrl);
+ if (rc)
+ pr_err("Err registering PCC as Mailbox controller: %d\n", rc);
+ else
+ return 0;
+err:
+ acpi_put_table(pcct_tbl);
+ return rc;
+}
+
+static struct platform_driver pcc_mbox_driver = {
+ .probe = pcc_mbox_probe,
+ .driver = {
+ .name = "PCCT",
+ },
+};
+
+static int __init pcc_init(void)
+{
+ int ret;
+ struct platform_device *pcc_pdev;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* Check if PCC support is available. */
+ ret = acpi_pcc_probe();
+
+ if (ret) {
+ pr_debug("ACPI PCC probe failed.\n");
+ return -ENODEV;
+ }
+
+ pcc_pdev = platform_create_bundle(&pcc_mbox_driver,
+ pcc_mbox_probe, NULL, 0, NULL, 0);
+
+ if (IS_ERR(pcc_pdev)) {
+ pr_debug("Err creating PCC platform bundle\n");
+ pcc_chan_count = 0;
+ return PTR_ERR(pcc_pdev);
+ }
+
+ return 0;
+}
+
+/*
+ * Make PCC init postcore so that users of this mailbox
+ * such as the ACPI Processor driver have it available
+ * at their init.
+ */
+postcore_initcall(pcc_init);
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
new file mode 100644
index 000000000..fbcf07930
--- /dev/null
+++ b/drivers/mailbox/pl320-ipc.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2012 Calxeda, Inc.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+
+#include <linux/pl320-ipc.h>
+
+#define IPCMxSOURCE(m) ((m) * 0x40)
+#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
+#define IPCMxDCLEAR(m) (((m) * 0x40) + 0x008)
+#define IPCMxDSTATUS(m) (((m) * 0x40) + 0x00C)
+#define IPCMxMODE(m) (((m) * 0x40) + 0x010)
+#define IPCMxMSET(m) (((m) * 0x40) + 0x014)
+#define IPCMxMCLEAR(m) (((m) * 0x40) + 0x018)
+#define IPCMxMSTATUS(m) (((m) * 0x40) + 0x01C)
+#define IPCMxSEND(m) (((m) * 0x40) + 0x020)
+#define IPCMxDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
+
+#define IPCMMIS(irq) (((irq) * 8) + 0x800)
+#define IPCMRIS(irq) (((irq) * 8) + 0x804)
+
+#define MBOX_MASK(n) (1 << (n))
+#define IPC_TX_MBOX 1
+#define IPC_RX_MBOX 2
+
+#define CHAN_MASK(n) (1 << (n))
+#define A9_SOURCE 1
+#define M3_SOURCE 0
+
+static void __iomem *ipc_base;
+static int ipc_irq;
+static DEFINE_MUTEX(ipc_m1_lock);
+static DECLARE_COMPLETION(ipc_completion);
+static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
+
+static inline void set_destination(int source, int mbox)
+{
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
+}
+
+static inline void clear_destination(int source, int mbox)
+{
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
+}
+
+static void __ipc_send(int mbox, u32 *data)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ writel_relaxed(data[i], ipc_base + IPCMxDR(mbox, i));
+ writel_relaxed(0x1, ipc_base + IPCMxSEND(mbox));
+}
+
+static u32 __ipc_rcv(int mbox, u32 *data)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ data[i] = readl_relaxed(ipc_base + IPCMxDR(mbox, i));
+ return data[1];
+}
+
+/* blocking implementation from the A9 side, not usable in interrupts! */
+int pl320_ipc_transmit(u32 *data)
+{
+ int ret;
+
+ mutex_lock(&ipc_m1_lock);
+
+ init_completion(&ipc_completion);
+ __ipc_send(IPC_TX_MBOX, data);
+ ret = wait_for_completion_timeout(&ipc_completion,
+ msecs_to_jiffies(1000));
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ ret = __ipc_rcv(IPC_TX_MBOX, data);
+out:
+ mutex_unlock(&ipc_m1_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_transmit);
+
+static irqreturn_t ipc_handler(int irq, void *dev)
+{
+ u32 irq_stat;
+ u32 data[7];
+
+ irq_stat = readl_relaxed(ipc_base + IPCMMIS(1));
+ if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
+ writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+ complete(&ipc_completion);
+ }
+ if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
+ __ipc_rcv(IPC_RX_MBOX, data);
+ atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
+ writel_relaxed(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
+ }
+
+ return IRQ_HANDLED;
+}
+
+int pl320_ipc_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ipc_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_register_notifier);
+
+int pl320_ipc_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ipc_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
+
+static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+
+ ipc_base = ioremap(adev->res.start, resource_size(&adev->res));
+ if (ipc_base == NULL)
+ return -ENOMEM;
+
+ writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+
+ ipc_irq = adev->irq[0];
+ ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
+ if (ret < 0)
+ goto err;
+
+ /* Init slow mailbox */
+ writel_relaxed(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxDSET(IPC_TX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_TX_MBOX));
+
+ /* Init receive mailbox */
+ writel_relaxed(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
+ writel_relaxed(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxDSET(IPC_RX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_RX_MBOX));
+
+ return 0;
+err:
+ iounmap(ipc_base);
+ return ret;
+}
+
+static struct amba_id pl320_ids[] = {
+ {
+ .id = 0x00041320,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver pl320_driver = {
+ .drv = {
+ .name = "pl320",
+ },
+ .id_table = pl320_ids,
+ .probe = pl320_probe,
+};
+
+static int __init ipc_init(void)
+{
+ return amba_driver_register(&pl320_driver);
+}
+subsys_initcall(ipc_init);
diff --git a/drivers/mailbox/platform_mhu.c b/drivers/mailbox/platform_mhu.c
new file mode 100644
index 000000000..834aecd72
--- /dev/null
+++ b/drivers/mailbox/platform_mhu.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 BayLibre SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Synchronised with arm_mhu.c from :
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Jassi Brar <jaswinder.singh@linaro.org>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+
+#define INTR_SET_OFS 0x0
+#define INTR_STAT_OFS 0x4
+#define INTR_CLR_OFS 0x8
+
+#define MHU_SEC_OFFSET 0x0
+#define MHU_LP_OFFSET 0xc
+#define MHU_HP_OFFSET 0x18
+#define TX_REG_OFFSET 0x24
+
+#define MHU_CHANS 3
+
+struct platform_mhu_link {
+ int irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct platform_mhu {
+ void __iomem *base;
+ struct platform_mhu_link mlink[MHU_CHANS];
+ struct mbox_chan chan[MHU_CHANS];
+ struct mbox_controller mbox;
+};
+
+static irqreturn_t platform_mhu_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 val;
+
+ val = readl_relaxed(mlink->rx_reg + INTR_STAT_OFS);
+ if (!val)
+ return IRQ_NONE;
+
+ mbox_chan_received_data(chan, (void *)&val);
+
+ writel_relaxed(val, mlink->rx_reg + INTR_CLR_OFS);
+
+ return IRQ_HANDLED;
+}
+
+static bool platform_mhu_last_tx_done(struct mbox_chan *chan)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+
+ return (val == 0);
+}
+
+static int platform_mhu_send_data(struct mbox_chan *chan, void *data)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 *arg = data;
+
+ writel_relaxed(*arg, mlink->tx_reg + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int platform_mhu_startup(struct mbox_chan *chan)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(mlink->tx_reg + INTR_STAT_OFS);
+ writel_relaxed(val, mlink->tx_reg + INTR_CLR_OFS);
+
+ ret = request_irq(mlink->irq, platform_mhu_rx_interrupt,
+ IRQF_SHARED, "platform_mhu_link", chan);
+ if (ret) {
+ dev_err(chan->mbox->dev,
+ "Unable to acquire IRQ %d\n", mlink->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void platform_mhu_shutdown(struct mbox_chan *chan)
+{
+ struct platform_mhu_link *mlink = chan->con_priv;
+
+ free_irq(mlink->irq, chan);
+}
+
+static const struct mbox_chan_ops platform_mhu_ops = {
+ .send_data = platform_mhu_send_data,
+ .startup = platform_mhu_startup,
+ .shutdown = platform_mhu_shutdown,
+ .last_tx_done = platform_mhu_last_tx_done,
+};
+
+static int platform_mhu_probe(struct platform_device *pdev)
+{
+ int i, err;
+ struct platform_mhu *mhu;
+ struct device *dev = &pdev->dev;
+ int platform_mhu_reg[MHU_CHANS] = {
+ MHU_SEC_OFFSET, MHU_LP_OFFSET, MHU_HP_OFFSET
+ };
+
+ /* Allocate memory for device */
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mhu->base)) {
+ dev_err(dev, "ioremap failed\n");
+ return PTR_ERR(mhu->base);
+ }
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ mhu->chan[i].con_priv = &mhu->mlink[i];
+ mhu->mlink[i].irq = platform_get_irq(pdev, i);
+ if (mhu->mlink[i].irq < 0)
+ return mhu->mlink[i].irq;
+ mhu->mlink[i].rx_reg = mhu->base + platform_mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+ }
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = &mhu->chan[0];
+ mhu->mbox.num_chans = MHU_CHANS;
+ mhu->mbox.ops = &platform_mhu_ops;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+ platform_set_drvdata(pdev, mhu);
+
+ err = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ dev_info(dev, "Platform MHU Mailbox registered\n");
+ return 0;
+}
+
+static const struct of_device_id platform_mhu_dt_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-mhu", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, platform_mhu_dt_ids);
+
+static struct platform_driver platform_mhu_driver = {
+ .probe = platform_mhu_probe,
+ .driver = {
+ .name = "platform-mhu",
+ .of_match_table = platform_mhu_dt_ids,
+ },
+};
+
+module_platform_driver(platform_mhu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:platform-mhu");
+MODULE_DESCRIPTION("Platform MHU Driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
new file mode 100644
index 000000000..002a135ee
--- /dev/null
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017, Linaro Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mailbox_controller.h>
+
+#define QCOM_APCS_IPC_BITS 32
+
+struct qcom_apcs_ipc {
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[QCOM_APCS_IPC_BITS];
+
+ struct regmap *regmap;
+ unsigned long offset;
+ struct platform_device *clk;
+};
+
+struct qcom_apcs_ipc_data {
+ int offset;
+ char *clk_name;
+};
+
+static const struct qcom_apcs_ipc_data ipq6018_apcs_data = {
+ .offset = 8, .clk_name = "qcom,apss-ipq6018-clk"
+};
+
+static const struct qcom_apcs_ipc_data msm8916_apcs_data = {
+ .offset = 8, .clk_name = "qcom-apcs-msm8916-clk"
+};
+
+static const struct qcom_apcs_ipc_data msm8994_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
+ .offset = 16, .clk_name = "qcom-apcs-msm8996-clk"
+};
+
+static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
+ .offset = 12, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data sdx55_apcs_data = {
+ .offset = 0x1008, .clk_name = "qcom-sdx55-acps-clk"
+};
+
+static const struct regmap_config apcs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1008,
+ .fast_io = true,
+};
+
+static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_apcs_ipc *apcs = container_of(chan->mbox,
+ struct qcom_apcs_ipc, mbox);
+ unsigned long idx = (unsigned long)chan->con_priv;
+
+ return regmap_write(apcs->regmap, apcs->offset, BIT(idx));
+}
+
+static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
+ .send_data = qcom_apcs_ipc_send_data,
+};
+
+static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+{
+ struct qcom_apcs_ipc *apcs;
+ const struct qcom_apcs_ipc_data *apcs_data;
+ struct regmap *regmap;
+ void __iomem *base;
+ unsigned long i;
+ int ret;
+
+ apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
+ if (!apcs)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &apcs_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ apcs_data = of_device_get_match_data(&pdev->dev);
+
+ apcs->regmap = regmap;
+ apcs->offset = apcs_data->offset;
+
+ /* Initialize channel identifiers */
+ for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
+ apcs->mbox_chans[i].con_priv = (void *)i;
+
+ apcs->mbox.dev = &pdev->dev;
+ apcs->mbox.ops = &qcom_apcs_ipc_ops;
+ apcs->mbox.chans = apcs->mbox_chans;
+ apcs->mbox.num_chans = ARRAY_SIZE(apcs->mbox_chans);
+
+ ret = devm_mbox_controller_register(&pdev->dev, &apcs->mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register APCS IPC controller\n");
+ return ret;
+ }
+
+ if (apcs_data->clk_name) {
+ apcs->clk = platform_device_register_data(&pdev->dev,
+ apcs_data->clk_name,
+ PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ if (IS_ERR(apcs->clk))
+ dev_err(&pdev->dev, "failed to register APCS clk\n");
+ }
+
+ platform_set_drvdata(pdev, apcs);
+
+ return 0;
+}
+
+static int qcom_apcs_ipc_remove(struct platform_device *pdev)
+{
+ struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev);
+ struct platform_device *clk = apcs->clk;
+
+ platform_device_unregister(clk);
+
+ return 0;
+}
+
+/* .data is the offset of the ipc register within the global block */
+static const struct of_device_id qcom_apcs_ipc_of_match[] = {
+ { .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,msm8976-apcs-kpss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sm4250-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
+ /* Do not add any more entries using existing driver data */
+ { .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
+
+static struct platform_driver qcom_apcs_ipc_driver = {
+ .probe = qcom_apcs_ipc_probe,
+ .remove = qcom_apcs_ipc_remove,
+ .driver = {
+ .name = "qcom_apcs_ipc",
+ .of_match_table = qcom_apcs_ipc_of_match,
+ },
+};
+
+static int __init qcom_apcs_ipc_init(void)
+{
+ return platform_driver_register(&qcom_apcs_ipc_driver);
+}
+postcore_initcall(qcom_apcs_ipc_init);
+
+static void __exit qcom_apcs_ipc_exit(void)
+{
+ platform_driver_unregister(&qcom_apcs_ipc_driver);
+}
+module_exit(qcom_apcs_ipc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APCS IPC driver");
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
new file mode 100644
index 000000000..f597a1bd5
--- /dev/null
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/mailbox/qcom-ipcc.h>
+
+/* IPCC Register offsets */
+#define IPCC_REG_SEND_ID 0x0c
+#define IPCC_REG_RECV_ID 0x10
+#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14
+#define IPCC_REG_RECV_SIGNAL_DISABLE 0x18
+#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c
+#define IPCC_REG_CLIENT_CLEAR 0x38
+
+#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0)
+#define IPCC_CLIENT_ID_MASK GENMASK(31, 16)
+
+#define IPCC_NO_PENDING_IRQ GENMASK(31, 0)
+
+/**
+ * struct qcom_ipcc_chan_info - Per-mailbox-channel info
+ * @client_id: The client-id to which the interrupt has to be triggered
+ * @signal_id: The signal-id to which the interrupt has to be triggered
+ */
+struct qcom_ipcc_chan_info {
+ u16 client_id;
+ u16 signal_id;
+};
+
+/**
+ * struct qcom_ipcc - Holder for the mailbox driver
+ * @dev: Device associated with this instance
+ * @base: Base address of the IPCC frame associated to APSS
+ * @irq_domain: The irq_domain associated with this instance
+ * @chans: The mailbox channels array
+ * @mchan: The per-mailbox channel info array
+ * @mbox: The mailbox controller
+ * @num_chans: Number of @chans elements
+ * @irq: Summary irq
+ */
+struct qcom_ipcc {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ struct mbox_chan *chans;
+ struct qcom_ipcc_chan_info *mchan;
+ struct mbox_controller mbox;
+ int num_chans;
+ int irq;
+};
+
+static inline struct qcom_ipcc *to_qcom_ipcc(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct qcom_ipcc, mbox);
+}
+
+static inline u32 qcom_ipcc_get_hwirq(u16 client_id, u16 signal_id)
+{
+ return FIELD_PREP(IPCC_CLIENT_ID_MASK, client_id) |
+ FIELD_PREP(IPCC_SIGNAL_ID_MASK, signal_id);
+}
+
+static irqreturn_t qcom_ipcc_irq_fn(int irq, void *data)
+{
+ struct qcom_ipcc *ipcc = data;
+ u32 hwirq;
+ int virq;
+
+ for (;;) {
+ hwirq = readl(ipcc->base + IPCC_REG_RECV_ID);
+ if (hwirq == IPCC_NO_PENDING_IRQ)
+ break;
+
+ virq = irq_find_mapping(ipcc->irq_domain, hwirq);
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_CLEAR);
+ generic_handle_irq(virq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void qcom_ipcc_mask_irq(struct irq_data *irqd)
+{
+ struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_DISABLE);
+}
+
+static void qcom_ipcc_unmask_irq(struct irq_data *irqd)
+{
+ struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_ENABLE);
+}
+
+static struct irq_chip qcom_ipcc_irq_chip = {
+ .name = "ipcc",
+ .irq_mask = qcom_ipcc_mask_irq,
+ .irq_unmask = qcom_ipcc_unmask_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int qcom_ipcc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct qcom_ipcc *ipcc = d->host_data;
+
+ irq_set_chip_and_handler(irq, &qcom_ipcc_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, ipcc);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static int qcom_ipcc_domain_xlate(struct irq_domain *d,
+ struct device_node *node, const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (intsize != 3)
+ return -EINVAL;
+
+ *out_hwirq = qcom_ipcc_get_hwirq(intspec[0], intspec[1]);
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static const struct irq_domain_ops qcom_ipcc_irq_ops = {
+ .map = qcom_ipcc_domain_map,
+ .xlate = qcom_ipcc_domain_xlate,
+};
+
+static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_ipcc *ipcc = to_qcom_ipcc(chan->mbox);
+ struct qcom_ipcc_chan_info *mchan = chan->con_priv;
+ u32 hwirq;
+
+ hwirq = qcom_ipcc_get_hwirq(mchan->client_id, mchan->signal_id);
+ writel(hwirq, ipcc->base + IPCC_REG_SEND_ID);
+
+ return 0;
+}
+
+static void qcom_ipcc_mbox_shutdown(struct mbox_chan *chan)
+{
+ chan->con_priv = NULL;
+}
+
+static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *ph)
+{
+ struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox);
+ struct qcom_ipcc_chan_info *mchan;
+ struct mbox_chan *chan;
+ struct device *dev;
+ int chan_id;
+
+ dev = ipcc->dev;
+
+ if (ph->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ for (chan_id = 0; chan_id < mbox->num_chans; chan_id++) {
+ chan = &ipcc->chans[chan_id];
+ mchan = chan->con_priv;
+
+ if (!mchan)
+ break;
+ else if (mchan->client_id == ph->args[0] &&
+ mchan->signal_id == ph->args[1])
+ return ERR_PTR(-EBUSY);
+ }
+
+ if (chan_id >= mbox->num_chans)
+ return ERR_PTR(-EBUSY);
+
+ mchan = devm_kzalloc(dev, sizeof(*mchan), GFP_KERNEL);
+ if (!mchan)
+ return ERR_PTR(-ENOMEM);
+
+ mchan->client_id = ph->args[0];
+ mchan->signal_id = ph->args[1];
+ chan->con_priv = mchan;
+
+ return chan;
+}
+
+static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
+ .send_data = qcom_ipcc_mbox_send_data,
+ .shutdown = qcom_ipcc_mbox_shutdown,
+};
+
+static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
+ struct device_node *controller_dn)
+{
+ struct of_phandle_args curr_ph;
+ struct device_node *client_dn;
+ struct mbox_controller *mbox;
+ struct device *dev = ipcc->dev;
+ int i, j, ret;
+
+ /*
+ * Find out the number of clients interested in this mailbox
+ * and create channels accordingly.
+ */
+ ipcc->num_chans = 0;
+ for_each_node_with_property(client_dn, "mboxes") {
+ if (!of_device_is_available(client_dn))
+ continue;
+ i = of_count_phandle_with_args(client_dn,
+ "mboxes", "#mbox-cells");
+ for (j = 0; j < i; j++) {
+ ret = of_parse_phandle_with_args(client_dn, "mboxes",
+ "#mbox-cells", j, &curr_ph);
+ of_node_put(curr_ph.np);
+ if (!ret && curr_ph.np == controller_dn)
+ ipcc->num_chans++;
+ }
+ }
+
+ /* If no clients are found, skip registering as a mbox controller */
+ if (!ipcc->num_chans)
+ return 0;
+
+ ipcc->chans = devm_kcalloc(dev, ipcc->num_chans,
+ sizeof(struct mbox_chan), GFP_KERNEL);
+ if (!ipcc->chans)
+ return -ENOMEM;
+
+ mbox = &ipcc->mbox;
+ mbox->dev = dev;
+ mbox->num_chans = ipcc->num_chans;
+ mbox->chans = ipcc->chans;
+ mbox->ops = &ipcc_mbox_chan_ops;
+ mbox->of_xlate = qcom_ipcc_mbox_xlate;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = false;
+
+ return devm_mbox_controller_register(dev, mbox);
+}
+
+static int qcom_ipcc_pm_resume(struct device *dev)
+{
+ struct qcom_ipcc *ipcc = dev_get_drvdata(dev);
+ u32 hwirq;
+ int virq;
+
+ hwirq = readl(ipcc->base + IPCC_REG_RECV_ID);
+ if (hwirq == IPCC_NO_PENDING_IRQ)
+ return 0;
+
+ virq = irq_find_mapping(ipcc->irq_domain, hwirq);
+
+ dev_dbg(dev, "virq: %d triggered client-id: %ld; signal-id: %ld\n", virq,
+ FIELD_GET(IPCC_CLIENT_ID_MASK, hwirq), FIELD_GET(IPCC_SIGNAL_ID_MASK, hwirq));
+
+ return 0;
+}
+
+static int qcom_ipcc_probe(struct platform_device *pdev)
+{
+ struct qcom_ipcc *ipcc;
+ static int id;
+ char *name;
+ int ret;
+
+ ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL);
+ if (!ipcc)
+ return -ENOMEM;
+
+ ipcc->dev = &pdev->dev;
+
+ ipcc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ipcc->base))
+ return PTR_ERR(ipcc->base);
+
+ ipcc->irq = platform_get_irq(pdev, 0);
+ if (ipcc->irq < 0)
+ return ipcc->irq;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "ipcc_%d", id++);
+ if (!name)
+ return -ENOMEM;
+
+ ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
+ &qcom_ipcc_irq_ops, ipcc);
+ if (!ipcc->irq_domain)
+ return -ENOMEM;
+
+ ret = qcom_ipcc_setup_mbox(ipcc, pdev->dev.of_node);
+ if (ret)
+ goto err_mbox;
+
+ ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND |
+ IRQF_NO_THREAD, name, ipcc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
+ goto err_req_irq;
+ }
+
+ platform_set_drvdata(pdev, ipcc);
+
+ return 0;
+
+err_req_irq:
+ if (ipcc->num_chans)
+ mbox_controller_unregister(&ipcc->mbox);
+err_mbox:
+ irq_domain_remove(ipcc->irq_domain);
+
+ return ret;
+}
+
+static int qcom_ipcc_remove(struct platform_device *pdev)
+{
+ struct qcom_ipcc *ipcc = platform_get_drvdata(pdev);
+
+ disable_irq_wake(ipcc->irq);
+ irq_domain_remove(ipcc->irq_domain);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_ipcc_of_match[] = {
+ { .compatible = "qcom,ipcc"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match);
+
+static const struct dev_pm_ops qcom_ipcc_dev_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, qcom_ipcc_pm_resume)
+};
+
+static struct platform_driver qcom_ipcc_driver = {
+ .probe = qcom_ipcc_probe,
+ .remove = qcom_ipcc_remove,
+ .driver = {
+ .name = "qcom-ipcc",
+ .of_match_table = qcom_ipcc_of_match,
+ .suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&qcom_ipcc_dev_pm_ops),
+ },
+};
+
+static int __init qcom_ipcc_init(void)
+{
+ return platform_driver_register(&qcom_ipcc_driver);
+}
+arch_initcall(qcom_ipcc_init);
+
+MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vnkgutta@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
new file mode 100644
index 000000000..8ffad059e
--- /dev/null
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define MAILBOX_A2B_INTEN 0x00
+#define MAILBOX_A2B_STATUS 0x04
+#define MAILBOX_A2B_CMD(x) (0x08 + (x) * 8)
+#define MAILBOX_A2B_DAT(x) (0x0c + (x) * 8)
+
+#define MAILBOX_B2A_INTEN 0x28
+#define MAILBOX_B2A_STATUS 0x2C
+#define MAILBOX_B2A_CMD(x) (0x30 + (x) * 8)
+#define MAILBOX_B2A_DAT(x) (0x34 + (x) * 8)
+
+struct rockchip_mbox_msg {
+ u32 cmd;
+ int rx_size;
+};
+
+struct rockchip_mbox_data {
+ int num_chans;
+};
+
+struct rockchip_mbox_chan {
+ int idx;
+ int irq;
+ struct rockchip_mbox_msg *msg;
+ struct rockchip_mbox *mb;
+};
+
+struct rockchip_mbox {
+ struct mbox_controller mbox;
+ struct clk *pclk;
+ void __iomem *mbox_base;
+
+ /* The maximum size of buf for each channel */
+ u32 buf_size;
+
+ struct rockchip_mbox_chan *chans;
+};
+
+static int rockchip_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ struct rockchip_mbox_msg *msg = data;
+ struct rockchip_mbox_chan *chans = mb->chans;
+
+ if (!msg)
+ return -EINVAL;
+
+ if (msg->rx_size > mb->buf_size) {
+ dev_err(mb->mbox.dev, "Transmit size over buf size(%d)\n",
+ mb->buf_size);
+ return -EINVAL;
+ }
+
+ dev_dbg(mb->mbox.dev, "Chan[%d]: A2B message, cmd 0x%08x\n",
+ chans->idx, msg->cmd);
+
+ mb->chans[chans->idx].msg = msg;
+
+ writel_relaxed(msg->cmd, mb->mbox_base + MAILBOX_A2B_CMD(chans->idx));
+ writel_relaxed(msg->rx_size, mb->mbox_base +
+ MAILBOX_A2B_DAT(chans->idx));
+
+ return 0;
+}
+
+static int rockchip_mbox_startup(struct mbox_chan *chan)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+
+ /* Enable all B2A interrupts */
+ writel_relaxed((1 << mb->mbox.num_chans) - 1,
+ mb->mbox_base + MAILBOX_B2A_INTEN);
+
+ return 0;
+}
+
+static void rockchip_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ struct rockchip_mbox_chan *chans = mb->chans;
+
+ /* Disable all B2A interrupts */
+ writel_relaxed(0, mb->mbox_base + MAILBOX_B2A_INTEN);
+
+ mb->chans[chans->idx].msg = NULL;
+}
+
+static const struct mbox_chan_ops rockchip_mbox_chan_ops = {
+ .send_data = rockchip_mbox_send_data,
+ .startup = rockchip_mbox_startup,
+ .shutdown = rockchip_mbox_shutdown,
+};
+
+static irqreturn_t rockchip_mbox_irq(int irq, void *dev_id)
+{
+ int idx;
+ struct rockchip_mbox *mb = (struct rockchip_mbox *)dev_id;
+ u32 status = readl_relaxed(mb->mbox_base + MAILBOX_B2A_STATUS);
+
+ for (idx = 0; idx < mb->mbox.num_chans; idx++) {
+ if ((status & (1 << idx)) && (irq == mb->chans[idx].irq)) {
+ /* Clear mbox interrupt */
+ writel_relaxed(1 << idx,
+ mb->mbox_base + MAILBOX_B2A_STATUS);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t rockchip_mbox_isr(int irq, void *dev_id)
+{
+ int idx;
+ struct rockchip_mbox_msg *msg = NULL;
+ struct rockchip_mbox *mb = (struct rockchip_mbox *)dev_id;
+
+ for (idx = 0; idx < mb->mbox.num_chans; idx++) {
+ if (irq != mb->chans[idx].irq)
+ continue;
+
+ msg = mb->chans[idx].msg;
+ if (!msg) {
+ dev_err(mb->mbox.dev,
+ "Chan[%d]: B2A message is NULL\n", idx);
+ break; /* spurious */
+ }
+
+ mbox_chan_received_data(&mb->mbox.chans[idx], msg);
+ mb->chans[idx].msg = NULL;
+
+ dev_dbg(mb->mbox.dev, "Chan[%d]: B2A message, cmd 0x%08x\n",
+ idx, msg->cmd);
+
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct rockchip_mbox_data rk3368_drv_data = {
+ .num_chans = 4,
+};
+
+static const struct of_device_id rockchip_mbox_of_match[] = {
+ { .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data},
+ { },
+};
+MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match);
+
+static int rockchip_mbox_probe(struct platform_device *pdev)
+{
+ struct rockchip_mbox *mb;
+ const struct rockchip_mbox_data *drv_data;
+ struct resource *res;
+ int ret, irq, i;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ drv_data = (const struct rockchip_mbox_data *) device_get_match_data(&pdev->dev);
+
+ mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->chans = devm_kcalloc(&pdev->dev, drv_data->num_chans,
+ sizeof(*mb->chans), GFP_KERNEL);
+ if (!mb->chans)
+ return -ENOMEM;
+
+ mb->mbox.chans = devm_kcalloc(&pdev->dev, drv_data->num_chans,
+ sizeof(*mb->mbox.chans), GFP_KERNEL);
+ if (!mb->mbox.chans)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mb);
+
+ mb->mbox.dev = &pdev->dev;
+ mb->mbox.num_chans = drv_data->num_chans;
+ mb->mbox.ops = &rockchip_mbox_chan_ops;
+ mb->mbox.txdone_irq = true;
+
+ mb->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(mb->mbox_base))
+ return PTR_ERR(mb->mbox_base);
+
+ /* Each channel has two buffers for A2B and B2A */
+ mb->buf_size = (size_t)resource_size(res) / (drv_data->num_chans * 2);
+
+ mb->pclk = devm_clk_get(&pdev->dev, "pclk_mailbox");
+ if (IS_ERR(mb->pclk)) {
+ ret = PTR_ERR(mb->pclk);
+ dev_err(&pdev->dev, "failed to get pclk_mailbox clock: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mb->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable pclk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < mb->mbox.num_chans; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ rockchip_mbox_irq,
+ rockchip_mbox_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mb);
+ if (ret < 0)
+ return ret;
+
+ mb->chans[i].idx = i;
+ mb->chans[i].irq = irq;
+ mb->chans[i].mb = mb;
+ mb->chans[i].msg = NULL;
+ }
+
+ ret = devm_mbox_controller_register(&pdev->dev, &mb->mbox);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to register mailbox: %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver rockchip_mbox_driver = {
+ .probe = rockchip_mbox_probe,
+ .driver = {
+ .name = "rockchip-mailbox",
+ .of_match_table = rockchip_mbox_of_match,
+ },
+};
+
+module_platform_driver(rockchip_mbox_driver);
+
+MODULE_DESCRIPTION("Rockchip mailbox: communicate between CPU cores and MCU");
+MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
+MODULE_AUTHOR("Caesar Wang <wxt@rock-chips.com>");
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
new file mode 100644
index 000000000..9ae57de77
--- /dev/null
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Spreadtrum mailbox driver
+ *
+ * Copyright (c) 2020 Spreadtrum Communications Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define SPRD_MBOX_ID 0x0
+#define SPRD_MBOX_MSG_LOW 0x4
+#define SPRD_MBOX_MSG_HIGH 0x8
+#define SPRD_MBOX_TRIGGER 0xc
+#define SPRD_MBOX_FIFO_RST 0x10
+#define SPRD_MBOX_FIFO_STS 0x14
+#define SPRD_MBOX_IRQ_STS 0x18
+#define SPRD_MBOX_IRQ_MSK 0x1c
+#define SPRD_MBOX_LOCK 0x20
+#define SPRD_MBOX_FIFO_DEPTH 0x24
+
+/* Bit and mask definition for inbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_INBOX_FIFO_DELIVER_MASK GENMASK(23, 16)
+#define SPRD_INBOX_FIFO_OVERLOW_MASK GENMASK(15, 8)
+#define SPRD_INBOX_FIFO_DELIVER_SHIFT 16
+#define SPRD_INBOX_FIFO_BUSY_MASK GENMASK(7, 0)
+
+/* Bit and mask definition for SPRD_MBOX_IRQ_STS register */
+#define SPRD_MBOX_IRQ_CLR BIT(0)
+
+/* Bit and mask definition for outbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_OUTBOX_FIFO_FULL BIT(2)
+#define SPRD_OUTBOX_FIFO_WR_SHIFT 16
+#define SPRD_OUTBOX_FIFO_RD_SHIFT 24
+#define SPRD_OUTBOX_FIFO_POS_MASK GENMASK(7, 0)
+
+/* Bit and mask definition for inbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_INBOX_FIFO_BLOCK_IRQ BIT(0)
+#define SPRD_INBOX_FIFO_OVERFLOW_IRQ BIT(1)
+#define SPRD_INBOX_FIFO_DELIVER_IRQ BIT(2)
+#define SPRD_INBOX_FIFO_IRQ_MASK GENMASK(2, 0)
+
+/* Bit and mask definition for outbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ BIT(0)
+#define SPRD_OUTBOX_FIFO_IRQ_MASK GENMASK(4, 0)
+
+#define SPRD_OUTBOX_BASE_SPAN 0x1000
+#define SPRD_MBOX_CHAN_MAX 8
+#define SPRD_SUPP_INBOX_ID_SC9863A 7
+
+struct sprd_mbox_priv {
+ struct mbox_controller mbox;
+ struct device *dev;
+ void __iomem *inbox_base;
+ void __iomem *outbox_base;
+ /* Base register address for supplementary outbox */
+ void __iomem *supp_base;
+ struct clk *clk;
+ u32 outbox_fifo_depth;
+
+ struct mutex lock;
+ u32 refcnt;
+ struct mbox_chan chan[SPRD_MBOX_CHAN_MAX];
+};
+
+static struct sprd_mbox_priv *to_sprd_mbox_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct sprd_mbox_priv, mbox);
+}
+
+static u32 sprd_mbox_get_fifo_len(struct sprd_mbox_priv *priv, u32 fifo_sts)
+{
+ u32 wr_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_WR_SHIFT) &
+ SPRD_OUTBOX_FIFO_POS_MASK;
+ u32 rd_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_RD_SHIFT) &
+ SPRD_OUTBOX_FIFO_POS_MASK;
+ u32 fifo_len;
+
+ /*
+ * If the read pointer is equal with write pointer, which means the fifo
+ * is full or empty.
+ */
+ if (wr_pos == rd_pos) {
+ if (fifo_sts & SPRD_OUTBOX_FIFO_FULL)
+ fifo_len = priv->outbox_fifo_depth;
+ else
+ fifo_len = 0;
+ } else if (wr_pos > rd_pos) {
+ fifo_len = wr_pos - rd_pos;
+ } else {
+ fifo_len = priv->outbox_fifo_depth - rd_pos + wr_pos;
+ }
+
+ return fifo_len;
+}
+
+static irqreturn_t do_outbox_isr(void __iomem *base, struct sprd_mbox_priv *priv)
+{
+ struct mbox_chan *chan;
+ u32 fifo_sts, fifo_len, msg[2];
+ int i, id;
+
+ fifo_sts = readl(base + SPRD_MBOX_FIFO_STS);
+
+ fifo_len = sprd_mbox_get_fifo_len(priv, fifo_sts);
+ if (!fifo_len) {
+ dev_warn_ratelimited(priv->dev, "spurious outbox interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < fifo_len; i++) {
+ msg[0] = readl(base + SPRD_MBOX_MSG_LOW);
+ msg[1] = readl(base + SPRD_MBOX_MSG_HIGH);
+ id = readl(base + SPRD_MBOX_ID);
+
+ chan = &priv->chan[id];
+ if (chan->cl)
+ mbox_chan_received_data(chan, (void *)msg);
+ else
+ dev_warn_ratelimited(priv->dev,
+ "message's been dropped at ch[%d]\n", id);
+
+ /* Trigger to update outbox FIFO pointer */
+ writel(0x1, base + SPRD_MBOX_TRIGGER);
+ }
+
+ /* Clear irq status after reading all message. */
+ writel(SPRD_MBOX_IRQ_CLR, base + SPRD_MBOX_IRQ_STS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+
+ return do_outbox_isr(priv->outbox_base, priv);
+}
+
+static irqreturn_t sprd_mbox_supp_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+
+ return do_outbox_isr(priv->supp_base, priv);
+}
+
+static irqreturn_t sprd_mbox_inbox_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+ struct mbox_chan *chan;
+ u32 fifo_sts, send_sts, busy, id;
+
+ fifo_sts = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS);
+
+ /* Get the inbox data delivery status */
+ send_sts = (fifo_sts & SPRD_INBOX_FIFO_DELIVER_MASK) >>
+ SPRD_INBOX_FIFO_DELIVER_SHIFT;
+ if (!send_sts) {
+ dev_warn_ratelimited(priv->dev, "spurious inbox interrupt\n");
+ return IRQ_NONE;
+ }
+
+ while (send_sts) {
+ id = __ffs(send_sts);
+ send_sts &= (send_sts - 1);
+
+ chan = &priv->chan[id];
+
+ /*
+ * Check if the message was fetched by remote target, if yes,
+ * that means the transmission has been completed.
+ */
+ busy = fifo_sts & SPRD_INBOX_FIFO_BUSY_MASK;
+ if (!(busy & BIT(id)))
+ mbox_chan_txdone(chan, 0);
+ }
+
+ /* Clear FIFO delivery and overflow status */
+ writel(fifo_sts &
+ (SPRD_INBOX_FIFO_DELIVER_MASK | SPRD_INBOX_FIFO_OVERLOW_MASK),
+ priv->inbox_base + SPRD_MBOX_FIFO_RST);
+
+ /* Clear irq status */
+ writel(SPRD_MBOX_IRQ_CLR, priv->inbox_base + SPRD_MBOX_IRQ_STS);
+
+ return IRQ_HANDLED;
+}
+
+static int sprd_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ unsigned long id = (unsigned long)chan->con_priv;
+ u32 *data = msg;
+
+ /* Write data into inbox FIFO, and only support 8 bytes every time */
+ writel(data[0], priv->inbox_base + SPRD_MBOX_MSG_LOW);
+ writel(data[1], priv->inbox_base + SPRD_MBOX_MSG_HIGH);
+
+ /* Set target core id */
+ writel(id, priv->inbox_base + SPRD_MBOX_ID);
+
+ /* Trigger remote request */
+ writel(0x1, priv->inbox_base + SPRD_MBOX_TRIGGER);
+
+ return 0;
+}
+
+static int sprd_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ unsigned long id = (unsigned long)chan->con_priv;
+ u32 busy;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ busy = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS) &
+ SPRD_INBOX_FIFO_BUSY_MASK;
+ if (!(busy & BIT(id))) {
+ mbox_chan_txdone(chan, 0);
+ return 0;
+ }
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int sprd_mbox_startup(struct mbox_chan *chan)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ u32 val;
+
+ mutex_lock(&priv->lock);
+ if (priv->refcnt++ == 0) {
+ /* Select outbox FIFO mode and reset the outbox FIFO status */
+ writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
+
+ /* Enable inbox FIFO overflow and delivery interrupt */
+ val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
+ writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+
+ /* Enable outbox FIFO not empty interrupt */
+ val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
+ writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+
+ /* Enable supplementary outbox as the fundamental one */
+ if (priv->supp_base) {
+ writel(0x0, priv->supp_base + SPRD_MBOX_FIFO_RST);
+ val = readl(priv->supp_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
+ writel(val, priv->supp_base + SPRD_MBOX_IRQ_MSK);
+ }
+ }
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static void sprd_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+
+ mutex_lock(&priv->lock);
+ if (--priv->refcnt == 0) {
+ /* Disable inbox & outbox interrupt */
+ writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+
+ if (priv->supp_base)
+ writel(SPRD_OUTBOX_FIFO_IRQ_MASK,
+ priv->supp_base + SPRD_MBOX_IRQ_MSK);
+ }
+ mutex_unlock(&priv->lock);
+}
+
+static const struct mbox_chan_ops sprd_mbox_ops = {
+ .send_data = sprd_mbox_send_data,
+ .flush = sprd_mbox_flush,
+ .startup = sprd_mbox_startup,
+ .shutdown = sprd_mbox_shutdown,
+};
+
+static void sprd_mbox_disable(void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+
+ clk_disable_unprepare(priv->clk);
+}
+
+static int sprd_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sprd_mbox_priv *priv;
+ int ret, inbox_irq, outbox_irq, supp_irq;
+ unsigned long id, supp;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ mutex_init(&priv->lock);
+
+ /*
+ * Unisoc mailbox uses an inbox to send messages to the target
+ * core, and uses (an) outbox(es) to receive messages from other
+ * cores.
+ *
+ * Thus in general the mailbox controller supplies 2 different
+ * register addresses and IRQ numbers for inbox and outbox.
+ *
+ * If necessary, a supplementary inbox could be enabled optionally
+ * with an independent FIFO and an extra interrupt.
+ */
+ priv->inbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->inbox_base))
+ return PTR_ERR(priv->inbox_base);
+
+ priv->outbox_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->outbox_base))
+ return PTR_ERR(priv->outbox_base);
+
+ priv->clk = devm_clk_get(dev, "enable");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get mailbox clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv);
+ if (ret) {
+ dev_err(dev, "failed to add mailbox disable action\n");
+ return ret;
+ }
+
+ inbox_irq = platform_get_irq_byname(pdev, "inbox");
+ if (inbox_irq < 0)
+ return inbox_irq;
+
+ ret = devm_request_irq(dev, inbox_irq, sprd_mbox_inbox_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request inbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ outbox_irq = platform_get_irq_byname(pdev, "outbox");
+ if (outbox_irq < 0)
+ return outbox_irq;
+
+ ret = devm_request_irq(dev, outbox_irq, sprd_mbox_outbox_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request outbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ /* Supplementary outbox IRQ is optional */
+ supp_irq = platform_get_irq_byname(pdev, "supp-outbox");
+ if (supp_irq > 0) {
+ ret = devm_request_irq(dev, supp_irq, sprd_mbox_supp_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request outbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ supp = (unsigned long) of_device_get_match_data(dev);
+ if (!supp) {
+ dev_err(dev, "no supplementary outbox specified\n");
+ return -ENODEV;
+ }
+ priv->supp_base = priv->outbox_base + (SPRD_OUTBOX_BASE_SPAN * supp);
+ }
+
+ /* Get the default outbox FIFO depth */
+ priv->outbox_fifo_depth =
+ readl(priv->outbox_base + SPRD_MBOX_FIFO_DEPTH) + 1;
+ priv->mbox.dev = dev;
+ priv->mbox.chans = &priv->chan[0];
+ priv->mbox.num_chans = SPRD_MBOX_CHAN_MAX;
+ priv->mbox.ops = &sprd_mbox_ops;
+ priv->mbox.txdone_irq = true;
+
+ for (id = 0; id < SPRD_MBOX_CHAN_MAX; id++)
+ priv->chan[id].con_priv = (void *)id;
+
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret) {
+ dev_err(dev, "failed to register mailbox: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_mbox_of_match[] = {
+ { .compatible = "sprd,sc9860-mailbox" },
+ { .compatible = "sprd,sc9863a-mailbox",
+ .data = (void *)SPRD_SUPP_INBOX_ID_SC9863A },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_mbox_of_match);
+
+static struct platform_driver sprd_mbox_driver = {
+ .driver = {
+ .name = "sprd-mailbox",
+ .of_match_table = sprd_mbox_of_match,
+ },
+ .probe = sprd_mbox_probe,
+};
+module_platform_driver(sprd_mbox_driver);
+
+MODULE_AUTHOR("Baolin Wang <baolin.wang@unisoc.com>");
+MODULE_DESCRIPTION("Spreadtrum mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
new file mode 100644
index 000000000..4ad3653f3
--- /dev/null
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
+ * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+
+#define IPCC_XCR 0x000
+#define XCR_RXOIE BIT(0)
+#define XCR_TXOIE BIT(16)
+
+#define IPCC_XMR 0x004
+#define IPCC_XSCR 0x008
+#define IPCC_XTOYSR 0x00c
+
+#define IPCC_PROC_OFFST 0x010
+
+#define IPCC_HWCFGR 0x3f0
+#define IPCFGR_CHAN_MASK GENMASK(7, 0)
+
+#define IPCC_VER 0x3f4
+#define VER_MINREV_MASK GENMASK(3, 0)
+#define VER_MAJREV_MASK GENMASK(7, 4)
+
+#define RX_BIT_MASK GENMASK(15, 0)
+#define RX_BIT_CHAN(chan) BIT(chan)
+#define TX_BIT_SHIFT 16
+#define TX_BIT_MASK GENMASK(31, 16)
+#define TX_BIT_CHAN(chan) BIT(TX_BIT_SHIFT + (chan))
+
+#define STM32_MAX_PROCS 2
+
+enum {
+ IPCC_IRQ_RX,
+ IPCC_IRQ_TX,
+ IPCC_IRQ_NUM,
+};
+
+struct stm32_ipcc {
+ struct mbox_controller controller;
+ void __iomem *reg_base;
+ void __iomem *reg_proc;
+ struct clk *clk;
+ spinlock_t lock; /* protect access to IPCC registers */
+ int irqs[IPCC_IRQ_NUM];
+ u32 proc_id;
+ u32 n_chans;
+ u32 xcr;
+ u32 xmr;
+};
+
+static inline void stm32_ipcc_set_bits(spinlock_t *lock, void __iomem *reg,
+ u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ writel_relaxed(readl_relaxed(reg) | mask, reg);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static inline void stm32_ipcc_clr_bits(spinlock_t *lock, void __iomem *reg,
+ u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ writel_relaxed(readl_relaxed(reg) & ~mask, reg);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static irqreturn_t stm32_ipcc_rx_irq(int irq, void *data)
+{
+ struct stm32_ipcc *ipcc = data;
+ struct device *dev = ipcc->controller.dev;
+ u32 status, mr, tosr, chan;
+ irqreturn_t ret = IRQ_NONE;
+ int proc_offset;
+
+ /* read 'channel occupied' status from other proc */
+ proc_offset = ipcc->proc_id ? -IPCC_PROC_OFFST : IPCC_PROC_OFFST;
+ tosr = readl_relaxed(ipcc->reg_proc + proc_offset + IPCC_XTOYSR);
+ mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+
+ /* search for unmasked 'channel occupied' */
+ status = tosr & FIELD_GET(RX_BIT_MASK, ~mr);
+
+ for (chan = 0; chan < ipcc->n_chans; chan++) {
+ if (!(status & (1 << chan)))
+ continue;
+
+ dev_dbg(dev, "%s: chan:%d rx\n", __func__, chan);
+
+ mbox_chan_received_data(&ipcc->controller.chans[chan], NULL);
+
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
+ RX_BIT_CHAN(chan));
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
+{
+ struct stm32_ipcc *ipcc = data;
+ struct device *dev = ipcc->controller.dev;
+ u32 status, mr, tosr, chan;
+ irqreturn_t ret = IRQ_NONE;
+
+ tosr = readl_relaxed(ipcc->reg_proc + IPCC_XTOYSR);
+ mr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+
+ /* search for unmasked 'channel free' */
+ status = ~tosr & FIELD_GET(TX_BIT_MASK, ~mr);
+
+ for (chan = 0; chan < ipcc->n_chans ; chan++) {
+ if (!(status & (1 << chan)))
+ continue;
+
+ dev_dbg(dev, "%s: chan:%d tx\n", __func__, chan);
+
+ /* mask 'tx channel free' interrupt */
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
+ TX_BIT_CHAN(chan));
+
+ mbox_chan_txdone(&ipcc->controller.chans[chan], 0);
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
+{
+ unsigned long chan = (unsigned long)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+
+ dev_dbg(ipcc->controller.dev, "%s: chan:%lu\n", __func__, chan);
+
+ /* set channel n occupied */
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
+ TX_BIT_CHAN(chan));
+
+ /* unmask 'tx channel free' interrupt */
+ stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
+ TX_BIT_CHAN(chan));
+
+ return 0;
+}
+
+static int stm32_ipcc_startup(struct mbox_chan *link)
+{
+ unsigned long chan = (unsigned long)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+ int ret;
+
+ ret = clk_prepare_enable(ipcc->clk);
+ if (ret) {
+ dev_err(ipcc->controller.dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ /* unmask 'rx channel occupied' interrupt */
+ stm32_ipcc_clr_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
+ RX_BIT_CHAN(chan));
+
+ return 0;
+}
+
+static void stm32_ipcc_shutdown(struct mbox_chan *link)
+{
+ unsigned long chan = (unsigned long)link->con_priv;
+ struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
+ controller);
+
+ /* mask rx/tx interrupt */
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
+ RX_BIT_CHAN(chan) | TX_BIT_CHAN(chan));
+
+ clk_disable_unprepare(ipcc->clk);
+}
+
+static const struct mbox_chan_ops stm32_ipcc_ops = {
+ .send_data = stm32_ipcc_send_data,
+ .startup = stm32_ipcc_startup,
+ .shutdown = stm32_ipcc_shutdown,
+};
+
+static int stm32_ipcc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct stm32_ipcc *ipcc;
+ unsigned long i;
+ int ret;
+ u32 ip_ver;
+ static const char * const irq_name[] = {"rx", "tx"};
+ irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq};
+
+ if (!np) {
+ dev_err(dev, "No DT found\n");
+ return -ENODEV;
+ }
+
+ ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL);
+ if (!ipcc)
+ return -ENOMEM;
+
+ spin_lock_init(&ipcc->lock);
+
+ /* proc_id */
+ if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) {
+ dev_err(dev, "Missing st,proc-id\n");
+ return -ENODEV;
+ }
+
+ if (ipcc->proc_id >= STM32_MAX_PROCS) {
+ dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id);
+ return -EINVAL;
+ }
+
+ /* regs */
+ ipcc->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ipcc->reg_base))
+ return PTR_ERR(ipcc->reg_base);
+
+ ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST;
+
+ /* clock */
+ ipcc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ipcc->clk))
+ return PTR_ERR(ipcc->clk);
+
+ ret = clk_prepare_enable(ipcc->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ /* irq */
+ for (i = 0; i < IPCC_IRQ_NUM; i++) {
+ ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
+ if (ipcc->irqs[i] < 0) {
+ ret = ipcc->irqs[i];
+ goto err_clk;
+ }
+
+ ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL,
+ irq_thread[i], IRQF_ONESHOT,
+ dev_name(dev), ipcc);
+ if (ret) {
+ dev_err(dev, "failed to request irq %lu (%d)\n", i, ret);
+ goto err_clk;
+ }
+ }
+
+ /* mask and enable rx/tx irq */
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XMR,
+ RX_BIT_MASK | TX_BIT_MASK);
+ stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XCR,
+ XCR_RXOIE | XCR_TXOIE);
+
+ /* wakeup */
+ if (of_property_read_bool(np, "wakeup-source")) {
+ device_set_wakeup_capable(dev, true);
+
+ ret = dev_pm_set_wake_irq(dev, ipcc->irqs[IPCC_IRQ_RX]);
+ if (ret) {
+ dev_err(dev, "Failed to set wake up irq\n");
+ goto err_init_wkp;
+ }
+ }
+
+ /* mailbox controller */
+ ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR);
+ ipcc->n_chans &= IPCFGR_CHAN_MASK;
+
+ ipcc->controller.dev = dev;
+ ipcc->controller.txdone_irq = true;
+ ipcc->controller.ops = &stm32_ipcc_ops;
+ ipcc->controller.num_chans = ipcc->n_chans;
+ ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans,
+ sizeof(*ipcc->controller.chans),
+ GFP_KERNEL);
+ if (!ipcc->controller.chans) {
+ ret = -ENOMEM;
+ goto err_irq_wkp;
+ }
+
+ for (i = 0; i < ipcc->controller.num_chans; i++)
+ ipcc->controller.chans[i].con_priv = (void *)i;
+
+ ret = devm_mbox_controller_register(dev, &ipcc->controller);
+ if (ret)
+ goto err_irq_wkp;
+
+ platform_set_drvdata(pdev, ipcc);
+
+ ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER);
+
+ dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n",
+ FIELD_GET(VER_MAJREV_MASK, ip_ver),
+ FIELD_GET(VER_MINREV_MASK, ip_ver),
+ ipcc->controller.num_chans, ipcc->proc_id);
+
+ clk_disable_unprepare(ipcc->clk);
+ return 0;
+
+err_irq_wkp:
+ if (of_property_read_bool(np, "wakeup-source"))
+ dev_pm_clear_wake_irq(dev);
+err_init_wkp:
+ device_set_wakeup_capable(dev, false);
+err_clk:
+ clk_disable_unprepare(ipcc->clk);
+ return ret;
+}
+
+static int stm32_ipcc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (of_property_read_bool(dev->of_node, "wakeup-source"))
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+ device_set_wakeup_capable(dev, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_ipcc_suspend(struct device *dev)
+{
+ struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
+
+ ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
+ ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
+
+ return 0;
+}
+
+static int stm32_ipcc_resume(struct device *dev)
+{
+ struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
+
+ writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
+ writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stm32_ipcc_pm_ops,
+ stm32_ipcc_suspend, stm32_ipcc_resume);
+
+static const struct of_device_id stm32_ipcc_of_match[] = {
+ { .compatible = "st,stm32mp1-ipcc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_ipcc_of_match);
+
+static struct platform_driver stm32_ipcc_driver = {
+ .driver = {
+ .name = "stm32-ipcc",
+ .pm = &stm32_ipcc_pm_ops,
+ .of_match_table = stm32_ipcc_of_match,
+ },
+ .probe = stm32_ipcc_probe,
+ .remove = stm32_ipcc_remove,
+};
+
+module_platform_driver(stm32_ipcc_driver);
+
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STM32 IPCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c
new file mode 100644
index 000000000..7f8d93104
--- /dev/null
+++ b/drivers/mailbox/sun6i-msgbox.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2017-2019 Samuel Holland <samuel@sholland.org>
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define NUM_CHANS 8
+
+#define CTRL_REG(n) (0x0000 + 0x4 * ((n) / 4))
+#define CTRL_RX(n) BIT(0 + 8 * ((n) % 4))
+#define CTRL_TX(n) BIT(4 + 8 * ((n) % 4))
+
+#define REMOTE_IRQ_EN_REG 0x0040
+#define REMOTE_IRQ_STAT_REG 0x0050
+#define LOCAL_IRQ_EN_REG 0x0060
+#define LOCAL_IRQ_STAT_REG 0x0070
+
+#define RX_IRQ(n) BIT(0 + 2 * (n))
+#define RX_IRQ_MASK 0x5555
+#define TX_IRQ(n) BIT(1 + 2 * (n))
+#define TX_IRQ_MASK 0xaaaa
+
+#define FIFO_STAT_REG(n) (0x0100 + 0x4 * (n))
+#define FIFO_STAT_MASK GENMASK(0, 0)
+
+#define MSG_STAT_REG(n) (0x0140 + 0x4 * (n))
+#define MSG_STAT_MASK GENMASK(2, 0)
+
+#define MSG_DATA_REG(n) (0x0180 + 0x4 * (n))
+
+#define mbox_dbg(mbox, ...) dev_dbg((mbox)->controller.dev, __VA_ARGS__)
+
+struct sun6i_msgbox {
+ struct mbox_controller controller;
+ struct clk *clk;
+ spinlock_t lock;
+ void __iomem *regs;
+};
+
+static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan);
+static bool sun6i_msgbox_peek_data(struct mbox_chan *chan);
+
+static inline int channel_number(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline struct sun6i_msgbox *to_sun6i_msgbox(struct mbox_chan *chan)
+{
+ return chan->con_priv;
+}
+
+static irqreturn_t sun6i_msgbox_irq(int irq, void *dev_id)
+{
+ struct sun6i_msgbox *mbox = dev_id;
+ uint32_t status;
+ int n;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mbox->regs + LOCAL_IRQ_EN_REG) &
+ readl(mbox->regs + LOCAL_IRQ_STAT_REG);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < NUM_CHANS; ++n) {
+ struct mbox_chan *chan = &mbox->controller.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ while (sun6i_msgbox_peek_data(chan)) {
+ uint32_t msg = readl(mbox->regs + MSG_DATA_REG(n));
+
+ mbox_dbg(mbox, "Channel %d received 0x%08x\n", n, msg);
+ mbox_chan_received_data(chan, &msg);
+ }
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sun6i_msgbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+ uint32_t msg = *(uint32_t *)data;
+
+ /* Using a channel backwards gets the hardware into a bad state. */
+ if (WARN_ON_ONCE(!(readl(mbox->regs + CTRL_REG(n)) & CTRL_TX(n))))
+ return 0;
+
+ writel(msg, mbox->regs + MSG_DATA_REG(n));
+ mbox_dbg(mbox, "Channel %d sent 0x%08x\n", n, msg);
+
+ return 0;
+}
+
+static int sun6i_msgbox_startup(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ /* The coprocessor is responsible for setting channel directions. */
+ if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
+ /* Flush the receive FIFO. */
+ while (sun6i_msgbox_peek_data(chan))
+ readl(mbox->regs + MSG_DATA_REG(n));
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+
+ /* Enable the receive IRQ. */
+ spin_lock(&mbox->lock);
+ writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) | RX_IRQ(n),
+ mbox->regs + LOCAL_IRQ_EN_REG);
+ spin_unlock(&mbox->lock);
+ }
+
+ mbox_dbg(mbox, "Channel %d startup complete\n", n);
+
+ return 0;
+}
+
+static void sun6i_msgbox_shutdown(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ if (readl(mbox->regs + CTRL_REG(n)) & CTRL_RX(n)) {
+ /* Disable the receive IRQ. */
+ spin_lock(&mbox->lock);
+ writel(readl(mbox->regs + LOCAL_IRQ_EN_REG) & ~RX_IRQ(n),
+ mbox->regs + LOCAL_IRQ_EN_REG);
+ spin_unlock(&mbox->lock);
+
+ /* Attempt to flush the FIFO until the IRQ is cleared. */
+ do {
+ while (sun6i_msgbox_peek_data(chan))
+ readl(mbox->regs + MSG_DATA_REG(n));
+ writel(RX_IRQ(n), mbox->regs + LOCAL_IRQ_STAT_REG);
+ } while (readl(mbox->regs + LOCAL_IRQ_STAT_REG) & RX_IRQ(n));
+ }
+
+ mbox_dbg(mbox, "Channel %d shutdown complete\n", n);
+}
+
+static bool sun6i_msgbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ /*
+ * The hardware allows snooping on the remote user's IRQ statuses.
+ * We consider a message to be acknowledged only once the receive IRQ
+ * for that channel is cleared. Since the receive IRQ for a channel
+ * cannot be cleared until the FIFO for that channel is empty, this
+ * ensures that the message has actually been read. It also gives the
+ * recipient an opportunity to perform minimal processing before
+ * acknowledging the message.
+ */
+ return !(readl(mbox->regs + REMOTE_IRQ_STAT_REG) & RX_IRQ(n));
+}
+
+static bool sun6i_msgbox_peek_data(struct mbox_chan *chan)
+{
+ struct sun6i_msgbox *mbox = to_sun6i_msgbox(chan);
+ int n = channel_number(chan);
+
+ return readl(mbox->regs + MSG_STAT_REG(n)) & MSG_STAT_MASK;
+}
+
+static const struct mbox_chan_ops sun6i_msgbox_chan_ops = {
+ .send_data = sun6i_msgbox_send_data,
+ .startup = sun6i_msgbox_startup,
+ .shutdown = sun6i_msgbox_shutdown,
+ .last_tx_done = sun6i_msgbox_last_tx_done,
+ .peek_data = sun6i_msgbox_peek_data,
+};
+
+static int sun6i_msgbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mbox_chan *chans;
+ struct reset_control *reset;
+ struct sun6i_msgbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ chans = devm_kcalloc(dev, NUM_CHANS, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ for (i = 0; i < NUM_CHANS; ++i)
+ chans[i].con_priv = mbox;
+
+ mbox->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mbox->clk)) {
+ ret = PTR_ERR(mbox->clk);
+ dev_err(dev, "Failed to get clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mbox->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(reset)) {
+ ret = PTR_ERR(reset);
+ dev_err(dev, "Failed to get reset control: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ /*
+ * NOTE: We rely on platform firmware to preconfigure the channel
+ * directions, and we share this hardware block with other firmware
+ * that runs concurrently with Linux (e.g. a trusted monitor).
+ *
+ * Therefore, we do *not* assert the reset line if probing fails or
+ * when removing the device.
+ */
+ ret = reset_control_deassert(reset);
+ if (ret) {
+ dev_err(dev, "Failed to deassert reset: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->regs)) {
+ ret = PTR_ERR(mbox->regs);
+ dev_err(dev, "Failed to map MMIO resource: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ /* Disable all IRQs for this end of the msgbox. */
+ writel(0, mbox->regs + LOCAL_IRQ_EN_REG);
+
+ ret = devm_request_irq(dev, irq_of_parse_and_map(dev->of_node, 0),
+ sun6i_msgbox_irq, 0, dev_name(dev), mbox);
+ if (ret) {
+ dev_err(dev, "Failed to register IRQ handler: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.ops = &sun6i_msgbox_chan_ops;
+ mbox->controller.chans = chans;
+ mbox->controller.num_chans = NUM_CHANS;
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+
+ spin_lock_init(&mbox->lock);
+ platform_set_drvdata(pdev, mbox);
+
+ ret = mbox_controller_register(&mbox->controller);
+ if (ret) {
+ dev_err(dev, "Failed to register controller: %d\n", ret);
+ goto err_disable_unprepare;
+ }
+
+ return 0;
+
+err_disable_unprepare:
+ clk_disable_unprepare(mbox->clk);
+
+ return ret;
+}
+
+static int sun6i_msgbox_remove(struct platform_device *pdev)
+{
+ struct sun6i_msgbox *mbox = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&mbox->controller);
+ /* See the comment in sun6i_msgbox_probe about the reset line. */
+ clk_disable_unprepare(mbox->clk);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_msgbox_of_match[] = {
+ { .compatible = "allwinner,sun6i-a31-msgbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun6i_msgbox_of_match);
+
+static struct platform_driver sun6i_msgbox_driver = {
+ .driver = {
+ .name = "sun6i-msgbox",
+ .of_match_table = sun6i_msgbox_of_match,
+ },
+ .probe = sun6i_msgbox_probe,
+ .remove = sun6i_msgbox_remove,
+};
+module_platform_driver(sun6i_msgbox_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Allwinner sun6i/sun8i/sun9i/sun50i Message Box");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
new file mode 100644
index 000000000..fe29fc2ca
--- /dev/null
+++ b/drivers/mailbox/tegra-hsp.c
@@ -0,0 +1,963 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/fuse.h>
+
+#include <dt-bindings/mailbox/tegra186-hsp.h>
+
+#include "mailbox.h"
+
+#define HSP_INT_IE(x) (0x100 + ((x) * 4))
+#define HSP_INT_IV 0x300
+#define HSP_INT_IR 0x304
+
+#define HSP_INT_EMPTY_SHIFT 0
+#define HSP_INT_EMPTY_MASK 0xff
+#define HSP_INT_FULL_SHIFT 8
+#define HSP_INT_FULL_MASK 0xff
+
+#define HSP_INT_DIMENSIONING 0x380
+#define HSP_nSM_SHIFT 0
+#define HSP_nSS_SHIFT 4
+#define HSP_nAS_SHIFT 8
+#define HSP_nDB_SHIFT 12
+#define HSP_nSI_SHIFT 16
+#define HSP_nINT_MASK 0xf
+
+#define HSP_DB_TRIGGER 0x0
+#define HSP_DB_ENABLE 0x4
+#define HSP_DB_RAW 0x8
+#define HSP_DB_PENDING 0xc
+
+#define HSP_SM_SHRD_MBOX 0x0
+#define HSP_SM_SHRD_MBOX_FULL BIT(31)
+#define HSP_SM_SHRD_MBOX_FULL_INT_IE 0x04
+#define HSP_SM_SHRD_MBOX_EMPTY_INT_IE 0x08
+
+#define HSP_SHRD_MBOX_TYPE1_TAG 0x40
+#define HSP_SHRD_MBOX_TYPE1_DATA0 0x48
+#define HSP_SHRD_MBOX_TYPE1_DATA1 0x4c
+#define HSP_SHRD_MBOX_TYPE1_DATA2 0x50
+#define HSP_SHRD_MBOX_TYPE1_DATA3 0x54
+
+#define HSP_DB_CCPLEX 1
+#define HSP_DB_BPMP 3
+#define HSP_DB_MAX 7
+
+#define HSP_MBOX_TYPE_MASK 0xff
+
+struct tegra_hsp_channel;
+struct tegra_hsp;
+
+struct tegra_hsp_channel {
+ struct tegra_hsp *hsp;
+ struct mbox_chan *chan;
+ void __iomem *regs;
+};
+
+struct tegra_hsp_doorbell {
+ struct tegra_hsp_channel channel;
+ struct list_head list;
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_sm_ops {
+ void (*send)(struct tegra_hsp_channel *channel, void *data);
+ void (*recv)(struct tegra_hsp_channel *channel);
+};
+
+struct tegra_hsp_mailbox {
+ struct tegra_hsp_channel channel;
+ const struct tegra_hsp_sm_ops *ops;
+ unsigned int index;
+ bool producer;
+};
+
+struct tegra_hsp_db_map {
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_soc {
+ const struct tegra_hsp_db_map *map;
+ bool has_per_mb_ie;
+ bool has_128_bit_mb;
+ unsigned int reg_stride;
+};
+
+struct tegra_hsp {
+ struct device *dev;
+ const struct tegra_hsp_soc *soc;
+ struct mbox_controller mbox_db;
+ struct mbox_controller mbox_sm;
+ void __iomem *regs;
+ unsigned int doorbell_irq;
+ unsigned int *shared_irqs;
+ unsigned int shared_irq;
+ unsigned int num_sm;
+ unsigned int num_as;
+ unsigned int num_ss;
+ unsigned int num_db;
+ unsigned int num_si;
+
+ spinlock_t lock;
+ struct lock_class_key lock_key;
+
+ struct list_head doorbells;
+ struct tegra_hsp_mailbox *mailboxes;
+
+ unsigned long mask;
+};
+
+static inline u32 tegra_hsp_readl(struct tegra_hsp *hsp, unsigned int offset)
+{
+ return readl(hsp->regs + offset);
+}
+
+static inline void tegra_hsp_writel(struct tegra_hsp *hsp, u32 value,
+ unsigned int offset)
+{
+ writel(value, hsp->regs + offset);
+}
+
+static inline u32 tegra_hsp_channel_readl(struct tegra_hsp_channel *channel,
+ unsigned int offset)
+{
+ return readl(channel->regs + offset);
+}
+
+static inline void tegra_hsp_channel_writel(struct tegra_hsp_channel *channel,
+ u32 value, unsigned int offset)
+{
+ writel(value, channel->regs + offset);
+}
+
+static bool tegra_hsp_doorbell_can_ring(struct tegra_hsp_doorbell *db)
+{
+ u32 value;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_ENABLE);
+
+ return (value & BIT(TEGRA_HSP_DB_MASTER_CCPLEX)) != 0;
+}
+
+static struct tegra_hsp_doorbell *
+__tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *entry;
+
+ list_for_each_entry(entry, &hsp->doorbells, list)
+ if (entry->master == master)
+ return entry;
+
+ return NULL;
+}
+
+static struct tegra_hsp_doorbell *
+tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return db;
+}
+
+static irqreturn_t tegra_hsp_doorbell_irq(int irq, void *data)
+{
+ struct tegra_hsp *hsp = data;
+ struct tegra_hsp_doorbell *db;
+ unsigned long master, value;
+
+ db = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!db)
+ return IRQ_NONE;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_PENDING);
+ tegra_hsp_channel_writel(&db->channel, value, HSP_DB_PENDING);
+
+ spin_lock(&hsp->lock);
+
+ for_each_set_bit(master, &value, hsp->mbox_db.num_chans) {
+ struct tegra_hsp_doorbell *db;
+
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ /*
+ * Depending on the bootloader chain, the CCPLEX doorbell will
+ * have some doorbells enabled, which means that requesting an
+ * interrupt will immediately fire.
+ *
+ * In that case, db->channel.chan will still be NULL here and
+ * cause a crash if not properly guarded.
+ *
+ * It remains to be seen if ignoring the doorbell in that case
+ * is the correct solution.
+ */
+ if (db && db->channel.chan)
+ mbox_chan_received_data(db->channel.chan, NULL);
+ }
+
+ spin_unlock(&hsp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_hsp_shared_irq(int irq, void *data)
+{
+ struct tegra_hsp *hsp = data;
+ unsigned long bit, mask;
+ u32 status;
+
+ status = tegra_hsp_readl(hsp, HSP_INT_IR) & hsp->mask;
+
+ /* process EMPTY interrupts first */
+ mask = (status >> HSP_INT_EMPTY_SHIFT) & HSP_INT_EMPTY_MASK;
+
+ for_each_set_bit(bit, &mask, hsp->num_sm) {
+ struct tegra_hsp_mailbox *mb = &hsp->mailboxes[bit];
+
+ if (mb->producer) {
+ /*
+ * Disable EMPTY interrupts until data is sent with
+ * the next message. These interrupts are level-
+ * triggered, so if we kept them enabled they would
+ * constantly trigger until we next write data into
+ * the message.
+ */
+ spin_lock(&hsp->lock);
+
+ hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
+ tegra_hsp_writel(hsp, hsp->mask,
+ HSP_INT_IE(hsp->shared_irq));
+
+ spin_unlock(&hsp->lock);
+
+ mbox_chan_txdone(mb->channel.chan, 0);
+ }
+ }
+
+ /* process FULL interrupts */
+ mask = (status >> HSP_INT_FULL_SHIFT) & HSP_INT_FULL_MASK;
+
+ for_each_set_bit(bit, &mask, hsp->num_sm) {
+ struct tegra_hsp_mailbox *mb = &hsp->mailboxes[bit];
+
+ if (!mb->producer)
+ mb->ops->recv(&mb->channel);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct tegra_hsp_channel *
+tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
+ unsigned int master, unsigned int index)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned int offset;
+ unsigned long flags;
+
+ db = devm_kzalloc(hsp->dev, sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return ERR_PTR(-ENOMEM);
+
+ offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) * SZ_64K;
+ offset += index * hsp->soc->reg_stride;
+
+ db->channel.regs = hsp->regs + offset;
+ db->channel.hsp = hsp;
+
+ db->name = devm_kstrdup_const(hsp->dev, name, GFP_KERNEL);
+ db->master = master;
+ db->index = index;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ list_add_tail(&db->list, &hsp->doorbells);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return &db->channel;
+}
+
+static int tegra_hsp_doorbell_send_data(struct mbox_chan *chan, void *data)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+
+ tegra_hsp_channel_writel(&db->channel, 1, HSP_DB_TRIGGER);
+
+ return 0;
+}
+
+static int tegra_hsp_doorbell_startup(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ if (db->master >= chan->mbox->num_chans) {
+ dev_err(chan->mbox->dev,
+ "invalid master ID %u for HSP channel\n",
+ db->master);
+ return -EINVAL;
+ }
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return -ENODEV;
+
+ /*
+ * On simulation platforms the BPMP hasn't had a chance yet to mark
+ * the doorbell as ringable by the CCPLEX, so we want to skip extra
+ * checks here.
+ */
+ if (tegra_is_silicon() && !tegra_hsp_doorbell_can_ring(db))
+ return -ENODEV;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value |= BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return 0;
+}
+
+static void tegra_hsp_doorbell_shutdown(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value &= ~BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static const struct mbox_chan_ops tegra_hsp_db_ops = {
+ .send_data = tegra_hsp_doorbell_send_data,
+ .startup = tegra_hsp_doorbell_startup,
+ .shutdown = tegra_hsp_doorbell_shutdown,
+};
+
+static void tegra_hsp_sm_send32(struct tegra_hsp_channel *channel, void *data)
+{
+ u32 value;
+
+ /* copy data and mark mailbox full */
+ value = (u32)(unsigned long)data;
+ value |= HSP_SM_SHRD_MBOX_FULL;
+
+ tegra_hsp_channel_writel(channel, value, HSP_SM_SHRD_MBOX);
+}
+
+static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel)
+{
+ u32 value;
+ void *msg;
+
+ value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX);
+ value &= ~HSP_SM_SHRD_MBOX_FULL;
+ msg = (void *)(unsigned long)value;
+ mbox_chan_received_data(channel->chan, msg);
+
+ /*
+ * Need to clear all bits here since some producers, such as TCU, depend
+ * on fields in the register getting cleared by the consumer.
+ *
+ * The mailbox API doesn't give the consumers a way of doing that
+ * explicitly, so we have to make sure we cover all possible cases.
+ */
+ tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX);
+}
+
+static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = {
+ .send = tegra_hsp_sm_send32,
+ .recv = tegra_hsp_sm_recv32,
+};
+
+static void tegra_hsp_sm_send128(struct tegra_hsp_channel *channel, void *data)
+{
+ u32 value[4];
+
+ memcpy(value, data, sizeof(value));
+
+ /* Copy data */
+ tegra_hsp_channel_writel(channel, value[0], HSP_SHRD_MBOX_TYPE1_DATA0);
+ tegra_hsp_channel_writel(channel, value[1], HSP_SHRD_MBOX_TYPE1_DATA1);
+ tegra_hsp_channel_writel(channel, value[2], HSP_SHRD_MBOX_TYPE1_DATA2);
+ tegra_hsp_channel_writel(channel, value[3], HSP_SHRD_MBOX_TYPE1_DATA3);
+
+ /* Update tag to mark mailbox full */
+ tegra_hsp_channel_writel(channel, HSP_SM_SHRD_MBOX_FULL,
+ HSP_SHRD_MBOX_TYPE1_TAG);
+}
+
+static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel)
+{
+ u32 value[4];
+ void *msg;
+
+ value[0] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA0);
+ value[1] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA1);
+ value[2] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA2);
+ value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3);
+
+ msg = (void *)(unsigned long)value;
+ mbox_chan_received_data(channel->chan, msg);
+
+ /*
+ * Clear data registers and tag.
+ */
+ tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA0);
+ tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA1);
+ tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2);
+ tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3);
+ tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG);
+}
+
+static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = {
+ .send = tegra_hsp_sm_send128,
+ .recv = tegra_hsp_sm_recv128,
+};
+
+static int tegra_hsp_mailbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct tegra_hsp_mailbox *mb = chan->con_priv;
+ struct tegra_hsp *hsp = mb->channel.hsp;
+ unsigned long flags;
+
+ if (WARN_ON(!mb->producer))
+ return -EPERM;
+
+ mb->ops->send(&mb->channel, data);
+
+ /* enable EMPTY interrupt for the shared mailbox */
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ hsp->mask |= BIT(HSP_INT_EMPTY_SHIFT + mb->index);
+ tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return 0;
+}
+
+static int tegra_hsp_mailbox_flush(struct mbox_chan *chan,
+ unsigned long timeout)
+{
+ struct tegra_hsp_mailbox *mb = chan->con_priv;
+ struct tegra_hsp_channel *ch = &mb->channel;
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_hsp_channel_readl(ch, HSP_SM_SHRD_MBOX);
+ if ((value & HSP_SM_SHRD_MBOX_FULL) == 0) {
+ mbox_chan_txdone(chan, 0);
+
+ /* Wait until channel is empty */
+ if (chan->active_req != NULL)
+ continue;
+
+ return 0;
+ }
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int tegra_hsp_mailbox_startup(struct mbox_chan *chan)
+{
+ struct tegra_hsp_mailbox *mb = chan->con_priv;
+ struct tegra_hsp_channel *ch = &mb->channel;
+ struct tegra_hsp *hsp = mb->channel.hsp;
+ unsigned long flags;
+
+ chan->txdone_method = TXDONE_BY_IRQ;
+
+ /*
+ * Shared mailboxes start out as consumers by default. FULL and EMPTY
+ * interrupts are coalesced at the same shared interrupt.
+ *
+ * Keep EMPTY interrupts disabled at startup and only enable them when
+ * the mailbox is actually full. This is required because the FULL and
+ * EMPTY interrupts are level-triggered, so keeping EMPTY interrupts
+ * enabled all the time would cause an interrupt storm while mailboxes
+ * are idle.
+ */
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ if (mb->producer)
+ hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
+ else
+ hsp->mask |= BIT(HSP_INT_FULL_SHIFT + mb->index);
+
+ tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ if (hsp->soc->has_per_mb_ie) {
+ if (mb->producer)
+ tegra_hsp_channel_writel(ch, 0x0,
+ HSP_SM_SHRD_MBOX_EMPTY_INT_IE);
+ else
+ tegra_hsp_channel_writel(ch, 0x1,
+ HSP_SM_SHRD_MBOX_FULL_INT_IE);
+ }
+
+ return 0;
+}
+
+static void tegra_hsp_mailbox_shutdown(struct mbox_chan *chan)
+{
+ struct tegra_hsp_mailbox *mb = chan->con_priv;
+ struct tegra_hsp_channel *ch = &mb->channel;
+ struct tegra_hsp *hsp = mb->channel.hsp;
+ unsigned long flags;
+
+ if (hsp->soc->has_per_mb_ie) {
+ if (mb->producer)
+ tegra_hsp_channel_writel(ch, 0x0,
+ HSP_SM_SHRD_MBOX_EMPTY_INT_IE);
+ else
+ tegra_hsp_channel_writel(ch, 0x0,
+ HSP_SM_SHRD_MBOX_FULL_INT_IE);
+ }
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ if (mb->producer)
+ hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
+ else
+ hsp->mask &= ~BIT(HSP_INT_FULL_SHIFT + mb->index);
+
+ tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static const struct mbox_chan_ops tegra_hsp_sm_ops = {
+ .send_data = tegra_hsp_mailbox_send_data,
+ .flush = tegra_hsp_mailbox_flush,
+ .startup = tegra_hsp_mailbox_startup,
+ .shutdown = tegra_hsp_mailbox_shutdown,
+};
+
+static struct mbox_chan *tegra_hsp_db_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ struct tegra_hsp *hsp = container_of(mbox, struct tegra_hsp, mbox_db);
+ unsigned int type = args->args[0], master = args->args[1];
+ struct tegra_hsp_channel *channel = ERR_PTR(-ENODEV);
+ struct tegra_hsp_doorbell *db;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ unsigned int i;
+
+ if (type != TEGRA_HSP_MBOX_TYPE_DB || !hsp->doorbell_irq)
+ return ERR_PTR(-ENODEV);
+
+ db = tegra_hsp_doorbell_get(hsp, master);
+ if (db)
+ channel = &db->channel;
+
+ if (IS_ERR(channel))
+ return ERR_CAST(channel);
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ chan = &mbox->chans[i];
+ if (!chan->con_priv) {
+ channel->chan = chan;
+ chan->con_priv = db;
+ break;
+ }
+
+ chan = NULL;
+ }
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return chan ?: ERR_PTR(-EBUSY);
+}
+
+static struct mbox_chan *tegra_hsp_sm_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ struct tegra_hsp *hsp = container_of(mbox, struct tegra_hsp, mbox_sm);
+ unsigned int type = args->args[0], index;
+ struct tegra_hsp_mailbox *mb;
+
+ index = args->args[1] & TEGRA_HSP_SM_MASK;
+
+ if ((type & HSP_MBOX_TYPE_MASK) != TEGRA_HSP_MBOX_TYPE_SM ||
+ !hsp->shared_irqs || index >= hsp->num_sm)
+ return ERR_PTR(-ENODEV);
+
+ mb = &hsp->mailboxes[index];
+
+ if (type & TEGRA_HSP_MBOX_TYPE_SM_128BIT) {
+ if (!hsp->soc->has_128_bit_mb)
+ return ERR_PTR(-ENODEV);
+
+ mb->ops = &tegra_hsp_sm_128bit_ops;
+ } else {
+ mb->ops = &tegra_hsp_sm_32bit_ops;
+ }
+
+ if ((args->args[1] & TEGRA_HSP_SM_FLAG_TX) == 0)
+ mb->producer = false;
+ else
+ mb->producer = true;
+
+ return mb->channel.chan;
+}
+
+static int tegra_hsp_add_doorbells(struct tegra_hsp *hsp)
+{
+ const struct tegra_hsp_db_map *map = hsp->soc->map;
+ struct tegra_hsp_channel *channel;
+
+ while (map->name) {
+ channel = tegra_hsp_doorbell_create(hsp, map->name,
+ map->master, map->index);
+ if (IS_ERR(channel))
+ return PTR_ERR(channel);
+
+ map++;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_add_mailboxes(struct tegra_hsp *hsp, struct device *dev)
+{
+ int i;
+
+ hsp->mailboxes = devm_kcalloc(dev, hsp->num_sm, sizeof(*hsp->mailboxes),
+ GFP_KERNEL);
+ if (!hsp->mailboxes)
+ return -ENOMEM;
+
+ for (i = 0; i < hsp->num_sm; i++) {
+ struct tegra_hsp_mailbox *mb = &hsp->mailboxes[i];
+
+ mb->index = i;
+
+ mb->channel.hsp = hsp;
+ mb->channel.regs = hsp->regs + SZ_64K + i * SZ_32K;
+ mb->channel.chan = &hsp->mbox_sm.chans[i];
+ mb->channel.chan->con_priv = mb;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_request_shared_irq(struct tegra_hsp *hsp)
+{
+ unsigned int i, irq = 0;
+ int err;
+
+ for (i = 0; i < hsp->num_si; i++) {
+ irq = hsp->shared_irqs[i];
+ if (irq <= 0)
+ continue;
+
+ err = devm_request_irq(hsp->dev, irq, tegra_hsp_shared_irq, 0,
+ dev_name(hsp->dev), hsp);
+ if (err < 0) {
+ dev_err(hsp->dev, "failed to request interrupt: %d\n",
+ err);
+ continue;
+ }
+
+ hsp->shared_irq = i;
+
+ /* disable all interrupts */
+ tegra_hsp_writel(hsp, 0, HSP_INT_IE(hsp->shared_irq));
+
+ dev_dbg(hsp->dev, "interrupt requested: %u\n", irq);
+
+ break;
+ }
+
+ if (i == hsp->num_si) {
+ dev_err(hsp->dev, "failed to find available interrupt\n");
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_probe(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp;
+ unsigned int i;
+ u32 value;
+ int err;
+
+ hsp = devm_kzalloc(&pdev->dev, sizeof(*hsp), GFP_KERNEL);
+ if (!hsp)
+ return -ENOMEM;
+
+ hsp->dev = &pdev->dev;
+ hsp->soc = of_device_get_match_data(&pdev->dev);
+ INIT_LIST_HEAD(&hsp->doorbells);
+ spin_lock_init(&hsp->lock);
+
+ hsp->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hsp->regs))
+ return PTR_ERR(hsp->regs);
+
+ value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
+ hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
+ hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
+ hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+
+ err = platform_get_irq_byname_optional(pdev, "doorbell");
+ if (err >= 0)
+ hsp->doorbell_irq = err;
+
+ if (hsp->num_si > 0) {
+ unsigned int count = 0;
+
+ hsp->shared_irqs = devm_kcalloc(&pdev->dev, hsp->num_si,
+ sizeof(*hsp->shared_irqs),
+ GFP_KERNEL);
+ if (!hsp->shared_irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < hsp->num_si; i++) {
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "shared%u", i);
+ if (!name)
+ return -ENOMEM;
+
+ err = platform_get_irq_byname_optional(pdev, name);
+ if (err >= 0) {
+ hsp->shared_irqs[i] = err;
+ count++;
+ }
+
+ kfree(name);
+ }
+
+ if (count == 0) {
+ devm_kfree(&pdev->dev, hsp->shared_irqs);
+ hsp->shared_irqs = NULL;
+ }
+ }
+
+ /* setup the doorbell controller */
+ hsp->mbox_db.of_xlate = tegra_hsp_db_xlate;
+ hsp->mbox_db.num_chans = 32;
+ hsp->mbox_db.dev = &pdev->dev;
+ hsp->mbox_db.ops = &tegra_hsp_db_ops;
+
+ hsp->mbox_db.chans = devm_kcalloc(&pdev->dev, hsp->mbox_db.num_chans,
+ sizeof(*hsp->mbox_db.chans),
+ GFP_KERNEL);
+ if (!hsp->mbox_db.chans)
+ return -ENOMEM;
+
+ if (hsp->doorbell_irq) {
+ err = tegra_hsp_add_doorbells(hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to add doorbells: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ err = devm_mbox_controller_register(&pdev->dev, &hsp->mbox_db);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register doorbell mailbox: %d\n",
+ err);
+ return err;
+ }
+
+ /* setup the shared mailbox controller */
+ hsp->mbox_sm.of_xlate = tegra_hsp_sm_xlate;
+ hsp->mbox_sm.num_chans = hsp->num_sm;
+ hsp->mbox_sm.dev = &pdev->dev;
+ hsp->mbox_sm.ops = &tegra_hsp_sm_ops;
+
+ hsp->mbox_sm.chans = devm_kcalloc(&pdev->dev, hsp->mbox_sm.num_chans,
+ sizeof(*hsp->mbox_sm.chans),
+ GFP_KERNEL);
+ if (!hsp->mbox_sm.chans)
+ return -ENOMEM;
+
+ if (hsp->shared_irqs) {
+ err = tegra_hsp_add_mailboxes(hsp, &pdev->dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to add mailboxes: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ err = devm_mbox_controller_register(&pdev->dev, &hsp->mbox_sm);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register shared mailbox: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hsp);
+
+ if (hsp->doorbell_irq) {
+ err = devm_request_irq(&pdev->dev, hsp->doorbell_irq,
+ tegra_hsp_doorbell_irq, IRQF_NO_SUSPEND,
+ dev_name(&pdev->dev), hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to request doorbell IRQ#%u: %d\n",
+ hsp->doorbell_irq, err);
+ return err;
+ }
+ }
+
+ if (hsp->shared_irqs) {
+ err = tegra_hsp_request_shared_irq(hsp);
+ if (err < 0)
+ return err;
+ }
+
+ lockdep_register_key(&hsp->lock_key);
+ lockdep_set_class(&hsp->lock, &hsp->lock_key);
+
+ return 0;
+}
+
+static int tegra_hsp_remove(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp = platform_get_drvdata(pdev);
+
+ lockdep_unregister_key(&hsp->lock_key);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_hsp_resume(struct device *dev)
+{
+ struct tegra_hsp *hsp = dev_get_drvdata(dev);
+ unsigned int i;
+ struct tegra_hsp_doorbell *db;
+
+ list_for_each_entry(db, &hsp->doorbells, list) {
+ if (db->channel.chan)
+ tegra_hsp_doorbell_startup(db->channel.chan);
+ }
+
+ if (hsp->mailboxes) {
+ for (i = 0; i < hsp->num_sm; i++) {
+ struct tegra_hsp_mailbox *mb = &hsp->mailboxes[i];
+
+ if (mb->channel.chan->cl)
+ tegra_hsp_mailbox_startup(mb->channel.chan);
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_hsp_pm_ops = {
+ .resume_noirq = tegra_hsp_resume,
+};
+
+static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = {
+ { "ccplex", TEGRA_HSP_DB_MASTER_CCPLEX, HSP_DB_CCPLEX, },
+ { "bpmp", TEGRA_HSP_DB_MASTER_BPMP, HSP_DB_BPMP, },
+ { /* sentinel */ }
+};
+
+static const struct tegra_hsp_soc tegra186_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+ .has_per_mb_ie = false,
+ .has_128_bit_mb = false,
+ .reg_stride = 0x100,
+};
+
+static const struct tegra_hsp_soc tegra194_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+ .has_per_mb_ie = true,
+ .has_128_bit_mb = false,
+ .reg_stride = 0x100,
+};
+
+static const struct tegra_hsp_soc tegra234_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+ .has_per_mb_ie = false,
+ .has_128_bit_mb = true,
+ .reg_stride = 0x100,
+};
+
+static const struct tegra_hsp_soc tegra264_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+ .has_per_mb_ie = false,
+ .has_128_bit_mb = true,
+ .reg_stride = 0x1000,
+};
+
+static const struct of_device_id tegra_hsp_match[] = {
+ { .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
+ { .compatible = "nvidia,tegra194-hsp", .data = &tegra194_hsp_soc },
+ { .compatible = "nvidia,tegra234-hsp", .data = &tegra234_hsp_soc },
+ { .compatible = "nvidia,tegra264-hsp", .data = &tegra264_hsp_soc },
+ { }
+};
+
+static struct platform_driver tegra_hsp_driver = {
+ .driver = {
+ .name = "tegra-hsp",
+ .of_match_table = tegra_hsp_match,
+ .pm = &tegra_hsp_pm_ops,
+ },
+ .probe = tegra_hsp_probe,
+ .remove = tegra_hsp_remove,
+};
+
+static int __init tegra_hsp_init(void)
+{
+ return platform_driver_register(&tegra_hsp_driver);
+}
+core_initcall(tegra_hsp_init);
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
new file mode 100644
index 000000000..a94577f16
--- /dev/null
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -0,0 +1,940 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' Message Manager Driver
+ *
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Nishanth Menon
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+
+#define Q_DATA_OFFSET(proxy, queue, reg) \
+ ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4))
+#define Q_STATE_OFFSET(queue) ((queue) * 0x4)
+#define Q_STATE_ENTRY_COUNT_MASK (0xFFF000)
+
+#define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid))
+#define SPROXY_THREAD_DATA_OFFSET(tid, reg) \
+ (SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4)
+
+#define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid))
+
+#define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF)
+
+#define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid))
+#define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31)
+
+/**
+ * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor
+ * @queue_id: Queue Number for this path
+ * @proxy_id: Proxy ID representing the processor in SoC
+ * @is_tx: Is this a receive path?
+ */
+struct ti_msgmgr_valid_queue_desc {
+ u8 queue_id;
+ u8 proxy_id;
+ bool is_tx;
+};
+
+/**
+ * struct ti_msgmgr_desc - Description of message manager integration
+ * @queue_count: Number of Queues
+ * @max_message_size: Message size in bytes
+ * @max_messages: Number of messages
+ * @data_first_reg: First data register for proxy data region
+ * @data_last_reg: Last data register for proxy data region
+ * @status_cnt_mask: Mask for getting the status value
+ * @status_err_mask: Mask for getting the error value, if applicable
+ * @tx_polled: Do I need to use polled mechanism for tx
+ * @tx_poll_timeout_ms: Timeout in ms if polled
+ * @valid_queues: List of Valid queues that the processor can access
+ * @data_region_name: Name of the proxy data region
+ * @status_region_name: Name of the proxy status region
+ * @ctrl_region_name: Name of the proxy control region
+ * @num_valid_queues: Number of valid queues
+ * @is_sproxy: Is this an Secure Proxy instance?
+ *
+ * This structure is used in of match data to describe how integration
+ * for a specific compatible SoC is done.
+ */
+struct ti_msgmgr_desc {
+ u8 queue_count;
+ u8 max_message_size;
+ u8 max_messages;
+ u8 data_first_reg;
+ u8 data_last_reg;
+ u32 status_cnt_mask;
+ u32 status_err_mask;
+ bool tx_polled;
+ int tx_poll_timeout_ms;
+ const struct ti_msgmgr_valid_queue_desc *valid_queues;
+ const char *data_region_name;
+ const char *status_region_name;
+ const char *ctrl_region_name;
+ int num_valid_queues;
+ bool is_sproxy;
+};
+
+/**
+ * struct ti_queue_inst - Description of a queue instance
+ * @name: Queue Name
+ * @queue_id: Queue Identifier as mapped on SoC
+ * @proxy_id: Proxy Identifier as mapped on SoC
+ * @irq: IRQ for Rx Queue
+ * @is_tx: 'true' if transmit queue, else, 'false'
+ * @queue_buff_start: First register of Data Buffer
+ * @queue_buff_end: Last (or confirmation) register of Data buffer
+ * @queue_state: Queue status register
+ * @queue_ctrl: Queue Control register
+ * @chan: Mailbox channel
+ * @rx_buff: Receive buffer pointer allocated at probe, max_message_size
+ * @polled_rx_mode: Use polling for rx instead of interrupts
+ */
+struct ti_queue_inst {
+ char name[30];
+ u8 queue_id;
+ u8 proxy_id;
+ int irq;
+ bool is_tx;
+ void __iomem *queue_buff_start;
+ void __iomem *queue_buff_end;
+ void __iomem *queue_state;
+ void __iomem *queue_ctrl;
+ struct mbox_chan *chan;
+ u32 *rx_buff;
+ bool polled_rx_mode;
+};
+
+/**
+ * struct ti_msgmgr_inst - Description of a Message Manager Instance
+ * @dev: device pointer corresponding to the Message Manager instance
+ * @desc: Description of the SoC integration
+ * @queue_proxy_region: Queue proxy region where queue buffers are located
+ * @queue_state_debug_region: Queue status register regions
+ * @queue_ctrl_region: Queue Control register regions
+ * @num_valid_queues: Number of valid queues defined for the processor
+ * Note: other queues are probably reserved for other processors
+ * in the SoC.
+ * @qinsts: Array of valid Queue Instances for the Processor
+ * @mbox: Mailbox Controller
+ * @chans: Array for channels corresponding to the Queue Instances.
+ */
+struct ti_msgmgr_inst {
+ struct device *dev;
+ const struct ti_msgmgr_desc *desc;
+ void __iomem *queue_proxy_region;
+ void __iomem *queue_state_debug_region;
+ void __iomem *queue_ctrl_region;
+ u8 num_valid_queues;
+ struct ti_queue_inst *qinsts;
+ struct mbox_controller mbox;
+ struct mbox_chan *chans;
+};
+
+/**
+ * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages
+ * @d: Description of message manager
+ * @qinst: Queue instance for which we check the number of pending messages
+ *
+ * Return: number of messages pending in the queue (0 == no pending messages)
+ */
+static inline int
+ti_msgmgr_queue_get_num_messages(const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst)
+{
+ u32 val;
+ u32 status_cnt_mask = d->status_cnt_mask;
+
+ /*
+ * We cannot use relaxed operation here - update may happen
+ * real-time.
+ */
+ val = readl(qinst->queue_state) & status_cnt_mask;
+ val >>= __ffs(status_cnt_mask);
+
+ return val;
+}
+
+/**
+ * ti_msgmgr_queue_is_error() - Check to see if there is queue error
+ * @d: Description of message manager
+ * @qinst: Queue instance for which we check the number of pending messages
+ *
+ * Return: true if error, else false
+ */
+static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst)
+{
+ u32 val;
+
+ /* Msgmgr has no error detection */
+ if (!d->is_sproxy)
+ return false;
+
+ /*
+ * We cannot use relaxed operation here - update may happen
+ * real-time.
+ */
+ val = readl(qinst->queue_state) & d->status_err_mask;
+
+ return val ? true : false;
+}
+
+static int ti_msgmgr_queue_rx_data(struct mbox_chan *chan, struct ti_queue_inst *qinst,
+ const struct ti_msgmgr_desc *desc)
+{
+ int num_words;
+ struct ti_msgmgr_message message;
+ void __iomem *data_reg;
+ u32 *word_data;
+
+ /*
+ * I have no idea about the protocol being used to communicate with the
+ * remote producer - 0 could be valid data, so I wont make a judgement
+ * of how many bytes I should be reading. Let the client figure this
+ * out.. I just read the full message and pass it on..
+ */
+ message.len = desc->max_message_size;
+ message.buf = (u8 *)qinst->rx_buff;
+
+ /*
+ * NOTE about register access involved here:
+ * the hardware block is implemented with 32bit access operations and no
+ * support for data splitting. We don't want the hardware to misbehave
+ * with sub 32bit access - For example: if the last register read is
+ * split into byte wise access, it can result in the queue getting
+ * stuck or indeterminate behavior. An out of order read operation may
+ * result in weird data results as well.
+ * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead
+ * we depend on readl for the purpose.
+ *
+ * Also note that the final register read automatically marks the
+ * queue message as read.
+ */
+ for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff,
+ num_words = (desc->max_message_size / sizeof(u32));
+ num_words; num_words--, data_reg += sizeof(u32), word_data++)
+ *word_data = readl(data_reg);
+
+ /*
+ * Last register read automatically clears the IRQ if only 1 message
+ * is pending - so send the data up the stack..
+ * NOTE: Client is expected to be as optimal as possible, since
+ * we invoke the handler in IRQ context.
+ */
+ mbox_chan_received_data(chan, (void *)&message);
+
+ return 0;
+}
+
+static int ti_msgmgr_queue_rx_poll_timeout(struct mbox_chan *chan, int timeout_us)
+{
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst = chan->con_priv;
+ const struct ti_msgmgr_desc *desc = inst->desc;
+ int msg_count;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(qinst->queue_state, msg_count,
+ (msg_count & desc->status_cnt_mask),
+ 10, timeout_us);
+ if (ret != 0)
+ return ret;
+
+ ti_msgmgr_queue_rx_data(chan, qinst, desc);
+
+ return 0;
+}
+
+/**
+ * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
+ * @irq: Interrupt number
+ * @p: Channel Pointer
+ *
+ * Return: -EINVAL if there is no instance
+ * IRQ_NONE if the interrupt is not ours.
+ * IRQ_HANDLED if the rx interrupt was successfully handled.
+ */
+static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst = chan->con_priv;
+ const struct ti_msgmgr_desc *desc;
+ int msg_count;
+
+ if (WARN_ON(!inst)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+
+ /* Do I have an invalid interrupt source? */
+ if (qinst->is_tx) {
+ dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n",
+ qinst->name);
+ return IRQ_NONE;
+ }
+
+ desc = inst->desc;
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on Rx channel %s\n", qinst->name);
+ return IRQ_NONE;
+ }
+
+ /* Do I actually have messages to read? */
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
+ if (!msg_count) {
+ /* Shared IRQ? */
+ dev_dbg(dev, "Spurious event - 0 pending data!\n");
+ return IRQ_NONE;
+ }
+
+ ti_msgmgr_queue_rx_data(chan, qinst, desc);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ti_msgmgr_queue_peek_data() - Peek to see if there are any rx messages.
+ * @chan: Channel Pointer
+ *
+ * Return: 'true' if there is pending rx data, 'false' if there is none.
+ */
+static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc = inst->desc;
+ int msg_count;
+
+ if (qinst->is_tx)
+ return false;
+
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
+
+ return msg_count ? true : false;
+}
+
+/**
+ * ti_msgmgr_last_tx_done() - See if all the tx messages are sent
+ * @chan: Channel pointer
+ *
+ * Return: 'true' is no pending tx data, 'false' if there are any.
+ */
+static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc = inst->desc;
+ int msg_count;
+
+ if (!qinst->is_tx)
+ return false;
+
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
+ msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
+
+ if (desc->is_sproxy) {
+ /* In secure proxy, msg_count indicates how many we can send */
+ return msg_count ? true : false;
+ }
+
+ /* if we have any messages pending.. */
+ return msg_count ? false : true;
+}
+
+static bool ti_msgmgr_chan_has_polled_queue_rx(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst;
+
+ if (!chan)
+ return false;
+
+ qinst = chan->con_priv;
+ return qinst->polled_rx_mode;
+}
+
+/**
+ * ti_msgmgr_send_data() - Send data
+ * @chan: Channel Pointer
+ * @data: ti_msgmgr_message * Message Pointer
+ *
+ * Return: 0 if all goes good, else appropriate error messages.
+ */
+static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc;
+ struct ti_queue_inst *qinst = chan->con_priv;
+ int num_words, trail_bytes;
+ struct ti_msgmgr_message *message = data;
+ void __iomem *data_reg;
+ u32 *word_data;
+ int ret = 0;
+
+ if (WARN_ON(!inst)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+ desc = inst->desc;
+
+ if (ti_msgmgr_queue_is_error(desc, qinst)) {
+ dev_err(dev, "Error on channel %s\n", qinst->name);
+ return false;
+ }
+
+ if (desc->max_message_size < message->len) {
+ dev_err(dev, "Queue %s message length %zu > max %d\n",
+ qinst->name, message->len, desc->max_message_size);
+ return -EINVAL;
+ }
+
+ /* NOTE: Constraints similar to rx path exists here as well */
+ for (data_reg = qinst->queue_buff_start,
+ num_words = message->len / sizeof(u32),
+ word_data = (u32 *)message->buf;
+ num_words; num_words--, data_reg += sizeof(u32), word_data++)
+ writel(*word_data, data_reg);
+
+ trail_bytes = message->len % sizeof(u32);
+ if (trail_bytes) {
+ u32 data_trail = *word_data;
+
+ /* Ensure all unused data is 0 */
+ data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
+ writel(data_trail, data_reg);
+ data_reg += sizeof(u32);
+ }
+
+ /*
+ * 'data_reg' indicates next register to write. If we did not already
+ * write on tx complete reg(last reg), we must do so for transmit
+ * In addition, we also need to make sure all intermediate data
+ * registers(if any required), are reset to 0 for TISCI backward
+ * compatibility to be maintained.
+ */
+ while (data_reg <= qinst->queue_buff_end) {
+ writel(0, data_reg);
+ data_reg += sizeof(u32);
+ }
+
+ /* If we are in polled mode, wait for a response before proceeding */
+ if (ti_msgmgr_chan_has_polled_queue_rx(message->chan_rx))
+ ret = ti_msgmgr_queue_rx_poll_timeout(message->chan_rx,
+ message->timeout_rx_ms * 1000);
+
+ return ret;
+}
+
+/**
+ * ti_msgmgr_queue_rx_irq_req() - RX IRQ request
+ * @dev: device pointer
+ * @d: descriptor for ti_msgmgr
+ * @qinst: Queue instance
+ * @chan: Channel pointer
+ */
+static int ti_msgmgr_queue_rx_irq_req(struct device *dev,
+ const struct ti_msgmgr_desc *d,
+ struct ti_queue_inst *qinst,
+ struct mbox_chan *chan)
+{
+ int ret = 0;
+ char of_rx_irq_name[7];
+ struct device_node *np;
+
+ snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
+ "rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id);
+
+ /* Get the IRQ if not found */
+ if (qinst->irq < 0) {
+ np = of_node_get(dev->of_node);
+ if (!np)
+ return -ENODATA;
+ qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
+ of_node_put(np);
+
+ if (qinst->irq < 0) {
+ dev_err(dev,
+ "QID %d PID %d:No IRQ[%s]: %d\n",
+ qinst->queue_id, qinst->proxy_id,
+ of_rx_irq_name, qinst->irq);
+ return qinst->irq;
+ }
+ }
+
+ /* With the expectation that the IRQ might be shared in SoC */
+ ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
+ IRQF_SHARED, qinst->name, chan);
+ if (ret) {
+ dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
+ qinst->irq, qinst->name, ret);
+ }
+
+ return ret;
+}
+
+/**
+ * ti_msgmgr_queue_startup() - Startup queue
+ * @chan: Channel pointer
+ *
+ * Return: 0 if all goes good, else return corresponding error message
+ */
+static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst = chan->con_priv;
+ const struct ti_msgmgr_desc *d = inst->desc;
+ int ret;
+ int msg_count;
+
+ /*
+ * If sproxy is starting and can send messages, we are a Tx thread,
+ * else Rx
+ */
+ if (d->is_sproxy) {
+ qinst->is_tx = (readl(qinst->queue_ctrl) &
+ SPROXY_THREAD_CTRL_DIR_MASK) ? false : true;
+
+ msg_count = ti_msgmgr_queue_get_num_messages(d, qinst);
+
+ if (!msg_count && qinst->is_tx) {
+ dev_err(dev, "%s: Cannot transmit with 0 credits!\n",
+ qinst->name);
+ return -EINVAL;
+ }
+ }
+
+ if (!qinst->is_tx) {
+ /* Allocate usage buffer for rx */
+ qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL);
+ if (!qinst->rx_buff)
+ return -ENOMEM;
+ /* Request IRQ */
+ ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan);
+ if (ret) {
+ kfree(qinst->rx_buff);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ti_msgmgr_queue_shutdown() - Shutdown the queue
+ * @chan: Channel pointer
+ */
+static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+
+ if (!qinst->is_tx) {
+ free_irq(qinst->irq, chan);
+ kfree(qinst->rx_buff);
+ }
+}
+
+/**
+ * ti_msgmgr_of_xlate() - Translation of phandle to queue
+ * @mbox: Mailbox controller
+ * @p: phandle pointer
+ *
+ * Return: Mailbox channel corresponding to the queue, else return error
+ * pointer.
+ */
+static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *p)
+{
+ struct ti_msgmgr_inst *inst;
+ int req_qid, req_pid;
+ struct ti_queue_inst *qinst;
+ const struct ti_msgmgr_desc *d;
+ int i, ncells;
+
+ inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
+ if (WARN_ON(!inst))
+ return ERR_PTR(-EINVAL);
+
+ d = inst->desc;
+
+ if (d->is_sproxy)
+ ncells = 1;
+ else
+ ncells = 2;
+ if (p->args_count != ncells) {
+ dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n",
+ p->args_count, ncells);
+ return ERR_PTR(-EINVAL);
+ }
+ if (ncells == 1) {
+ req_qid = 0;
+ req_pid = p->args[0];
+ } else {
+ req_qid = p->args[0];
+ req_pid = p->args[1];
+ }
+
+ if (d->is_sproxy) {
+ if (req_pid >= d->num_valid_queues)
+ goto err;
+ qinst = &inst->qinsts[req_pid];
+ return qinst->chan;
+ }
+
+ for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
+ i++, qinst++) {
+ if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id)
+ return qinst->chan;
+ }
+
+err:
+ dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %pOFn\n",
+ req_qid, req_pid, p->np);
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * ti_msgmgr_queue_setup() - Setup data structures for each queue instance
+ * @idx: index of the queue
+ * @dev: pointer to the message manager device
+ * @np: pointer to the of node
+ * @inst: Queue instance pointer
+ * @d: Message Manager instance description data
+ * @qd: Queue description data
+ * @qinst: Queue instance pointer
+ * @chan: pointer to mailbox channel
+ *
+ * Return: 0 if all went well, else return corresponding error
+ */
+static int ti_msgmgr_queue_setup(int idx, struct device *dev,
+ struct device_node *np,
+ struct ti_msgmgr_inst *inst,
+ const struct ti_msgmgr_desc *d,
+ const struct ti_msgmgr_valid_queue_desc *qd,
+ struct ti_queue_inst *qinst,
+ struct mbox_chan *chan)
+{
+ char *dir;
+
+ qinst->proxy_id = qd->proxy_id;
+ qinst->queue_id = qd->queue_id;
+
+ if (qinst->queue_id > d->queue_count) {
+ dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n",
+ idx, qinst->queue_id, d->queue_count);
+ return -ERANGE;
+ }
+
+ if (d->is_sproxy) {
+ qinst->queue_buff_start = inst->queue_proxy_region +
+ SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
+ d->data_first_reg);
+ qinst->queue_buff_end = inst->queue_proxy_region +
+ SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
+ d->data_last_reg);
+ qinst->queue_state = inst->queue_state_debug_region +
+ SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id);
+ qinst->queue_ctrl = inst->queue_ctrl_region +
+ SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id);
+
+ /* XXX: DONOT read registers here!.. Some may be unusable */
+ dir = "thr";
+ snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d",
+ dev_name(dev), dir, qinst->proxy_id);
+ } else {
+ qinst->queue_buff_start = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
+ d->data_first_reg);
+ qinst->queue_buff_end = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
+ d->data_last_reg);
+ qinst->queue_state =
+ inst->queue_state_debug_region +
+ Q_STATE_OFFSET(qinst->queue_id);
+ qinst->is_tx = qd->is_tx;
+ dir = qinst->is_tx ? "tx" : "rx";
+ snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
+ dev_name(dev), dir, qinst->queue_id, qinst->proxy_id);
+ }
+
+ qinst->chan = chan;
+
+ /* Setup an error value for IRQ - Lazy allocation */
+ qinst->irq = -EINVAL;
+
+ chan->con_priv = qinst;
+
+ dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
+ idx, qinst->queue_id, qinst->proxy_id, qinst->irq,
+ qinst->queue_buff_start, qinst->queue_buff_end);
+ return 0;
+}
+
+static int ti_msgmgr_queue_rx_set_polled_mode(struct ti_queue_inst *qinst, bool enable)
+{
+ if (enable) {
+ disable_irq(qinst->irq);
+ qinst->polled_rx_mode = true;
+ } else {
+ enable_irq(qinst->irq);
+ qinst->polled_rx_mode = false;
+ }
+
+ return 0;
+}
+
+static int ti_msgmgr_suspend(struct device *dev)
+{
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst;
+ int i;
+
+ /*
+ * We must switch operation to polled mode now as drivers and the genpd
+ * layer may make late TI SCI calls to change clock and device states
+ * from the noirq phase of suspend.
+ */
+ for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) {
+ if (!qinst->is_tx)
+ ti_msgmgr_queue_rx_set_polled_mode(qinst, true);
+ }
+
+ return 0;
+}
+
+static int ti_msgmgr_resume(struct device *dev)
+{
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst;
+ int i;
+
+ for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; qinst++, i++) {
+ if (!qinst->is_tx)
+ ti_msgmgr_queue_rx_set_polled_mode(qinst, false);
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ti_msgmgr_pm_ops, ti_msgmgr_suspend, ti_msgmgr_resume);
+
+/* Queue operations */
+static const struct mbox_chan_ops ti_msgmgr_chan_ops = {
+ .startup = ti_msgmgr_queue_startup,
+ .shutdown = ti_msgmgr_queue_shutdown,
+ .peek_data = ti_msgmgr_queue_peek_data,
+ .last_tx_done = ti_msgmgr_last_tx_done,
+ .send_data = ti_msgmgr_send_data,
+};
+
+/* Keystone K2G SoC integration details */
+static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = {
+ {.queue_id = 0, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 1, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 2, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 3, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 5, .proxy_id = 2, .is_tx = false,},
+ {.queue_id = 56, .proxy_id = 1, .is_tx = true,},
+ {.queue_id = 57, .proxy_id = 2, .is_tx = false,},
+ {.queue_id = 58, .proxy_id = 3, .is_tx = true,},
+ {.queue_id = 59, .proxy_id = 4, .is_tx = true,},
+ {.queue_id = 60, .proxy_id = 5, .is_tx = true,},
+ {.queue_id = 61, .proxy_id = 6, .is_tx = true,},
+};
+
+static const struct ti_msgmgr_desc k2g_desc = {
+ .queue_count = 64,
+ .max_message_size = 64,
+ .max_messages = 128,
+ .data_region_name = "queue_proxy_region",
+ .status_region_name = "queue_state_debug_region",
+ .data_first_reg = 16,
+ .data_last_reg = 31,
+ .status_cnt_mask = Q_STATE_ENTRY_COUNT_MASK,
+ .tx_polled = false,
+ .valid_queues = k2g_valid_queues,
+ .num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
+ .is_sproxy = false,
+};
+
+static const struct ti_msgmgr_desc am654_desc = {
+ .queue_count = 190,
+ .num_valid_queues = 190,
+ .max_message_size = 60,
+ .data_region_name = "target_data",
+ .status_region_name = "rt",
+ .ctrl_region_name = "scfg",
+ .data_first_reg = 0,
+ .data_last_reg = 14,
+ .status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK,
+ .tx_polled = false,
+ .is_sproxy = true,
+};
+
+static const struct of_device_id ti_msgmgr_of_match[] = {
+ {.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
+ {.compatible = "ti,am654-secure-proxy", .data = &am654_desc},
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
+
+static int ti_msgmgr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ struct device_node *np;
+ const struct ti_msgmgr_desc *desc;
+ struct ti_msgmgr_inst *inst;
+ struct ti_queue_inst *qinst;
+ struct mbox_controller *mbox;
+ struct mbox_chan *chans;
+ int queue_count;
+ int i;
+ int ret = -EINVAL;
+ const struct ti_msgmgr_valid_queue_desc *queue_desc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "no OF information\n");
+ return -EINVAL;
+ }
+ np = dev->of_node;
+
+ of_id = of_match_device(ti_msgmgr_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+ desc = of_id->data;
+
+ inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->dev = dev;
+ inst->desc = desc;
+
+ inst->queue_proxy_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->data_region_name);
+ if (IS_ERR(inst->queue_proxy_region))
+ return PTR_ERR(inst->queue_proxy_region);
+
+ inst->queue_state_debug_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->status_region_name);
+ if (IS_ERR(inst->queue_state_debug_region))
+ return PTR_ERR(inst->queue_state_debug_region);
+
+ if (desc->is_sproxy) {
+ inst->queue_ctrl_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->ctrl_region_name);
+ if (IS_ERR(inst->queue_ctrl_region))
+ return PTR_ERR(inst->queue_ctrl_region);
+ }
+
+ dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
+ inst->queue_proxy_region, inst->queue_state_debug_region);
+
+ queue_count = desc->num_valid_queues;
+ if (!queue_count || queue_count > desc->queue_count) {
+ dev_crit(dev, "Invalid Number of queues %d. Max %d\n",
+ queue_count, desc->queue_count);
+ return -ERANGE;
+ }
+ inst->num_valid_queues = queue_count;
+
+ qinst = devm_kcalloc(dev, queue_count, sizeof(*qinst), GFP_KERNEL);
+ if (!qinst)
+ return -ENOMEM;
+ inst->qinsts = qinst;
+
+ chans = devm_kcalloc(dev, queue_count, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+ inst->chans = chans;
+
+ if (desc->is_sproxy) {
+ struct ti_msgmgr_valid_queue_desc sproxy_desc;
+
+ /* All proxies may be valid in Secure Proxy instance */
+ for (i = 0; i < queue_count; i++, qinst++, chans++) {
+ sproxy_desc.queue_id = 0;
+ sproxy_desc.proxy_id = i;
+ ret = ti_msgmgr_queue_setup(i, dev, np, inst,
+ desc, &sproxy_desc, qinst,
+ chans);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* Only Some proxies are valid in Message Manager */
+ for (i = 0, queue_desc = desc->valid_queues;
+ i < queue_count; i++, qinst++, chans++, queue_desc++) {
+ ret = ti_msgmgr_queue_setup(i, dev, np, inst,
+ desc, queue_desc, qinst,
+ chans);
+ if (ret)
+ return ret;
+ }
+ }
+
+ mbox = &inst->mbox;
+ mbox->dev = dev;
+ mbox->ops = &ti_msgmgr_chan_ops;
+ mbox->chans = inst->chans;
+ mbox->num_chans = inst->num_valid_queues;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = desc->tx_polled;
+ if (desc->tx_polled)
+ mbox->txpoll_period = desc->tx_poll_timeout_ms;
+ mbox->of_xlate = ti_msgmgr_of_xlate;
+
+ platform_set_drvdata(pdev, inst);
+ ret = devm_mbox_controller_register(dev, mbox);
+ if (ret)
+ dev_err(dev, "Failed to register mbox_controller(%d)\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver ti_msgmgr_driver = {
+ .probe = ti_msgmgr_probe,
+ .driver = {
+ .name = "ti-msgmgr",
+ .of_match_table = of_match_ptr(ti_msgmgr_of_match),
+ .pm = &ti_msgmgr_pm_ops,
+ },
+};
+module_platform_driver(ti_msgmgr_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI message manager driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-msgmgr");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
new file mode 100644
index 000000000..e4fcac97d
--- /dev/null
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Inter Processor Interrupt(IPI) Mailbox Driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+/* IPI agent ID any */
+#define IPI_ID_ANY 0xFFUL
+
+/* indicate if ZynqMP IPI mailbox driver uses SMC calls or HVC calls */
+#define USE_SMC 0
+#define USE_HVC 1
+
+/* Default IPI SMC function IDs */
+#define SMC_IPI_MAILBOX_OPEN 0x82001000U
+#define SMC_IPI_MAILBOX_RELEASE 0x82001001U
+#define SMC_IPI_MAILBOX_STATUS_ENQUIRY 0x82001002U
+#define SMC_IPI_MAILBOX_NOTIFY 0x82001003U
+#define SMC_IPI_MAILBOX_ACK 0x82001004U
+#define SMC_IPI_MAILBOX_ENABLE_IRQ 0x82001005U
+#define SMC_IPI_MAILBOX_DISABLE_IRQ 0x82001006U
+
+/* IPI SMC Macros */
+#define IPI_SMC_ENQUIRY_DIRQ_MASK 0x00000001UL /* Flag to indicate if
+ * notification interrupt
+ * to be disabled.
+ */
+#define IPI_SMC_ACK_EIRQ_MASK 0x00000001UL /* Flag to indicate if
+ * notification interrupt
+ * to be enabled.
+ */
+
+/* IPI mailbox status */
+#define IPI_MB_STATUS_IDLE 0
+#define IPI_MB_STATUS_SEND_PENDING 1
+#define IPI_MB_STATUS_RECV_PENDING 2
+
+#define IPI_MB_CHNL_TX 0 /* IPI mailbox TX channel */
+#define IPI_MB_CHNL_RX 1 /* IPI mailbox RX channel */
+
+/**
+ * struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel
+ * @is_opened: indicate if the IPI channel is opened
+ * @req_buf: local to remote request buffer start address
+ * @resp_buf: local to remote response buffer start address
+ * @req_buf_size: request buffer size
+ * @resp_buf_size: response buffer size
+ * @rx_buf: receive buffer to pass received message to client
+ * @chan_type: channel type
+ */
+struct zynqmp_ipi_mchan {
+ int is_opened;
+ void __iomem *req_buf;
+ void __iomem *resp_buf;
+ void *rx_buf;
+ size_t req_buf_size;
+ size_t resp_buf_size;
+ unsigned int chan_type;
+};
+
+/**
+ * struct zynqmp_ipi_mbox - Description of a ZynqMP IPI mailbox
+ * platform data.
+ * @pdata: pointer to the IPI private data
+ * @dev: device pointer corresponding to the Xilinx ZynqMP
+ * IPI mailbox
+ * @remote_id: remote IPI agent ID
+ * @mbox: mailbox Controller
+ * @mchans: array for channels, tx channel and rx channel.
+ * @irq: IPI agent interrupt ID
+ */
+struct zynqmp_ipi_mbox {
+ struct zynqmp_ipi_pdata *pdata;
+ struct device dev;
+ u32 remote_id;
+ struct mbox_controller mbox;
+ struct zynqmp_ipi_mchan mchans[2];
+};
+
+/**
+ * struct zynqmp_ipi_pdata - Description of z ZynqMP IPI agent platform data.
+ *
+ * @dev: device pointer corresponding to the Xilinx ZynqMP
+ * IPI agent
+ * @irq: IPI agent interrupt ID
+ * @method: IPI SMC or HVC is going to be used
+ * @local_id: local IPI agent ID
+ * @num_mboxes: number of mailboxes of this IPI agent
+ * @ipi_mboxes: IPI mailboxes of this IPI agent
+ */
+struct zynqmp_ipi_pdata {
+ struct device *dev;
+ int irq;
+ unsigned int method;
+ u32 local_id;
+ int num_mboxes;
+ struct zynqmp_ipi_mbox ipi_mboxes[];
+};
+
+static struct device_driver zynqmp_ipi_mbox_driver = {
+ .owner = THIS_MODULE,
+ .name = "zynqmp-ipi-mbox",
+};
+
+static void zynqmp_ipi_fw_call(struct zynqmp_ipi_mbox *ipi_mbox,
+ unsigned long a0, unsigned long a3,
+ struct arm_smccc_res *res)
+{
+ struct zynqmp_ipi_pdata *pdata = ipi_mbox->pdata;
+ unsigned long a1, a2;
+
+ a1 = pdata->local_id;
+ a2 = ipi_mbox->remote_id;
+ if (pdata->method == USE_SMC)
+ arm_smccc_smc(a0, a1, a2, a3, 0, 0, 0, 0, res);
+ else
+ arm_smccc_hvc(a0, a1, a2, a3, 0, 0, 0, 0, res);
+}
+
+/**
+ * zynqmp_ipi_interrupt - Interrupt handler for IPI notification
+ *
+ * @irq: Interrupt number
+ * @data: ZynqMP IPI mailbox platform data.
+ *
+ * Return: -EINVAL if there is no instance
+ * IRQ_NONE if the interrupt is not ours.
+ * IRQ_HANDLED if the rx interrupt was successfully handled.
+ */
+static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
+{
+ struct zynqmp_ipi_pdata *pdata = data;
+ struct mbox_chan *chan;
+ struct zynqmp_ipi_mbox *ipi_mbox;
+ struct zynqmp_ipi_mchan *mchan;
+ struct zynqmp_ipi_message *msg;
+ u64 arg0, arg3;
+ struct arm_smccc_res res;
+ int ret, i, status = IRQ_NONE;
+
+ (void)irq;
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ arg3 = IPI_SMC_ENQUIRY_DIRQ_MASK;
+ for (i = 0; i < pdata->num_mboxes; i++) {
+ ipi_mbox = &pdata->ipi_mboxes[i];
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ chan = &ipi_mbox->mbox.chans[IPI_MB_CHNL_RX];
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, arg3, &res);
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
+ if (mchan->is_opened) {
+ msg = mchan->rx_buf;
+ msg->len = mchan->req_buf_size;
+ memcpy_fromio(msg->data, mchan->req_buf,
+ msg->len);
+ mbox_chan_received_data(chan, (void *)msg);
+ status = IRQ_HANDLED;
+ }
+ }
+ }
+ return status;
+}
+
+/**
+ * zynqmp_ipi_peek_data - Peek to see if there are any rx messages.
+ *
+ * @chan: Channel Pointer
+ *
+ * Return: 'true' if there is pending rx data, 'false' if there is none.
+ */
+static bool zynqmp_ipi_peek_data(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ int ret;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return false;
+ }
+
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* TX channel, check if the message has been acked
+ * by the remote, if yes, response is available.
+ */
+ if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING)
+ return false;
+ else
+ return true;
+ } else if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) {
+ /* RX channel, check if there is message arrived. */
+ return true;
+ }
+ return false;
+}
+
+/**
+ * zynqmp_ipi_last_tx_done - See if the last tx message is sent
+ *
+ * @chan: Channel pointer
+ *
+ * Return: 'true' is no pending tx data, 'false' if there are any.
+ */
+static bool zynqmp_ipi_last_tx_done(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ int ret;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return false;
+ }
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* We only need to check if the message been taken
+ * by the remote in the TX channel
+ */
+ arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ /* Check the SMC call status, a0 of the result */
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING)
+ return false;
+ return true;
+ }
+ /* Always true for the response message in RX channel */
+ return true;
+}
+
+/**
+ * zynqmp_ipi_send_data - Send data
+ *
+ * @chan: Channel Pointer
+ * @data: Message Pointer
+ *
+ * Return: 0 if all goes good, else appropriate error messages.
+ */
+static int zynqmp_ipi_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ struct zynqmp_ipi_message *msg = data;
+ u64 arg0;
+ struct arm_smccc_res res;
+
+ if (WARN_ON(!ipi_mbox)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+
+ if (mchan->chan_type == IPI_MB_CHNL_TX) {
+ /* Send request message */
+ if (msg && msg->len > mchan->req_buf_size) {
+ dev_err(dev, "channel %d message length %u > max %lu\n",
+ mchan->chan_type, (unsigned int)msg->len,
+ mchan->req_buf_size);
+ return -EINVAL;
+ }
+ if (msg && msg->len)
+ memcpy_toio(mchan->req_buf, msg->data, msg->len);
+ /* Kick IPI mailbox to send message */
+ arg0 = SMC_IPI_MAILBOX_NOTIFY;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ } else {
+ /* Send response message */
+ if (msg && msg->len > mchan->resp_buf_size) {
+ dev_err(dev, "channel %d message length %u > max %lu\n",
+ mchan->chan_type, (unsigned int)msg->len,
+ mchan->resp_buf_size);
+ return -EINVAL;
+ }
+ if (msg && msg->len)
+ memcpy_toio(mchan->resp_buf, msg->data, msg->len);
+ arg0 = SMC_IPI_MAILBOX_ACK;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, IPI_SMC_ACK_EIRQ_MASK,
+ &res);
+ }
+ return 0;
+}
+
+/**
+ * zynqmp_ipi_startup - Startup the IPI channel
+ *
+ * @chan: Channel pointer
+ *
+ * Return: 0 if all goes good, else return corresponding error message
+ */
+static int zynqmp_ipi_startup(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ u64 arg0;
+ struct arm_smccc_res res;
+ int ret = 0;
+ unsigned int nchan_type;
+
+ if (mchan->is_opened)
+ return 0;
+
+ /* If no channel has been opened, open the IPI mailbox */
+ nchan_type = (mchan->chan_type + 1) % 2;
+ if (!ipi_mbox->mchans[nchan_type].is_opened) {
+ arg0 = SMC_IPI_MAILBOX_OPEN;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ /* Check the SMC call status, a0 of the result */
+ ret = (int)(res.a0 & 0xFFFFFFFF);
+ if (ret < 0) {
+ dev_err(dev, "SMC to open the IPI channel failed.\n");
+ return ret;
+ }
+ ret = 0;
+ }
+
+ /* If it is RX channel, enable the IPI notification interrupt */
+ if (mchan->chan_type == IPI_MB_CHNL_RX) {
+ arg0 = SMC_IPI_MAILBOX_ENABLE_IRQ;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+ mchan->is_opened = 1;
+
+ return ret;
+}
+
+/**
+ * zynqmp_ipi_shutdown - Shutdown the IPI channel
+ *
+ * @chan: Channel pointer
+ */
+static void zynqmp_ipi_shutdown(struct mbox_chan *chan)
+{
+ struct device *dev = chan->mbox->dev;
+ struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev);
+ struct zynqmp_ipi_mchan *mchan = chan->con_priv;
+ u64 arg0;
+ struct arm_smccc_res res;
+ unsigned int chan_type;
+
+ if (!mchan->is_opened)
+ return;
+
+ /* If it is RX channel, disable notification interrupt */
+ chan_type = mchan->chan_type;
+ if (chan_type == IPI_MB_CHNL_RX) {
+ arg0 = SMC_IPI_MAILBOX_DISABLE_IRQ;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+ /* Release IPI mailbox if no other channel is opened */
+ chan_type = (chan_type + 1) % 2;
+ if (!ipi_mbox->mchans[chan_type].is_opened) {
+ arg0 = SMC_IPI_MAILBOX_RELEASE;
+ zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res);
+ }
+
+ mchan->is_opened = 0;
+}
+
+/* ZynqMP IPI mailbox operations */
+static const struct mbox_chan_ops zynqmp_ipi_chan_ops = {
+ .startup = zynqmp_ipi_startup,
+ .shutdown = zynqmp_ipi_shutdown,
+ .peek_data = zynqmp_ipi_peek_data,
+ .last_tx_done = zynqmp_ipi_last_tx_done,
+ .send_data = zynqmp_ipi_send_data,
+};
+
+/**
+ * zynqmp_ipi_of_xlate - Translate of phandle to IPI mailbox channel
+ *
+ * @mbox: mailbox controller pointer
+ * @p: phandle pointer
+ *
+ * Return: Mailbox channel, else return error pointer.
+ */
+static struct mbox_chan *zynqmp_ipi_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *p)
+{
+ struct mbox_chan *chan;
+ struct device *dev = mbox->dev;
+ unsigned int chan_type;
+
+ /* Only supports TX and RX channels */
+ chan_type = p->args[0];
+ if (chan_type != IPI_MB_CHNL_TX && chan_type != IPI_MB_CHNL_RX) {
+ dev_err(dev, "req chnl failure: invalid chnl type %u.\n",
+ chan_type);
+ return ERR_PTR(-EINVAL);
+ }
+ chan = &mbox->chans[chan_type];
+ return chan;
+}
+
+static const struct of_device_id zynqmp_ipi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-ipi-mailbox" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match);
+
+/**
+ * zynqmp_ipi_mbox_get_buf_res - Get buffer resource from the IPI dev node
+ *
+ * @node: IPI mbox device child node
+ * @name: name of the IPI buffer
+ * @res: pointer to where the resource information will be stored.
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int zynqmp_ipi_mbox_get_buf_res(struct device_node *node,
+ const char *name,
+ struct resource *res)
+{
+ int ret, index;
+
+ index = of_property_match_string(node, "reg-names", name);
+ if (index >= 0) {
+ ret = of_address_to_resource(node, index, res);
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/**
+ * zynqmp_ipi_mbox_dev_release() - release the existence of a ipi mbox dev
+ *
+ * @dev: the ipi mailbox device
+ *
+ * This is to avoid the no device release() function kernel warning.
+ *
+ */
+static void zynqmp_ipi_mbox_dev_release(struct device *dev)
+{
+ (void)dev;
+}
+
+/**
+ * zynqmp_ipi_mbox_probe - probe IPI mailbox resource from device node
+ *
+ * @ipi_mbox: pointer to IPI mailbox private data structure
+ * @node: IPI mailbox device node
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
+ struct device_node *node)
+{
+ struct zynqmp_ipi_mchan *mchan;
+ struct mbox_chan *chans;
+ struct mbox_controller *mbox;
+ struct resource res;
+ struct device *dev, *mdev;
+ const char *name;
+ int ret;
+
+ dev = ipi_mbox->pdata->dev;
+ /* Initialize dev for IPI mailbox */
+ ipi_mbox->dev.parent = dev;
+ ipi_mbox->dev.release = NULL;
+ ipi_mbox->dev.of_node = node;
+ dev_set_name(&ipi_mbox->dev, "%s", of_node_full_name(node));
+ dev_set_drvdata(&ipi_mbox->dev, ipi_mbox);
+ ipi_mbox->dev.release = zynqmp_ipi_mbox_dev_release;
+ ipi_mbox->dev.driver = &zynqmp_ipi_mbox_driver;
+ ret = device_register(&ipi_mbox->dev);
+ if (ret) {
+ dev_err(dev, "Failed to register ipi mbox dev.\n");
+ put_device(&ipi_mbox->dev);
+ return ret;
+ }
+ mdev = &ipi_mbox->dev;
+
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ name = "local_request_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->req_buf_size = resource_size(&res);
+ mchan->req_buf = devm_ioremap(mdev, res.start,
+ mchan->req_buf_size);
+ if (!mchan->req_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret);
+ return ret;
+ }
+
+ name = "remote_response_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->resp_buf_size = resource_size(&res);
+ mchan->resp_buf = devm_ioremap(mdev, res.start,
+ mchan->resp_buf_size);
+ if (!mchan->resp_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+ mchan->rx_buf = devm_kzalloc(mdev,
+ mchan->resp_buf_size +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!mchan->rx_buf)
+ return -ENOMEM;
+
+ mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ name = "remote_request_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->req_buf_size = resource_size(&res);
+ mchan->req_buf = devm_ioremap(mdev, res.start,
+ mchan->req_buf_size);
+ if (!mchan->req_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+
+ name = "local_response_region";
+ ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res);
+ if (!ret) {
+ mchan->resp_buf_size = resource_size(&res);
+ mchan->resp_buf = devm_ioremap(mdev, res.start,
+ mchan->resp_buf_size);
+ if (!mchan->resp_buf) {
+ dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
+ return -ENOMEM;
+ }
+ } else if (ret != -ENODEV) {
+ dev_err(mdev, "Unmatched resource %s.\n", name);
+ return ret;
+ }
+ mchan->rx_buf = devm_kzalloc(mdev,
+ mchan->resp_buf_size +
+ sizeof(struct zynqmp_ipi_message),
+ GFP_KERNEL);
+ if (!mchan->rx_buf)
+ return -ENOMEM;
+
+ /* Get the IPI remote agent ID */
+ ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id);
+ if (ret < 0) {
+ dev_err(dev, "No IPI remote ID is specified.\n");
+ return ret;
+ }
+
+ mbox = &ipi_mbox->mbox;
+ mbox->dev = mdev;
+ mbox->ops = &zynqmp_ipi_chan_ops;
+ mbox->num_chans = 2;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = true;
+ mbox->txpoll_period = 5;
+ mbox->of_xlate = zynqmp_ipi_of_xlate;
+ chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+ mbox->chans = chans;
+ chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX];
+ chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX];
+ ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX;
+ ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX;
+ ret = devm_mbox_controller_register(mdev, mbox);
+ if (ret)
+ dev_err(mdev,
+ "Failed to register mbox_controller(%d)\n", ret);
+ else
+ dev_info(mdev,
+ "Registered ZynqMP IPI mbox with TX/RX channels.\n");
+ return ret;
+}
+
+/**
+ * zynqmp_ipi_free_mboxes - Free IPI mailboxes devices
+ *
+ * @pdata: IPI private data
+ */
+static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
+{
+ struct zynqmp_ipi_mbox *ipi_mbox;
+ int i;
+
+ i = pdata->num_mboxes;
+ for (; i >= 0; i--) {
+ ipi_mbox = &pdata->ipi_mboxes[i];
+ if (ipi_mbox->dev.parent) {
+ mbox_controller_unregister(&ipi_mbox->mbox);
+ if (device_is_registered(&ipi_mbox->dev))
+ device_unregister(&ipi_mbox->dev);
+ }
+ }
+}
+
+static int zynqmp_ipi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *nc, *np = pdev->dev.of_node;
+ struct zynqmp_ipi_pdata *pdata;
+ struct zynqmp_ipi_mbox *mbox;
+ int num_mboxes, ret = -EINVAL;
+
+ num_mboxes = of_get_available_child_count(np);
+ if (num_mboxes == 0) {
+ dev_err(dev, "mailbox nodes not available\n");
+ return -EINVAL;
+ }
+
+ pdata = devm_kzalloc(dev, struct_size(pdata, ipi_mboxes, num_mboxes),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ pdata->dev = dev;
+
+ /* Get the IPI local agents ID */
+ ret = of_property_read_u32(np, "xlnx,ipi-id", &pdata->local_id);
+ if (ret < 0) {
+ dev_err(dev, "No IPI local ID is specified.\n");
+ return ret;
+ }
+
+ pdata->num_mboxes = num_mboxes;
+
+ mbox = pdata->ipi_mboxes;
+ for_each_available_child_of_node(np, nc) {
+ mbox->pdata = pdata;
+ ret = zynqmp_ipi_mbox_probe(mbox, nc);
+ if (ret) {
+ of_node_put(nc);
+ dev_err(dev, "failed to probe subdev.\n");
+ ret = -EINVAL;
+ goto free_mbox_dev;
+ }
+ mbox++;
+ }
+
+ /* IPI IRQ */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto free_mbox_dev;
+
+ pdata->irq = ret;
+ ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
+ IRQF_SHARED, dev_name(dev), pdata);
+ if (ret) {
+ dev_err(dev, "IRQ %d is not requested successfully.\n",
+ pdata->irq);
+ goto free_mbox_dev;
+ }
+
+ platform_set_drvdata(pdev, pdata);
+ return ret;
+
+free_mbox_dev:
+ zynqmp_ipi_free_mboxes(pdata);
+ return ret;
+}
+
+static int zynqmp_ipi_remove(struct platform_device *pdev)
+{
+ struct zynqmp_ipi_pdata *pdata;
+
+ pdata = platform_get_drvdata(pdev);
+ zynqmp_ipi_free_mboxes(pdata);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_ipi_driver = {
+ .probe = zynqmp_ipi_probe,
+ .remove = zynqmp_ipi_remove,
+ .driver = {
+ .name = "zynqmp-ipi",
+ .of_match_table = of_match_ptr(zynqmp_ipi_of_match),
+ },
+};
+
+static int __init zynqmp_ipi_init(void)
+{
+ return platform_driver_register(&zynqmp_ipi_driver);
+}
+subsys_initcall(zynqmp_ipi_init);
+
+static void __exit zynqmp_ipi_exit(void)
+{
+ platform_driver_unregister(&zynqmp_ipi_driver);
+}
+module_exit(zynqmp_ipi_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Xilinx ZynqMP IPI Mailbox driver");
+MODULE_AUTHOR("Xilinx Inc.");