summaryrefslogtreecommitdiffstats
path: root/drivers/remoteproc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/remoteproc
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/remoteproc')
-rw-r--r--drivers/remoteproc/Kconfig357
-rw-r--r--drivers/remoteproc/Makefile40
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c399
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c1318
-rw-r--r--drivers/remoteproc/imx_rproc.c877
-rw-r--r--drivers/remoteproc/imx_rproc.h39
-rw-r--r--drivers/remoteproc/ingenic_rproc.c254
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c515
-rw-r--r--drivers/remoteproc/meson_mx_ao_arc.c261
-rw-r--r--drivers/remoteproc/mtk_common.h141
-rw-r--r--drivers/remoteproc/mtk_scp.c1015
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c217
-rw-r--r--drivers/remoteproc/omap_remoteproc.c1396
-rw-r--r--drivers/remoteproc/omap_remoteproc.h65
-rw-r--r--drivers/remoteproc/pru_rproc.c920
-rw-r--r--drivers/remoteproc/pru_rproc.h46
-rw-r--r--drivers/remoteproc/qcom_common.c509
-rw-r--r--drivers/remoteproc/qcom_common.h75
-rw-r--r--drivers/remoteproc/qcom_pil_info.c129
-rw-r--r--drivers/remoteproc/qcom_pil_info.h9
-rw-r--r--drivers/remoteproc/qcom_q6v5.c373
-rw-r--r--drivers/remoteproc/qcom_q6v5.h56
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c754
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c2386
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c1008
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c1129
-rw-r--r--drivers/remoteproc/qcom_sysmon.c811
-rw-r--r--drivers/remoteproc/qcom_wcnss.c694
-rw-r--r--drivers/remoteproc/qcom_wcnss.h25
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c206
-rw-r--r--drivers/remoteproc/rcar_rproc.c229
-rw-r--r--drivers/remoteproc/remoteproc_cdev.c126
-rw-r--r--drivers/remoteproc/remoteproc_core.c2769
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c469
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c430
-rw-r--r--drivers/remoteproc/remoteproc_elf_helpers.h122
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c395
-rw-r--r--drivers/remoteproc/remoteproc_internal.h228
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c275
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c603
-rw-r--r--drivers/remoteproc/st_remoteproc.c481
-rw-r--r--drivers/remoteproc/st_slim_rproc.c335
-rw-r--r--drivers/remoteproc/stm32_rproc.c932
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c908
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c1799
-rw-r--r--drivers/remoteproc/ti_sci_proc.h104
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c268
47 files changed, 26497 insertions, 0 deletions
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
new file mode 100644
index 000000000..166019786
--- /dev/null
+++ b/drivers/remoteproc/Kconfig
@@ -0,0 +1,357 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Remoteproc drivers"
+
+config REMOTEPROC
+ bool "Support for Remote Processor subsystem"
+ depends on HAS_DMA
+ select CRC32
+ select FW_LOADER
+ select VIRTIO
+ select WANT_DEV_COREDUMP
+ help
+ Support for remote processors (such as DSP coprocessors). These
+ are mainly used on embedded systems.
+
+if REMOTEPROC
+
+config REMOTEPROC_CDEV
+ bool "Remoteproc character device interface"
+ help
+ Say y here to have a character device interface for the remoteproc
+ framework. Userspace can boot/shutdown remote processors through
+ this interface.
+
+ It's safe to say N if you don't want to use this interface.
+
+config IMX_REMOTEPROC
+ tristate "i.MX remoteproc support"
+ depends on ARCH_MXC
+ depends on HAVE_ARM_SMCCC
+ select MAILBOX
+ help
+ Say y here to support iMX's remote processors via the remote
+ processor framework.
+
+ It's safe to say N here.
+
+config IMX_DSP_REMOTEPROC
+ tristate "i.MX DSP remoteproc support"
+ depends on ARCH_MXC
+ depends on HAVE_ARM_SMCCC
+ select MAILBOX
+ help
+ Say y here to support iMX's DSP remote processors via the remote
+ processor framework.
+
+ It's safe to say N here.
+
+config INGENIC_VPU_RPROC
+ tristate "Ingenic JZ47xx VPU remoteproc support"
+ depends on MIPS || COMPILE_TEST
+ help
+ Say y or m here to support the VPU in the JZ47xx SoCs from Ingenic.
+
+ This can be either built-in or a loadable module.
+ If unsure say N.
+
+config MTK_SCP
+ tristate "Mediatek SCP support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select RPMSG_MTK_SCP
+ help
+ Say y here to support Mediatek's System Companion Processor (SCP) via
+ the remote processor framework.
+
+ It's safe to say N here.
+
+config OMAP_REMOTEPROC
+ tristate "OMAP remoteproc support"
+ depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX
+ depends on OMAP_IOMMU
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say y here to support OMAP's remote processors (dual M3
+ and DSP on OMAP4) via the remote processor framework.
+
+ Currently only supported on OMAP4.
+
+ Usually you want to say Y here, in order to enable multimedia
+ use-cases to run on your platform (multimedia codecs are
+ offloaded to remote DSP processors using this framework).
+
+ It's safe to say N here if you're not interested in multimedia
+ offloading or just want a bare minimum kernel.
+
+config OMAP_REMOTEPROC_WATCHDOG
+ bool "OMAP remoteproc watchdog timer"
+ depends on OMAP_REMOTEPROC
+ default n
+ help
+ Say Y here to enable watchdog timer for remote processors.
+
+ This option controls the watchdog functionality for the remote
+ processors in OMAP. Dedicated OMAP DMTimers are used by the remote
+ processors and triggers the timer interrupt upon a watchdog
+ detection.
+
+config WKUP_M3_RPROC
+ tristate "AMx3xx Wakeup M3 remoteproc support"
+ depends on SOC_AM33XX || SOC_AM43XX
+ help
+ Say y here to support Wakeup M3 remote processor on TI AM33xx
+ and AM43xx family of SoCs.
+
+ Required for Suspend-to-RAM on AM33xx and AM43xx SoCs. Also needed
+ for deep CPUIdle states on AM33xx SoCs. Allows for loading of the
+ firmware onto these remote processors.
+ If unsure say N.
+
+config DA8XX_REMOTEPROC
+ tristate "DA8xx/OMAP-L13x remoteproc support"
+ depends on ARCH_DAVINCI_DA8XX
+ depends on DMA_CMA
+ help
+ Say y here to support DA8xx/OMAP-L13x remote processors via the
+ remote processor framework.
+
+ You want to say y here in order to enable AMP
+ use-cases to run on your platform (multimedia codecs are
+ offloaded to remote DSP processors using this framework).
+
+ This module controls the name of the firmware file that gets
+ loaded on the DSP. This file must reside in the /lib/firmware
+ directory. It can be specified via the module parameter
+ da8xx_fw_name=<filename>, and if not specified will default to
+ "rproc-dsp-fw".
+
+ It's safe to say n here if you're not interested in multimedia
+ offloading.
+
+config KEYSTONE_REMOTEPROC
+ tristate "Keystone Remoteproc support"
+ depends on ARCH_KEYSTONE
+ help
+ Say Y here here to support Keystone remote processors (DSP)
+ via the remote processor framework.
+
+ It's safe to say N here if you're not interested in the Keystone
+ DSPs or just want to use a bare minimum kernel.
+
+config MESON_MX_AO_ARC_REMOTEPROC
+ tristate "Amlogic Meson6/8/8b/8m2 AO ARC remote processor support"
+ depends on HAS_IOMEM
+ depends on (ARM && ARCH_MESON) || COMPILE_TEST
+ select GENERIC_ALLOCATOR
+ help
+ Say m or y here to have support for the AO ARC remote processor
+ on Amlogic Meson6/Meson8/Meson8b/Meson8m2 SoCs. This is
+ typically used for system suspend.
+ If unsure say N.
+
+config PRU_REMOTEPROC
+ tristate "TI PRU remoteproc support"
+ depends on TI_PRUSS
+ default TI_PRUSS
+ help
+ Support for TI PRU remote processors present within a PRU-ICSS
+ subsystem via the remote processor framework.
+
+ Say Y or M here to support the Programmable Realtime Unit (PRU)
+ processors on various TI SoCs. It's safe to say N here if you're
+ not interested in the PRU or if you are unsure.
+
+config QCOM_PIL_INFO
+ tristate
+
+config QCOM_RPROC_COMMON
+ tristate
+
+config QCOM_Q6V5_COMMON
+ tristate
+ depends on ARCH_QCOM
+ depends on QCOM_SMEM
+
+config QCOM_Q6V5_ADSP
+ tristate "Qualcomm Technology Inc ADSP Peripheral Image Loader"
+ depends on OF && ARCH_QCOM
+ depends on QCOM_SMEM
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
+ depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
+ select MFD_SYSCON
+ select QCOM_PIL_INFO
+ select QCOM_MDT_LOADER
+ select QCOM_Q6V5_COMMON
+ select QCOM_RPROC_COMMON
+ help
+ Say y here to support the Peripheral Image Loader
+ for the non-TrustZone part of Qualcomm Technology Inc. ADSP and CDSP
+ remote processors. The TrustZone part is handled by QCOM_Q6V5_PAS
+ driver.
+
+config QCOM_Q6V5_MSS
+ tristate "Qualcomm Hexagon V5 self-authenticating modem subsystem support"
+ depends on OF && ARCH_QCOM
+ depends on QCOM_SMEM
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
+ depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
+ select MFD_SYSCON
+ select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
+ select QCOM_Q6V5_COMMON
+ select QCOM_RPROC_COMMON
+ select QCOM_SCM
+ help
+ Say y here to support the Qualcomm self-authenticating modem
+ subsystem based on Hexagon V5. The TrustZone based system is
+ handled by QCOM_Q6V5_PAS driver.
+
+config QCOM_Q6V5_PAS
+ tristate "Qualcomm Hexagon v5 Peripheral Authentication Service support"
+ depends on OF && ARCH_QCOM
+ depends on QCOM_SMEM
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
+ depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
+ select MFD_SYSCON
+ select QCOM_PIL_INFO
+ select QCOM_MDT_LOADER
+ select QCOM_Q6V5_COMMON
+ select QCOM_RPROC_COMMON
+ select QCOM_SCM
+ help
+ Say y here to support the TrustZone based Peripheral Image Loader
+ for the Qualcomm Hexagon v5 based remote processors. This is commonly
+ used to control subsystems such as ADSP (Audio DSP),
+ CDSP (Compute DSP), MPSS (Modem Peripheral SubSystem), and
+ SLPI (Sensor Low Power Island).
+
+config QCOM_Q6V5_WCSS
+ tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader"
+ depends on OF && ARCH_QCOM
+ depends on QCOM_SMEM
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
+ depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
+ select MFD_SYSCON
+ select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
+ select QCOM_Q6V5_COMMON
+ select QCOM_RPROC_COMMON
+ select QCOM_SCM
+ help
+ Say y here to support the Qualcomm Peripheral Image Loader for the
+ Hexagon V5 based WCSS remote processors on e.g. IPQ8074. This is
+ a non-TrustZone wireless subsystem.
+
+config QCOM_SYSMON
+ tristate "Qualcomm sysmon driver"
+ depends on RPMSG
+ depends on ARCH_QCOM
+ depends on NET
+ select QCOM_QMI_HELPERS
+ help
+ The sysmon driver implements a sysmon QMI client and a handler for
+ the sys_mon SMD and GLINK channel, which are used for graceful
+ shutdown, retrieving failure information and propagating information
+ about other subsystems being shut down.
+
+ Say y here if your system runs firmware on any other subsystems, e.g.
+ modem or DSP.
+
+config QCOM_WCNSS_PIL
+ tristate "Qualcomm WCNSS Peripheral Image Loader"
+ depends on OF && ARCH_QCOM
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
+ depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
+ depends on QCOM_SMEM
+ depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
+ select QCOM_RPROC_COMMON
+ select QCOM_SCM
+ help
+ Say y here to support the Peripheral Image Loader for loading WCNSS
+ firmware and boot the core on e.g. MSM8974, MSM8916. The firmware is
+ verified and booted with the help of the Peripheral Authentication
+ System (PAS) in TrustZone.
+
+config RCAR_REMOTEPROC
+ tristate "Renesas R-Car Gen3 remoteproc support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Say y here to support R-Car realtime processor via the
+ remote processor framework. An ELF firmware can be loaded
+ thanks to sysfs remoteproc entries. The remote processor
+ can be started and stopped.
+ This can be either built-in or a loadable module.
+ If compiled as module (M), the module name is rcar_rproc.
+
+config ST_REMOTEPROC
+ tristate "ST remoteproc support"
+ depends on ARCH_STI
+ select MAILBOX
+ select STI_MBOX
+ help
+ Say y here to support ST's adjunct processors via the remote
+ processor framework.
+ This can be either built-in or a loadable module.
+
+config ST_SLIM_REMOTEPROC
+ tristate
+
+config STM32_RPROC
+ tristate "STM32 remoteproc support"
+ depends on ARCH_STM32
+ depends on REMOTEPROC
+ select MAILBOX
+ help
+ Say y here to support STM32 MCU processors via the
+ remote processor framework.
+
+ You want to say y here in order to enable AMP
+ use-cases to run on your platform (dedicated firmware could be
+ offloaded to remote MCU processors using this framework).
+
+ This can be either built-in or a loadable module.
+
+config TI_K3_DSP_REMOTEPROC
+ tristate "TI K3 DSP remoteproc support"
+ depends on ARCH_K3
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's C66x and C71x DSP remote processor
+ subsystems on various TI K3 family of SoCs through the remote
+ processor framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ the DSP slave processors.
+
+config TI_K3_R5_REMOTEPROC
+ tristate "TI K3 R5 remoteproc support"
+ depends on ARCH_K3
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's R5F remote processor subsystems
+ on various TI K3 family of SoCs through the remote processor
+ framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ a slave processor.
+
+endif # REMOTEPROC
+
+endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
new file mode 100644
index 000000000..5478c7cb9
--- /dev/null
+++ b/drivers/remoteproc/Makefile
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generic framework for controlling remote processors
+#
+
+obj-$(CONFIG_REMOTEPROC) += remoteproc.o
+remoteproc-y := remoteproc_core.o
+remoteproc-y += remoteproc_coredump.o
+remoteproc-y += remoteproc_debugfs.o
+remoteproc-y += remoteproc_sysfs.o
+remoteproc-y += remoteproc_virtio.o
+remoteproc-y += remoteproc_elf_loader.o
+obj-$(CONFIG_REMOTEPROC_CDEV) += remoteproc_cdev.o
+obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o
+obj-$(CONFIG_IMX_DSP_REMOTEPROC) += imx_dsp_rproc.o
+obj-$(CONFIG_INGENIC_VPU_RPROC) += ingenic_rproc.o
+obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o
+obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
+obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
+obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
+obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o
+obj-$(CONFIG_MESON_MX_AO_ARC_REMOTEPROC)+= meson_mx_ao_arc.o
+obj-$(CONFIG_PRU_REMOTEPROC) += pru_rproc.o
+obj-$(CONFIG_QCOM_PIL_INFO) += qcom_pil_info.o
+obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o
+obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o
+obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o
+obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o
+obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o
+obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o
+obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
+obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
+qcom_wcnss_pil-y += qcom_wcnss.o
+qcom_wcnss_pil-y += qcom_wcnss_iris.o
+obj-$(CONFIG_RCAR_REMOTEPROC) += rcar_rproc.o
+obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
+obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
+obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
+obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
+obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
new file mode 100644
index 000000000..98e0be947
--- /dev/null
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Remote processor machine-specific module for DA8XX
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+
+#include "remoteproc_internal.h"
+
+static char *da8xx_fw_name;
+module_param(da8xx_fw_name, charp, 0444);
+MODULE_PARM_DESC(da8xx_fw_name,
+ "Name of DSP firmware file in /lib/firmware (if not specified defaults to 'rproc-dsp-fw')");
+
+/*
+ * OMAP-L138 Technical References:
+ * http://www.ti.com/product/omap-l138
+ */
+#define SYSCFG_CHIPSIG0 BIT(0)
+#define SYSCFG_CHIPSIG1 BIT(1)
+#define SYSCFG_CHIPSIG2 BIT(2)
+#define SYSCFG_CHIPSIG3 BIT(3)
+#define SYSCFG_CHIPSIG4 BIT(4)
+
+#define DA8XX_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct da8xx_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from DSP view
+ * @size: Size of the memory region
+ */
+struct da8xx_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct da8xx_rproc - da8xx remote processor instance state
+ * @rproc: rproc handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @dsp_clk: placeholder for platform's DSP clk
+ * @ack_fxn: chip-specific ack function for ack'ing irq
+ * @irq_data: ack_fxn function parameter
+ * @chipsig: virt ptr to DSP interrupt registers (CHIPSIG & CHIPSIG_CLR)
+ * @bootreg: virt ptr to DSP boot address register (HOST1CFG)
+ * @irq: irq # used by this instance
+ */
+struct da8xx_rproc {
+ struct rproc *rproc;
+ struct da8xx_rproc_mem *mem;
+ int num_mems;
+ struct clk *dsp_clk;
+ struct reset_control *dsp_reset;
+ void (*ack_fxn)(struct irq_data *data);
+ struct irq_data *irq_data;
+ void __iomem *chipsig;
+ void __iomem *bootreg;
+ int irq;
+};
+
+/**
+ * handle_event() - inbound virtqueue message workqueue function
+ *
+ * This function is registered as a kernel thread and is scheduled by the
+ * kernel handler.
+ */
+static irqreturn_t handle_event(int irq, void *p)
+{
+ struct rproc *rproc = (struct rproc *)p;
+
+ /* Process incoming buffers on all our vrings */
+ rproc_vq_interrupt(rproc, 0);
+ rproc_vq_interrupt(rproc, 1);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * da8xx_rproc_callback() - inbound virtqueue message handler
+ *
+ * This handler is invoked directly by the kernel whenever the remote
+ * core (DSP) has modified the state of a virtqueue. There is no
+ * "payload" message indicating the virtqueue index as is the case with
+ * mailbox-based implementations on OMAP4. As such, this handler "polls"
+ * each known virtqueue index for every invocation.
+ */
+static irqreturn_t da8xx_rproc_callback(int irq, void *p)
+{
+ struct rproc *rproc = (struct rproc *)p;
+ struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
+ u32 chipsig;
+
+ chipsig = readl(drproc->chipsig);
+ if (chipsig & SYSCFG_CHIPSIG0) {
+ /* Clear interrupt level source */
+ writel(SYSCFG_CHIPSIG0, drproc->chipsig + 4);
+
+ /*
+ * ACK intr to AINTC.
+ *
+ * It has already been ack'ed by the kernel before calling
+ * this function, but since the ARM<->DSP interrupts in the
+ * CHIPSIG register are "level" instead of "pulse" variety,
+ * we need to ack it after taking down the level else we'll
+ * be called again immediately after returning.
+ */
+ drproc->ack_fxn(drproc->irq_data);
+
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int da8xx_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
+ struct clk *dsp_clk = drproc->dsp_clk;
+ struct reset_control *dsp_reset = drproc->dsp_reset;
+ int ret;
+
+ /* hw requires the start (boot) address be on 1KB boundary */
+ if (rproc->bootaddr & 0x3ff) {
+ dev_err(dev, "invalid boot address: must be aligned to 1KB\n");
+
+ return -EINVAL;
+ }
+
+ writel(rproc->bootaddr, drproc->bootreg);
+
+ ret = clk_prepare_enable(dsp_clk);
+ if (ret) {
+ dev_err(dev, "clk_prepare_enable() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(dsp_reset);
+ if (ret) {
+ dev_err(dev, "reset_control_deassert() failed: %d\n", ret);
+ clk_disable_unprepare(dsp_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int da8xx_rproc_stop(struct rproc *rproc)
+{
+ struct da8xx_rproc *drproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ int ret;
+
+ ret = reset_control_assert(drproc->dsp_reset);
+ if (ret) {
+ dev_err(dev, "reset_control_assert() failed: %d\n", ret);
+ return ret;
+ }
+
+ clk_disable_unprepare(drproc->dsp_clk);
+
+ return 0;
+}
+
+/* kick a virtqueue */
+static void da8xx_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
+
+ /* Interrupt remote proc */
+ writel(SYSCFG_CHIPSIG2, drproc->chipsig);
+}
+
+static const struct rproc_ops da8xx_rproc_ops = {
+ .start = da8xx_rproc_start,
+ .stop = da8xx_rproc_stop,
+ .kick = da8xx_rproc_kick,
+};
+
+static int da8xx_rproc_get_internal_memories(struct platform_device *pdev,
+ struct da8xx_rproc *drproc)
+{
+ static const char * const mem_names[] = {"l2sram", "l1pram", "l1dram"};
+ int num_mems = ARRAY_SIZE(mem_names);
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int i;
+
+ drproc->mem = devm_kcalloc(dev, num_mems, sizeof(*drproc->mem),
+ GFP_KERNEL);
+ if (!drproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ drproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drproc->mem[i].cpu_addr)) {
+ dev_err(dev, "failed to parse and map %s memory\n",
+ mem_names[i]);
+ return PTR_ERR(drproc->mem[i].cpu_addr);
+ }
+ drproc->mem[i].bus_addr = res->start;
+ drproc->mem[i].dev_addr =
+ res->start & DA8XX_RPROC_LOCAL_ADDRESS_MASK;
+ drproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
+ mem_names[i], &drproc->mem[i].bus_addr,
+ drproc->mem[i].size, drproc->mem[i].cpu_addr,
+ drproc->mem[i].dev_addr);
+ }
+ drproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static int da8xx_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct da8xx_rproc *drproc;
+ struct rproc *rproc;
+ struct irq_data *irq_data;
+ struct resource *bootreg_res;
+ struct resource *chipsig_res;
+ struct clk *dsp_clk;
+ struct reset_control *dsp_reset;
+ void __iomem *chipsig;
+ void __iomem *bootreg;
+ int irq;
+ int ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ irq_data = irq_get_irq_data(irq);
+ if (!irq_data) {
+ dev_err(dev, "irq_get_irq_data(%d): NULL\n", irq);
+ return -EINVAL;
+ }
+
+ bootreg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "host1cfg");
+ bootreg = devm_ioremap_resource(dev, bootreg_res);
+ if (IS_ERR(bootreg))
+ return PTR_ERR(bootreg);
+
+ chipsig_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "chipsig");
+ chipsig = devm_ioremap_resource(dev, chipsig_res);
+ if (IS_ERR(chipsig))
+ return PTR_ERR(chipsig);
+
+ dsp_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(dsp_clk)) {
+ dev_err(dev, "clk_get error: %ld\n", PTR_ERR(dsp_clk));
+
+ return PTR_ERR(dsp_clk);
+ }
+
+ dsp_reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(dsp_reset)) {
+ if (PTR_ERR(dsp_reset) != -EPROBE_DEFER)
+ dev_err(dev, "unable to get reset control: %ld\n",
+ PTR_ERR(dsp_reset));
+
+ return PTR_ERR(dsp_reset);
+ }
+
+ if (dev->of_node) {
+ ret = of_reserved_mem_device_init(dev);
+ if (ret) {
+ dev_err(dev, "device does not have specific CMA pool: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ rproc = rproc_alloc(dev, "dsp", &da8xx_rproc_ops, da8xx_fw_name,
+ sizeof(*drproc));
+ if (!rproc) {
+ ret = -ENOMEM;
+ goto free_mem;
+ }
+
+ /* error recovery is not supported at present */
+ rproc->recovery_disabled = true;
+
+ drproc = rproc->priv;
+ drproc->rproc = rproc;
+ drproc->dsp_clk = dsp_clk;
+ drproc->dsp_reset = dsp_reset;
+ rproc->has_iommu = false;
+
+ ret = da8xx_rproc_get_internal_memories(pdev, drproc);
+ if (ret)
+ goto free_rproc;
+
+ platform_set_drvdata(pdev, rproc);
+
+ /* everything the ISR needs is now setup, so hook it up */
+ ret = devm_request_threaded_irq(dev, irq, da8xx_rproc_callback,
+ handle_event, 0, "da8xx-remoteproc",
+ rproc);
+ if (ret) {
+ dev_err(dev, "devm_request_threaded_irq error: %d\n", ret);
+ goto free_rproc;
+ }
+
+ /*
+ * rproc_add() can end up enabling the DSP's clk with the DSP
+ * *not* in reset, but da8xx_rproc_start() needs the DSP to be
+ * held in reset at the time it is called.
+ */
+ ret = reset_control_assert(dsp_reset);
+ if (ret)
+ goto free_rproc;
+
+ drproc->chipsig = chipsig;
+ drproc->bootreg = bootreg;
+ drproc->ack_fxn = irq_data->chip->irq_ack;
+ drproc->irq_data = irq_data;
+ drproc->irq = irq;
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed: %d\n", ret);
+ goto free_rproc;
+ }
+
+ return 0;
+
+free_rproc:
+ rproc_free(rproc);
+free_mem:
+ if (dev->of_node)
+ of_reserved_mem_device_release(dev);
+ return ret;
+}
+
+static int da8xx_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
+ struct device *dev = &pdev->dev;
+
+ /*
+ * The devm subsystem might end up releasing things before
+ * freeing the irq, thus allowing an interrupt to sneak in while
+ * the device is being removed. This should prevent that.
+ */
+ disable_irq(drproc->irq);
+
+ rproc_del(rproc);
+ rproc_free(rproc);
+ if (dev->of_node)
+ of_reserved_mem_device_release(dev);
+
+ return 0;
+}
+
+static const struct of_device_id davinci_rproc_of_match[] __maybe_unused = {
+ { .compatible = "ti,da850-dsp", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, davinci_rproc_of_match);
+
+static struct platform_driver da8xx_rproc_driver = {
+ .probe = da8xx_rproc_probe,
+ .remove = da8xx_rproc_remove,
+ .driver = {
+ .name = "davinci-rproc",
+ .of_match_table = of_match_ptr(davinci_rproc_of_match),
+ },
+};
+
+module_platform_driver(da8xx_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DA8XX Remote Processor control driver");
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
new file mode 100644
index 000000000..dcd07a6a5
--- /dev/null
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -0,0 +1,1318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2021 NXP */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/slab.h>
+
+#include "imx_rproc.h"
+#include "remoteproc_elf_helpers.h"
+#include "remoteproc_internal.h"
+
+#define DSP_RPROC_CLK_MAX 5
+
+#define REMOTE_IS_READY BIT(0)
+#define REMOTE_READY_WAIT_MAX_RETRIES 500
+
+/* att flags */
+/* DSP own area */
+#define ATT_OWN BIT(31)
+/* DSP instruction area */
+#define ATT_IRAM BIT(30)
+
+/* Definitions for i.MX8MP */
+/* DAP registers */
+#define IMX8M_DAP_DEBUG 0x28800000
+#define IMX8M_DAP_DEBUG_SIZE (64 * 1024)
+#define IMX8M_DAP_PWRCTL (0x4000 + 0x3020)
+#define IMX8M_PWRCTL_CORERESET BIT(16)
+
+/* DSP audio mix registers */
+#define IMX8M_AudioDSP_REG0 0x100
+#define IMX8M_AudioDSP_REG1 0x104
+#define IMX8M_AudioDSP_REG2 0x108
+#define IMX8M_AudioDSP_REG3 0x10c
+
+#define IMX8M_AudioDSP_REG2_RUNSTALL BIT(5)
+#define IMX8M_AudioDSP_REG2_PWAITMODE BIT(1)
+
+/* Definitions for i.MX8ULP */
+#define IMX8ULP_SIM_LPAV_REG_SYSCTRL0 0x8
+#define IMX8ULP_SYSCTRL0_DSP_DBG_RST BIT(25)
+#define IMX8ULP_SYSCTRL0_DSP_PLAT_CLK_EN BIT(19)
+#define IMX8ULP_SYSCTRL0_DSP_PBCLK_EN BIT(18)
+#define IMX8ULP_SYSCTRL0_DSP_CLK_EN BIT(17)
+#define IMX8ULP_SYSCTRL0_DSP_RST BIT(16)
+#define IMX8ULP_SYSCTRL0_DSP_OCD_HALT BIT(14)
+#define IMX8ULP_SYSCTRL0_DSP_STALL BIT(13)
+
+#define IMX8ULP_SIP_HIFI_XRDC 0xc200000e
+
+/*
+ * enum - Predefined Mailbox Messages
+ *
+ * @RP_MBOX_SUSPEND_SYSTEM: system suspend request for the remote processor
+ *
+ * @RP_MBOX_SUSPEND_ACK: successful response from remote processor for a
+ * suspend request
+ *
+ * @RP_MBOX_RESUME_SYSTEM: system resume request for the remote processor
+ *
+ * @RP_MBOX_RESUME_ACK: successful response from remote processor for a
+ * resume request
+ */
+enum imx_dsp_rp_mbox_messages {
+ RP_MBOX_SUSPEND_SYSTEM = 0xFF11,
+ RP_MBOX_SUSPEND_ACK = 0xFF12,
+ RP_MBOX_RESUME_SYSTEM = 0xFF13,
+ RP_MBOX_RESUME_ACK = 0xFF14,
+};
+
+/**
+ * struct imx_dsp_rproc - DSP remote processor state
+ * @regmap: regmap handler
+ * @rproc: rproc handler
+ * @dsp_dcfg: device configuration pointer
+ * @clks: clocks needed by this device
+ * @cl: mailbox client to request the mailbox channel
+ * @cl_rxdb: mailbox client to request the mailbox channel for doorbell
+ * @tx_ch: mailbox tx channel handle
+ * @rx_ch: mailbox rx channel handle
+ * @rxdb_ch: mailbox rx doorbell channel handle
+ * @pd_dev: power domain device
+ * @pd_dev_link: power domain device link
+ * @ipc_handle: System Control Unit ipc handle
+ * @rproc_work: work for processing virtio interrupts
+ * @pm_comp: completion primitive to sync for suspend response
+ * @num_domains: power domain number
+ * @flags: control flags
+ */
+struct imx_dsp_rproc {
+ struct regmap *regmap;
+ struct rproc *rproc;
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg;
+ struct clk_bulk_data clks[DSP_RPROC_CLK_MAX];
+ struct mbox_client cl;
+ struct mbox_client cl_rxdb;
+ struct mbox_chan *tx_ch;
+ struct mbox_chan *rx_ch;
+ struct mbox_chan *rxdb_ch;
+ struct device **pd_dev;
+ struct device_link **pd_dev_link;
+ struct imx_sc_ipc *ipc_handle;
+ struct work_struct rproc_work;
+ struct completion pm_comp;
+ int num_domains;
+ u32 flags;
+};
+
+/**
+ * struct imx_dsp_rproc_dcfg - DSP remote processor configuration
+ * @dcfg: imx_rproc_dcfg handler
+ * @reset: reset callback function
+ */
+struct imx_dsp_rproc_dcfg {
+ const struct imx_rproc_dcfg *dcfg;
+ int (*reset)(struct imx_dsp_rproc *priv);
+};
+
+static const struct imx_rproc_att imx_dsp_rproc_att_imx8qm[] = {
+ /* dev addr , sys addr , size , flags */
+ { 0x596e8000, 0x556e8000, 0x00008000, ATT_OWN },
+ { 0x596f0000, 0x556f0000, 0x00008000, ATT_OWN },
+ { 0x596f8000, 0x556f8000, 0x00000800, ATT_OWN | ATT_IRAM},
+ { 0x55700000, 0x55700000, 0x00070000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x80000000, 0x80000000, 0x60000000, 0},
+};
+
+static const struct imx_rproc_att imx_dsp_rproc_att_imx8qxp[] = {
+ /* dev addr , sys addr , size , flags */
+ { 0x596e8000, 0x596e8000, 0x00008000, ATT_OWN },
+ { 0x596f0000, 0x596f0000, 0x00008000, ATT_OWN },
+ { 0x596f8000, 0x596f8000, 0x00000800, ATT_OWN | ATT_IRAM},
+ { 0x59700000, 0x59700000, 0x00070000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x80000000, 0x80000000, 0x60000000, 0},
+};
+
+static const struct imx_rproc_att imx_dsp_rproc_att_imx8mp[] = {
+ /* dev addr , sys addr , size , flags */
+ { 0x3b6e8000, 0x3b6e8000, 0x00008000, ATT_OWN },
+ { 0x3b6f0000, 0x3b6f0000, 0x00008000, ATT_OWN },
+ { 0x3b6f8000, 0x3b6f8000, 0x00000800, ATT_OWN | ATT_IRAM},
+ { 0x3b700000, 0x3b700000, 0x00040000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x40000000, 0x40000000, 0x80000000, 0},
+};
+
+static const struct imx_rproc_att imx_dsp_rproc_att_imx8ulp[] = {
+ /* dev addr , sys addr , size , flags */
+ { 0x21170000, 0x21170000, 0x00010000, ATT_OWN | ATT_IRAM},
+ { 0x21180000, 0x21180000, 0x00010000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x0c000000, 0x80000000, 0x10000000, 0},
+ { 0x30000000, 0x90000000, 0x10000000, 0},
+};
+
+/* Reset function for DSP on i.MX8MP */
+static int imx8mp_dsp_reset(struct imx_dsp_rproc *priv)
+{
+ void __iomem *dap = ioremap_wc(IMX8M_DAP_DEBUG, IMX8M_DAP_DEBUG_SIZE);
+ int pwrctl;
+
+ /* Put DSP into reset and stall */
+ pwrctl = readl(dap + IMX8M_DAP_PWRCTL);
+ pwrctl |= IMX8M_PWRCTL_CORERESET;
+ writel(pwrctl, dap + IMX8M_DAP_PWRCTL);
+
+ /* Keep reset asserted for 10 cycles */
+ usleep_range(1, 2);
+
+ regmap_update_bits(priv->regmap, IMX8M_AudioDSP_REG2,
+ IMX8M_AudioDSP_REG2_RUNSTALL,
+ IMX8M_AudioDSP_REG2_RUNSTALL);
+
+ /* Take the DSP out of reset and keep stalled for FW loading */
+ pwrctl = readl(dap + IMX8M_DAP_PWRCTL);
+ pwrctl &= ~IMX8M_PWRCTL_CORERESET;
+ writel(pwrctl, dap + IMX8M_DAP_PWRCTL);
+
+ iounmap(dap);
+ return 0;
+}
+
+/* Reset function for DSP on i.MX8ULP */
+static int imx8ulp_dsp_reset(struct imx_dsp_rproc *priv)
+{
+ struct arm_smccc_res res;
+
+ /* Put DSP into reset and stall */
+ regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ IMX8ULP_SYSCTRL0_DSP_RST, IMX8ULP_SYSCTRL0_DSP_RST);
+ regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ IMX8ULP_SYSCTRL0_DSP_STALL,
+ IMX8ULP_SYSCTRL0_DSP_STALL);
+
+ /* Configure resources of DSP through TFA */
+ arm_smccc_smc(IMX8ULP_SIP_HIFI_XRDC, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ /* Take the DSP out of reset and keep stalled for FW loading */
+ regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ IMX8ULP_SYSCTRL0_DSP_RST, 0);
+ regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ IMX8ULP_SYSCTRL0_DSP_DBG_RST, 0);
+
+ return 0;
+}
+
+/* Specific configuration for i.MX8MP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
+ .src_reg = IMX8M_AudioDSP_REG2,
+ .src_mask = IMX8M_AudioDSP_REG2_RUNSTALL,
+ .src_start = 0,
+ .src_stop = IMX8M_AudioDSP_REG2_RUNSTALL,
+ .att = imx_dsp_rproc_att_imx8mp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
+ .method = IMX_RPROC_MMIO,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
+ .dcfg = &dsp_rproc_cfg_imx8mp,
+ .reset = imx8mp_dsp_reset,
+};
+
+/* Specific configuration for i.MX8ULP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8ulp = {
+ .src_reg = IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ .src_mask = IMX8ULP_SYSCTRL0_DSP_STALL,
+ .src_start = 0,
+ .src_stop = IMX8ULP_SYSCTRL0_DSP_STALL,
+ .att = imx_dsp_rproc_att_imx8ulp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8ulp),
+ .method = IMX_RPROC_MMIO,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8ulp = {
+ .dcfg = &dsp_rproc_cfg_imx8ulp,
+ .reset = imx8ulp_dsp_reset,
+};
+
+/* Specific configuration for i.MX8QXP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qxp = {
+ .att = imx_dsp_rproc_att_imx8qxp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qxp),
+ .method = IMX_RPROC_SCU_API,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qxp = {
+ .dcfg = &dsp_rproc_cfg_imx8qxp,
+};
+
+/* Specific configuration for i.MX8QM */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qm = {
+ .att = imx_dsp_rproc_att_imx8qm,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qm),
+ .method = IMX_RPROC_SCU_API,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qm = {
+ .dcfg = &dsp_rproc_cfg_imx8qm,
+};
+
+static int imx_dsp_rproc_ready(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ int i;
+
+ if (!priv->rxdb_ch)
+ return 0;
+
+ for (i = 0; i < REMOTE_READY_WAIT_MAX_RETRIES; i++) {
+ if (priv->flags & REMOTE_IS_READY)
+ return 0;
+ usleep_range(100, 200);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Start function for rproc_ops
+ *
+ * There is a handshake for start procedure: when DSP starts, it
+ * will send a doorbell message to this driver, then the
+ * REMOTE_IS_READY flags is set, then driver will kick
+ * a message to DSP.
+ */
+static int imx_dsp_rproc_start(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
+ struct device *dev = rproc->dev.parent;
+ int ret;
+
+ switch (dcfg->method) {
+ case IMX_RPROC_MMIO:
+ ret = regmap_update_bits(priv->regmap,
+ dcfg->src_reg,
+ dcfg->src_mask,
+ dcfg->src_start);
+ break;
+ case IMX_RPROC_SCU_API:
+ ret = imx_sc_pm_cpu_start(priv->ipc_handle,
+ IMX_SC_R_DSP,
+ true,
+ rproc->bootaddr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret)
+ dev_err(dev, "Failed to enable remote core!\n");
+ else
+ ret = imx_dsp_rproc_ready(rproc);
+
+ return ret;
+}
+
+/*
+ * Stop function for rproc_ops
+ * It clears the REMOTE_IS_READY flags
+ */
+static int imx_dsp_rproc_stop(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
+ struct device *dev = rproc->dev.parent;
+ int ret = 0;
+
+ if (rproc->state == RPROC_CRASHED) {
+ priv->flags &= ~REMOTE_IS_READY;
+ return 0;
+ }
+
+ switch (dcfg->method) {
+ case IMX_RPROC_MMIO:
+ ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
+ dcfg->src_stop);
+ break;
+ case IMX_RPROC_SCU_API:
+ ret = imx_sc_pm_cpu_start(priv->ipc_handle,
+ IMX_SC_R_DSP,
+ false,
+ rproc->bootaddr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret)
+ dev_err(dev, "Failed to stop remote core\n");
+ else
+ priv->flags &= ~REMOTE_IS_READY;
+
+ return ret;
+}
+
+/**
+ * imx_dsp_rproc_sys_to_da() - internal memory translation helper
+ * @priv: private data pointer
+ * @sys: system address (DDR address)
+ * @len: length of the memory buffer
+ * @da: device address to translate
+ *
+ * Convert system address (DDR address) to device address (DSP)
+ * for there may be memory remap for device.
+ */
+static int imx_dsp_rproc_sys_to_da(struct imx_dsp_rproc *priv, u64 sys,
+ size_t len, u64 *da)
+{
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
+ int i;
+
+ /* Parse address translation table */
+ for (i = 0; i < dcfg->att_size; i++) {
+ const struct imx_rproc_att *att = &dcfg->att[i];
+
+ if (sys >= att->sa && sys + len <= att->sa + att->size) {
+ unsigned int offset = sys - att->sa;
+
+ *da = att->da + offset;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/* Main virtqueue message work function
+ *
+ * This function is executed upon scheduling of the i.MX DSP remoteproc
+ * driver's workqueue. The workqueue is scheduled by the mailbox rx
+ * handler.
+ *
+ * This work function processes both the Tx and Rx virtqueue indices on
+ * every invocation. The rproc_vq_interrupt function can detect if there
+ * are new unprocessed messages or not (returns IRQ_NONE vs IRQ_HANDLED),
+ * but there is no need to check for these return values. The index 0
+ * triggering will process all pending Rx buffers, and the index 1 triggering
+ * will process all newly available Tx buffers and will wakeup any potentially
+ * blocked senders.
+ *
+ * NOTE:
+ * The current logic is based on an inherent design assumption of supporting
+ * only 2 vrings, but this can be changed if needed.
+ */
+static void imx_dsp_rproc_vq_work(struct work_struct *work)
+{
+ struct imx_dsp_rproc *priv = container_of(work, struct imx_dsp_rproc,
+ rproc_work);
+ struct rproc *rproc = priv->rproc;
+
+ mutex_lock(&rproc->lock);
+
+ if (rproc->state != RPROC_RUNNING)
+ goto unlock_mutex;
+
+ rproc_vq_interrupt(priv->rproc, 0);
+ rproc_vq_interrupt(priv->rproc, 1);
+
+unlock_mutex:
+ mutex_unlock(&rproc->lock);
+}
+
+/**
+ * imx_dsp_rproc_rx_tx_callback() - inbound mailbox message handler
+ * @cl: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by mailbox driver whenever a mailbox
+ * message is received. Usually, the SUSPEND and RESUME related messages
+ * are handled in this function, other messages are handled by remoteproc core
+ */
+static void imx_dsp_rproc_rx_tx_callback(struct mbox_client *cl, void *data)
+{
+ struct rproc *rproc = dev_get_drvdata(cl->dev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ u32 message = (u32)(*(u32 *)data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", message);
+
+ switch (message) {
+ case RP_MBOX_SUSPEND_ACK:
+ complete(&priv->pm_comp);
+ break;
+ case RP_MBOX_RESUME_ACK:
+ complete(&priv->pm_comp);
+ break;
+ default:
+ schedule_work(&priv->rproc_work);
+ break;
+ }
+}
+
+/**
+ * imx_dsp_rproc_rxdb_callback() - inbound mailbox message handler
+ * @cl: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * For doorbell, there is no message specified, just set REMOTE_IS_READY
+ * flag.
+ */
+static void imx_dsp_rproc_rxdb_callback(struct mbox_client *cl, void *data)
+{
+ struct rproc *rproc = dev_get_drvdata(cl->dev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ /* Remote is ready after firmware is loaded and running */
+ priv->flags |= REMOTE_IS_READY;
+}
+
+/**
+ * imx_dsp_rproc_mbox_init() - request mailbox channels
+ * @priv: private data pointer
+ *
+ * Request three mailbox channels (tx, rx, rxdb).
+ */
+static int imx_dsp_rproc_mbox_init(struct imx_dsp_rproc *priv)
+{
+ struct device *dev = priv->rproc->dev.parent;
+ struct mbox_client *cl;
+ int ret;
+
+ if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ return 0;
+
+ cl = &priv->cl;
+ cl->dev = dev;
+ cl->tx_block = true;
+ cl->tx_tout = 100;
+ cl->knows_txdone = false;
+ cl->rx_callback = imx_dsp_rproc_rx_tx_callback;
+
+ /* Channel for sending message */
+ priv->tx_ch = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(priv->tx_ch)) {
+ ret = PTR_ERR(priv->tx_ch);
+ dev_dbg(cl->dev, "failed to request tx mailbox channel: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /* Channel for receiving message */
+ priv->rx_ch = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(priv->rx_ch)) {
+ ret = PTR_ERR(priv->rx_ch);
+ dev_dbg(cl->dev, "failed to request rx mailbox channel: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ cl = &priv->cl_rxdb;
+ cl->dev = dev;
+ cl->rx_callback = imx_dsp_rproc_rxdb_callback;
+
+ /*
+ * RX door bell is used to receive the ready signal from remote
+ * after firmware loaded.
+ */
+ priv->rxdb_ch = mbox_request_channel_byname(cl, "rxdb");
+ if (IS_ERR(priv->rxdb_ch)) {
+ ret = PTR_ERR(priv->rxdb_ch);
+ dev_dbg(cl->dev, "failed to request mbox chan rxdb, ret %d\n",
+ ret);
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ if (!IS_ERR(priv->tx_ch))
+ mbox_free_channel(priv->tx_ch);
+ if (!IS_ERR(priv->rx_ch))
+ mbox_free_channel(priv->rx_ch);
+ if (!IS_ERR(priv->rxdb_ch))
+ mbox_free_channel(priv->rxdb_ch);
+
+ return ret;
+}
+
+static void imx_dsp_rproc_free_mbox(struct imx_dsp_rproc *priv)
+{
+ mbox_free_channel(priv->tx_ch);
+ mbox_free_channel(priv->rx_ch);
+ mbox_free_channel(priv->rxdb_ch);
+}
+
+/**
+ * imx_dsp_rproc_add_carveout() - request mailbox channels
+ * @priv: private data pointer
+ *
+ * This function registers specified memory entry in @rproc carveouts list
+ * The carveouts can help to mapping the memory address for DSP.
+ */
+static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
+{
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
+ struct rproc *rproc = priv->rproc;
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ void __iomem *cpu_addr;
+ int a;
+ u64 da;
+
+ /* Remap required addresses */
+ for (a = 0; a < dcfg->att_size; a++) {
+ const struct imx_rproc_att *att = &dcfg->att[a];
+
+ if (!(att->flags & ATT_OWN))
+ continue;
+
+ if (imx_dsp_rproc_sys_to_da(priv, att->sa, att->size, &da))
+ return -EINVAL;
+
+ cpu_addr = devm_ioremap_wc(dev, att->sa, att->size);
+ if (!cpu_addr) {
+ dev_err(dev, "failed to map memory %p\n", &att->sa);
+ return -ENOMEM;
+ }
+
+ /* Register memory region */
+ mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)att->sa,
+ att->size, da, NULL, NULL, "dsp_mem");
+
+ if (mem)
+ rproc_coredump_add_segment(rproc, da, att->size);
+ else
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+ /*
+ * Ignore the first memory region which will be used vdev buffer.
+ * No need to do extra handlings, rproc_add_virtio_dev will handle it.
+ */
+ if (!strcmp(it.node->name, "vdev0buffer"))
+ continue;
+
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ of_node_put(it.node);
+ dev_err(dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da)) {
+ of_node_put(it.node);
+ return -EINVAL;
+ }
+
+ cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!cpu_addr) {
+ of_node_put(it.node);
+ dev_err(dev, "failed to map memory %p\n", &rmem->base);
+ return -ENOMEM;
+ }
+
+ /* Register memory region */
+ mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)rmem->base,
+ rmem->size, da, NULL, NULL, it.node->name);
+
+ if (mem) {
+ rproc_coredump_add_segment(rproc, da, rmem->size);
+ } else {
+ of_node_put(it.node);
+ return -ENOMEM;
+ }
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ return 0;
+}
+
+/* Prepare function for rproc_ops */
+static int imx_dsp_rproc_prepare(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct rproc_mem_entry *carveout;
+ int ret;
+
+ ret = imx_dsp_rproc_add_carveout(priv);
+ if (ret) {
+ dev_err(dev, "failed on imx_dsp_rproc_add_carveout\n");
+ return ret;
+ }
+
+ pm_runtime_get_sync(dev);
+
+ /*
+ * Clear buffers after pm rumtime for internal ocram is not
+ * accessible if power and clock are not enabled.
+ */
+ list_for_each_entry(carveout, &rproc->carveouts, node) {
+ if (carveout->va)
+ memset(carveout->va, 0, carveout->len);
+ }
+
+ return 0;
+}
+
+/* Unprepare function for rproc_ops */
+static int imx_dsp_rproc_unprepare(struct rproc *rproc)
+{
+ pm_runtime_put_sync(rproc->dev.parent);
+
+ return 0;
+}
+
+/* Kick function for rproc_ops */
+static void imx_dsp_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ int err;
+ __u32 mmsg;
+
+ if (!priv->tx_ch) {
+ dev_err(dev, "No initialized mbox tx channel\n");
+ return;
+ }
+
+ /*
+ * Send the index of the triggered virtqueue as the mu payload.
+ * Let remote processor know which virtqueue is used.
+ */
+ mmsg = vqid;
+
+ err = mbox_send_message(priv->tx_ch, (void *)&mmsg);
+ if (err < 0)
+ dev_err(dev, "%s: failed (%d, err:%d)\n", __func__, vqid, err);
+}
+
+/*
+ * Custom memory copy implementation for i.MX DSP Cores
+ *
+ * The IRAM is part of the HiFi DSP.
+ * According to hw specs only 32-bits writes are allowed.
+ */
+static int imx_dsp_rproc_memcpy(void *dst, const void *src, size_t size)
+{
+ void __iomem *dest = (void __iomem *)dst;
+ const u8 *src_byte = src;
+ const u32 *source = src;
+ u32 affected_mask;
+ int i, q, r;
+ u32 tmp;
+
+ /* destination must be 32bit aligned */
+ if (!IS_ALIGNED((uintptr_t)dest, 4))
+ return -EINVAL;
+
+ q = size / 4;
+ r = size % 4;
+
+ /* copy data in units of 32 bits at a time */
+ for (i = 0; i < q; i++)
+ writel(source[i], dest + i * 4);
+
+ if (r) {
+ affected_mask = GENMASK(8 * r, 0);
+
+ /*
+ * first read the 32bit data of dest, then change affected
+ * bytes, and write back to dest.
+ * For unaffected bytes, it should not be changed
+ */
+ tmp = readl(dest + q * 4);
+ tmp &= ~affected_mask;
+
+ /* avoid reading after end of source */
+ for (i = 0; i < r; i++)
+ tmp |= (src_byte[q * 4 + i] << (8 * i));
+
+ writel(tmp, dest + q * 4);
+ }
+
+ return 0;
+}
+
+/*
+ * Custom memset implementation for i.MX DSP Cores
+ *
+ * The IRAM is part of the HiFi DSP.
+ * According to hw specs only 32-bits writes are allowed.
+ */
+static int imx_dsp_rproc_memset(void *addr, u8 value, size_t size)
+{
+ void __iomem *tmp_dst = (void __iomem *)addr;
+ u32 tmp_val = value;
+ u32 affected_mask;
+ int q, r;
+ u32 tmp;
+
+ /* destination must be 32bit aligned */
+ if (!IS_ALIGNED((uintptr_t)addr, 4))
+ return -EINVAL;
+
+ tmp_val |= tmp_val << 8;
+ tmp_val |= tmp_val << 16;
+
+ q = size / 4;
+ r = size % 4;
+
+ while (q--)
+ writel(tmp_val, tmp_dst++);
+
+ if (r) {
+ affected_mask = GENMASK(8 * r, 0);
+
+ /*
+ * first read the 32bit data of addr, then change affected
+ * bytes, and write back to addr.
+ * For unaffected bytes, it should not be changed
+ */
+ tmp = readl(tmp_dst);
+ tmp &= ~affected_mask;
+
+ tmp |= (tmp_val & affected_mask);
+ writel(tmp, tmp_dst);
+ }
+
+ return 0;
+}
+
+/*
+ * imx_dsp_rproc_elf_load_segments() - load firmware segments to memory
+ * @rproc: remote processor which will be booted using these fw segments
+ * @fw: the ELF firmware image
+ *
+ * This function loads the firmware segments to memory, where the remote
+ * processor expects them.
+ *
+ * Return: 0 on success and an appropriate error code otherwise
+ */
+static int imx_dsp_rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ const void *ehdr, *phdr;
+ int i, ret = 0;
+ u16 phnum;
+ const u8 *elf_data = fw->data;
+ u8 class = fw_elf_get_class(fw);
+ u32 elf_phdr_get_size = elf_size_of_phdr(class);
+
+ ehdr = elf_data;
+ phnum = elf_hdr_get_e_phnum(class, ehdr);
+ phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) {
+ u64 da = elf_phdr_get_p_paddr(class, phdr);
+ u64 memsz = elf_phdr_get_p_memsz(class, phdr);
+ u64 filesz = elf_phdr_get_p_filesz(class, phdr);
+ u64 offset = elf_phdr_get_p_offset(class, phdr);
+ u32 type = elf_phdr_get_p_type(class, phdr);
+ void *ptr;
+
+ if (type != PT_LOAD || !memsz)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
+ type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!rproc_u64_fit_in_size_t(memsz)) {
+ dev_err(dev, "size (%llx) does not fit in size_t type\n",
+ memsz);
+ ret = -EOVERFLOW;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ ptr = rproc_da_to_va(rproc, da, memsz, NULL);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
+ memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* put the segment where the remote processor expects it */
+ if (filesz) {
+ ret = imx_dsp_rproc_memcpy(ptr, elf_data + offset, filesz);
+ if (ret) {
+ dev_err(dev, "memory copy failed for da 0x%llx memsz 0x%llx\n",
+ da, memsz);
+ break;
+ }
+ }
+
+ /* zero out remaining memory for this segment */
+ if (memsz > filesz) {
+ ret = imx_dsp_rproc_memset(ptr + filesz, 0, memsz - filesz);
+ if (ret) {
+ dev_err(dev, "memset failed for da 0x%llx memsz 0x%llx\n",
+ da, memsz);
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int imx_dsp_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc_elf_load_rsc_table(rproc, fw))
+ dev_warn(&rproc->dev, "no resource table found for this firmware\n");
+
+ return 0;
+}
+
+static const struct rproc_ops imx_dsp_rproc_ops = {
+ .prepare = imx_dsp_rproc_prepare,
+ .unprepare = imx_dsp_rproc_unprepare,
+ .start = imx_dsp_rproc_start,
+ .stop = imx_dsp_rproc_stop,
+ .kick = imx_dsp_rproc_kick,
+ .load = imx_dsp_rproc_elf_load_segments,
+ .parse_fw = imx_dsp_rproc_parse_fw,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+};
+
+/**
+ * imx_dsp_attach_pm_domains() - attach the power domains
+ * @priv: private data pointer
+ *
+ * On i.MX8QM and i.MX8QXP there is multiple power domains
+ * required, so need to link them.
+ */
+static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv)
+{
+ struct device *dev = priv->rproc->dev.parent;
+ int ret, i;
+
+ priv->num_domains = of_count_phandle_with_args(dev->of_node,
+ "power-domains",
+ "#power-domain-cells");
+
+ /* If only one domain, then no need to link the device */
+ if (priv->num_domains <= 1)
+ return 0;
+
+ priv->pd_dev = devm_kmalloc_array(dev, priv->num_domains,
+ sizeof(*priv->pd_dev),
+ GFP_KERNEL);
+ if (!priv->pd_dev)
+ return -ENOMEM;
+
+ priv->pd_dev_link = devm_kmalloc_array(dev, priv->num_domains,
+ sizeof(*priv->pd_dev_link),
+ GFP_KERNEL);
+ if (!priv->pd_dev_link)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->num_domains; i++) {
+ priv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR(priv->pd_dev[i])) {
+ ret = PTR_ERR(priv->pd_dev[i]);
+ goto detach_pm;
+ }
+
+ /*
+ * device_link_add will check priv->pd_dev[i], if it is
+ * NULL, then will break.
+ */
+ priv->pd_dev_link[i] = device_link_add(dev,
+ priv->pd_dev[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME);
+ if (!priv->pd_dev_link[i]) {
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ ret = -EINVAL;
+ goto detach_pm;
+ }
+ }
+
+ return 0;
+
+detach_pm:
+ while (--i >= 0) {
+ device_link_del(priv->pd_dev_link[i]);
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ }
+
+ return ret;
+}
+
+static int imx_dsp_detach_pm_domains(struct imx_dsp_rproc *priv)
+{
+ int i;
+
+ if (priv->num_domains <= 1)
+ return 0;
+
+ for (i = 0; i < priv->num_domains; i++) {
+ device_link_del(priv->pd_dev_link[i]);
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ }
+
+ return 0;
+}
+
+/**
+ * imx_dsp_rproc_detect_mode() - detect DSP control mode
+ * @priv: private data pointer
+ *
+ * Different platform has different control method for DSP, which depends
+ * on how the DSP is integrated in platform.
+ *
+ * For i.MX8QXP and i.MX8QM, DSP should be started and stopped by System
+ * Control Unit.
+ * For i.MX8MP and i.MX8ULP, DSP should be started and stopped by system
+ * integration module.
+ */
+static int imx_dsp_rproc_detect_mode(struct imx_dsp_rproc *priv)
+{
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ struct device *dev = priv->rproc->dev.parent;
+ struct regmap *regmap;
+ int ret = 0;
+
+ switch (dsp_dcfg->dcfg->method) {
+ case IMX_RPROC_SCU_API:
+ ret = imx_scu_get_handle(&priv->ipc_handle);
+ if (ret)
+ return ret;
+ break;
+ case IMX_RPROC_MMIO:
+ regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,dsp-ctrl");
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to find syscon\n");
+ return PTR_ERR(regmap);
+ }
+
+ priv->regmap = regmap;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static const char *imx_dsp_clks_names[DSP_RPROC_CLK_MAX] = {
+ /* DSP clocks */
+ "core", "ocram", "debug", "ipg", "mu",
+};
+
+static int imx_dsp_rproc_clk_get(struct imx_dsp_rproc *priv)
+{
+ struct device *dev = priv->rproc->dev.parent;
+ struct clk_bulk_data *clks = priv->clks;
+ int i;
+
+ for (i = 0; i < DSP_RPROC_CLK_MAX; i++)
+ clks[i].id = imx_dsp_clks_names[i];
+
+ return devm_clk_bulk_get_optional(dev, DSP_RPROC_CLK_MAX, clks);
+}
+
+static int imx_dsp_rproc_probe(struct platform_device *pdev)
+{
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg;
+ struct device *dev = &pdev->dev;
+ struct imx_dsp_rproc *priv;
+ struct rproc *rproc;
+ const char *fw_name;
+ int ret;
+
+ dsp_dcfg = of_device_get_match_data(dev);
+ if (!dsp_dcfg)
+ return -ENODEV;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret) {
+ dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ rproc = rproc_alloc(dev, "imx-dsp-rproc", &imx_dsp_rproc_ops, fw_name,
+ sizeof(*priv));
+ if (!rproc)
+ return -ENOMEM;
+
+ priv = rproc->priv;
+ priv->rproc = rproc;
+ priv->dsp_dcfg = dsp_dcfg;
+
+ dev_set_drvdata(dev, rproc);
+
+ INIT_WORK(&priv->rproc_work, imx_dsp_rproc_vq_work);
+
+ ret = imx_dsp_rproc_detect_mode(priv);
+ if (ret) {
+ dev_err(dev, "failed on imx_dsp_rproc_detect_mode\n");
+ goto err_put_rproc;
+ }
+
+ /* There are multiple power domains required by DSP on some platform */
+ ret = imx_dsp_attach_pm_domains(priv);
+ if (ret) {
+ dev_err(dev, "failed on imx_dsp_attach_pm_domains\n");
+ goto err_put_rproc;
+ }
+ /* Get clocks */
+ ret = imx_dsp_rproc_clk_get(priv);
+ if (ret) {
+ dev_err(dev, "failed on imx_dsp_rproc_clk_get\n");
+ goto err_detach_domains;
+ }
+
+ init_completion(&priv->pm_comp);
+ rproc->auto_boot = false;
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed\n");
+ goto err_detach_domains;
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_detach_domains:
+ imx_dsp_detach_pm_domains(priv);
+err_put_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int imx_dsp_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ pm_runtime_disable(&pdev->dev);
+ rproc_del(rproc);
+ imx_dsp_detach_pm_domains(priv);
+ rproc_free(rproc);
+
+ return 0;
+}
+
+/* pm runtime functions */
+static int imx_dsp_runtime_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ int ret;
+
+ /*
+ * There is power domain attached with mailbox, if setup mailbox
+ * in probe(), then the power of mailbox is always enabled,
+ * the power can't be saved.
+ * So move setup of mailbox to runtime resume.
+ */
+ ret = imx_dsp_rproc_mbox_init(priv);
+ if (ret) {
+ dev_err(dev, "failed on imx_dsp_rproc_mbox_init\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(DSP_RPROC_CLK_MAX, priv->clks);
+ if (ret) {
+ dev_err(dev, "failed on clk_bulk_prepare_enable\n");
+ return ret;
+ }
+
+ /* Reset DSP if needed */
+ if (dsp_dcfg->reset)
+ dsp_dcfg->reset(priv);
+
+ return 0;
+}
+
+static int imx_dsp_runtime_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ clk_bulk_disable_unprepare(DSP_RPROC_CLK_MAX, priv->clks);
+
+ imx_dsp_rproc_free_mbox(priv);
+
+ return 0;
+}
+
+static void imx_dsp_load_firmware(const struct firmware *fw, void *context)
+{
+ struct rproc *rproc = context;
+ int ret;
+
+ /*
+ * Same flow as start procedure.
+ * Load the ELF segments to memory firstly.
+ */
+ ret = rproc_load_segments(rproc, fw);
+ if (ret)
+ goto out;
+
+ /* Start the remote processor */
+ ret = rproc->ops->start(rproc);
+ if (ret)
+ goto out;
+
+ rproc->ops->kick(rproc, 0);
+
+out:
+ release_firmware(fw);
+}
+
+static __maybe_unused int imx_dsp_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+ __u32 mmsg = RP_MBOX_SUSPEND_SYSTEM;
+ int ret;
+
+ if (rproc->state != RPROC_RUNNING)
+ goto out;
+
+ reinit_completion(&priv->pm_comp);
+
+ /* Tell DSP that suspend is happening */
+ ret = mbox_send_message(priv->tx_ch, (void *)&mmsg);
+ if (ret < 0) {
+ dev_err(dev, "PM mbox_send_message failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * DSP need to save the context at suspend.
+ * Here waiting the response for DSP, then power can be disabled.
+ */
+ if (!wait_for_completion_timeout(&priv->pm_comp, msecs_to_jiffies(100)))
+ return -EBUSY;
+
+out:
+ /*
+ * The power of DSP is disabled in suspend, so force pm runtime
+ * to be suspend, then we can reenable the power and clocks at
+ * resume stage.
+ */
+ return pm_runtime_force_suspend(dev);
+}
+
+static __maybe_unused int imx_dsp_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ if (rproc->state != RPROC_RUNNING)
+ return 0;
+
+ /*
+ * The power of DSP is disabled at suspend, the memory of dsp
+ * is reset, the image segments are lost. So need to reload
+ * firmware and restart the DSP if it is in running state.
+ */
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+ rproc->firmware, dev, GFP_KERNEL,
+ rproc, imx_dsp_load_firmware);
+ if (ret < 0) {
+ dev_err(dev, "load firmware failed: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pm_runtime_force_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_dsp_rproc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx_dsp_suspend, imx_dsp_resume)
+ SET_RUNTIME_PM_OPS(imx_dsp_runtime_suspend,
+ imx_dsp_runtime_resume, NULL)
+};
+
+static const struct of_device_id imx_dsp_rproc_of_match[] = {
+ { .compatible = "fsl,imx8qxp-hifi4", .data = &imx_dsp_rproc_cfg_imx8qxp },
+ { .compatible = "fsl,imx8qm-hifi4", .data = &imx_dsp_rproc_cfg_imx8qm },
+ { .compatible = "fsl,imx8mp-hifi4", .data = &imx_dsp_rproc_cfg_imx8mp },
+ { .compatible = "fsl,imx8ulp-hifi4", .data = &imx_dsp_rproc_cfg_imx8ulp },
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx_dsp_rproc_of_match);
+
+static struct platform_driver imx_dsp_rproc_driver = {
+ .probe = imx_dsp_rproc_probe,
+ .remove = imx_dsp_rproc_remove,
+ .driver = {
+ .name = "imx-dsp-rproc",
+ .of_match_table = imx_dsp_rproc_of_match,
+ .pm = &imx_dsp_rproc_pm_ops,
+ },
+};
+module_platform_driver(imx_dsp_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("i.MX HiFi Core Remote Processor Control Driver");
+MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>");
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
new file mode 100644
index 000000000..8a2a71126
--- /dev/null
+++ b/drivers/remoteproc/imx_rproc.c
@@ -0,0 +1,877 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/workqueue.h>
+
+#include "imx_rproc.h"
+#include "remoteproc_internal.h"
+
+#define IMX7D_SRC_SCR 0x0C
+#define IMX7D_ENABLE_M4 BIT(3)
+#define IMX7D_SW_M4P_RST BIT(2)
+#define IMX7D_SW_M4C_RST BIT(1)
+#define IMX7D_SW_M4C_NON_SCLR_RST BIT(0)
+
+#define IMX7D_M4_RST_MASK (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \
+ | IMX7D_SW_M4C_RST \
+ | IMX7D_SW_M4C_NON_SCLR_RST)
+
+#define IMX7D_M4_START (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \
+ | IMX7D_SW_M4C_RST)
+#define IMX7D_M4_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST | \
+ IMX7D_SW_M4C_NON_SCLR_RST)
+
+/* Address: 0x020D8000 */
+#define IMX6SX_SRC_SCR 0x00
+#define IMX6SX_ENABLE_M4 BIT(22)
+#define IMX6SX_SW_M4P_RST BIT(12)
+#define IMX6SX_SW_M4C_NON_SCLR_RST BIT(4)
+#define IMX6SX_SW_M4C_RST BIT(3)
+
+#define IMX6SX_M4_START (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
+ | IMX6SX_SW_M4C_RST)
+#define IMX6SX_M4_STOP (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4C_RST | \
+ IMX6SX_SW_M4C_NON_SCLR_RST)
+#define IMX6SX_M4_RST_MASK (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
+ | IMX6SX_SW_M4C_NON_SCLR_RST \
+ | IMX6SX_SW_M4C_RST)
+
+#define IMX_RPROC_MEM_MAX 32
+
+#define IMX_SIP_RPROC 0xC2000005
+#define IMX_SIP_RPROC_START 0x00
+#define IMX_SIP_RPROC_STARTED 0x01
+#define IMX_SIP_RPROC_STOP 0x02
+
+/**
+ * struct imx_rproc_mem - slim internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @sys_addr: Bus address used to access the memory region
+ * @size: Size of the memory region
+ */
+struct imx_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t sys_addr;
+ size_t size;
+};
+
+/* att flags */
+/* M4 own area. Can be mapped at probe */
+#define ATT_OWN BIT(1)
+#define ATT_IOMEM BIT(2)
+
+struct imx_rproc {
+ struct device *dev;
+ struct regmap *regmap;
+ struct rproc *rproc;
+ const struct imx_rproc_dcfg *dcfg;
+ struct imx_rproc_mem mem[IMX_RPROC_MEM_MAX];
+ struct clk *clk;
+ struct mbox_client cl;
+ struct mbox_chan *tx_ch;
+ struct mbox_chan *rx_ch;
+ struct work_struct rproc_work;
+ struct workqueue_struct *workqueue;
+ void __iomem *rsc_table;
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx93[] = {
+ /* dev addr , sys addr , size , flags */
+ /* TCM CODE NON-SECURE */
+ { 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* TCM CODE SECURE */
+ { 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* TCM SYS NON-SECURE*/
+ { 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* TCM SYS SECURE*/
+ { 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* DDR */
+ { 0x80000000, 0x80000000, 0x10000000, 0 },
+ { 0x90000000, 0x80000000, 0x10000000, 0 },
+
+ { 0xC0000000, 0xC0000000, 0x10000000, 0 },
+ { 0xD0000000, 0xC0000000, 0x10000000, 0 },
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx8mn[] = {
+ /* dev addr , sys addr , size , flags */
+ /* ITCM */
+ { 0x00000000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ /* OCRAM_S */
+ { 0x00180000, 0x00180000, 0x00009000, 0 },
+ /* OCRAM */
+ { 0x00900000, 0x00900000, 0x00020000, 0 },
+ /* OCRAM */
+ { 0x00920000, 0x00920000, 0x00020000, 0 },
+ /* OCRAM */
+ { 0x00940000, 0x00940000, 0x00050000, 0 },
+ /* QSPI Code - alias */
+ { 0x08000000, 0x08000000, 0x08000000, 0 },
+ /* DDR (Code) - alias */
+ { 0x10000000, 0x40000000, 0x0FFE0000, 0 },
+ /* DTCM */
+ { 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ /* OCRAM_S - alias */
+ { 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
+ /* OCRAM */
+ { 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
+ /* OCRAM */
+ { 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
+ /* OCRAM */
+ { 0x20240000, 0x00940000, 0x00040000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x40000000, 0x40000000, 0x80000000, 0 },
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
+ /* dev addr , sys addr , size , flags */
+ /* TCML - alias */
+ { 0x00000000, 0x007e0000, 0x00020000, ATT_IOMEM},
+ /* OCRAM_S */
+ { 0x00180000, 0x00180000, 0x00008000, 0 },
+ /* OCRAM */
+ { 0x00900000, 0x00900000, 0x00020000, 0 },
+ /* OCRAM */
+ { 0x00920000, 0x00920000, 0x00020000, 0 },
+ /* QSPI Code - alias */
+ { 0x08000000, 0x08000000, 0x08000000, 0 },
+ /* DDR (Code) - alias */
+ { 0x10000000, 0x80000000, 0x0FFE0000, 0 },
+ /* TCML */
+ { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM},
+ /* TCMU */
+ { 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM},
+ /* OCRAM_S */
+ { 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
+ /* OCRAM */
+ { 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
+ /* OCRAM */
+ { 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x40000000, 0x40000000, 0x80000000, 0 },
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx8ulp[] = {
+ {0x1FFC0000, 0x1FFC0000, 0xC0000, ATT_OWN},
+ {0x21000000, 0x21000000, 0x10000, ATT_OWN},
+ {0x80000000, 0x80000000, 0x60000000, 0}
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx7ulp[] = {
+ {0x1FFD0000, 0x1FFD0000, 0x30000, ATT_OWN},
+ {0x20000000, 0x20000000, 0x10000, ATT_OWN},
+ {0x2F000000, 0x2F000000, 0x20000, ATT_OWN},
+ {0x2F020000, 0x2F020000, 0x20000, ATT_OWN},
+ {0x60000000, 0x60000000, 0x40000000, 0}
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx7d[] = {
+ /* dev addr , sys addr , size , flags */
+ /* OCRAM_S (M4 Boot code) - alias */
+ { 0x00000000, 0x00180000, 0x00008000, 0 },
+ /* OCRAM_S (Code) */
+ { 0x00180000, 0x00180000, 0x00008000, ATT_OWN },
+ /* OCRAM (Code) - alias */
+ { 0x00900000, 0x00900000, 0x00020000, 0 },
+ /* OCRAM_EPDC (Code) - alias */
+ { 0x00920000, 0x00920000, 0x00020000, 0 },
+ /* OCRAM_PXP (Code) - alias */
+ { 0x00940000, 0x00940000, 0x00008000, 0 },
+ /* TCML (Code) */
+ { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN | ATT_IOMEM },
+ /* DDR (Code) - alias, first part of DDR (Data) */
+ { 0x10000000, 0x80000000, 0x0FFF0000, 0 },
+
+ /* TCMU (Data) */
+ { 0x20000000, 0x00800000, 0x00008000, ATT_OWN | ATT_IOMEM },
+ /* OCRAM (Data) */
+ { 0x20200000, 0x00900000, 0x00020000, 0 },
+ /* OCRAM_EPDC (Data) */
+ { 0x20220000, 0x00920000, 0x00020000, 0 },
+ /* OCRAM_PXP (Data) */
+ { 0x20240000, 0x00940000, 0x00008000, 0 },
+ /* DDR (Data) */
+ { 0x80000000, 0x80000000, 0x60000000, 0 },
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx6sx[] = {
+ /* dev addr , sys addr , size , flags */
+ /* TCML (M4 Boot Code) - alias */
+ { 0x00000000, 0x007F8000, 0x00008000, ATT_IOMEM },
+ /* OCRAM_S (Code) */
+ { 0x00180000, 0x008F8000, 0x00004000, 0 },
+ /* OCRAM_S (Code) - alias */
+ { 0x00180000, 0x008FC000, 0x00004000, 0 },
+ /* TCML (Code) */
+ { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN | ATT_IOMEM },
+ /* DDR (Code) - alias, first part of DDR (Data) */
+ { 0x10000000, 0x80000000, 0x0FFF8000, 0 },
+
+ /* TCMU (Data) */
+ { 0x20000000, 0x00800000, 0x00008000, ATT_OWN | ATT_IOMEM },
+ /* OCRAM_S (Data) - alias? */
+ { 0x208F8000, 0x008F8000, 0x00004000, 0 },
+ /* DDR (Data) */
+ { 0x80000000, 0x80000000, 0x60000000, 0 },
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
+ .att = imx_rproc_att_imx8mn,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
+ .method = IMX_RPROC_SMC,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
+ .src_reg = IMX7D_SRC_SCR,
+ .src_mask = IMX7D_M4_RST_MASK,
+ .src_start = IMX7D_M4_START,
+ .src_stop = IMX7D_M4_STOP,
+ .att = imx_rproc_att_imx8mq,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
+ .method = IMX_RPROC_MMIO,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = {
+ .att = imx_rproc_att_imx8ulp,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp),
+ .method = IMX_RPROC_NONE,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
+ .att = imx_rproc_att_imx7ulp,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
+ .method = IMX_RPROC_NONE,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
+ .src_reg = IMX7D_SRC_SCR,
+ .src_mask = IMX7D_M4_RST_MASK,
+ .src_start = IMX7D_M4_START,
+ .src_stop = IMX7D_M4_STOP,
+ .att = imx_rproc_att_imx7d,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx7d),
+ .method = IMX_RPROC_MMIO,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
+ .src_reg = IMX6SX_SRC_SCR,
+ .src_mask = IMX6SX_M4_RST_MASK,
+ .src_start = IMX6SX_M4_START,
+ .src_stop = IMX6SX_M4_STOP,
+ .att = imx_rproc_att_imx6sx,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx6sx),
+ .method = IMX_RPROC_MMIO,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
+ .att = imx_rproc_att_imx93,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx93),
+ .method = IMX_RPROC_SMC,
+};
+
+static int imx_rproc_start(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
+ struct arm_smccc_res res;
+ int ret;
+
+ switch (dcfg->method) {
+ case IMX_RPROC_MMIO:
+ ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
+ dcfg->src_start);
+ break;
+ case IMX_RPROC_SMC:
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_START, 0, 0, 0, 0, 0, 0, &res);
+ ret = res.a0;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret)
+ dev_err(dev, "Failed to enable remote core!\n");
+
+ return ret;
+}
+
+static int imx_rproc_stop(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
+ struct arm_smccc_res res;
+ int ret;
+
+ switch (dcfg->method) {
+ case IMX_RPROC_MMIO:
+ ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
+ dcfg->src_stop);
+ break;
+ case IMX_RPROC_SMC:
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STOP, 0, 0, 0, 0, 0, 0, &res);
+ ret = res.a0;
+ if (res.a1)
+ dev_info(dev, "Not in wfi, force stopped\n");
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret)
+ dev_err(dev, "Failed to stop remote core\n");
+
+ return ret;
+}
+
+static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
+ size_t len, u64 *sys, bool *is_iomem)
+{
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ int i;
+
+ /* parse address translation table */
+ for (i = 0; i < dcfg->att_size; i++) {
+ const struct imx_rproc_att *att = &dcfg->att[i];
+
+ if (da >= att->da && da + len < att->da + att->size) {
+ unsigned int offset = da - att->da;
+
+ *sys = att->sa + offset;
+ if (is_iomem)
+ *is_iomem = att->flags & ATT_IOMEM;
+ return 0;
+ }
+ }
+
+ dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%zx\n",
+ da, len);
+ return -ENOENT;
+}
+
+static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct imx_rproc *priv = rproc->priv;
+ void *va = NULL;
+ u64 sys;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ /*
+ * On device side we have many aliases, so we need to convert device
+ * address (M4) to system bus address first.
+ */
+ if (imx_rproc_da_to_sys(priv, da, len, &sys, is_iomem))
+ return NULL;
+
+ for (i = 0; i < IMX_RPROC_MEM_MAX; i++) {
+ if (sys >= priv->mem[i].sys_addr && sys + len <
+ priv->mem[i].sys_addr + priv->mem[i].size) {
+ unsigned int offset = sys - priv->mem[i].sys_addr;
+ /* __force to make sparse happy with type conversion */
+ va = (__force void *)(priv->mem[i].cpu_addr + offset);
+ break;
+ }
+ }
+
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%p\n",
+ da, len, va);
+
+ return va;
+}
+
+static int imx_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = rproc->dev.parent;
+ void *va;
+
+ dev_dbg(dev, "map memory: %p+%zx\n", &mem->dma, mem->len);
+ va = ioremap_wc(mem->dma, mem->len);
+ if (IS_ERR_OR_NULL(va)) {
+ dev_err(dev, "Unable to map memory region: %p+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int imx_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int imx_rproc_prepare(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ struct device_node *np = priv->dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ u32 da;
+
+ /* Register associated reserved memory regions */
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+ /*
+ * Ignore the first memory region which will be used vdev buffer.
+ * No need to do extra handlings, rproc_add_virtio_dev will handle it.
+ */
+ if (!strcmp(it.node->name, "vdev0buffer"))
+ continue;
+
+ if (!strcmp(it.node->name, "rsc-table"))
+ continue;
+
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ of_node_put(it.node);
+ dev_err(priv->dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ /* No need to translate pa to da, i.MX use same map */
+ da = rmem->base;
+
+ /* Register memory region */
+ mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)rmem->base, rmem->size, da,
+ imx_rproc_mem_alloc, imx_rproc_mem_release,
+ it.node->name);
+
+ if (mem) {
+ rproc_coredump_add_segment(rproc, da, rmem->size);
+ } else {
+ of_node_put(it.node);
+ return -ENOMEM;
+ }
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ return 0;
+}
+
+static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret;
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret)
+ dev_info(&rproc->dev, "No resource table in elf\n");
+
+ return 0;
+}
+
+static void imx_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct imx_rproc *priv = rproc->priv;
+ int err;
+ __u32 mmsg;
+
+ if (!priv->tx_ch) {
+ dev_err(priv->dev, "No initialized mbox tx channel\n");
+ return;
+ }
+
+ /*
+ * Send the index of the triggered virtqueue as the mu payload.
+ * Let remote processor know which virtqueue is used.
+ */
+ mmsg = vqid << 16;
+
+ err = mbox_send_message(priv->tx_ch, (void *)&mmsg);
+ if (err < 0)
+ dev_err(priv->dev, "%s: failed (%d, err:%d)\n",
+ __func__, vqid, err);
+}
+
+static int imx_rproc_attach(struct rproc *rproc)
+{
+ return 0;
+}
+
+static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ /* The resource table has already been mapped in imx_rproc_addr_init */
+ if (!priv->rsc_table)
+ return NULL;
+
+ *table_sz = SZ_1K;
+ return (struct resource_table *)priv->rsc_table;
+}
+
+static const struct rproc_ops imx_rproc_ops = {
+ .prepare = imx_rproc_prepare,
+ .attach = imx_rproc_attach,
+ .start = imx_rproc_start,
+ .stop = imx_rproc_stop,
+ .kick = imx_rproc_kick,
+ .da_to_va = imx_rproc_da_to_va,
+ .load = rproc_elf_load_segments,
+ .parse_fw = imx_rproc_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+};
+
+static int imx_rproc_addr_init(struct imx_rproc *priv,
+ struct platform_device *pdev)
+{
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int a, b = 0, err, nph;
+
+ /* remap required addresses */
+ for (a = 0; a < dcfg->att_size; a++) {
+ const struct imx_rproc_att *att = &dcfg->att[a];
+
+ if (!(att->flags & ATT_OWN))
+ continue;
+
+ if (b >= IMX_RPROC_MEM_MAX)
+ break;
+
+ if (att->flags & ATT_IOMEM)
+ priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
+ att->sa, att->size);
+ else
+ priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev,
+ att->sa, att->size);
+ if (!priv->mem[b].cpu_addr) {
+ dev_err(dev, "failed to remap %#x bytes from %#x\n", att->size, att->sa);
+ return -ENOMEM;
+ }
+ priv->mem[b].sys_addr = att->sa;
+ priv->mem[b].size = att->size;
+ b++;
+ }
+
+ /* memory-region is optional property */
+ nph = of_count_phandle_with_args(np, "memory-region", NULL);
+ if (nph <= 0)
+ return 0;
+
+ /* remap optional addresses */
+ for (a = 0; a < nph; a++) {
+ struct device_node *node;
+ struct resource res;
+
+ node = of_parse_phandle(np, "memory-region", a);
+ /* Not map vdevbuffer, vdevring region */
+ if (!strncmp(node->name, "vdev", strlen("vdev"))) {
+ of_node_put(node);
+ continue;
+ }
+ err = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
+ if (err) {
+ dev_err(dev, "unable to resolve memory region\n");
+ return err;
+ }
+
+ if (b >= IMX_RPROC_MEM_MAX)
+ break;
+
+ /* Not use resource version, because we might share region */
+ priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
+ if (!priv->mem[b].cpu_addr) {
+ dev_err(dev, "failed to remap %pr\n", &res);
+ return -ENOMEM;
+ }
+ priv->mem[b].sys_addr = res.start;
+ priv->mem[b].size = resource_size(&res);
+ if (!strcmp(node->name, "rsc-table"))
+ priv->rsc_table = priv->mem[b].cpu_addr;
+ b++;
+ }
+
+ return 0;
+}
+
+static void imx_rproc_vq_work(struct work_struct *work)
+{
+ struct imx_rproc *priv = container_of(work, struct imx_rproc,
+ rproc_work);
+
+ rproc_vq_interrupt(priv->rproc, 0);
+ rproc_vq_interrupt(priv->rproc, 1);
+}
+
+static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
+{
+ struct rproc *rproc = dev_get_drvdata(cl->dev);
+ struct imx_rproc *priv = rproc->priv;
+
+ queue_work(priv->workqueue, &priv->rproc_work);
+}
+
+static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ struct device *dev = priv->dev;
+ struct mbox_client *cl;
+
+ if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ return 0;
+
+ cl = &priv->cl;
+ cl->dev = dev;
+ cl->tx_block = true;
+ cl->tx_tout = 100;
+ cl->knows_txdone = false;
+ cl->rx_callback = imx_rproc_rx_callback;
+
+ priv->tx_ch = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(priv->tx_ch))
+ return dev_err_probe(cl->dev, PTR_ERR(priv->tx_ch),
+ "failed to request tx mailbox channel\n");
+
+ priv->rx_ch = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(priv->rx_ch)) {
+ mbox_free_channel(priv->tx_ch);
+ return dev_err_probe(cl->dev, PTR_ERR(priv->rx_ch),
+ "failed to request rx mailbox channel\n");
+ }
+
+ return 0;
+}
+
+static void imx_rproc_free_mbox(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ mbox_free_channel(priv->tx_ch);
+ mbox_free_channel(priv->rx_ch);
+}
+
+static int imx_rproc_detect_mode(struct imx_rproc *priv)
+{
+ struct regmap_config config = { .name = "imx-rproc" };
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
+ struct regmap *regmap;
+ struct arm_smccc_res res;
+ int ret;
+ u32 val;
+
+ switch (dcfg->method) {
+ case IMX_RPROC_NONE:
+ priv->rproc->state = RPROC_DETACHED;
+ return 0;
+ case IMX_RPROC_SMC:
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STARTED, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
+ priv->rproc->state = RPROC_DETACHED;
+ return 0;
+ default:
+ break;
+ }
+
+ regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to find syscon\n");
+ return PTR_ERR(regmap);
+ }
+
+ priv->regmap = regmap;
+ regmap_attach_dev(dev, regmap, &config);
+
+ ret = regmap_read(regmap, dcfg->src_reg, &val);
+ if (ret) {
+ dev_err(dev, "Failed to read src\n");
+ return ret;
+ }
+
+ if ((val & dcfg->src_mask) != dcfg->src_stop)
+ priv->rproc->state = RPROC_DETACHED;
+
+ return 0;
+}
+
+static int imx_rproc_clk_enable(struct imx_rproc *priv)
+{
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
+ int ret;
+
+ /* Remote core is not under control of Linux */
+ if (dcfg->method == IMX_RPROC_NONE)
+ return 0;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ /*
+ * clk for M4 block including memory. Should be
+ * enabled before .start for FW transfer.
+ */
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx_rproc *priv;
+ struct rproc *rproc;
+ const struct imx_rproc_dcfg *dcfg;
+ int ret;
+
+ /* set some other name then imx */
+ rproc = rproc_alloc(dev, "imx-rproc", &imx_rproc_ops,
+ NULL, sizeof(*priv));
+ if (!rproc)
+ return -ENOMEM;
+
+ dcfg = of_device_get_match_data(dev);
+ if (!dcfg) {
+ ret = -EINVAL;
+ goto err_put_rproc;
+ }
+
+ priv = rproc->priv;
+ priv->rproc = rproc;
+ priv->dcfg = dcfg;
+ priv->dev = dev;
+
+ dev_set_drvdata(dev, rproc);
+ priv->workqueue = create_workqueue(dev_name(dev));
+ if (!priv->workqueue) {
+ dev_err(dev, "cannot create workqueue\n");
+ ret = -ENOMEM;
+ goto err_put_rproc;
+ }
+
+ ret = imx_rproc_xtr_mbox_init(rproc);
+ if (ret)
+ goto err_put_wkq;
+
+ ret = imx_rproc_addr_init(priv, pdev);
+ if (ret) {
+ dev_err(dev, "failed on imx_rproc_addr_init\n");
+ goto err_put_mbox;
+ }
+
+ ret = imx_rproc_detect_mode(priv);
+ if (ret)
+ goto err_put_mbox;
+
+ ret = imx_rproc_clk_enable(priv);
+ if (ret)
+ goto err_put_mbox;
+
+ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
+
+ if (rproc->state != RPROC_DETACHED)
+ rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed\n");
+ goto err_put_clk;
+ }
+
+ return 0;
+
+err_put_clk:
+ clk_disable_unprepare(priv->clk);
+err_put_mbox:
+ imx_rproc_free_mbox(rproc);
+err_put_wkq:
+ destroy_workqueue(priv->workqueue);
+err_put_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int imx_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct imx_rproc *priv = rproc->priv;
+
+ clk_disable_unprepare(priv->clk);
+ rproc_del(rproc);
+ imx_rproc_free_mbox(rproc);
+ destroy_workqueue(priv->workqueue);
+ rproc_free(rproc);
+
+ return 0;
+}
+
+static const struct of_device_id imx_rproc_of_match[] = {
+ { .compatible = "fsl,imx7ulp-cm4", .data = &imx_rproc_cfg_imx7ulp },
+ { .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d },
+ { .compatible = "fsl,imx6sx-cm4", .data = &imx_rproc_cfg_imx6sx },
+ { .compatible = "fsl,imx8mq-cm4", .data = &imx_rproc_cfg_imx8mq },
+ { .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq },
+ { .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn },
+ { .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn },
+ { .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp },
+ { .compatible = "fsl,imx93-cm33", .data = &imx_rproc_cfg_imx93 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
+
+static struct platform_driver imx_rproc_driver = {
+ .probe = imx_rproc_probe,
+ .remove = imx_rproc_remove,
+ .driver = {
+ .name = "imx-rproc",
+ .of_match_table = imx_rproc_of_match,
+ },
+};
+
+module_platform_driver(imx_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("i.MX remote processor control driver");
+MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
new file mode 100644
index 000000000..1c7e2127c
--- /dev/null
+++ b/drivers/remoteproc/imx_rproc.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ * Copyright 2021 NXP
+ */
+
+#ifndef _IMX_RPROC_H
+#define _IMX_RPROC_H
+
+/* address translation table */
+struct imx_rproc_att {
+ u32 da; /* device address (From Cortex M4 view)*/
+ u32 sa; /* system bus address */
+ u32 size; /* size of reg range */
+ int flags;
+};
+
+/* Remote core start/stop method */
+enum imx_rproc_method {
+ IMX_RPROC_NONE,
+ /* Through syscon regmap */
+ IMX_RPROC_MMIO,
+ /* Through ARM SMCCC */
+ IMX_RPROC_SMC,
+ /* Through System Control Unit API */
+ IMX_RPROC_SCU_API,
+};
+
+struct imx_rproc_dcfg {
+ u32 src_reg;
+ u32 src_mask;
+ u32 src_start;
+ u32 src_stop;
+ const struct imx_rproc_att *att;
+ size_t att_size;
+ enum imx_rproc_method method;
+};
+
+#endif /* _IMX_RPROC_H */
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
new file mode 100644
index 000000000..9902cce28
--- /dev/null
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Ingenic JZ47xx remoteproc driver
+ * Copyright 2019, Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+
+#include "remoteproc_internal.h"
+
+#define REG_AUX_CTRL 0x0
+#define REG_AUX_MSG_ACK 0x10
+#define REG_AUX_MSG 0x14
+#define REG_CORE_MSG_ACK 0x18
+#define REG_CORE_MSG 0x1C
+
+#define AUX_CTRL_SLEEP BIT(31)
+#define AUX_CTRL_MSG_IRQ_EN BIT(3)
+#define AUX_CTRL_NMI_RESETS BIT(2)
+#define AUX_CTRL_NMI BIT(1)
+#define AUX_CTRL_SW_RESET BIT(0)
+
+static bool auto_boot;
+module_param(auto_boot, bool, 0400);
+MODULE_PARM_DESC(auto_boot,
+ "Auto-boot the remote processor [default=false]");
+
+struct vpu_mem_map {
+ const char *name;
+ unsigned int da;
+};
+
+struct vpu_mem_info {
+ const struct vpu_mem_map *map;
+ unsigned long len;
+ void __iomem *base;
+};
+
+static const struct vpu_mem_map vpu_mem_map[] = {
+ { "tcsm0", 0x132b0000 },
+ { "tcsm1", 0xf4000000 },
+ { "sram", 0x132f0000 },
+};
+
+/**
+ * struct vpu - Ingenic VPU remoteproc private structure
+ * @irq: interrupt number
+ * @clks: pointers to the VPU and AUX clocks
+ * @aux_base: raw pointer to the AUX interface registers
+ * @mem_info: array of struct vpu_mem_info, which contain the mapping info of
+ * each of the external memories
+ * @dev: private pointer to the device
+ */
+struct vpu {
+ int irq;
+ struct clk_bulk_data clks[2];
+ void __iomem *aux_base;
+ struct vpu_mem_info mem_info[ARRAY_SIZE(vpu_mem_map)];
+ struct device *dev;
+};
+
+static int ingenic_rproc_prepare(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+ int ret;
+
+ /* The clocks must be enabled for the firmware to be loaded in TCSM */
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
+ if (ret)
+ dev_err(vpu->dev, "Unable to start clocks: %d\n", ret);
+
+ return ret;
+}
+
+static int ingenic_rproc_unprepare(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks);
+
+ return 0;
+}
+
+static int ingenic_rproc_start(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+ u32 ctrl;
+
+ enable_irq(vpu->irq);
+
+ /* Reset the AUX and enable message IRQ */
+ ctrl = AUX_CTRL_NMI_RESETS | AUX_CTRL_NMI | AUX_CTRL_MSG_IRQ_EN;
+ writel(ctrl, vpu->aux_base + REG_AUX_CTRL);
+
+ return 0;
+}
+
+static int ingenic_rproc_stop(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+
+ disable_irq(vpu->irq);
+
+ /* Keep AUX in reset mode */
+ writel(AUX_CTRL_SW_RESET, vpu->aux_base + REG_AUX_CTRL);
+
+ return 0;
+}
+
+static void ingenic_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct vpu *vpu = rproc->priv;
+
+ writel(vqid, vpu->aux_base + REG_CORE_MSG);
+}
+
+static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct vpu *vpu = rproc->priv;
+ void __iomem *va = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vpu_mem_map); i++) {
+ const struct vpu_mem_info *info = &vpu->mem_info[i];
+ const struct vpu_mem_map *map = info->map;
+
+ if (da >= map->da && (da + len) < (map->da + info->len)) {
+ va = info->base + (da - map->da);
+ break;
+ }
+ }
+
+ return (__force void *)va;
+}
+
+static const struct rproc_ops ingenic_rproc_ops = {
+ .prepare = ingenic_rproc_prepare,
+ .unprepare = ingenic_rproc_unprepare,
+ .start = ingenic_rproc_start,
+ .stop = ingenic_rproc_stop,
+ .kick = ingenic_rproc_kick,
+ .da_to_va = ingenic_rproc_da_to_va,
+};
+
+static irqreturn_t vpu_interrupt(int irq, void *data)
+{
+ struct rproc *rproc = data;
+ struct vpu *vpu = rproc->priv;
+ u32 vring;
+
+ vring = readl(vpu->aux_base + REG_AUX_MSG);
+
+ /* Ack the interrupt */
+ writel(0, vpu->aux_base + REG_AUX_MSG_ACK);
+
+ return rproc_vq_interrupt(rproc, vring);
+}
+
+static int ingenic_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem;
+ struct rproc *rproc;
+ struct vpu *vpu;
+ unsigned int i;
+ int ret;
+
+ rproc = devm_rproc_alloc(dev, "ingenic-vpu",
+ &ingenic_rproc_ops, NULL, sizeof(*vpu));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->auto_boot = auto_boot;
+
+ vpu = rproc->priv;
+ vpu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, vpu);
+
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux");
+ vpu->aux_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(vpu->aux_base)) {
+ dev_err(dev, "Failed to ioremap\n");
+ return PTR_ERR(vpu->aux_base);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vpu_mem_map); i++) {
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ vpu_mem_map[i].name);
+
+ vpu->mem_info[i].base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(vpu->mem_info[i].base)) {
+ ret = PTR_ERR(vpu->mem_info[i].base);
+ dev_err(dev, "Failed to ioremap\n");
+ return ret;
+ }
+
+ vpu->mem_info[i].len = resource_size(mem);
+ vpu->mem_info[i].map = &vpu_mem_map[i];
+ }
+
+ vpu->clks[0].id = "vpu";
+ vpu->clks[1].id = "aux";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(vpu->clks), vpu->clks);
+ if (ret) {
+ dev_err(dev, "Failed to get clocks\n");
+ return ret;
+ }
+
+ vpu->irq = platform_get_irq(pdev, 0);
+ if (vpu->irq < 0)
+ return vpu->irq;
+
+ ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, IRQF_NO_AUTOEN,
+ "VPU", rproc);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = devm_rproc_add(dev, rproc);
+ if (ret) {
+ dev_err(dev, "Failed to register remote processor\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ingenic_rproc_of_matches[] = {
+ { .compatible = "ingenic,jz4770-vpu-rproc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ingenic_rproc_of_matches);
+
+static struct platform_driver ingenic_rproc_driver = {
+ .probe = ingenic_rproc_probe,
+ .driver = {
+ .name = "ingenic-vpu",
+ .of_match_table = ingenic_rproc_of_matches,
+ },
+};
+module_platform_driver(ingenic_rproc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ47xx Remote Processor control driver");
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
new file mode 100644
index 000000000..95b397419
--- /dev/null
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI Keystone DSP remoteproc driver
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+
+#include "remoteproc_internal.h"
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct keystone_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from DSP view
+ * @size: Size of the memory region
+ */
+struct keystone_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct keystone_rproc - keystone remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @dev_ctrl: device control regmap handle
+ * @reset: reset control handle
+ * @boot_offset: boot register offset in @dev_ctrl regmap
+ * @irq_ring: irq entry for vring
+ * @irq_fault: irq entry for exception
+ * @kick_gpio: gpio used for virtio kicks
+ * @workqueue: workqueue for processing virtio interrupts
+ */
+struct keystone_rproc {
+ struct device *dev;
+ struct rproc *rproc;
+ struct keystone_rproc_mem *mem;
+ int num_mems;
+ struct regmap *dev_ctrl;
+ struct reset_control *reset;
+ struct gpio_desc *kick_gpio;
+ u32 boot_offset;
+ int irq_ring;
+ int irq_fault;
+ struct work_struct workqueue;
+};
+
+/* Put the DSP processor into reset */
+static void keystone_rproc_dsp_reset(struct keystone_rproc *ksproc)
+{
+ reset_control_assert(ksproc->reset);
+}
+
+/* Configure the boot address and boot the DSP processor */
+static int keystone_rproc_dsp_boot(struct keystone_rproc *ksproc, u32 boot_addr)
+{
+ int ret;
+
+ if (boot_addr & (SZ_1K - 1)) {
+ dev_err(ksproc->dev, "invalid boot address 0x%x, must be aligned on a 1KB boundary\n",
+ boot_addr);
+ return -EINVAL;
+ }
+
+ ret = regmap_write(ksproc->dev_ctrl, ksproc->boot_offset, boot_addr);
+ if (ret) {
+ dev_err(ksproc->dev, "regmap_write of boot address failed, status = %d\n",
+ ret);
+ return ret;
+ }
+
+ reset_control_deassert(ksproc->reset);
+
+ return 0;
+}
+
+/*
+ * Process the remoteproc exceptions
+ *
+ * The exception reporting on Keystone DSP remote processors is very simple
+ * compared to the equivalent processors on the OMAP family, it is notified
+ * through a software-designed specific interrupt source in the IPC interrupt
+ * generation register.
+ *
+ * This function just invokes the rproc_report_crash to report the exception
+ * to the remoteproc driver core, to trigger a recovery.
+ */
+static irqreturn_t keystone_rproc_exception_interrupt(int irq, void *dev_id)
+{
+ struct keystone_rproc *ksproc = dev_id;
+
+ rproc_report_crash(ksproc->rproc, RPROC_FATAL_ERROR);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Main virtqueue message workqueue function
+ *
+ * This function is executed upon scheduling of the keystone remoteproc
+ * driver's workqueue. The workqueue is scheduled by the vring ISR handler.
+ *
+ * There is no payload message indicating the virtqueue index as is the
+ * case with mailbox-based implementations on OMAP family. As such, this
+ * handler processes both the Tx and Rx virtqueue indices on every invocation.
+ * The rproc_vq_interrupt function can detect if there are new unprocessed
+ * messages or not (returns IRQ_NONE vs IRQ_HANDLED), but there is no need
+ * to check for these return values. The index 0 triggering will process all
+ * pending Rx buffers, and the index 1 triggering will process all newly
+ * available Tx buffers and will wakeup any potentially blocked senders.
+ *
+ * NOTE:
+ * 1. A payload could be added by using some of the source bits in the
+ * IPC interrupt generation registers, but this would need additional
+ * changes to the overall IPC stack, and currently there are no benefits
+ * of adapting that approach.
+ * 2. The current logic is based on an inherent design assumption of supporting
+ * only 2 vrings, but this can be changed if needed.
+ */
+static void handle_event(struct work_struct *work)
+{
+ struct keystone_rproc *ksproc =
+ container_of(work, struct keystone_rproc, workqueue);
+
+ rproc_vq_interrupt(ksproc->rproc, 0);
+ rproc_vq_interrupt(ksproc->rproc, 1);
+}
+
+/*
+ * Interrupt handler for processing vring kicks from remote processor
+ */
+static irqreturn_t keystone_rproc_vring_interrupt(int irq, void *dev_id)
+{
+ struct keystone_rproc *ksproc = dev_id;
+
+ schedule_work(&ksproc->workqueue);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Power up the DSP remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met.
+ */
+static int keystone_rproc_start(struct rproc *rproc)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+ int ret;
+
+ INIT_WORK(&ksproc->workqueue, handle_event);
+
+ ret = request_irq(ksproc->irq_ring, keystone_rproc_vring_interrupt, 0,
+ dev_name(ksproc->dev), ksproc);
+ if (ret) {
+ dev_err(ksproc->dev, "failed to enable vring interrupt, ret = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = request_irq(ksproc->irq_fault, keystone_rproc_exception_interrupt,
+ 0, dev_name(ksproc->dev), ksproc);
+ if (ret) {
+ dev_err(ksproc->dev, "failed to enable exception interrupt, ret = %d\n",
+ ret);
+ goto free_vring_irq;
+ }
+
+ ret = keystone_rproc_dsp_boot(ksproc, rproc->bootaddr);
+ if (ret)
+ goto free_exc_irq;
+
+ return 0;
+
+free_exc_irq:
+ free_irq(ksproc->irq_fault, ksproc);
+free_vring_irq:
+ free_irq(ksproc->irq_ring, ksproc);
+ flush_work(&ksproc->workqueue);
+out:
+ return ret;
+}
+
+/*
+ * Stop the DSP remote processor.
+ *
+ * This function puts the DSP processor into reset, and finishes processing
+ * of any pending messages.
+ */
+static int keystone_rproc_stop(struct rproc *rproc)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+
+ keystone_rproc_dsp_reset(ksproc);
+ free_irq(ksproc->irq_fault, ksproc);
+ free_irq(ksproc->irq_ring, ksproc);
+ flush_work(&ksproc->workqueue);
+
+ return 0;
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void keystone_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+
+ if (!ksproc->kick_gpio)
+ return;
+
+ gpiod_set_value(ksproc->kick_gpio, 1);
+}
+
+/*
+ * Custom function to translate a DSP device address (internal RAMs only) to a
+ * kernel virtual address. The DSPs can access their RAMs at either an internal
+ * address visible only from a DSP, or at the SoC-level bus address. Both these
+ * addresses need to be looked through for translation. The translated addresses
+ * can be used either by the remoteproc core for loading (when using kernel
+ * remoteproc loader), or by any rpmsg bus drivers.
+ */
+static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < ksproc->num_mems; i++) {
+ bus_addr = ksproc->mem[i].bus_addr;
+ dev_addr = ksproc->mem[i].dev_addr;
+ size = ksproc->mem[i].size;
+
+ if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
+ /* handle DSP-view addresses */
+ if ((da >= dev_addr) &&
+ ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = ksproc->mem[i].cpu_addr + offset;
+ break;
+ }
+ } else {
+ /* handle SoC-view addresses */
+ if ((da >= bus_addr) &&
+ (da + len) <= (bus_addr + size)) {
+ offset = da - bus_addr;
+ va = ksproc->mem[i].cpu_addr + offset;
+ break;
+ }
+ }
+ }
+
+ return (__force void *)va;
+}
+
+static const struct rproc_ops keystone_rproc_ops = {
+ .start = keystone_rproc_start,
+ .stop = keystone_rproc_stop,
+ .kick = keystone_rproc_kick,
+ .da_to_va = keystone_rproc_da_to_va,
+};
+
+static int keystone_rproc_of_get_memories(struct platform_device *pdev,
+ struct keystone_rproc *ksproc)
+{
+ static const char * const mem_names[] = {"l2sram", "l1pram", "l1dram"};
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems = 0;
+ int i;
+
+ num_mems = ARRAY_SIZE(mem_names);
+ ksproc->mem = devm_kcalloc(ksproc->dev, num_mems,
+ sizeof(*ksproc->mem), GFP_KERNEL);
+ if (!ksproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ ksproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ksproc->mem[i].cpu_addr)) {
+ dev_err(dev, "failed to parse and map %s memory\n",
+ mem_names[i]);
+ return PTR_ERR(ksproc->mem[i].cpu_addr);
+ }
+ ksproc->mem[i].bus_addr = res->start;
+ ksproc->mem[i].dev_addr =
+ res->start & KEYSTONE_RPROC_LOCAL_ADDRESS_MASK;
+ ksproc->mem[i].size = resource_size(res);
+
+ /* zero out memories to start in a pristine state */
+ memset((__force void *)ksproc->mem[i].cpu_addr, 0,
+ ksproc->mem[i].size);
+ }
+ ksproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static int keystone_rproc_of_get_dev_syscon(struct platform_device *pdev,
+ struct keystone_rproc *ksproc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (!of_property_read_bool(np, "ti,syscon-dev")) {
+ dev_err(dev, "ti,syscon-dev property is absent\n");
+ return -EINVAL;
+ }
+
+ ksproc->dev_ctrl =
+ syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+ if (IS_ERR(ksproc->dev_ctrl)) {
+ ret = PTR_ERR(ksproc->dev_ctrl);
+ return ret;
+ }
+
+ if (of_property_read_u32_index(np, "ti,syscon-dev", 1,
+ &ksproc->boot_offset)) {
+ dev_err(dev, "couldn't read the boot register offset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int keystone_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct keystone_rproc *ksproc;
+ struct rproc *rproc;
+ int dsp_id;
+ char *fw_name = NULL;
+ char *template = "keystone-dsp%d-fw";
+ int name_len = 0;
+ int ret = 0;
+
+ if (!np) {
+ dev_err(dev, "only DT-based devices are supported\n");
+ return -ENODEV;
+ }
+
+ dsp_id = of_alias_get_id(np, "rproc");
+ if (dsp_id < 0) {
+ dev_warn(dev, "device does not have an alias id\n");
+ return dsp_id;
+ }
+
+ /* construct a custom default fw name - subject to change in future */
+ name_len = strlen(template); /* assuming a single digit alias */
+ fw_name = devm_kzalloc(dev, name_len, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+ snprintf(fw_name, name_len, template, dsp_id);
+
+ rproc = rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops, fw_name,
+ sizeof(*ksproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ ksproc = rproc->priv;
+ ksproc->rproc = rproc;
+ ksproc->dev = dev;
+
+ ret = keystone_rproc_of_get_dev_syscon(pdev, ksproc);
+ if (ret)
+ goto free_rproc;
+
+ ksproc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(ksproc->reset)) {
+ ret = PTR_ERR(ksproc->reset);
+ goto free_rproc;
+ }
+
+ /* enable clock for accessing DSP internal memories */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock, status = %d\n", ret);
+ goto disable_rpm;
+ }
+
+ ret = keystone_rproc_of_get_memories(pdev, ksproc);
+ if (ret)
+ goto disable_clk;
+
+ ksproc->irq_ring = platform_get_irq_byname(pdev, "vring");
+ if (ksproc->irq_ring < 0) {
+ ret = ksproc->irq_ring;
+ goto disable_clk;
+ }
+
+ ksproc->irq_fault = platform_get_irq_byname(pdev, "exception");
+ if (ksproc->irq_fault < 0) {
+ ret = ksproc->irq_fault;
+ goto disable_clk;
+ }
+
+ ksproc->kick_gpio = gpiod_get(dev, "kick", GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(ksproc->kick_gpio);
+ if (ret) {
+ dev_err(dev, "failed to get gpio for virtio kicks, status = %d\n",
+ ret);
+ goto disable_clk;
+ }
+
+ if (of_reserved_mem_device_init(dev))
+ dev_warn(dev, "device does not have specific CMA pool\n");
+
+ /* ensure the DSP is in reset before loading firmware */
+ ret = reset_control_status(ksproc->reset);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset status, status = %d\n", ret);
+ goto release_mem;
+ } else if (ret == 0) {
+ WARN(1, "device is not in reset\n");
+ keystone_rproc_dsp_reset(ksproc);
+ }
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ platform_set_drvdata(pdev, ksproc);
+
+ return 0;
+
+release_mem:
+ of_reserved_mem_device_release(dev);
+ gpiod_put(ksproc->kick_gpio);
+disable_clk:
+ pm_runtime_put_sync(dev);
+disable_rpm:
+ pm_runtime_disable(dev);
+free_rproc:
+ rproc_free(rproc);
+ return ret;
+}
+
+static int keystone_rproc_remove(struct platform_device *pdev)
+{
+ struct keystone_rproc *ksproc = platform_get_drvdata(pdev);
+
+ rproc_del(ksproc->rproc);
+ gpiod_put(ksproc->kick_gpio);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ rproc_free(ksproc->rproc);
+ of_reserved_mem_device_release(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id keystone_rproc_of_match[] = {
+ { .compatible = "ti,k2hk-dsp", },
+ { .compatible = "ti,k2l-dsp", },
+ { .compatible = "ti,k2e-dsp", },
+ { .compatible = "ti,k2g-dsp", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, keystone_rproc_of_match);
+
+static struct platform_driver keystone_rproc_driver = {
+ .probe = keystone_rproc_probe,
+ .remove = keystone_rproc_remove,
+ .driver = {
+ .name = "keystone-rproc",
+ .of_match_table = keystone_rproc_of_match,
+ },
+};
+
+module_platform_driver(keystone_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Keystone DSP Remoteproc driver");
diff --git a/drivers/remoteproc/meson_mx_ao_arc.c b/drivers/remoteproc/meson_mx_ao_arc.c
new file mode 100644
index 000000000..462cddab6
--- /dev/null
+++ b/drivers/remoteproc/meson_mx_ao_arc.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+
+#include "remoteproc_internal.h"
+
+#define AO_REMAP_REG0 0x0
+#define AO_REMAP_REG0_REMAP_AHB_SRAM_BITS_17_14_FOR_ARM_CPU GENMASK(3, 0)
+
+#define AO_REMAP_REG1 0x4
+#define AO_REMAP_REG1_MOVE_AHB_SRAM_TO_0X0_INSTEAD_OF_DDR BIT(4)
+#define AO_REMAP_REG1_REMAP_AHB_SRAM_BITS_17_14_FOR_MEDIA_CPU GENMASK(3, 0)
+
+#define AO_CPU_CNTL 0x0
+#define AO_CPU_CNTL_AHB_SRAM_BITS_31_20 GENMASK(28, 16)
+#define AO_CPU_CNTL_HALT BIT(9)
+#define AO_CPU_CNTL_UNKNONWN BIT(8)
+#define AO_CPU_CNTL_RUN BIT(0)
+
+#define AO_CPU_STAT 0x4
+
+#define AO_SECURE_REG0 0x0
+#define AO_SECURE_REG0_AHB_SRAM_BITS_19_12 GENMASK(15, 8)
+
+/* Only bits [31:20] and [17:14] are usable, all other bits must be zero */
+#define MESON_AO_RPROC_SRAM_USABLE_BITS 0xfff3c000ULL
+
+#define MESON_AO_RPROC_MEMORY_OFFSET 0x10000000
+
+struct meson_mx_ao_arc_rproc_priv {
+ void __iomem *remap_base;
+ void __iomem *cpu_base;
+ unsigned long sram_va;
+ phys_addr_t sram_pa;
+ size_t sram_size;
+ struct gen_pool *sram_pool;
+ struct reset_control *arc_reset;
+ struct clk *arc_pclk;
+ struct regmap *secbus2_regmap;
+};
+
+static int meson_mx_ao_arc_rproc_start(struct rproc *rproc)
+{
+ struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
+ phys_addr_t translated_sram_addr;
+ u32 tmp;
+ int ret;
+
+ ret = clk_prepare_enable(priv->arc_pclk);
+ if (ret)
+ return ret;
+
+ tmp = FIELD_PREP(AO_REMAP_REG0_REMAP_AHB_SRAM_BITS_17_14_FOR_ARM_CPU,
+ priv->sram_pa >> 14);
+ writel(tmp, priv->remap_base + AO_REMAP_REG0);
+
+ /*
+ * The SRAM content as seen by the ARC core always starts at 0x0
+ * regardless of the value given here (this was discovered by trial and
+ * error). For SoCs older than Meson6 we probably have to set
+ * AO_REMAP_REG1_MOVE_AHB_SRAM_TO_0X0_INSTEAD_OF_DDR to achieve the
+ * same. (At least) For Meson8 and newer that bit must not be set.
+ */
+ writel(0x0, priv->remap_base + AO_REMAP_REG1);
+
+ regmap_update_bits(priv->secbus2_regmap, AO_SECURE_REG0,
+ AO_SECURE_REG0_AHB_SRAM_BITS_19_12,
+ FIELD_PREP(AO_SECURE_REG0_AHB_SRAM_BITS_19_12,
+ priv->sram_pa >> 12));
+
+ ret = reset_control_reset(priv->arc_reset);
+ if (ret) {
+ clk_disable_unprepare(priv->arc_pclk);
+ return ret;
+ }
+
+ usleep_range(10, 100);
+
+ /*
+ * Convert from 0xd9000000 to 0xc9000000 as the vendor driver does.
+ * This only seems to be relevant for the AO_CPU_CNTL register. It is
+ * unknown why this is needed.
+ */
+ translated_sram_addr = priv->sram_pa - MESON_AO_RPROC_MEMORY_OFFSET;
+
+ tmp = FIELD_PREP(AO_CPU_CNTL_AHB_SRAM_BITS_31_20,
+ translated_sram_addr >> 20);
+ tmp |= AO_CPU_CNTL_UNKNONWN | AO_CPU_CNTL_RUN;
+ writel(tmp, priv->cpu_base + AO_CPU_CNTL);
+
+ usleep_range(20, 200);
+
+ return 0;
+}
+
+static int meson_mx_ao_arc_rproc_stop(struct rproc *rproc)
+{
+ struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
+
+ writel(AO_CPU_CNTL_HALT, priv->cpu_base + AO_CPU_CNTL);
+
+ clk_disable_unprepare(priv->arc_pclk);
+
+ return 0;
+}
+
+static void *meson_mx_ao_arc_rproc_da_to_va(struct rproc *rproc, u64 da,
+ size_t len, bool *is_iomem)
+{
+ struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
+
+ /* The memory from the ARC core's perspective always starts at 0x0. */
+ if ((da + len) > priv->sram_size)
+ return NULL;
+
+ return (void *)priv->sram_va + da;
+}
+
+static struct rproc_ops meson_mx_ao_arc_rproc_ops = {
+ .start = meson_mx_ao_arc_rproc_start,
+ .stop = meson_mx_ao_arc_rproc_stop,
+ .da_to_va = meson_mx_ao_arc_rproc_da_to_va,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .load = rproc_elf_load_segments,
+ .sanity_check = rproc_elf_sanity_check,
+};
+
+static int meson_mx_ao_arc_rproc_probe(struct platform_device *pdev)
+{
+ struct meson_mx_ao_arc_rproc_priv *priv;
+ struct device *dev = &pdev->dev;
+ const char *fw_name = NULL;
+ struct rproc *rproc;
+ int ret;
+
+ device_property_read_string(dev, "firmware-name", &fw_name);
+
+ rproc = devm_rproc_alloc(dev, "meson-mx-ao-arc",
+ &meson_mx_ao_arc_rproc_ops, fw_name,
+ sizeof(*priv));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ priv = rproc->priv;
+
+ priv->sram_pool = of_gen_pool_get(dev->of_node, "sram", 0);
+ if (!priv->sram_pool) {
+ dev_err(dev, "Could not get SRAM pool\n");
+ return -ENODEV;
+ }
+
+ priv->sram_size = gen_pool_avail(priv->sram_pool);
+
+ priv->sram_va = gen_pool_alloc(priv->sram_pool, priv->sram_size);
+ if (!priv->sram_va) {
+ dev_err(dev, "Could not alloc memory in SRAM pool\n");
+ return -ENOMEM;
+ }
+
+ priv->sram_pa = gen_pool_virt_to_phys(priv->sram_pool, priv->sram_va);
+ if (priv->sram_pa & ~MESON_AO_RPROC_SRAM_USABLE_BITS) {
+ dev_err(dev, "SRAM address contains unusable bits\n");
+ ret = -EINVAL;
+ goto err_free_genpool;
+ }
+
+ priv->secbus2_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "amlogic,secbus2");
+ if (IS_ERR(priv->secbus2_regmap)) {
+ dev_err(dev, "Failed to find SECBUS2 regmap\n");
+ ret = PTR_ERR(priv->secbus2_regmap);
+ goto err_free_genpool;
+ }
+
+ priv->remap_base = devm_platform_ioremap_resource_byname(pdev, "remap");
+ if (IS_ERR(priv->remap_base)) {
+ ret = PTR_ERR(priv->remap_base);
+ goto err_free_genpool;
+ }
+
+ priv->cpu_base = devm_platform_ioremap_resource_byname(pdev, "cpu");
+ if (IS_ERR(priv->cpu_base)) {
+ ret = PTR_ERR(priv->cpu_base);
+ goto err_free_genpool;
+ }
+
+ priv->arc_reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->arc_reset)) {
+ dev_err(dev, "Failed to get ARC reset\n");
+ ret = PTR_ERR(priv->arc_reset);
+ goto err_free_genpool;
+ }
+
+ priv->arc_pclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->arc_pclk)) {
+ dev_err(dev, "Failed to get the ARC PCLK\n");
+ ret = PTR_ERR(priv->arc_pclk);
+ goto err_free_genpool;
+ }
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto err_free_genpool;
+
+ return 0;
+
+err_free_genpool:
+ gen_pool_free(priv->sram_pool, priv->sram_va, priv->sram_size);
+ return ret;
+}
+
+static int meson_mx_ao_arc_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
+
+ rproc_del(rproc);
+ gen_pool_free(priv->sram_pool, priv->sram_va, priv->sram_size);
+
+ return 0;
+}
+
+static const struct of_device_id meson_mx_ao_arc_rproc_match[] = {
+ { .compatible = "amlogic,meson8-ao-arc" },
+ { .compatible = "amlogic,meson8b-ao-arc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_mx_ao_arc_rproc_match);
+
+static struct platform_driver meson_mx_ao_arc_rproc_driver = {
+ .probe = meson_mx_ao_arc_rproc_probe,
+ .remove = meson_mx_ao_arc_rproc_remove,
+ .driver = {
+ .name = "meson-mx-ao-arc-rproc",
+ .of_match_table = meson_mx_ao_arc_rproc_match,
+ },
+};
+module_platform_driver(meson_mx_ao_arc_rproc_driver);
+
+MODULE_DESCRIPTION("Amlogic Meson6/8/8b/8m2 AO ARC remote processor driver");
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
new file mode 100644
index 000000000..ea6fa1100
--- /dev/null
+++ b/drivers/remoteproc/mtk_common.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef __RPROC_MTK_COMMON_H
+#define __RPROC_MTK_COMMON_H
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#define MT8183_SW_RSTN 0x0
+#define MT8183_SW_RSTN_BIT BIT(0)
+#define MT8183_SCP_TO_HOST 0x1C
+#define MT8183_SCP_IPC_INT_BIT BIT(0)
+#define MT8183_SCP_WDT_INT_BIT BIT(8)
+#define MT8183_HOST_TO_SCP 0x28
+#define MT8183_HOST_IPC_INT_BIT BIT(0)
+#define MT8183_WDT_CFG 0x84
+#define MT8183_SCP_CLK_SW_SEL 0x4000
+#define MT8183_SCP_CLK_DIV_SEL 0x4024
+#define MT8183_SCP_SRAM_PDN 0x402C
+#define MT8183_SCP_L1_SRAM_PD 0x4080
+#define MT8183_SCP_TCM_TAIL_SRAM_PD 0x4094
+
+#define MT8183_SCP_CACHE_SEL(x) (0x14000 + (x) * 0x3000)
+#define MT8183_SCP_CACHE_CON MT8183_SCP_CACHE_SEL(0)
+#define MT8183_SCP_DCACHE_CON MT8183_SCP_CACHE_SEL(1)
+#define MT8183_SCP_CACHESIZE_8KB BIT(8)
+#define MT8183_SCP_CACHE_CON_WAYEN BIT(10)
+
+#define MT8186_SCP_L1_SRAM_PD_P1 0x40B0
+#define MT8186_SCP_L1_SRAM_PD_p2 0x40B4
+
+#define MT8192_L2TCM_SRAM_PD_0 0x10C0
+#define MT8192_L2TCM_SRAM_PD_1 0x10C4
+#define MT8192_L2TCM_SRAM_PD_2 0x10C8
+#define MT8192_L1TCM_SRAM_PDN 0x102C
+#define MT8192_CPU0_SRAM_PD 0x1080
+
+#define MT8192_SCP2APMCU_IPC_SET 0x4080
+#define MT8192_SCP2APMCU_IPC_CLR 0x4084
+#define MT8192_SCP_IPC_INT_BIT BIT(0)
+#define MT8192_SCP2SPM_IPC_CLR 0x4094
+#define MT8192_GIPC_IN_SET 0x4098
+#define MT8192_HOST_IPC_INT_BIT BIT(0)
+
+#define MT8192_CORE0_SW_RSTN_CLR 0x10000
+#define MT8192_CORE0_SW_RSTN_SET 0x10004
+#define MT8192_CORE0_MEM_ATT_PREDEF 0x10008
+#define MT8192_CORE0_WDT_IRQ 0x10030
+#define MT8192_CORE0_WDT_CFG 0x10034
+
+#define MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS GENMASK(7, 4)
+
+#define SCP_FW_VER_LEN 32
+#define SCP_SHARE_BUFFER_SIZE 288
+
+struct scp_run {
+ u32 signaled;
+ s8 fw_ver[SCP_FW_VER_LEN];
+ u32 dec_capability;
+ u32 enc_capability;
+ wait_queue_head_t wq;
+};
+
+struct scp_ipi_desc {
+ /* For protecting handler. */
+ struct mutex lock;
+ scp_ipi_handler_t handler;
+ void *priv;
+};
+
+struct mtk_scp;
+
+struct mtk_scp_of_data {
+ int (*scp_clk_get)(struct mtk_scp *scp);
+ int (*scp_before_load)(struct mtk_scp *scp);
+ void (*scp_irq_handler)(struct mtk_scp *scp);
+ void (*scp_reset_assert)(struct mtk_scp *scp);
+ void (*scp_reset_deassert)(struct mtk_scp *scp);
+ void (*scp_stop)(struct mtk_scp *scp);
+ void *(*scp_da_to_va)(struct mtk_scp *scp, u64 da, size_t len);
+
+ u32 host_to_scp_reg;
+ u32 host_to_scp_int_bit;
+
+ size_t ipi_buf_offset;
+};
+
+struct mtk_scp {
+ struct device *dev;
+ struct rproc *rproc;
+ struct clk *clk;
+ void __iomem *reg_base;
+ void __iomem *sram_base;
+ size_t sram_size;
+ phys_addr_t sram_phys;
+ void __iomem *l1tcm_base;
+ size_t l1tcm_size;
+ phys_addr_t l1tcm_phys;
+
+ const struct mtk_scp_of_data *data;
+
+ struct mtk_share_obj __iomem *recv_buf;
+ struct mtk_share_obj __iomem *send_buf;
+ struct scp_run run;
+ /* To prevent multiple ipi_send run concurrently. */
+ struct mutex send_lock;
+ struct scp_ipi_desc ipi_desc[SCP_IPI_MAX];
+ bool ipi_id_ack[SCP_IPI_MAX];
+ wait_queue_head_t ack_wq;
+
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ size_t dram_size;
+
+ struct rproc_subdev *rpmsg_subdev;
+};
+
+/**
+ * struct mtk_share_obj - SRAM buffer shared with AP and SCP
+ *
+ * @id: IPI id
+ * @len: share buffer length
+ * @share_buf: share buffer data
+ */
+struct mtk_share_obj {
+ u32 id;
+ u32 len;
+ u8 share_buf[SCP_SHARE_BUFFER_SIZE];
+};
+
+void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len);
+void scp_ipi_lock(struct mtk_scp *scp, u32 id);
+void scp_ipi_unlock(struct mtk_scp *scp, u32 id);
+
+#endif
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
new file mode 100644
index 000000000..d421a2cca
--- /dev/null
+++ b/drivers/remoteproc/mtk_scp.c
@@ -0,0 +1,1015 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+#include <linux/rpmsg/mtk_rpmsg.h>
+
+#include "mtk_common.h"
+#include "remoteproc_internal.h"
+
+#define MAX_CODE_SIZE 0x500000
+#define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
+
+/**
+ * scp_get() - get a reference to SCP.
+ *
+ * @pdev: the platform device of the module requesting SCP platform
+ * device for using SCP API.
+ *
+ * Return: Return NULL if failed. otherwise reference to SCP.
+ **/
+struct mtk_scp *scp_get(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *scp_node;
+ struct platform_device *scp_pdev;
+
+ scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
+ if (!scp_node) {
+ dev_err(dev, "can't get SCP node\n");
+ return NULL;
+ }
+
+ scp_pdev = of_find_device_by_node(scp_node);
+ of_node_put(scp_node);
+
+ if (WARN_ON(!scp_pdev)) {
+ dev_err(dev, "SCP pdev failed\n");
+ return NULL;
+ }
+
+ return platform_get_drvdata(scp_pdev);
+}
+EXPORT_SYMBOL_GPL(scp_get);
+
+/**
+ * scp_put() - "free" the SCP
+ *
+ * @scp: mtk_scp structure from scp_get().
+ **/
+void scp_put(struct mtk_scp *scp)
+{
+ put_device(scp->dev);
+}
+EXPORT_SYMBOL_GPL(scp_put);
+
+static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
+{
+ dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
+ rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
+}
+
+static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)priv;
+ struct scp_run *run = (struct scp_run *)data;
+
+ scp->run.signaled = run->signaled;
+ strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
+ scp->run.dec_capability = run->dec_capability;
+ scp->run.enc_capability = run->enc_capability;
+ wake_up_interruptible(&scp->run.wq);
+}
+
+static void scp_ipi_handler(struct mtk_scp *scp)
+{
+ struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
+ struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
+ u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
+ scp_ipi_handler_t handler;
+ u32 id = readl(&rcv_obj->id);
+ u32 len = readl(&rcv_obj->len);
+
+ if (len > SCP_SHARE_BUFFER_SIZE) {
+ dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
+ SCP_SHARE_BUFFER_SIZE);
+ return;
+ }
+ if (id >= SCP_IPI_MAX) {
+ dev_err(scp->dev, "No such ipi id = %d\n", id);
+ return;
+ }
+
+ scp_ipi_lock(scp, id);
+ handler = ipi_desc[id].handler;
+ if (!handler) {
+ dev_err(scp->dev, "No such ipi id = %d\n", id);
+ scp_ipi_unlock(scp, id);
+ return;
+ }
+
+ memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
+ handler(tmp_data, len, ipi_desc[id].priv);
+ scp_ipi_unlock(scp, id);
+
+ scp->ipi_id_ack[id] = true;
+ wake_up(&scp->ack_wq);
+}
+
+static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
+ const struct firmware *fw,
+ size_t *offset);
+
+static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
+{
+ int ret;
+ size_t offset;
+
+ /* read the ipi buf addr from FW itself first */
+ ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
+ if (ret) {
+ /* use default ipi buf addr if the FW doesn't have it */
+ offset = scp->data->ipi_buf_offset;
+ if (!offset)
+ return ret;
+ }
+ dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
+
+ scp->recv_buf = (struct mtk_share_obj __iomem *)
+ (scp->sram_base + offset);
+ scp->send_buf = (struct mtk_share_obj __iomem *)
+ (scp->sram_base + offset + sizeof(*scp->recv_buf));
+ memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
+ memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
+
+ return 0;
+}
+
+static void mt8183_scp_reset_assert(struct mtk_scp *scp)
+{
+ u32 val;
+
+ val = readl(scp->reg_base + MT8183_SW_RSTN);
+ val &= ~MT8183_SW_RSTN_BIT;
+ writel(val, scp->reg_base + MT8183_SW_RSTN);
+}
+
+static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
+{
+ u32 val;
+
+ val = readl(scp->reg_base + MT8183_SW_RSTN);
+ val |= MT8183_SW_RSTN_BIT;
+ writel(val, scp->reg_base + MT8183_SW_RSTN);
+}
+
+static void mt8192_scp_reset_assert(struct mtk_scp *scp)
+{
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+}
+
+static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
+{
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_CLR);
+}
+
+static void mt8183_scp_irq_handler(struct mtk_scp *scp)
+{
+ u32 scp_to_host;
+
+ scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
+ if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
+ scp_ipi_handler(scp);
+ else
+ scp_wdt_handler(scp, scp_to_host);
+
+ /* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
+ writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
+ scp->reg_base + MT8183_SCP_TO_HOST);
+}
+
+static void mt8192_scp_irq_handler(struct mtk_scp *scp)
+{
+ u32 scp_to_host;
+
+ scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
+
+ if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
+ scp_ipi_handler(scp);
+
+ /*
+ * SCP won't send another interrupt until we clear
+ * MT8192_SCP2APMCU_IPC.
+ */
+ writel(MT8192_SCP_IPC_INT_BIT,
+ scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
+ } else {
+ scp_wdt_handler(scp, scp_to_host);
+ writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ);
+ }
+}
+
+static irqreturn_t scp_irq_handler(int irq, void *priv)
+{
+ struct mtk_scp *scp = priv;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(scp->dev, "failed to enable clocks\n");
+ return IRQ_NONE;
+ }
+
+ scp->data->scp_irq_handler(scp);
+
+ clk_disable_unprepare(scp->clk);
+
+ return IRQ_HANDLED;
+}
+
+static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ u32 offset = phdr->p_offset;
+ void __iomem *ptr;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+ if (!filesz)
+ continue;
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* put the segment where the remote processor expects it */
+ scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
+ }
+
+ return ret;
+}
+
+static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
+ const struct firmware *fw,
+ size_t *offset)
+{
+ struct elf32_hdr *ehdr;
+ struct elf32_shdr *shdr, *shdr_strtab;
+ int i;
+ const u8 *elf_data = fw->data;
+ const char *strtab;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
+ shdr_strtab = shdr + ehdr->e_shstrndx;
+ strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
+
+ for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
+ if (strcmp(strtab + shdr->sh_name,
+ SECTION_NAME_IPI_BUFFER) == 0) {
+ *offset = shdr->sh_addr;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int mt8183_scp_clk_get(struct mtk_scp *scp)
+{
+ struct device *dev = scp->dev;
+ int ret = 0;
+
+ scp->clk = devm_clk_get(dev, "main");
+ if (IS_ERR(scp->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ ret = PTR_ERR(scp->clk);
+ }
+
+ return ret;
+}
+
+static int mt8192_scp_clk_get(struct mtk_scp *scp)
+{
+ return mt8183_scp_clk_get(scp);
+}
+
+static int mt8195_scp_clk_get(struct mtk_scp *scp)
+{
+ scp->clk = NULL;
+
+ return 0;
+}
+
+static int mt8183_scp_before_load(struct mtk_scp *scp)
+{
+ /* Clear SCP to host interrupt */
+ writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+
+ /* Reset clocks before loading FW */
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+
+ /* Initialize TCM before loading FW. */
+ writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
+ writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+
+ /* Turn on the power of SCP's SRAM before using it. */
+ writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
+
+ /*
+ * Set I-cache and D-cache size before loading SCP FW.
+ * SCP SRAM logical address may change when cache size setting differs.
+ */
+ writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
+ scp->reg_base + MT8183_SCP_CACHE_CON);
+ writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+
+ return 0;
+}
+
+static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
+{
+ int i;
+
+ for (i = 31; i >= 0; i--)
+ writel(GENMASK(i, 0) & ~reserved_mask, addr);
+ writel(0, addr);
+}
+
+static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
+{
+ int i;
+
+ writel(0, addr);
+ for (i = 0; i < 32; i++)
+ writel(GENMASK(i, 0) & ~reserved_mask, addr);
+}
+
+static int mt8186_scp_before_load(struct mtk_scp *scp)
+{
+ /* Clear SCP to host interrupt */
+ writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+
+ /* Reset clocks before loading FW */
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+
+ /* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
+ scp_sram_power_on(scp->reg_base + MT8183_SCP_SRAM_PDN, 0);
+
+ /* Initialize TCM before loading FW. */
+ writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
+ writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+ writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
+ writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
+
+ /*
+ * Set I-cache and D-cache size before loading SCP FW.
+ * SCP SRAM logical address may change when cache size setting differs.
+ */
+ writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
+ scp->reg_base + MT8183_SCP_CACHE_CON);
+ writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+
+ return 0;
+}
+
+static int mt8192_scp_before_load(struct mtk_scp *scp)
+{
+ /* clear SPM interrupt, SCP2SPM_IPC_CLR */
+ writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+
+ /* enable SRAM clock */
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* enable MPU for all memory regions */
+ writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+ return 0;
+}
+
+static int mt8195_scp_before_load(struct mtk_scp *scp)
+{
+ /* clear SPM interrupt, SCP2SPM_IPC_CLR */
+ writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+
+ /* enable SRAM clock */
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
+ MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+ scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* enable MPU for all memory regions */
+ writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+ return 0;
+}
+
+static int scp_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct mtk_scp *scp = rproc->priv;
+ struct device *dev = scp->dev;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* Hold SCP in reset while loading FW. */
+ scp->data->scp_reset_assert(scp);
+
+ ret = scp->data->scp_before_load(scp);
+ if (ret < 0)
+ goto leave;
+
+ ret = scp_elf_load_segments(rproc, fw);
+leave:
+ clk_disable_unprepare(scp->clk);
+
+ return ret;
+}
+
+static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ struct mtk_scp *scp = rproc->priv;
+ struct device *dev = scp->dev;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ ret = scp_ipi_init(scp, fw);
+ clk_disable_unprepare(scp->clk);
+ return ret;
+}
+
+static int scp_start(struct rproc *rproc)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+ struct device *dev = scp->dev;
+ struct scp_run *run = &scp->run;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ run->signaled = false;
+
+ scp->data->scp_reset_deassert(scp);
+
+ ret = wait_event_interruptible_timeout(
+ run->wq,
+ run->signaled,
+ msecs_to_jiffies(2000));
+
+ if (ret == 0) {
+ dev_err(dev, "wait SCP initialization timeout!\n");
+ ret = -ETIME;
+ goto stop;
+ }
+ if (ret == -ERESTARTSYS) {
+ dev_err(dev, "wait SCP interrupted by a signal!\n");
+ goto stop;
+ }
+
+ clk_disable_unprepare(scp->clk);
+ dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
+
+ return 0;
+
+stop:
+ scp->data->scp_reset_assert(scp);
+ clk_disable_unprepare(scp->clk);
+ return ret;
+}
+
+static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
+{
+ int offset;
+
+ if (da < scp->sram_size) {
+ offset = da;
+ if (offset >= 0 && (offset + len) <= scp->sram_size)
+ return (void __force *)scp->sram_base + offset;
+ } else if (scp->dram_size) {
+ offset = da - scp->dma_addr;
+ if (offset >= 0 && (offset + len) <= scp->dram_size)
+ return scp->cpu_addr + offset;
+ }
+
+ return NULL;
+}
+
+static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
+{
+ int offset;
+
+ if (da >= scp->sram_phys &&
+ (da + len) <= scp->sram_phys + scp->sram_size) {
+ offset = da - scp->sram_phys;
+ return (void __force *)scp->sram_base + offset;
+ }
+
+ /* optional memory region */
+ if (scp->l1tcm_size &&
+ da >= scp->l1tcm_phys &&
+ (da + len) <= scp->l1tcm_phys + scp->l1tcm_size) {
+ offset = da - scp->l1tcm_phys;
+ return (void __force *)scp->l1tcm_base + offset;
+ }
+
+ /* optional memory region */
+ if (scp->dram_size &&
+ da >= scp->dma_addr &&
+ (da + len) <= scp->dma_addr + scp->dram_size) {
+ offset = da - scp->dma_addr;
+ return scp->cpu_addr + offset;
+ }
+
+ return NULL;
+}
+
+static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+
+ return scp->data->scp_da_to_va(scp, da, len);
+}
+
+static void mt8183_scp_stop(struct mtk_scp *scp)
+{
+ /* Disable SCP watchdog */
+ writel(0, scp->reg_base + MT8183_WDT_CFG);
+}
+
+static void mt8192_scp_stop(struct mtk_scp *scp)
+{
+ /* Disable SRAM clock */
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* Disable SCP watchdog */
+ writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static void mt8195_scp_stop(struct mtk_scp *scp)
+{
+ /* Disable SRAM clock */
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
+ MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+ scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* Disable SCP watchdog */
+ writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static int scp_stop(struct rproc *rproc)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(scp->dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ scp->data->scp_reset_assert(scp);
+ scp->data->scp_stop(scp);
+ clk_disable_unprepare(scp->clk);
+
+ return 0;
+}
+
+static const struct rproc_ops scp_ops = {
+ .start = scp_start,
+ .stop = scp_stop,
+ .load = scp_load,
+ .da_to_va = scp_da_to_va,
+ .parse_fw = scp_parse_fw,
+};
+
+/**
+ * scp_get_device() - get device struct of SCP
+ *
+ * @scp: mtk_scp structure
+ **/
+struct device *scp_get_device(struct mtk_scp *scp)
+{
+ return scp->dev;
+}
+EXPORT_SYMBOL_GPL(scp_get_device);
+
+/**
+ * scp_get_rproc() - get rproc struct of SCP
+ *
+ * @scp: mtk_scp structure
+ **/
+struct rproc *scp_get_rproc(struct mtk_scp *scp)
+{
+ return scp->rproc;
+}
+EXPORT_SYMBOL_GPL(scp_get_rproc);
+
+/**
+ * scp_get_vdec_hw_capa() - get video decoder hardware capability
+ *
+ * @scp: mtk_scp structure
+ *
+ * Return: video decoder hardware capability
+ **/
+unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
+{
+ return scp->run.dec_capability;
+}
+EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
+
+/**
+ * scp_get_venc_hw_capa() - get video encoder hardware capability
+ *
+ * @scp: mtk_scp structure
+ *
+ * Return: video encoder hardware capability
+ **/
+unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
+{
+ return scp->run.enc_capability;
+}
+EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
+
+/**
+ * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
+ *
+ * @scp: mtk_scp structure
+ * @mem_addr: SCP views memory address
+ *
+ * Mapping the SCP's SRAM address /
+ * DMEM (Data Extended Memory) memory address /
+ * Working buffer memory address to
+ * kernel virtual address.
+ *
+ * Return: Return ERR_PTR(-EINVAL) if mapping failed,
+ * otherwise the mapped kernel virtual address
+ **/
+void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
+{
+ void *ptr;
+
+ ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
+ if (!ptr)
+ return ERR_PTR(-EINVAL);
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
+
+static int scp_map_memory_region(struct mtk_scp *scp)
+{
+ int ret;
+
+ ret = of_reserved_mem_device_init(scp->dev);
+
+ /* reserved memory is optional. */
+ if (ret == -ENODEV) {
+ dev_info(scp->dev, "skipping reserved memory initialization.");
+ return 0;
+ }
+
+ if (ret) {
+ dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
+ return -ENOMEM;
+ }
+
+ /* Reserved SCP code size */
+ scp->dram_size = MAX_CODE_SIZE;
+ scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
+ &scp->dma_addr, GFP_KERNEL);
+ if (!scp->cpu_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void scp_unmap_memory_region(struct mtk_scp *scp)
+{
+ if (scp->dram_size == 0)
+ return;
+
+ dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
+ scp->dma_addr);
+ of_reserved_mem_device_release(scp->dev);
+}
+
+static int scp_register_ipi(struct platform_device *pdev, u32 id,
+ ipi_handler_t handler, void *priv)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+ return scp_ipi_register(scp, id, handler, priv);
+}
+
+static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+ scp_ipi_unregister(scp, id);
+}
+
+static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
+ unsigned int len, unsigned int wait)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+ return scp_ipi_send(scp, id, buf, len, wait);
+}
+
+static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
+ .send_ipi = scp_send_ipi,
+ .register_ipi = scp_register_ipi,
+ .unregister_ipi = scp_unregister_ipi,
+ .ns_ipi_id = SCP_IPI_NS_SERVICE,
+};
+
+static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
+{
+ scp->rpmsg_subdev =
+ mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
+ &mtk_scp_rpmsg_info);
+ if (scp->rpmsg_subdev)
+ rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
+}
+
+static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
+{
+ if (scp->rpmsg_subdev) {
+ rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
+ mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
+ scp->rpmsg_subdev = NULL;
+ }
+}
+
+static int scp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mtk_scp *scp;
+ struct rproc *rproc;
+ struct resource *res;
+ const char *fw_name = "scp.img";
+ int ret, i;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
+ rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
+ if (!rproc)
+ return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n");
+
+ scp = (struct mtk_scp *)rproc->priv;
+ scp->rproc = rproc;
+ scp->dev = dev;
+ scp->data = of_device_get_match_data(dev);
+ platform_set_drvdata(pdev, scp);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ scp->sram_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scp->sram_base))
+ return dev_err_probe(dev, PTR_ERR(scp->sram_base),
+ "Failed to parse and map sram memory\n");
+
+ scp->sram_size = resource_size(res);
+ scp->sram_phys = res->start;
+
+ /* l1tcm is an optional memory region */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
+ scp->l1tcm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scp->l1tcm_base)) {
+ ret = PTR_ERR(scp->l1tcm_base);
+ if (ret != -EINVAL) {
+ return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
+ }
+ } else {
+ scp->l1tcm_size = resource_size(res);
+ scp->l1tcm_phys = res->start;
+ }
+
+ scp->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(scp->reg_base))
+ return dev_err_probe(dev, PTR_ERR(scp->reg_base),
+ "Failed to parse and map cfg memory\n");
+
+ ret = scp->data->scp_clk_get(scp);
+ if (ret)
+ return ret;
+
+ ret = scp_map_memory_region(scp);
+ if (ret)
+ return ret;
+
+ mutex_init(&scp->send_lock);
+ for (i = 0; i < SCP_IPI_MAX; i++)
+ mutex_init(&scp->ipi_desc[i].lock);
+
+ /* register SCP initialization IPI */
+ ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
+ if (ret) {
+ dev_err(dev, "Failed to register IPI_SCP_INIT\n");
+ goto release_dev_mem;
+ }
+
+ init_waitqueue_head(&scp->run.wq);
+ init_waitqueue_head(&scp->ack_wq);
+
+ scp_add_rpmsg_subdev(scp);
+
+ ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
+ scp_irq_handler, IRQF_ONESHOT,
+ pdev->name, scp);
+
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto remove_subdev;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto remove_subdev;
+
+ return 0;
+
+remove_subdev:
+ scp_remove_rpmsg_subdev(scp);
+ scp_ipi_unregister(scp, SCP_IPI_INIT);
+release_dev_mem:
+ scp_unmap_memory_region(scp);
+ for (i = 0; i < SCP_IPI_MAX; i++)
+ mutex_destroy(&scp->ipi_desc[i].lock);
+ mutex_destroy(&scp->send_lock);
+
+ return ret;
+}
+
+static int scp_remove(struct platform_device *pdev)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+ int i;
+
+ rproc_del(scp->rproc);
+ scp_remove_rpmsg_subdev(scp);
+ scp_ipi_unregister(scp, SCP_IPI_INIT);
+ scp_unmap_memory_region(scp);
+ for (i = 0; i < SCP_IPI_MAX; i++)
+ mutex_destroy(&scp->ipi_desc[i].lock);
+ mutex_destroy(&scp->send_lock);
+
+ return 0;
+}
+
+static const struct mtk_scp_of_data mt8183_of_data = {
+ .scp_clk_get = mt8183_scp_clk_get,
+ .scp_before_load = mt8183_scp_before_load,
+ .scp_irq_handler = mt8183_scp_irq_handler,
+ .scp_reset_assert = mt8183_scp_reset_assert,
+ .scp_reset_deassert = mt8183_scp_reset_deassert,
+ .scp_stop = mt8183_scp_stop,
+ .scp_da_to_va = mt8183_scp_da_to_va,
+ .host_to_scp_reg = MT8183_HOST_TO_SCP,
+ .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
+ .ipi_buf_offset = 0x7bdb0,
+};
+
+static const struct mtk_scp_of_data mt8186_of_data = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8186_scp_before_load,
+ .scp_irq_handler = mt8183_scp_irq_handler,
+ .scp_reset_assert = mt8183_scp_reset_assert,
+ .scp_reset_deassert = mt8183_scp_reset_deassert,
+ .scp_stop = mt8183_scp_stop,
+ .scp_da_to_va = mt8183_scp_da_to_va,
+ .host_to_scp_reg = MT8183_HOST_TO_SCP,
+ .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
+ .ipi_buf_offset = 0x3bdb0,
+};
+
+static const struct mtk_scp_of_data mt8188_of_data = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8192_scp_before_load,
+ .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_reset_assert = mt8192_scp_reset_assert,
+ .scp_reset_deassert = mt8192_scp_reset_deassert,
+ .scp_stop = mt8192_scp_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
+};
+
+static const struct mtk_scp_of_data mt8192_of_data = {
+ .scp_clk_get = mt8192_scp_clk_get,
+ .scp_before_load = mt8192_scp_before_load,
+ .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_reset_assert = mt8192_scp_reset_assert,
+ .scp_reset_deassert = mt8192_scp_reset_deassert,
+ .scp_stop = mt8192_scp_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
+};
+
+static const struct mtk_scp_of_data mt8195_of_data = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8195_scp_before_load,
+ .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_reset_assert = mt8192_scp_reset_assert,
+ .scp_reset_deassert = mt8192_scp_reset_deassert,
+ .scp_stop = mt8195_scp_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
+};
+
+static const struct of_device_id mtk_scp_of_match[] = {
+ { .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
+ { .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
+ { .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
+ { .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
+ { .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
+
+static struct platform_driver mtk_scp_driver = {
+ .probe = scp_probe,
+ .remove = scp_remove,
+ .driver = {
+ .name = "mtk-scp",
+ .of_match_table = mtk_scp_of_match,
+ },
+};
+
+module_platform_driver(mtk_scp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek SCP control driver");
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
new file mode 100644
index 000000000..4c0d121c2
--- /dev/null
+++ b/drivers/remoteproc/mtk_scp_ipi.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#include "mtk_common.h"
+
+/**
+ * scp_ipi_register() - register an ipi function
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ * @handler: IPI handler
+ * @priv: private data for IPI handler
+ *
+ * Register an ipi function to receive ipi interrupt from SCP.
+ *
+ * Return: 0 if ipi registers successfully, -error on error.
+ */
+int scp_ipi_register(struct mtk_scp *scp,
+ u32 id,
+ scp_ipi_handler_t handler,
+ void *priv)
+{
+ if (!scp)
+ return -EPROBE_DEFER;
+
+ if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
+ return -EINVAL;
+
+ scp_ipi_lock(scp, id);
+ scp->ipi_desc[id].handler = handler;
+ scp->ipi_desc[id].priv = priv;
+ scp_ipi_unlock(scp, id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scp_ipi_register);
+
+/**
+ * scp_ipi_unregister() - unregister an ipi function
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ *
+ * Unregister an ipi function to receive ipi interrupt from SCP.
+ */
+void scp_ipi_unregister(struct mtk_scp *scp, u32 id)
+{
+ if (!scp)
+ return;
+
+ if (WARN_ON(id >= SCP_IPI_MAX))
+ return;
+
+ scp_ipi_lock(scp, id);
+ scp->ipi_desc[id].handler = NULL;
+ scp->ipi_desc[id].priv = NULL;
+ scp_ipi_unlock(scp, id);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_unregister);
+
+/*
+ * scp_memcpy_aligned() - Copy src to dst, where dst is in SCP SRAM region.
+ *
+ * @dst: Pointer to the destination buffer, should be in SCP SRAM region.
+ * @src: Pointer to the source buffer.
+ * @len: Length of the source buffer to be copied.
+ *
+ * Since AP access of SCP SRAM don't support byte write, this always write a
+ * full word at a time, and may cause some extra bytes to be written at the
+ * beginning & ending of dst.
+ */
+void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len)
+{
+ void __iomem *ptr;
+ u32 val;
+ unsigned int i = 0, remain;
+
+ if (!IS_ALIGNED((unsigned long)dst, 4)) {
+ ptr = (void __iomem *)ALIGN_DOWN((unsigned long)dst, 4);
+ i = 4 - (dst - ptr);
+ val = readl_relaxed(ptr);
+ memcpy((u8 *)&val + (4 - i), src, i);
+ writel_relaxed(val, ptr);
+ }
+
+ __iowrite32_copy(dst + i, src + i, (len - i) / 4);
+ remain = (len - i) % 4;
+
+ if (remain > 0) {
+ val = readl_relaxed(dst + len - remain);
+ memcpy(&val, src + len - remain, remain);
+ writel_relaxed(val, dst + len - remain);
+ }
+}
+EXPORT_SYMBOL_GPL(scp_memcpy_aligned);
+
+/**
+ * scp_ipi_lock() - Lock before operations of an IPI ID
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ *
+ * Note: This should not be used by drivers other than mtk_scp.
+ */
+void scp_ipi_lock(struct mtk_scp *scp, u32 id)
+{
+ if (WARN_ON(id >= SCP_IPI_MAX))
+ return;
+ mutex_lock(&scp->ipi_desc[id].lock);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_lock);
+
+/**
+ * scp_ipi_lock() - Unlock after operations of an IPI ID
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ *
+ * Note: This should not be used by drivers other than mtk_scp.
+ */
+void scp_ipi_unlock(struct mtk_scp *scp, u32 id)
+{
+ if (WARN_ON(id >= SCP_IPI_MAX))
+ return;
+ mutex_unlock(&scp->ipi_desc[id].lock);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_unlock);
+
+/**
+ * scp_ipi_send() - send data from AP to scp.
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ * @buf: the data buffer
+ * @len: the data buffer length
+ * @wait: number of msecs to wait for ack. 0 to skip waiting.
+ *
+ * This function is thread-safe. When this function returns,
+ * SCP has received the data and starts the processing.
+ * When the processing completes, IPI handler registered
+ * by scp_ipi_register will be called in interrupt context.
+ *
+ * Return: 0 if sending data successfully, -error on error.
+ **/
+int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+ unsigned int wait)
+{
+ struct mtk_share_obj __iomem *send_obj = scp->send_buf;
+ unsigned long timeout;
+ int ret;
+
+ if (WARN_ON(id <= SCP_IPI_INIT) || WARN_ON(id >= SCP_IPI_MAX) ||
+ WARN_ON(id == SCP_IPI_NS_SERVICE) ||
+ WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf))
+ return -EINVAL;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(scp->dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ mutex_lock(&scp->send_lock);
+
+ /* Wait until SCP receives the last command */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ do {
+ if (time_after(jiffies, timeout)) {
+ dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
+ ret = -ETIMEDOUT;
+ goto unlock_mutex;
+ }
+ } while (readl(scp->reg_base + scp->data->host_to_scp_reg));
+
+ scp_memcpy_aligned(send_obj->share_buf, buf, len);
+
+ writel(len, &send_obj->len);
+ writel(id, &send_obj->id);
+
+ scp->ipi_id_ack[id] = false;
+ /* send the command to SCP */
+ writel(scp->data->host_to_scp_int_bit,
+ scp->reg_base + scp->data->host_to_scp_reg);
+
+ if (wait) {
+ /* wait for SCP's ACK */
+ timeout = msecs_to_jiffies(wait);
+ ret = wait_event_timeout(scp->ack_wq,
+ scp->ipi_id_ack[id],
+ timeout);
+ scp->ipi_id_ack[id] = false;
+ if (WARN(!ret, "scp ipi %d ack time out !", id))
+ ret = -EIO;
+ else
+ ret = 0;
+ }
+
+unlock_mutex:
+ mutex_unlock(&scp->send_lock);
+ clk_disable_unprepare(scp->clk);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scp_ipi_send);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek scp IPI interface");
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
new file mode 100644
index 000000000..430fab026
--- /dev/null
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -0,0 +1,1396 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * OMAP Remote Processor driver
+ *
+ * Copyright (C) 2011-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ * Mark Grosen <mgrosen@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ * Hari Kanigeri <h-kanigeri2@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clk/ti.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/remoteproc.h>
+#include <linux/mailbox_client.h>
+#include <linux/omap-iommu.h>
+#include <linux/omap-mailbox.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset.h>
+#include <clocksource/timer-ti-dm.h>
+
+#include <linux/platform_data/dmtimer-omap.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+
+/* default auto-suspend delay (ms) */
+#define DEFAULT_AUTOSUSPEND_DELAY 10000
+
+/**
+ * struct omap_rproc_boot_data - boot data structure for the DSP omap rprocs
+ * @syscon: regmap handle for the system control configuration module
+ * @boot_reg: boot register offset within the @syscon regmap
+ * @boot_reg_shift: bit-field shift required for the boot address value in
+ * @boot_reg
+ */
+struct omap_rproc_boot_data {
+ struct regmap *syscon;
+ unsigned int boot_reg;
+ unsigned int boot_reg_shift;
+};
+
+/**
+ * struct omap_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: bus address used to access the memory region
+ * @dev_addr: device address of the memory region from DSP view
+ * @size: size of the memory region
+ */
+struct omap_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct omap_rproc_timer - data structure for a timer used by a omap rproc
+ * @odt: timer pointer
+ * @timer_ops: OMAP dmtimer ops for @odt timer
+ * @irq: timer irq
+ */
+struct omap_rproc_timer {
+ struct omap_dm_timer *odt;
+ const struct omap_dm_timer_ops *timer_ops;
+ int irq;
+};
+
+/**
+ * struct omap_rproc - omap remote processor state
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ * @boot_data: boot data structure for setting processor boot address
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @num_timers: number of rproc timer(s)
+ * @num_wd_timers: number of rproc watchdog timers
+ * @timers: timer(s) info used by rproc
+ * @autosuspend_delay: auto-suspend delay value to be used for runtime pm
+ * @need_resume: if true a resume is needed in the system resume callback
+ * @rproc: rproc handle
+ * @reset: reset handle
+ * @pm_comp: completion primitive to sync for suspend response
+ * @fck: functional clock for the remoteproc
+ * @suspend_acked: state machine flag to store the suspend request ack
+ */
+struct omap_rproc {
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+ struct omap_rproc_boot_data *boot_data;
+ struct omap_rproc_mem *mem;
+ int num_mems;
+ int num_timers;
+ int num_wd_timers;
+ struct omap_rproc_timer *timers;
+ int autosuspend_delay;
+ bool need_resume;
+ struct rproc *rproc;
+ struct reset_control *reset;
+ struct completion pm_comp;
+ struct clk *fck;
+ bool suspend_acked;
+};
+
+/**
+ * struct omap_rproc_mem_data - memory definitions for an omap remote processor
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct omap_rproc_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct omap_rproc_dev_data - device data for the omap remote processor
+ * @device_name: device name of the remote processor
+ * @mems: memory definitions for this remote processor
+ */
+struct omap_rproc_dev_data {
+ const char *device_name;
+ const struct omap_rproc_mem_data *mems;
+};
+
+/**
+ * omap_rproc_request_timer() - request a timer for a remoteproc
+ * @dev: device requesting the timer
+ * @np: device node pointer to the desired timer
+ * @timer: handle to a struct omap_rproc_timer to return the timer handle
+ *
+ * This helper function is used primarily to request a timer associated with
+ * a remoteproc. The returned handle is stored in the .odt field of the
+ * @timer structure passed in, and is used to invoke other timer specific
+ * ops (like starting a timer either during device initialization or during
+ * a resume operation, or for stopping/freeing a timer).
+ *
+ * Return: 0 on success, otherwise an appropriate failure
+ */
+static int omap_rproc_request_timer(struct device *dev, struct device_node *np,
+ struct omap_rproc_timer *timer)
+{
+ int ret;
+
+ timer->odt = timer->timer_ops->request_by_node(np);
+ if (!timer->odt) {
+ dev_err(dev, "request for timer node %p failed\n", np);
+ return -EBUSY;
+ }
+
+ ret = timer->timer_ops->set_source(timer->odt, OMAP_TIMER_SRC_SYS_CLK);
+ if (ret) {
+ dev_err(dev, "error setting OMAP_TIMER_SRC_SYS_CLK as source for timer node %p\n",
+ np);
+ timer->timer_ops->free(timer->odt);
+ return ret;
+ }
+
+ /* clean counter, remoteproc code will set the value */
+ timer->timer_ops->set_load(timer->odt, 0);
+
+ return 0;
+}
+
+/**
+ * omap_rproc_start_timer() - start a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used to start a timer associated with a remoteproc,
+ * obtained using the request_timer ops. The helper function needs to be
+ * invoked by the driver to start the timer (during device initialization)
+ * or to just resume the timer.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_start_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->start(timer->odt);
+}
+
+/**
+ * omap_rproc_stop_timer() - stop a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used to disable a timer associated with a
+ * remoteproc, and needs to be called either during a device shutdown
+ * or suspend operation. The separate helper function allows the driver
+ * to just stop a timer without having to release the timer during a
+ * suspend operation.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_stop_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->stop(timer->odt);
+}
+
+/**
+ * omap_rproc_release_timer() - release a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used primarily to release a timer associated
+ * with a remoteproc. The dmtimer will be available for other clients to
+ * use once released.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_release_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->free(timer->odt);
+}
+
+/**
+ * omap_rproc_get_timer_irq() - get the irq for a timer
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This function is used to get the irq associated with a watchdog timer. The
+ * function is called by the OMAP remoteproc driver to register a interrupt
+ * handler to handle watchdog events on the remote processor.
+ *
+ * Return: irq id on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_get_timer_irq(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->get_irq(timer->odt);
+}
+
+/**
+ * omap_rproc_ack_timer_irq() - acknowledge a timer irq
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This function is used to clear the irq associated with a watchdog timer.
+ * The function is called by the OMAP remoteproc upon a watchdog event on the
+ * remote processor to clear the interrupt status of the watchdog timer.
+ */
+static inline void omap_rproc_ack_timer_irq(struct omap_rproc_timer *timer)
+{
+ timer->timer_ops->write_status(timer->odt, OMAP_TIMER_INT_OVERFLOW);
+}
+
+/**
+ * omap_rproc_watchdog_isr() - Watchdog ISR handler for remoteproc device
+ * @irq: IRQ number associated with a watchdog timer
+ * @data: IRQ handler data
+ *
+ * This ISR routine executes the required necessary low-level code to
+ * acknowledge a watchdog timer interrupt. There can be multiple watchdog
+ * timers associated with a rproc (like IPUs which have 2 watchdog timers,
+ * one per Cortex M3/M4 core), so a lookup has to be performed to identify
+ * the timer to acknowledge its interrupt.
+ *
+ * The function also invokes rproc_report_crash to report the watchdog event
+ * to the remoteproc driver core, to trigger a recovery.
+ *
+ * Return: IRQ_HANDLED on success, otherwise IRQ_NONE
+ */
+static irqreturn_t omap_rproc_watchdog_isr(int irq, void *data)
+{
+ struct rproc *rproc = data;
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc_timer *timers = oproc->timers;
+ struct omap_rproc_timer *wd_timer = NULL;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+ int i;
+
+ for (i = oproc->num_timers; i < num_timers; i++) {
+ if (timers[i].irq > 0 && irq == timers[i].irq) {
+ wd_timer = &timers[i];
+ break;
+ }
+ }
+
+ if (!wd_timer) {
+ dev_err(dev, "invalid timer\n");
+ return IRQ_NONE;
+ }
+
+ omap_rproc_ack_timer_irq(wd_timer);
+
+ rproc_report_crash(rproc, RPROC_WATCHDOG);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * omap_rproc_enable_timers() - enable the timers for a remoteproc
+ * @rproc: handle of a remote processor
+ * @configure: boolean flag used to acquire and configure the timer handle
+ *
+ * This function is used primarily to enable the timers associated with
+ * a remoteproc. The configure flag is provided to allow the driver
+ * to either acquire and start a timer (during device initialization) or
+ * to just start a timer (during a resume operation).
+ *
+ * Return: 0 on success, otherwise an appropriate failure
+ */
+static int omap_rproc_enable_timers(struct rproc *rproc, bool configure)
+{
+ int i;
+ int ret = 0;
+ struct platform_device *tpdev;
+ struct dmtimer_platform_data *tpdata;
+ const struct omap_dm_timer_ops *timer_ops;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_timer *timers = oproc->timers;
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = NULL;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+
+ if (!num_timers)
+ return 0;
+
+ if (!configure)
+ goto start_timers;
+
+ for (i = 0; i < num_timers; i++) {
+ if (i < oproc->num_timers)
+ np = of_parse_phandle(dev->of_node, "ti,timers", i);
+ else
+ np = of_parse_phandle(dev->of_node,
+ "ti,watchdog-timers",
+ (i - oproc->num_timers));
+ if (!np) {
+ ret = -ENXIO;
+ dev_err(dev, "device node lookup for timer at index %d failed: %d\n",
+ i < oproc->num_timers ? i :
+ i - oproc->num_timers, ret);
+ goto free_timers;
+ }
+
+ tpdev = of_find_device_by_node(np);
+ if (!tpdev) {
+ ret = -ENODEV;
+ dev_err(dev, "could not get timer platform device\n");
+ goto put_node;
+ }
+
+ tpdata = dev_get_platdata(&tpdev->dev);
+ put_device(&tpdev->dev);
+ if (!tpdata) {
+ ret = -EINVAL;
+ dev_err(dev, "dmtimer pdata structure NULL\n");
+ goto put_node;
+ }
+
+ timer_ops = tpdata->timer_ops;
+ if (!timer_ops || !timer_ops->request_by_node ||
+ !timer_ops->set_source || !timer_ops->set_load ||
+ !timer_ops->free || !timer_ops->start ||
+ !timer_ops->stop || !timer_ops->get_irq ||
+ !timer_ops->write_status) {
+ ret = -EINVAL;
+ dev_err(dev, "device does not have required timer ops\n");
+ goto put_node;
+ }
+
+ timers[i].irq = -1;
+ timers[i].timer_ops = timer_ops;
+ ret = omap_rproc_request_timer(dev, np, &timers[i]);
+ if (ret) {
+ dev_err(dev, "request for timer %p failed: %d\n", np,
+ ret);
+ goto put_node;
+ }
+ of_node_put(np);
+
+ if (i >= oproc->num_timers) {
+ timers[i].irq = omap_rproc_get_timer_irq(&timers[i]);
+ if (timers[i].irq < 0) {
+ dev_err(dev, "get_irq for timer %p failed: %d\n",
+ np, timers[i].irq);
+ ret = -EBUSY;
+ goto free_timers;
+ }
+
+ ret = request_irq(timers[i].irq,
+ omap_rproc_watchdog_isr, IRQF_SHARED,
+ "rproc-wdt", rproc);
+ if (ret) {
+ dev_err(dev, "error requesting irq for timer %p\n",
+ np);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ goto free_timers;
+ }
+ }
+ }
+
+start_timers:
+ for (i = 0; i < num_timers; i++) {
+ ret = omap_rproc_start_timer(&timers[i]);
+ if (ret) {
+ dev_err(dev, "start timer %p failed failed: %d\n", np,
+ ret);
+ break;
+ }
+ }
+ if (ret) {
+ while (i >= 0) {
+ omap_rproc_stop_timer(&timers[i]);
+ i--;
+ }
+ goto put_node;
+ }
+ return 0;
+
+put_node:
+ if (configure)
+ of_node_put(np);
+free_timers:
+ while (i--) {
+ if (i >= oproc->num_timers)
+ free_irq(timers[i].irq, rproc);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ }
+
+ return ret;
+}
+
+/**
+ * omap_rproc_disable_timers() - disable the timers for a remoteproc
+ * @rproc: handle of a remote processor
+ * @configure: boolean flag used to release the timer handle
+ *
+ * This function is used primarily to disable the timers associated with
+ * a remoteproc. The configure flag is provided to allow the driver
+ * to either stop and release a timer (during device shutdown) or to just
+ * stop a timer (during a suspend operation).
+ *
+ * Return: 0 on success or no timers
+ */
+static int omap_rproc_disable_timers(struct rproc *rproc, bool configure)
+{
+ int i;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_timer *timers = oproc->timers;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+
+ if (!num_timers)
+ return 0;
+
+ for (i = 0; i < num_timers; i++) {
+ omap_rproc_stop_timer(&timers[i]);
+ if (configure) {
+ if (i >= oproc->num_timers)
+ free_irq(timers[i].irq, rproc);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * omap_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by omap's mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicates different events. Those values are deliberately very
+ * big so they don't coincide with virtqueue indices.
+ */
+static void omap_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct omap_rproc *oproc = container_of(client, struct omap_rproc,
+ client);
+ struct device *dev = oproc->rproc->dev.parent;
+ const char *name = oproc->rproc->name;
+ u32 msg = (u32)data;
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, notify the rproc core.
+ * The remoteproc core will handle the recovery.
+ */
+ dev_err(dev, "omap rproc %s crashed\n", name);
+ rproc_report_crash(oproc->rproc, RPROC_FATAL_ERROR);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", name);
+ break;
+ case RP_MBOX_SUSPEND_ACK:
+ case RP_MBOX_SUSPEND_CANCEL:
+ oproc->suspend_acked = msg == RP_MBOX_SUSPEND_ACK;
+ complete(&oproc->pm_comp);
+ break;
+ default:
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > oproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(oproc->rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/* kick a virtqueue */
+static void omap_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ int ret;
+
+ /* wake up the rproc before kicking it */
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0)) {
+ dev_err(dev, "pm_runtime_get_sync() failed during kick, ret = %d\n",
+ ret);
+ pm_runtime_put_noidle(dev);
+ return;
+ }
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(oproc->mbox, (void *)vqid);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+/**
+ * omap_rproc_write_dsp_boot_addr() - set boot address for DSP remote processor
+ * @rproc: handle of a remote processor
+ *
+ * Set boot address for a supported DSP remote processor.
+ *
+ * Return: 0 on success, or -EINVAL if boot address is not aligned properly
+ */
+static int omap_rproc_write_dsp_boot_addr(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_boot_data *bdata = oproc->boot_data;
+ u32 offset = bdata->boot_reg;
+ u32 value;
+ u32 mask;
+
+ if (rproc->bootaddr & (SZ_1K - 1)) {
+ dev_err(dev, "invalid boot address 0x%llx, must be aligned on a 1KB boundary\n",
+ rproc->bootaddr);
+ return -EINVAL;
+ }
+
+ value = rproc->bootaddr >> bdata->boot_reg_shift;
+ mask = ~(SZ_1K - 1) >> bdata->boot_reg_shift;
+
+ return regmap_update_bits(bdata->syscon, offset, mask, value);
+}
+
+/*
+ * Power up the remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met.
+ */
+static int omap_rproc_start(struct rproc *rproc)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ int ret;
+ struct mbox_client *client = &oproc->client;
+
+ if (oproc->boot_data) {
+ ret = omap_rproc_write_dsp_boot_addr(rproc);
+ if (ret)
+ return ret;
+ }
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = omap_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ oproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(oproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(oproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor. this is only for sanity-sake;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(oproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ goto put_mbox;
+ }
+
+ ret = omap_rproc_enable_timers(rproc, true);
+ if (ret) {
+ dev_err(dev, "omap_rproc_enable_timers failed: %d\n", ret);
+ goto put_mbox;
+ }
+
+ ret = reset_control_deassert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset control deassert failed: %d\n", ret);
+ goto disable_timers;
+ }
+
+ /*
+ * remote processor is up, so update the runtime pm status and
+ * enable the auto-suspend. The device usage count is incremented
+ * manually for balancing it for auto-suspend
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+disable_timers:
+ omap_rproc_disable_timers(rproc, true);
+put_mbox:
+ mbox_free_channel(oproc->mbox);
+ return ret;
+}
+
+/* power off the remote processor */
+static int omap_rproc_stop(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ /*
+ * cancel any possible scheduled runtime suspend by incrementing
+ * the device usage count, and resuming the device. The remoteproc
+ * also needs to be woken up if suspended, to avoid the remoteproc
+ * OS to continue to remember any context that it has saved, and
+ * avoid potential issues in misindentifying a subsequent device
+ * reboot as a power restore boot
+ */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ ret = reset_control_assert(oproc->reset);
+ if (ret)
+ goto out;
+
+ ret = omap_rproc_disable_timers(rproc, true);
+ if (ret)
+ goto enable_device;
+
+ mbox_free_channel(oproc->mbox);
+
+ /*
+ * update the runtime pm states and status now that the remoteproc
+ * has stopped
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+
+ return 0;
+
+enable_device:
+ reset_control_deassert(oproc->reset);
+out:
+ /* schedule the next auto-suspend */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+/**
+ * omap_rproc_da_to_va() - internal memory translation helper
+ * @rproc: remote processor to apply the address translation for
+ * @da: device address to translate
+ * @len: length of the memory buffer
+ *
+ * Custom function implementing the rproc .da_to_va ops to provide address
+ * translation (device address to kernel virtual address) for internal RAMs
+ * present in a DSP or IPU device). The translated addresses can be used
+ * either by the remoteproc core for loading, or by any rpmsg bus drivers.
+ *
+ * Return: translated virtual address in kernel memory space on success,
+ * or NULL on failure.
+ */
+static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ int i;
+ u32 offset;
+
+ if (len <= 0)
+ return NULL;
+
+ if (!oproc->num_mems)
+ return NULL;
+
+ for (i = 0; i < oproc->num_mems; i++) {
+ if (da >= oproc->mem[i].dev_addr && da + len <=
+ oproc->mem[i].dev_addr + oproc->mem[i].size) {
+ offset = da - oproc->mem[i].dev_addr;
+ /* __force to make sparse happy with type conversion */
+ return (__force void *)(oproc->mem[i].cpu_addr +
+ offset);
+ }
+ }
+
+ return NULL;
+}
+
+static const struct rproc_ops omap_rproc_ops = {
+ .start = omap_rproc_start,
+ .stop = omap_rproc_stop,
+ .kick = omap_rproc_kick,
+ .da_to_va = omap_rproc_da_to_va,
+};
+
+#ifdef CONFIG_PM
+static bool _is_rproc_in_standby(struct omap_rproc *oproc)
+{
+ return ti_clk_is_in_standby(oproc->fck);
+}
+
+/* 1 sec is long enough time to let the remoteproc side suspend the device */
+#define DEF_SUSPEND_TIMEOUT 1000
+static int _omap_rproc_suspend(struct rproc *rproc, bool auto_suspend)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ unsigned long to = msecs_to_jiffies(DEF_SUSPEND_TIMEOUT);
+ unsigned long ta = jiffies + to;
+ u32 suspend_msg = auto_suspend ?
+ RP_MBOX_SUSPEND_AUTO : RP_MBOX_SUSPEND_SYSTEM;
+ int ret;
+
+ reinit_completion(&oproc->pm_comp);
+ oproc->suspend_acked = false;
+ ret = mbox_send_message(oproc->mbox, (void *)suspend_msg);
+ if (ret < 0) {
+ dev_err(dev, "PM mbox_send_message failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&oproc->pm_comp, to);
+ if (!oproc->suspend_acked)
+ return -EBUSY;
+
+ /*
+ * The remoteproc side is returning the ACK message before saving the
+ * context, because the context saving is performed within a SYS/BIOS
+ * function, and it cannot have any inter-dependencies against the IPC
+ * layer. Also, as the SYS/BIOS needs to preserve properly the processor
+ * register set, sending this ACK or signalling the completion of the
+ * context save through a shared memory variable can never be the
+ * absolute last thing to be executed on the remoteproc side, and the
+ * MPU cannot use the ACK message as a sync point to put the remoteproc
+ * into reset. The only way to ensure that the remote processor has
+ * completed saving the context is to check that the module has reached
+ * STANDBY state (after saving the context, the SYS/BIOS executes the
+ * appropriate target-specific WFI instruction causing the module to
+ * enter STANDBY).
+ */
+ while (!_is_rproc_in_standby(oproc)) {
+ if (time_after(jiffies, ta))
+ return -ETIME;
+ schedule();
+ }
+
+ ret = reset_control_assert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset assert during suspend failed %d\n", ret);
+ return ret;
+ }
+
+ ret = omap_rproc_disable_timers(rproc, false);
+ if (ret) {
+ dev_err(dev, "disabling timers during suspend failed %d\n",
+ ret);
+ goto enable_device;
+ }
+
+ /*
+ * IOMMUs would have to be disabled specifically for runtime suspend.
+ * They are handled automatically through System PM callbacks for
+ * regular system suspend
+ */
+ if (auto_suspend) {
+ ret = omap_iommu_domain_deactivate(rproc->domain);
+ if (ret) {
+ dev_err(dev, "iommu domain deactivate failed %d\n",
+ ret);
+ goto enable_timers;
+ }
+ }
+
+ return 0;
+
+enable_timers:
+ /* ignore errors on re-enabling code */
+ omap_rproc_enable_timers(rproc, false);
+enable_device:
+ reset_control_deassert(oproc->reset);
+ return ret;
+}
+
+static int _omap_rproc_resume(struct rproc *rproc, bool auto_suspend)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ /*
+ * IOMMUs would have to be enabled specifically for runtime resume.
+ * They would have been already enabled automatically through System
+ * PM callbacks for regular system resume
+ */
+ if (auto_suspend) {
+ ret = omap_iommu_domain_activate(rproc->domain);
+ if (ret) {
+ dev_err(dev, "omap_iommu activate failed %d\n", ret);
+ goto out;
+ }
+ }
+
+ /* boot address could be lost after suspend, so restore it */
+ if (oproc->boot_data) {
+ ret = omap_rproc_write_dsp_boot_addr(rproc);
+ if (ret) {
+ dev_err(dev, "boot address restore failed %d\n", ret);
+ goto suspend_iommu;
+ }
+ }
+
+ ret = omap_rproc_enable_timers(rproc, false);
+ if (ret) {
+ dev_err(dev, "enabling timers during resume failed %d\n", ret);
+ goto suspend_iommu;
+ }
+
+ ret = reset_control_deassert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset deassert during resume failed %d\n", ret);
+ goto disable_timers;
+ }
+
+ return 0;
+
+disable_timers:
+ omap_rproc_disable_timers(rproc, false);
+suspend_iommu:
+ if (auto_suspend)
+ omap_iommu_domain_deactivate(rproc->domain);
+out:
+ return ret;
+}
+
+static int __maybe_unused omap_rproc_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret = 0;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_OFFLINE)
+ goto out;
+
+ if (rproc->state == RPROC_SUSPENDED)
+ goto out;
+
+ if (rproc->state != RPROC_RUNNING) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = _omap_rproc_suspend(rproc, false);
+ if (ret) {
+ dev_err(dev, "suspend failed %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * remoteproc is running at the time of system suspend, so remember
+ * it so as to wake it up during system resume
+ */
+ oproc->need_resume = true;
+ rproc->state = RPROC_SUSPENDED;
+
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int __maybe_unused omap_rproc_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret = 0;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_OFFLINE)
+ goto out;
+
+ if (rproc->state != RPROC_SUSPENDED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * remoteproc was auto-suspended at the time of system suspend,
+ * so no need to wake-up the processor (leave it in suspended
+ * state, will be woken up during a subsequent runtime_resume)
+ */
+ if (!oproc->need_resume)
+ goto out;
+
+ ret = _omap_rproc_resume(rproc, false);
+ if (ret) {
+ dev_err(dev, "resume failed %d\n", ret);
+ goto out;
+ }
+
+ oproc->need_resume = false;
+ rproc->state = RPROC_RUNNING;
+
+ pm_runtime_mark_last_busy(dev);
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int omap_rproc_runtime_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_CRASHED) {
+ dev_dbg(dev, "rproc cannot be runtime suspended when crashed!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (WARN_ON(rproc->state != RPROC_RUNNING)) {
+ dev_err(dev, "rproc cannot be runtime suspended when not running!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * do not even attempt suspend if the remote processor is not
+ * idled for runtime auto-suspend
+ */
+ if (!_is_rproc_in_standby(oproc)) {
+ ret = -EBUSY;
+ goto abort;
+ }
+
+ ret = _omap_rproc_suspend(rproc, true);
+ if (ret)
+ goto abort;
+
+ rproc->state = RPROC_SUSPENDED;
+ mutex_unlock(&rproc->lock);
+ return 0;
+
+abort:
+ pm_runtime_mark_last_busy(dev);
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int omap_rproc_runtime_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&rproc->lock);
+ if (WARN_ON(rproc->state != RPROC_SUSPENDED)) {
+ dev_err(dev, "rproc cannot be runtime resumed if not suspended! state=%d\n",
+ rproc->state);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = _omap_rproc_resume(rproc, true);
+ if (ret) {
+ dev_err(dev, "runtime resume failed %d\n", ret);
+ goto out;
+ }
+
+ rproc->state = RPROC_RUNNING;
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+static const struct omap_rproc_mem_data ipu_mems[] = {
+ { .name = "l2ram", .dev_addr = 0x20000000 },
+ { },
+};
+
+static const struct omap_rproc_mem_data dra7_dsp_mems[] = {
+ { .name = "l2ram", .dev_addr = 0x800000 },
+ { .name = "l1pram", .dev_addr = 0xe00000 },
+ { .name = "l1dram", .dev_addr = 0xf00000 },
+ { },
+};
+
+static const struct omap_rproc_dev_data omap4_dsp_dev_data = {
+ .device_name = "dsp",
+};
+
+static const struct omap_rproc_dev_data omap4_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
+};
+
+static const struct omap_rproc_dev_data omap5_dsp_dev_data = {
+ .device_name = "dsp",
+};
+
+static const struct omap_rproc_dev_data omap5_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
+};
+
+static const struct omap_rproc_dev_data dra7_dsp_dev_data = {
+ .device_name = "dsp",
+ .mems = dra7_dsp_mems,
+};
+
+static const struct omap_rproc_dev_data dra7_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
+};
+
+static const struct of_device_id omap_rproc_of_match[] = {
+ {
+ .compatible = "ti,omap4-dsp",
+ .data = &omap4_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,omap4-ipu",
+ .data = &omap4_ipu_dev_data,
+ },
+ {
+ .compatible = "ti,omap5-dsp",
+ .data = &omap5_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,omap5-ipu",
+ .data = &omap5_ipu_dev_data,
+ },
+ {
+ .compatible = "ti,dra7-dsp",
+ .data = &dra7_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,dra7-ipu",
+ .data = &dra7_ipu_dev_data,
+ },
+ {
+ /* end */
+ },
+};
+MODULE_DEVICE_TABLE(of, omap_rproc_of_match);
+
+static const char *omap_rproc_get_firmware(struct platform_device *pdev)
+{
+ const char *fw_name;
+ int ret;
+
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fw_name;
+}
+
+static int omap_rproc_get_boot_data(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct omap_rproc *oproc = rproc->priv;
+ const struct omap_rproc_dev_data *data;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ if (!of_property_read_bool(np, "ti,bootreg"))
+ return 0;
+
+ oproc->boot_data = devm_kzalloc(&pdev->dev, sizeof(*oproc->boot_data),
+ GFP_KERNEL);
+ if (!oproc->boot_data)
+ return -ENOMEM;
+
+ oproc->boot_data->syscon =
+ syscon_regmap_lookup_by_phandle(np, "ti,bootreg");
+ if (IS_ERR(oproc->boot_data->syscon)) {
+ ret = PTR_ERR(oproc->boot_data->syscon);
+ return ret;
+ }
+
+ if (of_property_read_u32_index(np, "ti,bootreg", 1,
+ &oproc->boot_data->boot_reg)) {
+ dev_err(&pdev->dev, "couldn't get the boot register\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_index(np, "ti,bootreg", 2,
+ &oproc->boot_data->boot_reg_shift);
+
+ return 0;
+}
+
+static int omap_rproc_of_get_internal_memories(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = &pdev->dev;
+ const struct omap_rproc_dev_data *data;
+ struct resource *res;
+ int num_mems;
+ int i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ if (!data->mems)
+ return 0;
+
+ num_mems = of_property_count_elems_of_size(dev->of_node, "reg",
+ sizeof(u32)) / 2;
+
+ oproc->mem = devm_kcalloc(dev, num_mems, sizeof(*oproc->mem),
+ GFP_KERNEL);
+ if (!oproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; data->mems[i].name; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "no memory defined for %s\n",
+ data->mems[i].name);
+ return -ENOMEM;
+ }
+ oproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(oproc->mem[i].cpu_addr)) {
+ dev_err(dev, "failed to parse and map %s memory\n",
+ data->mems[i].name);
+ return PTR_ERR(oproc->mem[i].cpu_addr);
+ }
+ oproc->mem[i].bus_addr = res->start;
+ oproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ oproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %pK da 0x%x\n",
+ data->mems[i].name, &oproc->mem[i].bus_addr,
+ oproc->mem[i].size, oproc->mem[i].cpu_addr,
+ oproc->mem[i].dev_addr);
+ }
+ oproc->num_mems = num_mems;
+
+ return 0;
+}
+
+#ifdef CONFIG_OMAP_REMOTEPROC_WATCHDOG
+static int omap_rproc_count_wdog_timers(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_count_phandle_with_args(np, "ti,watchdog-timers", NULL);
+ if (ret <= 0) {
+ dev_dbg(dev, "device does not have watchdog timers, status = %d\n",
+ ret);
+ ret = 0;
+ }
+
+ return ret;
+}
+#else
+static int omap_rproc_count_wdog_timers(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static int omap_rproc_of_get_timers(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = &pdev->dev;
+ int num_timers;
+
+ /*
+ * Timer nodes are directly used in client nodes as phandles, so
+ * retrieve the count using appropriate size
+ */
+ oproc->num_timers = of_count_phandle_with_args(np, "ti,timers", NULL);
+ if (oproc->num_timers <= 0) {
+ dev_dbg(dev, "device does not have timers, status = %d\n",
+ oproc->num_timers);
+ oproc->num_timers = 0;
+ }
+
+ oproc->num_wd_timers = omap_rproc_count_wdog_timers(dev);
+
+ num_timers = oproc->num_timers + oproc->num_wd_timers;
+ if (num_timers) {
+ oproc->timers = devm_kcalloc(dev, num_timers,
+ sizeof(*oproc->timers),
+ GFP_KERNEL);
+ if (!oproc->timers)
+ return -ENOMEM;
+
+ dev_dbg(dev, "device has %d tick timers and %d watchdog timers\n",
+ oproc->num_timers, oproc->num_wd_timers);
+ }
+
+ return 0;
+}
+
+static int omap_rproc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct omap_rproc *oproc;
+ struct rproc *rproc;
+ const char *firmware;
+ int ret;
+ struct reset_control *reset;
+
+ if (!np) {
+ dev_err(&pdev->dev, "only DT-based devices are supported\n");
+ return -ENODEV;
+ }
+
+ reset = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ firmware = omap_rproc_get_firmware(pdev);
+ if (IS_ERR(firmware))
+ return PTR_ERR(firmware);
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
+ return ret;
+ }
+
+ rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops,
+ firmware, sizeof(*oproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ oproc = rproc->priv;
+ oproc->rproc = rproc;
+ oproc->reset = reset;
+ /* All existing OMAP IPU and DSP processors have an MMU */
+ rproc->has_iommu = true;
+
+ ret = omap_rproc_of_get_internal_memories(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = omap_rproc_get_boot_data(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = omap_rproc_of_get_timers(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ init_completion(&oproc->pm_comp);
+ oproc->autosuspend_delay = DEFAULT_AUTOSUSPEND_DELAY;
+
+ of_property_read_u32(pdev->dev.of_node, "ti,autosuspend-delay-ms",
+ &oproc->autosuspend_delay);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, oproc->autosuspend_delay);
+
+ oproc->fck = devm_clk_get(&pdev->dev, 0);
+ if (IS_ERR(oproc->fck)) {
+ ret = PTR_ERR(oproc->fck);
+ goto free_rproc;
+ }
+
+ ret = of_reserved_mem_device_init(&pdev->dev);
+ if (ret) {
+ dev_warn(&pdev->dev, "device does not have specific CMA pool.\n");
+ dev_warn(&pdev->dev, "Typically this should be provided,\n");
+ dev_warn(&pdev->dev, "only omit if you know what you are doing.\n");
+ }
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto release_mem;
+
+ return 0;
+
+release_mem:
+ of_reserved_mem_device_release(&pdev->dev);
+free_rproc:
+ rproc_free(rproc);
+ return ret;
+}
+
+static int omap_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ rproc_del(rproc);
+ rproc_free(rproc);
+ of_reserved_mem_device_release(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_rproc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_rproc_suspend, omap_rproc_resume)
+ SET_RUNTIME_PM_OPS(omap_rproc_runtime_suspend,
+ omap_rproc_runtime_resume, NULL)
+};
+
+static struct platform_driver omap_rproc_driver = {
+ .probe = omap_rproc_probe,
+ .remove = omap_rproc_remove,
+ .driver = {
+ .name = "omap-rproc",
+ .pm = &omap_rproc_pm_ops,
+ .of_match_table = omap_rproc_of_match,
+ },
+};
+
+module_platform_driver(omap_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("OMAP Remote Processor control driver");
diff --git a/drivers/remoteproc/omap_remoteproc.h b/drivers/remoteproc/omap_remoteproc.h
new file mode 100644
index 000000000..828e13256
--- /dev/null
+++ b/drivers/remoteproc/omap_remoteproc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Remote processor messaging
+ *
+ * Copyright (C) 2011-2020 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _OMAP_RPMSG_H
+#define _OMAP_RPMSG_H
+
+/*
+ * enum - Predefined Mailbox Messages
+ *
+ * @RP_MBOX_READY: informs the M3's that we're up and running. this is
+ * part of the init sequence sent that the M3 expects to see immediately
+ * after it is booted.
+ *
+ * @RP_MBOX_PENDING_MSG: informs the receiver that there is an inbound
+ * message waiting in its own receive-side vring. please note that currently
+ * this message is optional: alternatively, one can explicitly send the index
+ * of the triggered virtqueue itself. the preferred approach will be decided
+ * as we progress and experiment with those two different approaches.
+ *
+ * @RP_MBOX_CRASH: this message is sent if BIOS crashes
+ *
+ * @RP_MBOX_ECHO_REQUEST: a mailbox-level "ping" message.
+ *
+ * @RP_MBOX_ECHO_REPLY: a mailbox-level reply to a "ping"
+ *
+ * @RP_MBOX_ABORT_REQUEST: a "please crash" request, used for testing the
+ * recovery mechanism (to some extent).
+ *
+ * @RP_MBOX_SUSPEND_AUTO: auto suspend request for the remote processor
+ *
+ * @RP_MBOX_SUSPEND_SYSTEM: system suspend request for the remote processor
+ *
+ * @RP_MBOX_SUSPEND_ACK: successful response from remote processor for a
+ * suspend request
+ *
+ * @RP_MBOX_SUSPEND_CANCEL: a cancel suspend response from a remote processor
+ * on a suspend request
+ *
+ * Introduce new message definitions if any here.
+ *
+ * @RP_MBOX_END_MSG: Indicates end of known/defined messages from remote core
+ * This should be the last definition.
+ *
+ */
+enum omap_rp_mbox_messages {
+ RP_MBOX_READY = 0xFFFFFF00,
+ RP_MBOX_PENDING_MSG = 0xFFFFFF01,
+ RP_MBOX_CRASH = 0xFFFFFF02,
+ RP_MBOX_ECHO_REQUEST = 0xFFFFFF03,
+ RP_MBOX_ECHO_REPLY = 0xFFFFFF04,
+ RP_MBOX_ABORT_REQUEST = 0xFFFFFF05,
+ RP_MBOX_SUSPEND_AUTO = 0xFFFFFF10,
+ RP_MBOX_SUSPEND_SYSTEM = 0xFFFFFF11,
+ RP_MBOX_SUSPEND_ACK = 0xFFFFFF12,
+ RP_MBOX_SUSPEND_CANCEL = 0xFFFFFF13,
+ RP_MBOX_END_MSG = 0xFFFFFF14,
+};
+
+#endif /* _OMAP_RPMSG_H */
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
new file mode 100644
index 000000000..128bf9912
--- /dev/null
+++ b/drivers/remoteproc/pru_rproc.c
@@ -0,0 +1,920 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS remoteproc driver for various TI SoCs
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Author(s):
+ * Suman Anna <s-anna@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ * Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org> for Texas Instruments
+ */
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pruss_driver.h>
+#include <linux/remoteproc.h>
+
+#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
+#include "pru_rproc.h"
+
+/* PRU_ICSS_PRU_CTRL registers */
+#define PRU_CTRL_CTRL 0x0000
+#define PRU_CTRL_STS 0x0004
+#define PRU_CTRL_WAKEUP_EN 0x0008
+#define PRU_CTRL_CYCLE 0x000C
+#define PRU_CTRL_STALL 0x0010
+#define PRU_CTRL_CTBIR0 0x0020
+#define PRU_CTRL_CTBIR1 0x0024
+#define PRU_CTRL_CTPPR0 0x0028
+#define PRU_CTRL_CTPPR1 0x002C
+
+/* CTRL register bit-fields */
+#define CTRL_CTRL_SOFT_RST_N BIT(0)
+#define CTRL_CTRL_EN BIT(1)
+#define CTRL_CTRL_SLEEPING BIT(2)
+#define CTRL_CTRL_CTR_EN BIT(3)
+#define CTRL_CTRL_SINGLE_STEP BIT(8)
+#define CTRL_CTRL_RUNSTATE BIT(15)
+
+/* PRU_ICSS_PRU_DEBUG registers */
+#define PRU_DEBUG_GPREG(x) (0x0000 + (x) * 4)
+#define PRU_DEBUG_CT_REG(x) (0x0080 + (x) * 4)
+
+/* PRU/RTU/Tx_PRU Core IRAM address masks */
+#define PRU_IRAM_ADDR_MASK 0x3ffff
+#define PRU0_IRAM_ADDR_MASK 0x34000
+#define PRU1_IRAM_ADDR_MASK 0x38000
+#define RTU0_IRAM_ADDR_MASK 0x4000
+#define RTU1_IRAM_ADDR_MASK 0x6000
+#define TX_PRU0_IRAM_ADDR_MASK 0xa000
+#define TX_PRU1_IRAM_ADDR_MASK 0xc000
+
+/* PRU device addresses for various type of PRU RAMs */
+#define PRU_IRAM_DA 0 /* Instruction RAM */
+#define PRU_PDRAM_DA 0 /* Primary Data RAM */
+#define PRU_SDRAM_DA 0x2000 /* Secondary Data RAM */
+#define PRU_SHRDRAM_DA 0x10000 /* Shared Data RAM */
+
+#define MAX_PRU_SYS_EVENTS 160
+
+/**
+ * enum pru_iomem - PRU core memory/register range identifiers
+ *
+ * @PRU_IOMEM_IRAM: PRU Instruction RAM range
+ * @PRU_IOMEM_CTRL: PRU Control register range
+ * @PRU_IOMEM_DEBUG: PRU Debug register range
+ * @PRU_IOMEM_MAX: just keep this one at the end
+ */
+enum pru_iomem {
+ PRU_IOMEM_IRAM = 0,
+ PRU_IOMEM_CTRL,
+ PRU_IOMEM_DEBUG,
+ PRU_IOMEM_MAX,
+};
+
+/**
+ * enum pru_type - PRU core type identifier
+ *
+ * @PRU_TYPE_PRU: Programmable Real-time Unit
+ * @PRU_TYPE_RTU: Auxiliary Programmable Real-Time Unit
+ * @PRU_TYPE_TX_PRU: Transmit Programmable Real-Time Unit
+ * @PRU_TYPE_MAX: just keep this one at the end
+ */
+enum pru_type {
+ PRU_TYPE_PRU = 0,
+ PRU_TYPE_RTU,
+ PRU_TYPE_TX_PRU,
+ PRU_TYPE_MAX,
+};
+
+/**
+ * struct pru_private_data - device data for a PRU core
+ * @type: type of the PRU core (PRU, RTU, Tx_PRU)
+ * @is_k3: flag used to identify the need for special load handling
+ */
+struct pru_private_data {
+ enum pru_type type;
+ unsigned int is_k3 : 1;
+};
+
+/**
+ * struct pru_rproc - PRU remoteproc structure
+ * @id: id of the PRU core within the PRUSS
+ * @dev: PRU core device pointer
+ * @pruss: back-reference to parent PRUSS structure
+ * @rproc: remoteproc pointer for this PRU core
+ * @data: PRU core specific data
+ * @mem_regions: data for each of the PRU memory regions
+ * @fw_name: name of firmware image used during loading
+ * @mapped_irq: virtual interrupt numbers of created fw specific mapping
+ * @pru_interrupt_map: pointer to interrupt mapping description (firmware)
+ * @pru_interrupt_map_sz: pru_interrupt_map size
+ * @dbg_single_step: debug state variable to set PRU into single step mode
+ * @dbg_continuous: debug state variable to restore PRU execution mode
+ * @evt_count: number of mapped events
+ */
+struct pru_rproc {
+ int id;
+ struct device *dev;
+ struct pruss *pruss;
+ struct rproc *rproc;
+ const struct pru_private_data *data;
+ struct pruss_mem_region mem_regions[PRU_IOMEM_MAX];
+ const char *fw_name;
+ unsigned int *mapped_irq;
+ struct pru_irq_rsc *pru_interrupt_map;
+ size_t pru_interrupt_map_sz;
+ u32 dbg_single_step;
+ u32 dbg_continuous;
+ u8 evt_count;
+};
+
+static inline u32 pru_control_read_reg(struct pru_rproc *pru, unsigned int reg)
+{
+ return readl_relaxed(pru->mem_regions[PRU_IOMEM_CTRL].va + reg);
+}
+
+static inline
+void pru_control_write_reg(struct pru_rproc *pru, unsigned int reg, u32 val)
+{
+ writel_relaxed(val, pru->mem_regions[PRU_IOMEM_CTRL].va + reg);
+}
+
+static inline u32 pru_debug_read_reg(struct pru_rproc *pru, unsigned int reg)
+{
+ return readl_relaxed(pru->mem_regions[PRU_IOMEM_DEBUG].va + reg);
+}
+
+static int regs_show(struct seq_file *s, void *data)
+{
+ struct rproc *rproc = s->private;
+ struct pru_rproc *pru = rproc->priv;
+ int i, nregs = 32;
+ u32 pru_sts;
+ int pru_is_running;
+
+ seq_puts(s, "============== Control Registers ==============\n");
+ seq_printf(s, "CTRL := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTRL));
+ pru_sts = pru_control_read_reg(pru, PRU_CTRL_STS);
+ seq_printf(s, "STS (PC) := 0x%08x (0x%08x)\n", pru_sts, pru_sts << 2);
+ seq_printf(s, "WAKEUP_EN := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_WAKEUP_EN));
+ seq_printf(s, "CYCLE := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CYCLE));
+ seq_printf(s, "STALL := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_STALL));
+ seq_printf(s, "CTBIR0 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTBIR0));
+ seq_printf(s, "CTBIR1 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTBIR1));
+ seq_printf(s, "CTPPR0 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTPPR0));
+ seq_printf(s, "CTPPR1 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTPPR1));
+
+ seq_puts(s, "=============== Debug Registers ===============\n");
+ pru_is_running = pru_control_read_reg(pru, PRU_CTRL_CTRL) &
+ CTRL_CTRL_RUNSTATE;
+ if (pru_is_running) {
+ seq_puts(s, "PRU is executing, cannot print/access debug registers.\n");
+ return 0;
+ }
+
+ for (i = 0; i < nregs; i++) {
+ seq_printf(s, "GPREG%-2d := 0x%08x\tCT_REG%-2d := 0x%08x\n",
+ i, pru_debug_read_reg(pru, PRU_DEBUG_GPREG(i)),
+ i, pru_debug_read_reg(pru, PRU_DEBUG_CT_REG(i)));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(regs);
+
+/*
+ * Control PRU single-step mode
+ *
+ * This is a debug helper function used for controlling the single-step
+ * mode of the PRU. The PRU Debug registers are not accessible when the
+ * PRU is in RUNNING state.
+ *
+ * Writing a non-zero value sets the PRU into single-step mode irrespective
+ * of its previous state. The PRU mode is saved only on the first set into
+ * a single-step mode. Writing a zero value will restore the PRU into its
+ * original mode.
+ */
+static int pru_rproc_debug_ss_set(void *data, u64 val)
+{
+ struct rproc *rproc = data;
+ struct pru_rproc *pru = rproc->priv;
+ u32 reg_val;
+
+ val = val ? 1 : 0;
+ if (!val && !pru->dbg_single_step)
+ return 0;
+
+ reg_val = pru_control_read_reg(pru, PRU_CTRL_CTRL);
+
+ if (val && !pru->dbg_single_step)
+ pru->dbg_continuous = reg_val;
+
+ if (val)
+ reg_val |= CTRL_CTRL_SINGLE_STEP | CTRL_CTRL_EN;
+ else
+ reg_val = pru->dbg_continuous;
+
+ pru->dbg_single_step = val;
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, reg_val);
+
+ return 0;
+}
+
+static int pru_rproc_debug_ss_get(void *data, u64 *val)
+{
+ struct rproc *rproc = data;
+ struct pru_rproc *pru = rproc->priv;
+
+ *val = pru->dbg_single_step;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get,
+ pru_rproc_debug_ss_set, "%llu\n");
+
+/*
+ * Create PRU-specific debugfs entries
+ *
+ * The entries are created only if the parent remoteproc debugfs directory
+ * exists, and will be cleaned up by the remoteproc core.
+ */
+static void pru_rproc_create_debug_entries(struct rproc *rproc)
+{
+ if (!rproc->dbg_dir)
+ return;
+
+ debugfs_create_file("regs", 0400, rproc->dbg_dir,
+ rproc, &regs_fops);
+ debugfs_create_file("single_step", 0600, rproc->dbg_dir,
+ rproc, &pru_rproc_debug_ss_fops);
+}
+
+static void pru_dispose_irq_mapping(struct pru_rproc *pru)
+{
+ if (!pru->mapped_irq)
+ return;
+
+ while (pru->evt_count) {
+ pru->evt_count--;
+ if (pru->mapped_irq[pru->evt_count] > 0)
+ irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
+ }
+
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+}
+
+/*
+ * Parse the custom PRU interrupt map resource and configure the INTC
+ * appropriately.
+ */
+static int pru_handle_intrmap(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct pru_rproc *pru = rproc->priv;
+ struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
+ struct irq_fwspec fwspec;
+ struct device_node *parent, *irq_parent;
+ int i, ret = 0;
+
+ /* not having pru_interrupt_map is not an error */
+ if (!rsc)
+ return 0;
+
+ /* currently supporting only type 0 */
+ if (rsc->type != 0) {
+ dev_err(dev, "unsupported rsc type: %d\n", rsc->type);
+ return -EINVAL;
+ }
+
+ if (rsc->num_evts > MAX_PRU_SYS_EVENTS)
+ return -EINVAL;
+
+ if (sizeof(*rsc) + rsc->num_evts * sizeof(struct pruss_int_map) !=
+ pru->pru_interrupt_map_sz)
+ return -EINVAL;
+
+ pru->evt_count = rsc->num_evts;
+ pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!pru->mapped_irq) {
+ pru->evt_count = 0;
+ return -ENOMEM;
+ }
+
+ /*
+ * parse and fill in system event to interrupt channel and
+ * channel-to-host mapping. The interrupt controller to be used
+ * for these mappings for a given PRU remoteproc is always its
+ * corresponding sibling PRUSS INTC node.
+ */
+ parent = of_get_parent(dev_of_node(pru->dev));
+ if (!parent) {
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+ pru->evt_count = 0;
+ return -ENODEV;
+ }
+
+ irq_parent = of_get_child_by_name(parent, "interrupt-controller");
+ of_node_put(parent);
+ if (!irq_parent) {
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+ pru->evt_count = 0;
+ return -ENODEV;
+ }
+
+ fwspec.fwnode = of_node_to_fwnode(irq_parent);
+ fwspec.param_count = 3;
+ for (i = 0; i < pru->evt_count; i++) {
+ fwspec.param[0] = rsc->pru_intc_map[i].event;
+ fwspec.param[1] = rsc->pru_intc_map[i].chnl;
+ fwspec.param[2] = rsc->pru_intc_map[i].host;
+
+ dev_dbg(dev, "mapping%d: event %d, chnl %d, host %d\n",
+ i, fwspec.param[0], fwspec.param[1], fwspec.param[2]);
+
+ pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
+ if (!pru->mapped_irq[i]) {
+ dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
+ i, fwspec.param[0], fwspec.param[1],
+ fwspec.param[2]);
+ ret = -EINVAL;
+ goto map_fail;
+ }
+ }
+ of_node_put(irq_parent);
+
+ return ret;
+
+map_fail:
+ pru_dispose_irq_mapping(pru);
+ of_node_put(irq_parent);
+
+ return ret;
+}
+
+static int pru_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
+ u32 val;
+ int ret;
+
+ dev_dbg(dev, "starting %s%d: entry-point = 0x%llx\n",
+ names[pru->data->type], pru->id, (rproc->bootaddr >> 2));
+
+ ret = pru_handle_intrmap(rproc);
+ /*
+ * reset references to pru interrupt map - they will stop being valid
+ * after rproc_start returns
+ */
+ pru->pru_interrupt_map = NULL;
+ pru->pru_interrupt_map_sz = 0;
+ if (ret)
+ return ret;
+
+ val = CTRL_CTRL_EN | ((rproc->bootaddr >> 2) << 16);
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
+
+ return 0;
+}
+
+static int pru_rproc_stop(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
+ u32 val;
+
+ dev_dbg(dev, "stopping %s%d\n", names[pru->data->type], pru->id);
+
+ val = pru_control_read_reg(pru, PRU_CTRL_CTRL);
+ val &= ~CTRL_CTRL_EN;
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
+
+ /* dispose irq mapping - new firmware can provide new mapping */
+ pru_dispose_irq_mapping(pru);
+
+ return 0;
+}
+
+/*
+ * Convert PRU device address (data spaces only) to kernel virtual address.
+ *
+ * Each PRU has access to all data memories within the PRUSS, accessible at
+ * different ranges. So, look through both its primary and secondary Data
+ * RAMs as well as any shared Data RAM to convert a PRU device address to
+ * kernel virtual address. Data RAM0 is primary Data RAM for PRU0 and Data
+ * RAM1 is primary Data RAM for PRU1.
+ */
+static void *pru_d_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
+{
+ struct pruss_mem_region dram0, dram1, shrd_ram;
+ struct pruss *pruss = pru->pruss;
+ u32 offset;
+ void *va = NULL;
+
+ if (len == 0)
+ return NULL;
+
+ dram0 = pruss->mem_regions[PRUSS_MEM_DRAM0];
+ dram1 = pruss->mem_regions[PRUSS_MEM_DRAM1];
+ /* PRU1 has its local RAM addresses reversed */
+ if (pru->id == 1)
+ swap(dram0, dram1);
+ shrd_ram = pruss->mem_regions[PRUSS_MEM_SHRD_RAM2];
+
+ if (da >= PRU_PDRAM_DA && da + len <= PRU_PDRAM_DA + dram0.size) {
+ offset = da - PRU_PDRAM_DA;
+ va = (__force void *)(dram0.va + offset);
+ } else if (da >= PRU_SDRAM_DA &&
+ da + len <= PRU_SDRAM_DA + dram1.size) {
+ offset = da - PRU_SDRAM_DA;
+ va = (__force void *)(dram1.va + offset);
+ } else if (da >= PRU_SHRDRAM_DA &&
+ da + len <= PRU_SHRDRAM_DA + shrd_ram.size) {
+ offset = da - PRU_SHRDRAM_DA;
+ va = (__force void *)(shrd_ram.va + offset);
+ }
+
+ return va;
+}
+
+/*
+ * Convert PRU device address (instruction space) to kernel virtual address.
+ *
+ * A PRU does not have an unified address space. Each PRU has its very own
+ * private Instruction RAM, and its device address is identical to that of
+ * its primary Data RAM device address.
+ */
+static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
+{
+ u32 offset;
+ void *va = NULL;
+
+ if (len == 0)
+ return NULL;
+
+ /*
+ * GNU binutils do not support multiple address spaces. The GNU
+ * linker's default linker script places IRAM at an arbitrary high
+ * offset, in order to differentiate it from DRAM. Hence we need to
+ * strip the artificial offset in the IRAM addresses coming from the
+ * ELF file.
+ *
+ * The TI proprietary linker would never set those higher IRAM address
+ * bits anyway. PRU architecture limits the program counter to 16-bit
+ * word-address range. This in turn corresponds to 18-bit IRAM
+ * byte-address range for ELF.
+ *
+ * Two more bits are added just in case to make the final 20-bit mask.
+ * Idea is to have a safeguard in case TI decides to add banking
+ * in future SoCs.
+ */
+ da &= 0xfffff;
+
+ if (da >= PRU_IRAM_DA &&
+ da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
+ offset = da - PRU_IRAM_DA;
+ va = (__force void *)(pru->mem_regions[PRU_IOMEM_IRAM].va +
+ offset);
+ }
+
+ return va;
+}
+
+/*
+ * Provide address translations for only PRU Data RAMs through the remoteproc
+ * core for any PRU client drivers. The PRU Instruction RAM access is restricted
+ * only to the PRU loader code.
+ */
+static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct pru_rproc *pru = rproc->priv;
+
+ return pru_d_da_to_va(pru, da, len);
+}
+
+/* PRU-specific address translator used by PRU loader. */
+static void *pru_da_to_va(struct rproc *rproc, u64 da, size_t len, bool is_iram)
+{
+ struct pru_rproc *pru = rproc->priv;
+ void *va;
+
+ if (is_iram)
+ va = pru_i_da_to_va(pru, da, len);
+ else
+ va = pru_d_da_to_va(pru, da, len);
+
+ return va;
+}
+
+static struct rproc_ops pru_rproc_ops = {
+ .start = pru_rproc_start,
+ .stop = pru_rproc_stop,
+ .da_to_va = pru_rproc_da_to_va,
+};
+
+/*
+ * Custom memory copy implementation for ICSSG PRU/RTU/Tx_PRU Cores
+ *
+ * The ICSSG PRU/RTU/Tx_PRU cores have a memory copying issue with IRAM
+ * memories, that is not seen on previous generation SoCs. The data is reflected
+ * properly in the IRAM memories only for integer (4-byte) copies. Any unaligned
+ * copies result in all the other pre-existing bytes zeroed out within that
+ * 4-byte boundary, thereby resulting in wrong text/code in the IRAMs. Also, the
+ * IRAM memory port interface does not allow any 8-byte copies (as commonly used
+ * by ARM64 memcpy implementation) and throws an exception. The DRAM memory
+ * ports do not show this behavior.
+ */
+static int pru_rproc_memcpy(void *dest, const void *src, size_t count)
+{
+ const u32 *s = src;
+ u32 *d = dest;
+ size_t size = count / 4;
+ u32 *tmp_src = NULL;
+
+ /*
+ * TODO: relax limitation of 4-byte aligned dest addresses and copy
+ * sizes
+ */
+ if ((long)dest % 4 || count % 4)
+ return -EINVAL;
+
+ /* src offsets in ELF firmware image can be non-aligned */
+ if ((long)src % 4) {
+ tmp_src = kmemdup(src, count, GFP_KERNEL);
+ if (!tmp_src)
+ return -ENOMEM;
+ s = tmp_src;
+ }
+
+ while (size--)
+ *d++ = *s++;
+
+ kfree(tmp_src);
+
+ return 0;
+}
+
+static int
+pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ struct pru_rproc *pru = rproc->priv;
+ struct device *dev = &rproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ u32 offset = phdr->p_offset;
+ bool is_iram;
+ void *ptr;
+
+ if (phdr->p_type != PT_LOAD || !filesz)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ is_iram = phdr->p_flags & PF_X;
+ ptr = pru_da_to_va(rproc, da, memsz, is_iram);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (pru->data->is_k3) {
+ ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
+ filesz);
+ if (ret) {
+ dev_err(dev, "PRU memory copy failed for da 0x%x memsz 0x%x\n",
+ da, memsz);
+ break;
+ }
+ } else {
+ memcpy(ptr, elf_data + phdr->p_offset, filesz);
+ }
+
+ /* skip the memzero logic performed by remoteproc ELF loader */
+ }
+
+ return ret;
+}
+
+static const void *
+pru_rproc_find_interrupt_map(struct device *dev, const struct firmware *fw)
+{
+ struct elf32_shdr *shdr, *name_table_shdr;
+ const char *name_table;
+ const u8 *elf_data = fw->data;
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)elf_data;
+ u16 shnum = ehdr->e_shnum;
+ u16 shstrndx = ehdr->e_shstrndx;
+ int i;
+
+ /* first, get the section header */
+ shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
+ /* compute name table section header entry in shdr array */
+ name_table_shdr = shdr + shstrndx;
+ /* finally, compute the name table section address in elf */
+ name_table = elf_data + name_table_shdr->sh_offset;
+
+ for (i = 0; i < shnum; i++, shdr++) {
+ u32 size = shdr->sh_size;
+ u32 offset = shdr->sh_offset;
+ u32 name = shdr->sh_name;
+
+ if (strcmp(name_table + name, ".pru_irq_map"))
+ continue;
+
+ /* make sure we have the entire irq map */
+ if (offset + size > fw->size || offset + size < size) {
+ dev_err(dev, ".pru_irq_map section truncated\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* make sure irq map has at least the header */
+ if (sizeof(struct pru_irq_rsc) > size) {
+ dev_err(dev, "header-less .pru_irq_map section\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return shdr;
+ }
+
+ dev_dbg(dev, "no .pru_irq_map section found for this fw\n");
+
+ return NULL;
+}
+
+/*
+ * Use a custom parse_fw callback function for dealing with PRU firmware
+ * specific sections.
+ *
+ * The firmware blob can contain optional ELF sections: .resource_table section
+ * and .pru_irq_map one. The second one contains the PRUSS interrupt mapping
+ * description, which needs to be setup before powering on the PRU core. To
+ * avoid RAM wastage this ELF section is not mapped to any ELF segment (by the
+ * firmware linker) and therefore is not loaded to PRU memory.
+ */
+static int pru_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const u8 *elf_data = fw->data;
+ const void *shdr;
+ u8 class = fw_elf_get_class(fw);
+ u64 sh_offset;
+ int ret;
+
+ /* load optional rsc table */
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret == -EINVAL)
+ dev_dbg(&rproc->dev, "no resource table found for this fw\n");
+ else if (ret)
+ return ret;
+
+ /* find .pru_interrupt_map section, not having it is not an error */
+ shdr = pru_rproc_find_interrupt_map(dev, fw);
+ if (IS_ERR(shdr))
+ return PTR_ERR(shdr);
+
+ if (!shdr)
+ return 0;
+
+ /* preserve pointer to PRU interrupt map together with it size */
+ sh_offset = elf_shdr_get_sh_offset(class, shdr);
+ pru->pru_interrupt_map = (struct pru_irq_rsc *)(elf_data + sh_offset);
+ pru->pru_interrupt_map_sz = elf_shdr_get_sh_size(class, shdr);
+
+ return 0;
+}
+
+/*
+ * Compute PRU id based on the IRAM addresses. The PRU IRAMs are
+ * always at a particular offset within the PRUSS address space.
+ */
+static int pru_rproc_set_id(struct pru_rproc *pru)
+{
+ int ret = 0;
+
+ switch (pru->mem_regions[PRU_IOMEM_IRAM].pa & PRU_IRAM_ADDR_MASK) {
+ case TX_PRU0_IRAM_ADDR_MASK:
+ fallthrough;
+ case RTU0_IRAM_ADDR_MASK:
+ fallthrough;
+ case PRU0_IRAM_ADDR_MASK:
+ pru->id = 0;
+ break;
+ case TX_PRU1_IRAM_ADDR_MASK:
+ fallthrough;
+ case RTU1_IRAM_ADDR_MASK:
+ fallthrough;
+ case PRU1_IRAM_ADDR_MASK:
+ pru->id = 1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int pru_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct platform_device *ppdev = to_platform_device(dev->parent);
+ struct pru_rproc *pru;
+ const char *fw_name;
+ struct rproc *rproc = NULL;
+ struct resource *res;
+ int i, ret;
+ const struct pru_private_data *data;
+ const char *mem_names[PRU_IOMEM_MAX] = { "iram", "control", "debug" };
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ ret = of_property_read_string(np, "firmware-name", &fw_name);
+ if (ret) {
+ dev_err(dev, "unable to retrieve firmware-name %d\n", ret);
+ return ret;
+ }
+
+ rproc = devm_rproc_alloc(dev, pdev->name, &pru_rproc_ops, fw_name,
+ sizeof(*pru));
+ if (!rproc) {
+ dev_err(dev, "rproc_alloc failed\n");
+ return -ENOMEM;
+ }
+ /* use a custom load function to deal with PRU-specific quirks */
+ rproc->ops->load = pru_rproc_load_elf_segments;
+
+ /* use a custom parse function to deal with PRU-specific resources */
+ rproc->ops->parse_fw = pru_rproc_parse_fw;
+
+ /* error recovery is not supported for PRUs */
+ rproc->recovery_disabled = true;
+
+ /*
+ * rproc_add will auto-boot the processor normally, but this is not
+ * desired with PRU client driven boot-flow methodology. A PRU
+ * application/client driver will boot the corresponding PRU
+ * remote-processor as part of its state machine either through the
+ * remoteproc sysfs interface or through the equivalent kernel API.
+ */
+ rproc->auto_boot = false;
+
+ pru = rproc->priv;
+ pru->dev = dev;
+ pru->data = data;
+ pru->pruss = platform_get_drvdata(ppdev);
+ pru->rproc = rproc;
+ pru->fw_name = fw_name;
+
+ for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ pru->mem_regions[i].va = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pru->mem_regions[i].va)) {
+ dev_err(dev, "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ ret = PTR_ERR(pru->mem_regions[i].va);
+ return ret;
+ }
+ pru->mem_regions[i].pa = res->start;
+ pru->mem_regions[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ mem_names[i], &pru->mem_regions[i].pa,
+ pru->mem_regions[i].size, pru->mem_regions[i].va);
+ }
+
+ ret = pru_rproc_set_id(pru);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = devm_rproc_add(dev, pru->rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed: %d\n", ret);
+ return ret;
+ }
+
+ pru_rproc_create_debug_entries(rproc);
+
+ dev_dbg(dev, "PRU rproc node %pOF probed successfully\n", np);
+
+ return 0;
+}
+
+static int pru_rproc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "%s: removing rproc %s\n", __func__, rproc->name);
+
+ return 0;
+}
+
+static const struct pru_private_data pru_data = {
+ .type = PRU_TYPE_PRU,
+};
+
+static const struct pru_private_data k3_pru_data = {
+ .type = PRU_TYPE_PRU,
+ .is_k3 = 1,
+};
+
+static const struct pru_private_data k3_rtu_data = {
+ .type = PRU_TYPE_RTU,
+ .is_k3 = 1,
+};
+
+static const struct pru_private_data k3_tx_pru_data = {
+ .type = PRU_TYPE_TX_PRU,
+ .is_k3 = 1,
+};
+
+static const struct of_device_id pru_rproc_match[] = {
+ { .compatible = "ti,am3356-pru", .data = &pru_data },
+ { .compatible = "ti,am4376-pru", .data = &pru_data },
+ { .compatible = "ti,am5728-pru", .data = &pru_data },
+ { .compatible = "ti,am642-pru", .data = &k3_pru_data },
+ { .compatible = "ti,am642-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,am642-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,k2g-pru", .data = &pru_data },
+ { .compatible = "ti,am654-pru", .data = &k3_pru_data },
+ { .compatible = "ti,am654-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,am654-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,j721e-pru", .data = &k3_pru_data },
+ { .compatible = "ti,j721e-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,j721e-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,am625-pru", .data = &k3_pru_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pru_rproc_match);
+
+static struct platform_driver pru_rproc_driver = {
+ .driver = {
+ .name = "pru-rproc",
+ .of_match_table = pru_rproc_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pru_rproc_probe,
+ .remove = pru_rproc_remove,
+};
+module_platform_driver(pru_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_AUTHOR("Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org>");
+MODULE_DESCRIPTION("PRU-ICSS Remote Processor Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/pru_rproc.h b/drivers/remoteproc/pru_rproc.h
new file mode 100644
index 000000000..8ee9c3171
--- /dev/null
+++ b/drivers/remoteproc/pru_rproc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * PRUSS Remote Processor specific types
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef _PRU_RPROC_H_
+#define _PRU_RPROC_H_
+
+/**
+ * struct pruss_int_map - PRU system events _to_ channel and host mapping
+ * @event: number of the system event
+ * @chnl: channel number assigned to a given @event
+ * @host: host number assigned to a given @chnl
+ *
+ * PRU system events are mapped to channels, and these channels are mapped
+ * to host interrupts. Events can be mapped to channels in a one-to-one or
+ * many-to-one ratio (multiple events per channel), and channels can be
+ * mapped to host interrupts in a one-to-one or many-to-one ratio (multiple
+ * channels per interrupt).
+ */
+struct pruss_int_map {
+ u8 event;
+ u8 chnl;
+ u8 host;
+};
+
+/**
+ * struct pru_irq_rsc - PRU firmware section header for IRQ data
+ * @type: resource type
+ * @num_evts: number of described events
+ * @pru_intc_map: PRU interrupt routing description
+ *
+ * The PRU firmware blob can contain optional .pru_irq_map ELF section, which
+ * provides the PRUSS interrupt mapping description. The pru_irq_rsc struct
+ * describes resource entry format.
+ */
+struct pru_irq_rsc {
+ u8 type;
+ u8 num_evts;
+ struct pruss_int_map pru_intc_map[];
+} __packed;
+
+#endif /* _PRU_RPROC_H_ */
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
new file mode 100644
index 000000000..020349f89
--- /dev/null
+++ b/drivers/remoteproc/qcom_common.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm Peripheral Image Loader helpers
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2015 Sony Mobile Communications Inc
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/qcom_rproc.h>
+#include <linux/rpmsg/qcom_glink.h>
+#include <linux/rpmsg/qcom_smd.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem.h>
+
+#include "remoteproc_internal.h"
+#include "qcom_common.h"
+
+#define to_glink_subdev(d) container_of(d, struct qcom_rproc_glink, subdev)
+#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev)
+#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev)
+
+#define MAX_NUM_OF_SS 10
+#define MAX_REGION_NAME_LENGTH 16
+#define SBL_MINIDUMP_SMEM_ID 602
+#define MD_REGION_VALID ('V' << 24 | 'A' << 16 | 'L' << 8 | 'I' << 0)
+#define MD_SS_ENCR_DONE ('D' << 24 | 'O' << 16 | 'N' << 8 | 'E' << 0)
+#define MD_SS_ENABLED ('E' << 24 | 'N' << 16 | 'B' << 8 | 'L' << 0)
+
+/**
+ * struct minidump_region - Minidump region
+ * @name : Name of the region to be dumped
+ * @seq_num: : Use to differentiate regions with same name.
+ * @valid : This entry to be dumped (if set to 1)
+ * @address : Physical address of region to be dumped
+ * @size : Size of the region
+ */
+struct minidump_region {
+ char name[MAX_REGION_NAME_LENGTH];
+ __le32 seq_num;
+ __le32 valid;
+ __le64 address;
+ __le64 size;
+};
+
+/**
+ * struct minidump_subsystem - Subsystem's SMEM Table of content
+ * @status : Subsystem toc init status
+ * @enabled : if set to 1, this region would be copied during coredump
+ * @encryption_status: Encryption status for this subsystem
+ * @encryption_required : Decides to encrypt the subsystem regions or not
+ * @region_count : Number of regions added in this subsystem toc
+ * @regions_baseptr : regions base pointer of the subsystem
+ */
+struct minidump_subsystem {
+ __le32 status;
+ __le32 enabled;
+ __le32 encryption_status;
+ __le32 encryption_required;
+ __le32 region_count;
+ __le64 regions_baseptr;
+};
+
+/**
+ * struct minidump_global_toc - Global Table of Content
+ * @status : Global Minidump init status
+ * @md_revision : Minidump revision
+ * @enabled : Minidump enable status
+ * @subsystems : Array of subsystems toc
+ */
+struct minidump_global_toc {
+ __le32 status;
+ __le32 md_revision;
+ __le32 enabled;
+ struct minidump_subsystem subsystems[MAX_NUM_OF_SS];
+};
+
+struct qcom_ssr_subsystem {
+ const char *name;
+ struct srcu_notifier_head notifier_list;
+ struct list_head list;
+};
+
+static LIST_HEAD(qcom_ssr_subsystem_list);
+static DEFINE_MUTEX(qcom_ssr_subsys_lock);
+
+static void qcom_minidump_cleanup(struct rproc *rproc)
+{
+ struct rproc_dump_segment *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
+ list_del(&entry->node);
+ kfree(entry->priv);
+ kfree(entry);
+ }
+}
+
+static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsystem *subsystem)
+{
+ struct minidump_region __iomem *ptr;
+ struct minidump_region region;
+ int seg_cnt, i;
+ dma_addr_t da;
+ size_t size;
+ char *name;
+
+ if (WARN_ON(!list_empty(&rproc->dump_segments))) {
+ dev_err(&rproc->dev, "dump segment list already populated\n");
+ return -EUCLEAN;
+ }
+
+ seg_cnt = le32_to_cpu(subsystem->region_count);
+ ptr = ioremap((unsigned long)le64_to_cpu(subsystem->regions_baseptr),
+ seg_cnt * sizeof(struct minidump_region));
+ if (!ptr)
+ return -EFAULT;
+
+ for (i = 0; i < seg_cnt; i++) {
+ memcpy_fromio(&region, ptr + i, sizeof(region));
+ if (region.valid == MD_REGION_VALID) {
+ name = kstrdup(region.name, GFP_KERNEL);
+ if (!name) {
+ iounmap(ptr);
+ return -ENOMEM;
+ }
+ da = le64_to_cpu(region.address);
+ size = le32_to_cpu(region.size);
+ rproc_coredump_add_custom_segment(rproc, da, size, NULL, name);
+ }
+ }
+
+ iounmap(ptr);
+ return 0;
+}
+
+void qcom_minidump(struct rproc *rproc, unsigned int minidump_id)
+{
+ int ret;
+ struct minidump_subsystem *subsystem;
+ struct minidump_global_toc *toc;
+
+ /* Get Global minidump ToC*/
+ toc = qcom_smem_get(QCOM_SMEM_HOST_ANY, SBL_MINIDUMP_SMEM_ID, NULL);
+
+ /* check if global table pointer exists and init is set */
+ if (IS_ERR(toc) || !toc->status) {
+ dev_err(&rproc->dev, "Minidump TOC not found in SMEM\n");
+ return;
+ }
+
+ /* Get subsystem table of contents using the minidump id */
+ subsystem = &toc->subsystems[minidump_id];
+
+ /**
+ * Collect minidump if SS ToC is valid and segment table
+ * is initialized in memory and encryption status is set.
+ */
+ if (subsystem->regions_baseptr == 0 ||
+ le32_to_cpu(subsystem->status) != 1 ||
+ le32_to_cpu(subsystem->enabled) != MD_SS_ENABLED ||
+ le32_to_cpu(subsystem->encryption_status) != MD_SS_ENCR_DONE) {
+ dev_err(&rproc->dev, "Minidump not ready, skipping\n");
+ return;
+ }
+
+ ret = qcom_add_minidump_segments(rproc, subsystem);
+ if (ret) {
+ dev_err(&rproc->dev, "Failed with error: %d while adding minidump entries\n", ret);
+ goto clean_minidump;
+ }
+ rproc_coredump_using_sections(rproc);
+clean_minidump:
+ qcom_minidump_cleanup(rproc);
+}
+EXPORT_SYMBOL_GPL(qcom_minidump);
+
+static int glink_subdev_start(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
+
+ glink->edge = qcom_glink_smem_register(glink->dev, glink->node);
+
+ return PTR_ERR_OR_ZERO(glink->edge);
+}
+
+static void glink_subdev_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
+
+ qcom_glink_smem_unregister(glink->edge);
+ glink->edge = NULL;
+}
+
+static void glink_subdev_unprepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
+
+ qcom_glink_ssr_notify(glink->ssr_name);
+}
+
+/**
+ * qcom_add_glink_subdev() - try to add a GLINK subdevice to rproc
+ * @rproc: rproc handle to parent the subdevice
+ * @glink: reference to a GLINK subdev context
+ * @ssr_name: identifier of the associated remoteproc for ssr notifications
+ */
+void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
+ const char *ssr_name)
+{
+ struct device *dev = &rproc->dev;
+
+ glink->node = of_get_child_by_name(dev->parent->of_node, "glink-edge");
+ if (!glink->node)
+ return;
+
+ glink->ssr_name = kstrdup_const(ssr_name, GFP_KERNEL);
+ if (!glink->ssr_name)
+ return;
+
+ glink->dev = dev;
+ glink->subdev.start = glink_subdev_start;
+ glink->subdev.stop = glink_subdev_stop;
+ glink->subdev.unprepare = glink_subdev_unprepare;
+
+ rproc_add_subdev(rproc, &glink->subdev);
+}
+EXPORT_SYMBOL_GPL(qcom_add_glink_subdev);
+
+/**
+ * qcom_remove_glink_subdev() - remove a GLINK subdevice from rproc
+ * @rproc: rproc handle
+ * @glink: reference to a GLINK subdev context
+ */
+void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink)
+{
+ if (!glink->node)
+ return;
+
+ rproc_remove_subdev(rproc, &glink->subdev);
+ kfree_const(glink->ssr_name);
+ of_node_put(glink->node);
+}
+EXPORT_SYMBOL_GPL(qcom_remove_glink_subdev);
+
+/**
+ * qcom_register_dump_segments() - register segments for coredump
+ * @rproc: remoteproc handle
+ * @fw: firmware header
+ *
+ * Register all segments of the ELF in the remoteproc coredump segment list
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qcom_register_dump_segments(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ int ret;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ continue;
+
+ if (!phdr->p_memsz)
+ continue;
+
+ ret = rproc_coredump_add_segment(rproc, phdr->p_paddr,
+ phdr->p_memsz);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_register_dump_segments);
+
+static int smd_subdev_start(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_subdev *smd = to_smd_subdev(subdev);
+
+ smd->edge = qcom_smd_register_edge(smd->dev, smd->node);
+
+ return PTR_ERR_OR_ZERO(smd->edge);
+}
+
+static void smd_subdev_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct qcom_rproc_subdev *smd = to_smd_subdev(subdev);
+
+ qcom_smd_unregister_edge(smd->edge);
+ smd->edge = NULL;
+}
+
+/**
+ * qcom_add_smd_subdev() - try to add a SMD subdevice to rproc
+ * @rproc: rproc handle to parent the subdevice
+ * @smd: reference to a Qualcomm subdev context
+ */
+void qcom_add_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd)
+{
+ struct device *dev = &rproc->dev;
+
+ smd->node = of_get_child_by_name(dev->parent->of_node, "smd-edge");
+ if (!smd->node)
+ return;
+
+ smd->dev = dev;
+ smd->subdev.start = smd_subdev_start;
+ smd->subdev.stop = smd_subdev_stop;
+
+ rproc_add_subdev(rproc, &smd->subdev);
+}
+EXPORT_SYMBOL_GPL(qcom_add_smd_subdev);
+
+/**
+ * qcom_remove_smd_subdev() - remove the smd subdevice from rproc
+ * @rproc: rproc handle
+ * @smd: the SMD subdevice to remove
+ */
+void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd)
+{
+ if (!smd->node)
+ return;
+
+ rproc_remove_subdev(rproc, &smd->subdev);
+ of_node_put(smd->node);
+}
+EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev);
+
+static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name)
+{
+ struct qcom_ssr_subsystem *info;
+
+ mutex_lock(&qcom_ssr_subsys_lock);
+ /* Match in the global qcom_ssr_subsystem_list with name */
+ list_for_each_entry(info, &qcom_ssr_subsystem_list, list)
+ if (!strcmp(info->name, name))
+ goto out;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ info = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ info->name = kstrdup_const(name, GFP_KERNEL);
+ srcu_init_notifier_head(&info->notifier_list);
+
+ /* Add to global notification list */
+ list_add_tail(&info->list, &qcom_ssr_subsystem_list);
+
+out:
+ mutex_unlock(&qcom_ssr_subsys_lock);
+ return info;
+}
+
+/**
+ * qcom_register_ssr_notifier() - register SSR notification handler
+ * @name: Subsystem's SSR name
+ * @nb: notifier_block to be invoked upon subsystem's state change
+ *
+ * This registers the @nb notifier block as part the notifier chain for a
+ * remoteproc associated with @name. The notifier block's callback
+ * will be invoked when the remote processor's SSR events occur
+ * (pre/post startup and pre/post shutdown).
+ *
+ * Return: a subsystem cookie on success, ERR_PTR on failure.
+ */
+void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb)
+{
+ struct qcom_ssr_subsystem *info;
+
+ info = qcom_ssr_get_subsys(name);
+ if (IS_ERR(info))
+ return info;
+
+ srcu_notifier_chain_register(&info->notifier_list, nb);
+
+ return &info->notifier_list;
+}
+EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier);
+
+/**
+ * qcom_unregister_ssr_notifier() - unregister SSR notification handler
+ * @notify: subsystem cookie returned from qcom_register_ssr_notifier
+ * @nb: notifier_block to unregister
+ *
+ * This function will unregister the notifier from the particular notifier
+ * chain.
+ *
+ * Return: 0 on success, %ENOENT otherwise.
+ */
+int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb)
+{
+ return srcu_notifier_chain_unregister(notify, nb);
+}
+EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier);
+
+static int ssr_notify_prepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_BEFORE_POWERUP, &data);
+ return 0;
+}
+
+static int ssr_notify_start(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_AFTER_POWERUP, &data);
+ return 0;
+}
+
+static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = crashed,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_BEFORE_SHUTDOWN, &data);
+}
+
+static void ssr_notify_unprepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_AFTER_SHUTDOWN, &data);
+}
+
+/**
+ * qcom_add_ssr_subdev() - register subdevice as restart notification source
+ * @rproc: rproc handle
+ * @ssr: SSR subdevice handle
+ * @ssr_name: identifier to use for notifications originating from @rproc
+ *
+ * As the @ssr is registered with the @rproc SSR events will be sent to all
+ * registered listeners for the remoteproc when it's SSR events occur
+ * (pre/post startup and pre/post shutdown).
+ */
+void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
+ const char *ssr_name)
+{
+ struct qcom_ssr_subsystem *info;
+
+ info = qcom_ssr_get_subsys(ssr_name);
+ if (IS_ERR(info)) {
+ dev_err(&rproc->dev, "Failed to add ssr subdevice\n");
+ return;
+ }
+
+ ssr->info = info;
+ ssr->subdev.prepare = ssr_notify_prepare;
+ ssr->subdev.start = ssr_notify_start;
+ ssr->subdev.stop = ssr_notify_stop;
+ ssr->subdev.unprepare = ssr_notify_unprepare;
+
+ rproc_add_subdev(rproc, &ssr->subdev);
+}
+EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev);
+
+/**
+ * qcom_remove_ssr_subdev() - remove subdevice as restart notification source
+ * @rproc: rproc handle
+ * @ssr: SSR subdevice handle
+ */
+void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr)
+{
+ rproc_remove_subdev(rproc, &ssr->subdev);
+ ssr->info = NULL;
+}
+EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev);
+
+MODULE_DESCRIPTION("Qualcomm Remoteproc helper driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h
new file mode 100644
index 000000000..c35adf730
--- /dev/null
+++ b/drivers/remoteproc/qcom_common.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __RPROC_QCOM_COMMON_H__
+#define __RPROC_QCOM_COMMON_H__
+
+#include <linux/remoteproc.h>
+#include "remoteproc_internal.h"
+#include <linux/soc/qcom/qmi.h>
+
+struct qcom_sysmon;
+
+struct qcom_rproc_glink {
+ struct rproc_subdev subdev;
+
+ const char *ssr_name;
+
+ struct device *dev;
+ struct device_node *node;
+ struct qcom_glink *edge;
+};
+
+struct qcom_rproc_subdev {
+ struct rproc_subdev subdev;
+
+ struct device *dev;
+ struct device_node *node;
+ struct qcom_smd_edge *edge;
+};
+
+struct qcom_ssr_subsystem;
+
+struct qcom_rproc_ssr {
+ struct rproc_subdev subdev;
+ struct qcom_ssr_subsystem *info;
+};
+
+void qcom_minidump(struct rproc *rproc, unsigned int minidump_id);
+
+void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
+ const char *ssr_name);
+void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink);
+
+int qcom_register_dump_segments(struct rproc *rproc, const struct firmware *fw);
+
+void qcom_add_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd);
+void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd);
+
+void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
+ const char *ssr_name);
+void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr);
+
+#if IS_ENABLED(CONFIG_QCOM_SYSMON)
+struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
+ const char *name,
+ int ssctl_instance);
+void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon);
+bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon);
+#else
+static inline struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
+ const char *name,
+ int ssctl_instance)
+{
+ return NULL;
+}
+
+static inline void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon)
+{
+}
+
+static inline bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c
new file mode 100644
index 000000000..aca21560e
--- /dev/null
+++ b/drivers/remoteproc/qcom_pil_info.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include "qcom_pil_info.h"
+
+/*
+ * The PIL relocation information region is used to communicate memory regions
+ * occupied by co-processor firmware for post mortem crash analysis.
+ *
+ * It consists of an array of entries with an 8 byte textual identifier of the
+ * region followed by a 64 bit base address and 32 bit size, both little
+ * endian.
+ */
+#define PIL_RELOC_NAME_LEN 8
+#define PIL_RELOC_ENTRY_SIZE (PIL_RELOC_NAME_LEN + sizeof(__le64) + sizeof(__le32))
+
+struct pil_reloc {
+ void __iomem *base;
+ size_t num_entries;
+};
+
+static struct pil_reloc _reloc __read_mostly;
+static DEFINE_MUTEX(pil_reloc_lock);
+
+static int qcom_pil_info_init(void)
+{
+ struct device_node *np;
+ struct resource imem;
+ void __iomem *base;
+ int ret;
+
+ /* Already initialized? */
+ if (_reloc.base)
+ return 0;
+
+ np = of_find_compatible_node(NULL, NULL, "qcom,pil-reloc-info");
+ if (!np)
+ return -ENOENT;
+
+ ret = of_address_to_resource(np, 0, &imem);
+ of_node_put(np);
+ if (ret < 0)
+ return ret;
+
+ base = ioremap(imem.start, resource_size(&imem));
+ if (!base) {
+ pr_err("failed to map PIL relocation info region\n");
+ return -ENOMEM;
+ }
+
+ memset_io(base, 0, resource_size(&imem));
+
+ _reloc.base = base;
+ _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+
+ return 0;
+}
+
+/**
+ * qcom_pil_info_store() - store PIL information of image in IMEM
+ * @image: name of the image
+ * @base: base address of the loaded image
+ * @size: size of the loaded image
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
+{
+ char buf[PIL_RELOC_NAME_LEN];
+ void __iomem *entry;
+ int ret;
+ int i;
+
+ mutex_lock(&pil_reloc_lock);
+ ret = qcom_pil_info_init();
+ if (ret < 0) {
+ mutex_unlock(&pil_reloc_lock);
+ return ret;
+ }
+
+ for (i = 0; i < _reloc.num_entries; i++) {
+ entry = _reloc.base + i * PIL_RELOC_ENTRY_SIZE;
+
+ memcpy_fromio(buf, entry, PIL_RELOC_NAME_LEN);
+
+ /*
+ * An empty record means we didn't find it, given that the
+ * records are packed.
+ */
+ if (!buf[0])
+ goto found_unused;
+
+ if (!strncmp(buf, image, PIL_RELOC_NAME_LEN))
+ goto found_existing;
+ }
+
+ pr_warn("insufficient PIL info slots\n");
+ mutex_unlock(&pil_reloc_lock);
+ return -ENOMEM;
+
+found_unused:
+ memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN));
+found_existing:
+ /* Use two writel() as base is only aligned to 4 bytes on odd entries */
+ writel(base, entry + PIL_RELOC_NAME_LEN);
+ writel((u64)base >> 32, entry + PIL_RELOC_NAME_LEN + 4);
+ writel(size, entry + PIL_RELOC_NAME_LEN + sizeof(__le64));
+ mutex_unlock(&pil_reloc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_pil_info_store);
+
+static void __exit pil_reloc_exit(void)
+{
+ mutex_lock(&pil_reloc_lock);
+ iounmap(_reloc.base);
+ _reloc.base = NULL;
+ mutex_unlock(&pil_reloc_lock);
+}
+module_exit(pil_reloc_exit);
+
+MODULE_DESCRIPTION("Qualcomm PIL relocation info");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_pil_info.h b/drivers/remoteproc/qcom_pil_info.h
new file mode 100644
index 000000000..0dce61429
--- /dev/null
+++ b/drivers/remoteproc/qcom_pil_info.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PIL_INFO_H__
+#define __QCOM_PIL_INFO_H__
+
+#include <linux/types.h>
+
+int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size);
+
+#endif
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
new file mode 100644
index 000000000..497acfb33
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm Peripheral Image Loader for Q6V5
+ *
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/soc/qcom/qcom_aoss.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/remoteproc.h>
+#include "qcom_common.h"
+#include "qcom_q6v5.h"
+
+#define Q6V5_LOAD_STATE_MSG_LEN 64
+#define Q6V5_PANIC_DELAY_MS 200
+
+static int q6v5_load_state_toggle(struct qcom_q6v5 *q6v5, bool enable)
+{
+ char buf[Q6V5_LOAD_STATE_MSG_LEN];
+ int ret;
+
+ if (!q6v5->qmp)
+ return 0;
+
+ ret = snprintf(buf, sizeof(buf),
+ "{class: image, res: load_state, name: %s, val: %s}",
+ q6v5->load_state, enable ? "on" : "off");
+
+ WARN_ON(ret >= Q6V5_LOAD_STATE_MSG_LEN);
+
+ ret = qmp_send(q6v5->qmp, buf, sizeof(buf));
+ if (ret)
+ dev_err(q6v5->dev, "failed to toggle load state\n");
+
+ return ret;
+}
+
+/**
+ * qcom_q6v5_prepare() - reinitialize the qcom_q6v5 context before start
+ * @q6v5: reference to qcom_q6v5 context to be reinitialized
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5)
+{
+ int ret;
+
+ ret = icc_set_bw(q6v5->path, 0, UINT_MAX);
+ if (ret < 0) {
+ dev_err(q6v5->dev, "failed to set bandwidth request\n");
+ return ret;
+ }
+
+ ret = q6v5_load_state_toggle(q6v5, true);
+ if (ret) {
+ icc_set_bw(q6v5->path, 0, 0);
+ return ret;
+ }
+
+ reinit_completion(&q6v5->start_done);
+ reinit_completion(&q6v5->stop_done);
+
+ q6v5->running = true;
+ q6v5->handover_issued = false;
+
+ enable_irq(q6v5->handover_irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_prepare);
+
+/**
+ * qcom_q6v5_unprepare() - unprepare the qcom_q6v5 context after stop
+ * @q6v5: reference to qcom_q6v5 context to be unprepared
+ *
+ * Return: 0 on success, 1 if handover hasn't yet been called
+ */
+int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5)
+{
+ disable_irq(q6v5->handover_irq);
+ q6v5_load_state_toggle(q6v5, false);
+
+ /* Disable interconnect vote, in case handover never happened */
+ icc_set_bw(q6v5->path, 0, 0);
+
+ return !q6v5->handover_issued;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_unprepare);
+
+static irqreturn_t q6v5_wdog_interrupt(int irq, void *data)
+{
+ struct qcom_q6v5 *q6v5 = data;
+ size_t len;
+ char *msg;
+
+ /* Sometimes the stop triggers a watchdog rather than a stop-ack */
+ if (!q6v5->running) {
+ complete(&q6v5->stop_done);
+ return IRQ_HANDLED;
+ }
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(q6v5->dev, "watchdog received: %s\n", msg);
+ else
+ dev_err(q6v5->dev, "watchdog without message\n");
+
+ q6v5->running = false;
+ rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
+{
+ struct qcom_q6v5 *q6v5 = data;
+ size_t len;
+ char *msg;
+
+ if (!q6v5->running)
+ return IRQ_HANDLED;
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(q6v5->dev, "fatal error received: %s\n", msg);
+ else
+ dev_err(q6v5->dev, "fatal error without message\n");
+
+ q6v5->running = false;
+ rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_ready_interrupt(int irq, void *data)
+{
+ struct qcom_q6v5 *q6v5 = data;
+
+ complete(&q6v5->start_done);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * qcom_q6v5_wait_for_start() - wait for remote processor start signal
+ * @q6v5: reference to qcom_q6v5 context
+ * @timeout: timeout to wait for the event, in jiffies
+ *
+ * qcom_q6v5_unprepare() should not be called when this function fails.
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout
+ */
+int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&q6v5->start_done, timeout);
+ if (!ret)
+ disable_irq(q6v5->handover_irq);
+
+ return !ret ? -ETIMEDOUT : 0;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_wait_for_start);
+
+static irqreturn_t q6v5_handover_interrupt(int irq, void *data)
+{
+ struct qcom_q6v5 *q6v5 = data;
+
+ if (q6v5->handover)
+ q6v5->handover(q6v5);
+
+ icc_set_bw(q6v5->path, 0, 0);
+
+ q6v5->handover_issued = true;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_stop_interrupt(int irq, void *data)
+{
+ struct qcom_q6v5 *q6v5 = data;
+
+ complete(&q6v5->stop_done);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * qcom_q6v5_request_stop() - request the remote processor to stop
+ * @q6v5: reference to qcom_q6v5 context
+ * @sysmon: reference to the remote's sysmon instance, or NULL
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon)
+{
+ int ret;
+
+ q6v5->running = false;
+
+ /* Don't perform SMP2P dance if sysmon already shut down the remote */
+ if (qcom_sysmon_shutdown_acked(sysmon))
+ return 0;
+
+ qcom_smem_state_update_bits(q6v5->state,
+ BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
+
+ ret = wait_for_completion_timeout(&q6v5->stop_done, 5 * HZ);
+
+ qcom_smem_state_update_bits(q6v5->state, BIT(q6v5->stop_bit), 0);
+
+ return ret == 0 ? -ETIMEDOUT : 0;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_request_stop);
+
+/**
+ * qcom_q6v5_panic() - panic handler to invoke a stop on the remote
+ * @q6v5: reference to qcom_q6v5 context
+ *
+ * Set the stop bit and sleep in order to allow the remote processor to flush
+ * its caches etc for post mortem debugging.
+ *
+ * Return: 200ms
+ */
+unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5)
+{
+ qcom_smem_state_update_bits(q6v5->state,
+ BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
+
+ return Q6V5_PANIC_DELAY_MS;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_panic);
+
+/**
+ * qcom_q6v5_init() - initializer of the q6v5 common struct
+ * @q6v5: handle to be initialized
+ * @pdev: platform_device reference for acquiring resources
+ * @rproc: associated remoteproc instance
+ * @crash_reason: SMEM id for crash reason string, or 0 if none
+ * @load_state: load state resource string
+ * @handover: function to be called when proxy resources should be released
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
+ struct rproc *rproc, int crash_reason, const char *load_state,
+ void (*handover)(struct qcom_q6v5 *q6v5))
+{
+ int ret;
+
+ q6v5->rproc = rproc;
+ q6v5->dev = &pdev->dev;
+ q6v5->crash_reason = crash_reason;
+ q6v5->handover = handover;
+
+ init_completion(&q6v5->start_done);
+ init_completion(&q6v5->stop_done);
+
+ q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog");
+ if (q6v5->wdog_irq < 0)
+ return q6v5->wdog_irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq,
+ NULL, q6v5_wdog_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 wdog", q6v5);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire wdog IRQ\n");
+ return ret;
+ }
+
+ q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal");
+ if (q6v5->fatal_irq < 0)
+ return q6v5->fatal_irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq,
+ NULL, q6v5_fatal_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 fatal", q6v5);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire fatal IRQ\n");
+ return ret;
+ }
+
+ q6v5->ready_irq = platform_get_irq_byname(pdev, "ready");
+ if (q6v5->ready_irq < 0)
+ return q6v5->ready_irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq,
+ NULL, q6v5_ready_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 ready", q6v5);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire ready IRQ\n");
+ return ret;
+ }
+
+ q6v5->handover_irq = platform_get_irq_byname(pdev, "handover");
+ if (q6v5->handover_irq < 0)
+ return q6v5->handover_irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq,
+ NULL, q6v5_handover_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 handover", q6v5);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire handover IRQ\n");
+ return ret;
+ }
+ disable_irq(q6v5->handover_irq);
+
+ q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack");
+ if (q6v5->stop_irq < 0)
+ return q6v5->stop_irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq,
+ NULL, q6v5_stop_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 stop", q6v5);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to acquire stop-ack IRQ\n");
+ return ret;
+ }
+
+ q6v5->state = devm_qcom_smem_state_get(&pdev->dev, "stop", &q6v5->stop_bit);
+ if (IS_ERR(q6v5->state)) {
+ dev_err(&pdev->dev, "failed to acquire stop state\n");
+ return PTR_ERR(q6v5->state);
+ }
+
+ q6v5->load_state = devm_kstrdup_const(&pdev->dev, load_state, GFP_KERNEL);
+ q6v5->qmp = qmp_get(&pdev->dev);
+ if (IS_ERR(q6v5->qmp)) {
+ if (PTR_ERR(q6v5->qmp) != -ENODEV)
+ return dev_err_probe(&pdev->dev, PTR_ERR(q6v5->qmp),
+ "failed to acquire load state\n");
+ q6v5->qmp = NULL;
+ } else if (!q6v5->load_state) {
+ if (!load_state)
+ dev_err(&pdev->dev, "load state resource string empty\n");
+
+ qmp_put(q6v5->qmp);
+ return load_state ? -ENOMEM : -EINVAL;
+ }
+
+ q6v5->path = devm_of_icc_get(&pdev->dev, NULL);
+ if (IS_ERR(q6v5->path))
+ return dev_err_probe(&pdev->dev, PTR_ERR(q6v5->path),
+ "failed to acquire interconnect path\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_init);
+
+/**
+ * qcom_q6v5_deinit() - deinitialize the q6v5 common struct
+ * @q6v5: reference to qcom_q6v5 context to be deinitialized
+ */
+void qcom_q6v5_deinit(struct qcom_q6v5 *q6v5)
+{
+ qmp_put(q6v5->qmp);
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_deinit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Q6V5");
diff --git a/drivers/remoteproc/qcom_q6v5.h b/drivers/remoteproc/qcom_q6v5.h
new file mode 100644
index 000000000..5a859c418
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_Q6V5_H__
+#define __QCOM_Q6V5_H__
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/soc/qcom/qcom_aoss.h>
+
+struct icc_path;
+struct rproc;
+struct qcom_smem_state;
+struct qcom_sysmon;
+
+struct qcom_q6v5 {
+ struct device *dev;
+ struct rproc *rproc;
+
+ struct qcom_smem_state *state;
+ struct qmp *qmp;
+
+ struct icc_path *path;
+
+ unsigned stop_bit;
+
+ int wdog_irq;
+ int fatal_irq;
+ int ready_irq;
+ int handover_irq;
+ int stop_irq;
+
+ bool handover_issued;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ int crash_reason;
+
+ bool running;
+
+ const char *load_state;
+ void (*handover)(struct qcom_q6v5 *q6v5);
+};
+
+int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
+ struct rproc *rproc, int crash_reason, const char *load_state,
+ void (*handover)(struct qcom_q6v5 *q6v5));
+void qcom_q6v5_deinit(struct qcom_q6v5 *q6v5);
+
+int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5);
+int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5);
+int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon);
+int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout);
+unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5);
+
+#endif
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
new file mode 100644
index 000000000..4c9a1b99c
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm Technology Inc. ADSP Peripheral Image Loader for SDM845.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "qcom_common.h"
+#include "qcom_pil_info.h"
+#include "qcom_q6v5.h"
+#include "remoteproc_internal.h"
+
+/* time out value */
+#define ACK_TIMEOUT 1000
+#define ACK_TIMEOUT_US 1000000
+#define BOOT_FSM_TIMEOUT 10000
+/* mask values */
+#define EVB_MASK GENMASK(27, 4)
+/*QDSP6SS register offsets*/
+#define RST_EVB_REG 0x10
+#define CORE_START_REG 0x400
+#define BOOT_CMD_REG 0x404
+#define BOOT_STATUS_REG 0x408
+#define RET_CFG_REG 0x1C
+/*TCSR register offsets*/
+#define LPASS_MASTER_IDLE_REG 0x8
+#define LPASS_HALTACK_REG 0x4
+#define LPASS_PWR_ON_REG 0x10
+#define LPASS_HALTREQ_REG 0x0
+
+#define QDSP6SS_XO_CBCR 0x38
+#define QDSP6SS_CORE_CBCR 0x20
+#define QDSP6SS_SLEEP_CBCR 0x3c
+
+#define QCOM_Q6V5_RPROC_PROXY_PD_MAX 3
+
+struct adsp_pil_data {
+ int crash_reason_smem;
+ const char *firmware_name;
+
+ const char *ssr_name;
+ const char *sysmon_name;
+ int ssctl_id;
+ bool is_wpss;
+ bool auto_boot;
+
+ const char **clk_ids;
+ int num_clks;
+ const char **proxy_pd_names;
+ const char *load_state;
+};
+
+struct qcom_adsp {
+ struct device *dev;
+ struct rproc *rproc;
+
+ struct qcom_q6v5 q6v5;
+
+ struct clk *xo;
+
+ int num_clks;
+ struct clk_bulk_data *clks;
+
+ void __iomem *qdsp6ss_base;
+
+ struct reset_control *pdc_sync_reset;
+ struct reset_control *restart;
+
+ struct regmap *halt_map;
+ unsigned int halt_lpass;
+
+ int crash_reason_smem;
+ const char *info_name;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ phys_addr_t mem_phys;
+ phys_addr_t mem_reloc;
+ void *mem_region;
+ size_t mem_size;
+
+ struct device *proxy_pds[QCOM_Q6V5_RPROC_PROXY_PD_MAX];
+ size_t proxy_pd_count;
+
+ struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_ssr ssr_subdev;
+ struct qcom_sysmon *sysmon;
+
+ int (*shutdown)(struct qcom_adsp *adsp);
+};
+
+static int qcom_rproc_pds_attach(struct device *dev, struct qcom_adsp *adsp,
+ const char **pd_names)
+{
+ struct device **devs = adsp->proxy_pds;
+ size_t num_pds = 0;
+ int ret;
+ int i;
+
+ if (!pd_names)
+ return 0;
+
+ /* Handle single power domain */
+ if (dev->pm_domain) {
+ devs[0] = dev;
+ pm_runtime_enable(dev);
+ return 1;
+ }
+
+ while (pd_names[num_pds])
+ num_pds++;
+
+ if (num_pds > ARRAY_SIZE(adsp->proxy_pds))
+ return -E2BIG;
+
+ for (i = 0; i < num_pds; i++) {
+ devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(devs[i])) {
+ ret = PTR_ERR(devs[i]) ? : -ENODATA;
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(devs[i], false);
+
+ return ret;
+}
+
+static void qcom_rproc_pds_detach(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ struct device *dev = adsp->dev;
+ int i;
+
+ /* Handle single power domain */
+ if (dev->pm_domain && pd_count) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
+ for (i = 0; i < pd_count; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
+static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_resume_and_get(pds[i]);
+ if (ret < 0) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ goto unroll_pd_votes;
+ }
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+}
+
+static void qcom_rproc_pds_disable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
+static int qcom_wpss_shutdown(struct qcom_adsp *adsp)
+{
+ unsigned int val;
+
+ regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 1);
+
+ /* Wait for halt ACK from QDSP6 */
+ regmap_read_poll_timeout(adsp->halt_map,
+ adsp->halt_lpass + LPASS_HALTACK_REG, val,
+ val, 1000, ACK_TIMEOUT_US);
+
+ /* Assert the WPSS PDC Reset */
+ reset_control_assert(adsp->pdc_sync_reset);
+
+ /* Place the WPSS processor into reset */
+ reset_control_assert(adsp->restart);
+
+ /* wait after asserting subsystem restart from AOSS */
+ usleep_range(200, 205);
+
+ /* Remove the WPSS reset */
+ reset_control_deassert(adsp->restart);
+
+ /* De-assert the WPSS PDC Reset */
+ reset_control_deassert(adsp->pdc_sync_reset);
+
+ usleep_range(100, 105);
+
+ clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
+
+ regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0);
+
+ /* Wait for halt ACK from QDSP6 */
+ regmap_read_poll_timeout(adsp->halt_map,
+ adsp->halt_lpass + LPASS_HALTACK_REG, val,
+ !val, 1000, ACK_TIMEOUT_US);
+
+ return 0;
+}
+
+static int qcom_adsp_shutdown(struct qcom_adsp *adsp)
+{
+ unsigned long timeout;
+ unsigned int val;
+ int ret;
+
+ /* Reset the retention logic */
+ val = readl(adsp->qdsp6ss_base + RET_CFG_REG);
+ val |= 0x1;
+ writel(val, adsp->qdsp6ss_base + RET_CFG_REG);
+
+ clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
+
+ /* QDSP6 master port needs to be explicitly halted */
+ ret = regmap_read(adsp->halt_map,
+ adsp->halt_lpass + LPASS_PWR_ON_REG, &val);
+ if (ret || !val)
+ goto reset;
+
+ ret = regmap_read(adsp->halt_map,
+ adsp->halt_lpass + LPASS_MASTER_IDLE_REG,
+ &val);
+ if (ret || val)
+ goto reset;
+
+ regmap_write(adsp->halt_map,
+ adsp->halt_lpass + LPASS_HALTREQ_REG, 1);
+
+ /* Wait for halt ACK from QDSP6 */
+ timeout = jiffies + msecs_to_jiffies(ACK_TIMEOUT);
+ for (;;) {
+ ret = regmap_read(adsp->halt_map,
+ adsp->halt_lpass + LPASS_HALTACK_REG, &val);
+ if (ret || val || time_after(jiffies, timeout))
+ break;
+
+ usleep_range(1000, 1100);
+ }
+
+ ret = regmap_read(adsp->halt_map,
+ adsp->halt_lpass + LPASS_MASTER_IDLE_REG, &val);
+ if (ret || !val)
+ dev_err(adsp->dev, "port failed halt\n");
+
+reset:
+ /* Assert the LPASS PDC Reset */
+ reset_control_assert(adsp->pdc_sync_reset);
+ /* Place the LPASS processor into reset */
+ reset_control_assert(adsp->restart);
+ /* wait after asserting subsystem restart from AOSS */
+ usleep_range(200, 300);
+
+ /* Clear the halt request for the AXIM and AHBM for Q6 */
+ regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0);
+
+ /* De-assert the LPASS PDC Reset */
+ reset_control_deassert(adsp->pdc_sync_reset);
+ /* Remove the LPASS reset */
+ reset_control_deassert(adsp->restart);
+ /* wait after de-asserting subsystem restart from AOSS */
+ usleep_range(200, 300);
+
+ return 0;
+}
+
+static int adsp_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
+ adsp->mem_region, adsp->mem_phys,
+ adsp->mem_size, &adsp->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
+
+ return 0;
+}
+
+static int adsp_start(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
+ unsigned int val;
+
+ ret = qcom_q6v5_prepare(&adsp->q6v5);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(adsp->xo);
+ if (ret)
+ goto disable_irqs;
+
+ ret = qcom_rproc_pds_enable(adsp, adsp->proxy_pds,
+ adsp->proxy_pd_count);
+ if (ret < 0)
+ goto disable_xo_clk;
+
+ ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
+ if (ret) {
+ dev_err(adsp->dev, "adsp clk_enable failed\n");
+ goto disable_power_domain;
+ }
+
+ /* Enable the XO clock */
+ writel(1, adsp->qdsp6ss_base + QDSP6SS_XO_CBCR);
+
+ /* Enable the QDSP6SS sleep clock */
+ writel(1, adsp->qdsp6ss_base + QDSP6SS_SLEEP_CBCR);
+
+ /* Enable the QDSP6 core clock */
+ writel(1, adsp->qdsp6ss_base + QDSP6SS_CORE_CBCR);
+
+ /* Program boot address */
+ writel(adsp->mem_phys >> 4, adsp->qdsp6ss_base + RST_EVB_REG);
+
+ /* De-assert QDSP6 stop core. QDSP6 will execute after out of reset */
+ writel(0x1, adsp->qdsp6ss_base + CORE_START_REG);
+
+ /* Trigger boot FSM to start QDSP6 */
+ writel(0x1, adsp->qdsp6ss_base + BOOT_CMD_REG);
+
+ /* Wait for core to come out of reset */
+ ret = readl_poll_timeout(adsp->qdsp6ss_base + BOOT_STATUS_REG,
+ val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
+ if (ret) {
+ dev_err(adsp->dev, "failed to bootup adsp\n");
+ goto disable_adsp_clks;
+ }
+
+ ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5 * HZ));
+ if (ret == -ETIMEDOUT) {
+ dev_err(adsp->dev, "start timed out\n");
+ goto disable_adsp_clks;
+ }
+
+ return 0;
+
+disable_adsp_clks:
+ clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
+disable_power_domain:
+ qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+disable_xo_clk:
+ clk_disable_unprepare(adsp->xo);
+disable_irqs:
+ qcom_q6v5_unprepare(&adsp->q6v5);
+
+ return ret;
+}
+
+static void qcom_adsp_pil_handover(struct qcom_q6v5 *q6v5)
+{
+ struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
+
+ clk_disable_unprepare(adsp->xo);
+ qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+}
+
+static int adsp_stop(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int handover;
+ int ret;
+
+ ret = qcom_q6v5_request_stop(&adsp->q6v5, adsp->sysmon);
+ if (ret == -ETIMEDOUT)
+ dev_err(adsp->dev, "timed out on wait\n");
+
+ ret = adsp->shutdown(adsp);
+ if (ret)
+ dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
+
+ handover = qcom_q6v5_unprepare(&adsp->q6v5);
+ if (handover)
+ qcom_adsp_pil_handover(&adsp->q6v5);
+
+ return ret;
+}
+
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int offset;
+
+ offset = da - adsp->mem_reloc;
+ if (offset < 0 || offset + len > adsp->mem_size)
+ return NULL;
+
+ return adsp->mem_region + offset;
+}
+
+static unsigned long adsp_panic(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = rproc->priv;
+
+ return qcom_q6v5_panic(&adsp->q6v5);
+}
+
+static const struct rproc_ops adsp_ops = {
+ .start = adsp_start,
+ .stop = adsp_stop,
+ .da_to_va = adsp_da_to_va,
+ .parse_fw = qcom_register_dump_segments,
+ .load = adsp_load,
+ .panic = adsp_panic,
+};
+
+static int adsp_init_clock(struct qcom_adsp *adsp, const char **clk_ids)
+{
+ int num_clks = 0;
+ int i, ret;
+
+ adsp->xo = devm_clk_get(adsp->dev, "xo");
+ if (IS_ERR(adsp->xo)) {
+ ret = PTR_ERR(adsp->xo);
+ if (ret != -EPROBE_DEFER)
+ dev_err(adsp->dev, "failed to get xo clock");
+ return ret;
+ }
+
+ for (i = 0; clk_ids[i]; i++)
+ num_clks++;
+
+ adsp->num_clks = num_clks;
+ adsp->clks = devm_kcalloc(adsp->dev, adsp->num_clks,
+ sizeof(*adsp->clks), GFP_KERNEL);
+ if (!adsp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < adsp->num_clks; i++)
+ adsp->clks[i].id = clk_ids[i];
+
+ return devm_clk_bulk_get(adsp->dev, adsp->num_clks, adsp->clks);
+}
+
+static int adsp_init_reset(struct qcom_adsp *adsp)
+{
+ adsp->pdc_sync_reset = devm_reset_control_get_optional_exclusive(adsp->dev,
+ "pdc_sync");
+ if (IS_ERR(adsp->pdc_sync_reset)) {
+ dev_err(adsp->dev, "failed to acquire pdc_sync reset\n");
+ return PTR_ERR(adsp->pdc_sync_reset);
+ }
+
+ adsp->restart = devm_reset_control_get_optional_exclusive(adsp->dev, "restart");
+
+ /* Fall back to the old "cc_lpass" if "restart" is absent */
+ if (!adsp->restart)
+ adsp->restart = devm_reset_control_get_exclusive(adsp->dev, "cc_lpass");
+
+ if (IS_ERR(adsp->restart)) {
+ dev_err(adsp->dev, "failed to acquire restart\n");
+ return PTR_ERR(adsp->restart);
+ }
+
+ return 0;
+}
+
+static int adsp_init_mmio(struct qcom_adsp *adsp,
+ struct platform_device *pdev)
+{
+ struct device_node *syscon;
+ int ret;
+
+ adsp->qdsp6ss_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adsp->qdsp6ss_base)) {
+ dev_err(adsp->dev, "failed to map QDSP6SS registers\n");
+ return PTR_ERR(adsp->qdsp6ss_base);
+ }
+
+ syscon = of_parse_phandle(pdev->dev.of_node, "qcom,halt-regs", 0);
+ if (!syscon) {
+ dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
+ return -EINVAL;
+ }
+
+ adsp->halt_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(adsp->halt_map))
+ return PTR_ERR(adsp->halt_map);
+
+ ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,halt-regs",
+ 1, &adsp->halt_lpass);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no offset in syscon\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
+{
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(adsp->dev, "no memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret)
+ return ret;
+
+ adsp->mem_phys = adsp->mem_reloc = r.start;
+ adsp->mem_size = resource_size(&r);
+ adsp->mem_region = devm_ioremap_wc(adsp->dev,
+ adsp->mem_phys, adsp->mem_size);
+ if (!adsp->mem_region) {
+ dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, adsp->mem_size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int adsp_probe(struct platform_device *pdev)
+{
+ const struct adsp_pil_data *desc;
+ const char *firmware_name;
+ struct qcom_adsp *adsp;
+ struct rproc *rproc;
+ int ret;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ firmware_name = desc->firmware_name;
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &firmware_name);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read firmware-name\n");
+ return ret;
+ }
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
+ firmware_name, sizeof(*adsp));
+ if (!rproc) {
+ dev_err(&pdev->dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->auto_boot = desc->auto_boot;
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
+ adsp = (struct qcom_adsp *)rproc->priv;
+ adsp->dev = &pdev->dev;
+ adsp->rproc = rproc;
+ adsp->info_name = desc->sysmon_name;
+ platform_set_drvdata(pdev, adsp);
+
+ if (desc->is_wpss)
+ adsp->shutdown = qcom_wpss_shutdown;
+ else
+ adsp->shutdown = qcom_adsp_shutdown;
+
+ ret = adsp_alloc_memory_region(adsp);
+ if (ret)
+ goto free_rproc;
+
+ ret = adsp_init_clock(adsp, desc->clk_ids);
+ if (ret)
+ goto free_rproc;
+
+ ret = qcom_rproc_pds_attach(adsp->dev, adsp,
+ desc->proxy_pd_names);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to attach proxy power domains\n");
+ goto free_rproc;
+ }
+ adsp->proxy_pd_count = ret;
+
+ ret = adsp_init_reset(adsp);
+ if (ret)
+ goto disable_pm;
+
+ ret = adsp_init_mmio(adsp, pdev);
+ if (ret)
+ goto disable_pm;
+
+ ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
+ desc->load_state, qcom_adsp_pil_handover);
+ if (ret)
+ goto disable_pm;
+
+ qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
+ qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
+ adsp->sysmon = qcom_add_sysmon_subdev(rproc,
+ desc->sysmon_name,
+ desc->ssctl_id);
+ if (IS_ERR(adsp->sysmon)) {
+ ret = PTR_ERR(adsp->sysmon);
+ goto disable_pm;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+
+free_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int adsp_remove(struct platform_device *pdev)
+{
+ struct qcom_adsp *adsp = platform_get_drvdata(pdev);
+
+ rproc_del(adsp->rproc);
+
+ qcom_q6v5_deinit(&adsp->q6v5);
+ qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
+ qcom_remove_sysmon_subdev(adsp->sysmon);
+ qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
+ qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ rproc_free(adsp->rproc);
+
+ return 0;
+}
+
+static const struct adsp_pil_data adsp_resource_init = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+ .is_wpss = false,
+ .auto_boot = true,
+ .clk_ids = (const char*[]) {
+ "sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
+ "qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", NULL
+ },
+ .num_clks = 7,
+ .proxy_pd_names = (const char*[]) {
+ "cx", NULL
+ },
+};
+
+static const struct adsp_pil_data cdsp_resource_init = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+ .is_wpss = false,
+ .auto_boot = true,
+ .clk_ids = (const char*[]) {
+ "sway", "tbu", "bimc", "ahb_aon", "q6ss_slave", "q6ss_master",
+ "q6_axim", NULL
+ },
+ .num_clks = 7,
+ .proxy_pd_names = (const char*[]) {
+ "cx", NULL
+ },
+};
+
+static const struct adsp_pil_data wpss_resource_init = {
+ .crash_reason_smem = 626,
+ .firmware_name = "wpss.mdt",
+ .ssr_name = "wpss",
+ .sysmon_name = "wpss",
+ .ssctl_id = 0x19,
+ .is_wpss = true,
+ .auto_boot = false,
+ .load_state = "wpss",
+ .clk_ids = (const char*[]) {
+ "ahb_bdg", "ahb", "rscp", NULL
+ },
+ .num_clks = 3,
+ .proxy_pd_names = (const char*[]) {
+ "cx", "mx", NULL
+ },
+};
+
+static const struct of_device_id adsp_of_match[] = {
+ { .compatible = "qcom,qcs404-cdsp-pil", .data = &cdsp_resource_init },
+ { .compatible = "qcom,sc7280-wpss-pil", .data = &wpss_resource_init },
+ { .compatible = "qcom,sdm845-adsp-pil", .data = &adsp_resource_init },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adsp_of_match);
+
+static struct platform_driver adsp_pil_driver = {
+ .probe = adsp_probe,
+ .remove = adsp_remove,
+ .driver = {
+ .name = "qcom_q6v5_adsp",
+ .of_match_table = adsp_of_match,
+ },
+};
+
+module_platform_driver(adsp_pil_driver);
+MODULE_DESCRIPTION("QTI SDM845 ADSP Peripheral Image Loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
new file mode 100644
index 000000000..7dbab5fcb
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -0,0 +1,2386 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm self-authenticating modem subsystem remoteproc driver
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/devcoredump.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+
+#include "remoteproc_internal.h"
+#include "qcom_common.h"
+#include "qcom_pil_info.h"
+#include "qcom_q6v5.h"
+
+#include <linux/qcom_scm.h>
+
+#define MPSS_CRASH_REASON_SMEM 421
+
+#define MBA_LOG_SIZE SZ_4K
+
+/* RMB Status Register Values */
+#define RMB_PBL_SUCCESS 0x1
+
+#define RMB_MBA_XPU_UNLOCKED 0x1
+#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
+#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
+#define RMB_MBA_AUTH_COMPLETE 0x4
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE_REG 0x00
+#define RMB_PBL_STATUS_REG 0x04
+#define RMB_MBA_COMMAND_REG 0x08
+#define RMB_MBA_STATUS_REG 0x0C
+#define RMB_PMI_META_DATA_REG 0x10
+#define RMB_PMI_CODE_START_REG 0x14
+#define RMB_PMI_CODE_LENGTH_REG 0x18
+#define RMB_MBA_MSS_STATUS 0x40
+#define RMB_MBA_ALT_RESET 0x44
+
+#define RMB_CMD_META_DATA_READY 0x1
+#define RMB_CMD_LOAD_READY 0x2
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET_REG 0x014
+#define QDSP6SS_GFMUX_CTL_REG 0x020
+#define QDSP6SS_PWR_CTL_REG 0x030
+#define QDSP6SS_MEM_PWR_CTL 0x0B0
+#define QDSP6V6SS_MEM_PWR_CTL 0x034
+#define QDSP6SS_STRAP_ACC 0x110
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ_REG 0x0
+#define AXI_HALTACK_REG 0x4
+#define AXI_IDLE_REG 0x8
+#define AXI_GATING_VALID_OVERRIDE BIT(0)
+
+#define HALT_ACK_TIMEOUT_US 100000
+
+/* QACCEPT Register Offsets */
+#define QACCEPT_ACCEPT_REG 0x0
+#define QACCEPT_ACTIVE_REG 0x4
+#define QACCEPT_DENY_REG 0x8
+#define QACCEPT_REQ_REG 0xC
+
+#define QACCEPT_TIMEOUT_US 50
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE BIT(0)
+#define Q6SS_CORE_ARES BIT(1)
+#define Q6SS_BUS_ARES_ENABLE BIT(2)
+
+/* QDSP6SS CBCR */
+#define Q6SS_CBCR_CLKEN BIT(0)
+#define Q6SS_CBCR_CLKOFF BIT(31)
+#define Q6SS_CBCR_TIMEOUT_US 200
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENABLE BIT(1)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
+#define Q6SS_ETB_SLP_NRET_N BIT(17)
+#define Q6SS_L2DATA_STBY_N BIT(18)
+#define Q6SS_SLP_RET_N BIT(19)
+#define Q6SS_CLAMP_IO BIT(20)
+#define QDSS_BHS_ON BIT(21)
+#define QDSS_LDO_BYP BIT(22)
+
+/* QDSP6v56 parameters */
+#define QDSP6v56_LDO_BYP BIT(25)
+#define QDSP6v56_BHS_ON BIT(24)
+#define QDSP6v56_CLAMP_WL BIT(21)
+#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
+#define QDSP6SS_XO_CBCR 0x0038
+#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
+
+/* QDSP6v65 parameters */
+#define QDSP6SS_CORE_CBCR 0x20
+#define QDSP6SS_SLEEP 0x3C
+#define QDSP6SS_BOOT_CORE_START 0x400
+#define QDSP6SS_BOOT_CMD 0x404
+#define BOOT_FSM_TIMEOUT 10000
+
+struct reg_info {
+ struct regulator *reg;
+ int uV;
+ int uA;
+};
+
+struct qcom_mss_reg_res {
+ const char *supply;
+ int uV;
+ int uA;
+};
+
+struct rproc_hexagon_res {
+ const char *hexagon_mba_image;
+ struct qcom_mss_reg_res *proxy_supply;
+ struct qcom_mss_reg_res *fallback_proxy_supply;
+ struct qcom_mss_reg_res *active_supply;
+ char **proxy_clk_names;
+ char **reset_clk_names;
+ char **active_clk_names;
+ char **proxy_pd_names;
+ int version;
+ bool need_mem_protection;
+ bool has_alt_reset;
+ bool has_mba_logs;
+ bool has_spare_reg;
+ bool has_qaccept_regs;
+ bool has_ext_cntl_regs;
+ bool has_vq6;
+};
+
+struct q6v5 {
+ struct device *dev;
+ struct rproc *rproc;
+
+ void __iomem *reg_base;
+ void __iomem *rmb_base;
+
+ struct regmap *halt_map;
+ struct regmap *conn_map;
+
+ u32 halt_q6;
+ u32 halt_modem;
+ u32 halt_nc;
+ u32 halt_vq6;
+ u32 conn_box;
+
+ u32 qaccept_mdm;
+ u32 qaccept_cx;
+ u32 qaccept_axi;
+
+ u32 axim1_clk_off;
+ u32 crypto_clk_off;
+ u32 force_clk_on;
+ u32 rscc_disable;
+
+ struct reset_control *mss_restart;
+ struct reset_control *pdc_reset;
+
+ struct qcom_q6v5 q6v5;
+
+ struct clk *active_clks[8];
+ struct clk *reset_clks[4];
+ struct clk *proxy_clks[4];
+ struct device *proxy_pds[3];
+ int active_clk_count;
+ int reset_clk_count;
+ int proxy_clk_count;
+ int proxy_pd_count;
+
+ struct reg_info active_regs[1];
+ struct reg_info proxy_regs[1];
+ struct reg_info fallback_proxy_regs[2];
+ int active_reg_count;
+ int proxy_reg_count;
+ int fallback_proxy_reg_count;
+
+ bool dump_mba_loaded;
+ size_t current_dump_size;
+ size_t total_dump_size;
+
+ phys_addr_t mba_phys;
+ size_t mba_size;
+ size_t dp_size;
+
+ phys_addr_t mdata_phys;
+ size_t mdata_size;
+
+ phys_addr_t mpss_phys;
+ phys_addr_t mpss_reloc;
+ size_t mpss_size;
+
+ struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_subdev smd_subdev;
+ struct qcom_rproc_ssr ssr_subdev;
+ struct qcom_sysmon *sysmon;
+ struct platform_device *bam_dmux;
+ bool need_mem_protection;
+ bool has_alt_reset;
+ bool has_mba_logs;
+ bool has_spare_reg;
+ bool has_qaccept_regs;
+ bool has_ext_cntl_regs;
+ bool has_vq6;
+ int mpss_perm;
+ int mba_perm;
+ const char *hexagon_mdt_image;
+ int version;
+};
+
+enum {
+ MSS_MSM8916,
+ MSS_MSM8974,
+ MSS_MSM8996,
+ MSS_MSM8998,
+ MSS_SC7180,
+ MSS_SC7280,
+ MSS_SDM845,
+};
+
+static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
+ const struct qcom_mss_reg_res *reg_res)
+{
+ int rc;
+ int i;
+
+ if (!reg_res)
+ return 0;
+
+ for (i = 0; reg_res[i].supply; i++) {
+ regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
+ if (IS_ERR(regs[i].reg)) {
+ rc = PTR_ERR(regs[i].reg);
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get %s\n regulator",
+ reg_res[i].supply);
+ return rc;
+ }
+
+ regs[i].uV = reg_res[i].uV;
+ regs[i].uA = reg_res[i].uA;
+ }
+
+ return i;
+}
+
+static int q6v5_regulator_enable(struct q6v5 *qproc,
+ struct reg_info *regs, int count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (regs[i].uV > 0) {
+ ret = regulator_set_voltage(regs[i].reg,
+ regs[i].uV, INT_MAX);
+ if (ret) {
+ dev_err(qproc->dev,
+ "Failed to request voltage for %d.\n",
+ i);
+ goto err;
+ }
+ }
+
+ if (regs[i].uA > 0) {
+ ret = regulator_set_load(regs[i].reg,
+ regs[i].uA);
+ if (ret < 0) {
+ dev_err(qproc->dev,
+ "Failed to set regulator mode\n");
+ goto err;
+ }
+ }
+
+ ret = regulator_enable(regs[i].reg);
+ if (ret) {
+ dev_err(qproc->dev, "Regulator enable failed\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ for (; i >= 0; i--) {
+ if (regs[i].uV > 0)
+ regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+ if (regs[i].uA > 0)
+ regulator_set_load(regs[i].reg, 0);
+
+ regulator_disable(regs[i].reg);
+ }
+
+ return ret;
+}
+
+static void q6v5_regulator_disable(struct q6v5 *qproc,
+ struct reg_info *regs, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (regs[i].uV > 0)
+ regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+ if (regs[i].uA > 0)
+ regulator_set_load(regs[i].reg, 0);
+
+ regulator_disable(regs[i].reg);
+ }
+}
+
+static int q6v5_clk_enable(struct device *dev,
+ struct clk **clks, int count)
+{
+ int rc;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ rc = clk_prepare_enable(clks[i]);
+ if (rc) {
+ dev_err(dev, "Clock enable failed\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(clks[i]);
+
+ return rc;
+}
+
+static void q6v5_clk_disable(struct device *dev,
+ struct clk **clks, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ clk_disable_unprepare(clks[i]);
+}
+
+static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
+ size_t pd_count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0) {
+ pm_runtime_put_noidle(pds[i]);
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ goto unroll_pd_votes;
+ }
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+}
+
+static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
+static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
+ bool local, bool remote, phys_addr_t addr,
+ size_t size)
+{
+ struct qcom_scm_vmperm next[2];
+ int perms = 0;
+
+ if (!qproc->need_mem_protection)
+ return 0;
+
+ if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
+ remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
+ return 0;
+
+ if (local) {
+ next[perms].vmid = QCOM_SCM_VMID_HLOS;
+ next[perms].perm = QCOM_SCM_PERM_RWX;
+ perms++;
+ }
+
+ if (remote) {
+ next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
+ next[perms].perm = QCOM_SCM_PERM_RW;
+ perms++;
+ }
+
+ return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
+ current_perm, next, perms);
+}
+
+static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
+{
+ const struct firmware *dp_fw;
+
+ if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
+ return;
+
+ if (SZ_1M + dp_fw->size <= qproc->mba_size) {
+ memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
+ qproc->dp_size = dp_fw->size;
+ }
+
+ release_firmware(dp_fw);
+}
+
+static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct q6v5 *qproc = rproc->priv;
+ void *mba_region;
+
+ /* MBA is restricted to a maximum size of 1M */
+ if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
+ dev_err(qproc->dev, "MBA firmware load failed\n");
+ return -EINVAL;
+ }
+
+ mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
+ if (!mba_region) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
+ &qproc->mba_phys, qproc->mba_size);
+ return -EBUSY;
+ }
+
+ memcpy(mba_region, fw->data, fw->size);
+ q6v5_debug_policy_load(qproc, mba_region);
+ memunmap(mba_region);
+
+ return 0;
+}
+
+static int q6v5_reset_assert(struct q6v5 *qproc)
+{
+ int ret;
+
+ if (qproc->has_alt_reset) {
+ reset_control_assert(qproc->pdc_reset);
+ ret = reset_control_reset(qproc->mss_restart);
+ reset_control_deassert(qproc->pdc_reset);
+ } else if (qproc->has_spare_reg) {
+ /*
+ * When the AXI pipeline is being reset with the Q6 modem partly
+ * operational there is possibility of AXI valid signal to
+ * glitch, leading to spurious transactions and Q6 hangs. A work
+ * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
+ * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
+ * is withdrawn post MSS assert followed by a MSS deassert,
+ * while holding the PDC reset.
+ */
+ reset_control_assert(qproc->pdc_reset);
+ regmap_update_bits(qproc->conn_map, qproc->conn_box,
+ AXI_GATING_VALID_OVERRIDE, 1);
+ reset_control_assert(qproc->mss_restart);
+ reset_control_deassert(qproc->pdc_reset);
+ regmap_update_bits(qproc->conn_map, qproc->conn_box,
+ AXI_GATING_VALID_OVERRIDE, 0);
+ ret = reset_control_deassert(qproc->mss_restart);
+ } else if (qproc->has_ext_cntl_regs) {
+ regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
+ reset_control_assert(qproc->pdc_reset);
+ reset_control_assert(qproc->mss_restart);
+ reset_control_deassert(qproc->pdc_reset);
+ ret = reset_control_deassert(qproc->mss_restart);
+ } else {
+ ret = reset_control_assert(qproc->mss_restart);
+ }
+
+ return ret;
+}
+
+static int q6v5_reset_deassert(struct q6v5 *qproc)
+{
+ int ret;
+
+ if (qproc->has_alt_reset) {
+ reset_control_assert(qproc->pdc_reset);
+ writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
+ ret = reset_control_reset(qproc->mss_restart);
+ writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
+ reset_control_deassert(qproc->pdc_reset);
+ } else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
+ ret = reset_control_reset(qproc->mss_restart);
+ } else {
+ ret = reset_control_deassert(qproc->mss_restart);
+ }
+
+ return ret;
+}
+
+static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
+{
+ unsigned long timeout;
+ s32 val;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ for (;;) {
+ val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
+ if (val)
+ break;
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+ }
+
+ return val;
+}
+
+static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
+{
+
+ unsigned long timeout;
+ s32 val;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ for (;;) {
+ val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
+ if (val < 0)
+ break;
+
+ if (!status && val)
+ break;
+ else if (status && val == status)
+ break;
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+ }
+
+ return val;
+}
+
+static void q6v5_dump_mba_logs(struct q6v5 *qproc)
+{
+ struct rproc *rproc = qproc->rproc;
+ void *data;
+ void *mba_region;
+
+ if (!qproc->has_mba_logs)
+ return;
+
+ if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
+ qproc->mba_size))
+ return;
+
+ mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
+ if (!mba_region)
+ return;
+
+ data = vmalloc(MBA_LOG_SIZE);
+ if (data) {
+ memcpy(data, mba_region, MBA_LOG_SIZE);
+ dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
+ }
+ memunmap(mba_region);
+}
+
+static int q6v5proc_reset(struct q6v5 *qproc)
+{
+ u32 val;
+ int ret;
+ int i;
+
+ if (qproc->version == MSS_SDM845) {
+ val = readl(qproc->reg_base + QDSP6SS_SLEEP);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_SLEEP);
+
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* De-assert QDSP6 stop core */
+ writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
+ /* Trigger boot FSM */
+ writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
+
+ ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
+ val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
+ if (ret) {
+ dev_err(qproc->dev, "Boot FSM failed to complete.\n");
+ /* Reset the modem so that boot FSM is in reset state */
+ q6v5_reset_deassert(qproc);
+ return ret;
+ }
+
+ goto pbl_wait;
+ } else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) {
+ val = readl(qproc->reg_base + QDSP6SS_SLEEP);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_SLEEP);
+
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Turn on the XO clock needed for PLL setup */
+ val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
+
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Configure Q6 core CBCR to auto-enable after reset sequence */
+ val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
+
+ /* De-assert the Q6 stop core signal */
+ writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
+
+ /* Wait for 10 us for any staggering logic to settle */
+ usleep_range(10, 20);
+
+ /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
+ writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
+
+ /* Poll the MSS_STATUS for FSM completion */
+ ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
+ val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
+ if (ret) {
+ dev_err(qproc->dev, "Boot FSM failed to complete.\n");
+ /* Reset the modem so that boot FSM is in reset state */
+ q6v5_reset_deassert(qproc);
+ return ret;
+ }
+ goto pbl_wait;
+ } else if (qproc->version == MSS_MSM8996 ||
+ qproc->version == MSS_MSM8998) {
+ int mem_pwr_ctl;
+
+ /* Override the ACC value if required */
+ writel(QDSP6SS_ACC_OVERRIDE_VAL,
+ qproc->reg_base + QDSP6SS_STRAP_ACC);
+
+ /* Assert resets, stop core */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* BHS require xo cbcr to be enabled */
+ val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
+
+ /* Read CLKOFF bit to go low indicating CLK is enabled */
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev,
+ "xo cbcr enabling timed out (rc:%d)\n", ret);
+ return ret;
+ }
+ /* Enable power block headswitch and wait for it to stabilize */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= QDSP6v56_BHS_ON;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ udelay(1);
+
+ /* Put LDO in bypass mode */
+ val |= QDSP6v56_LDO_BYP;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Deassert QDSP6 compiler memory clamp */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val &= ~QDSP6v56_CLAMP_QMC_MEM;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ if (qproc->version == MSS_MSM8996) {
+ mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
+ i = 19;
+ } else {
+ /* MSS_MSM8998 */
+ mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
+ i = 28;
+ }
+ val = readl(qproc->reg_base + mem_pwr_ctl);
+ for (; i >= 0; i--) {
+ val |= BIT(i);
+ writel(val, qproc->reg_base + mem_pwr_ctl);
+ /*
+ * Read back value to ensure the write is done then
+ * wait for 1us for both memory peripheral and data
+ * array to turn on.
+ */
+ val |= readl(qproc->reg_base + mem_pwr_ctl);
+ udelay(1);
+ }
+ /* Remove word line clamp */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val &= ~QDSP6v56_CLAMP_WL;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ } else {
+ /* Assert resets, stop core */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* Enable power block headswitch and wait for it to stabilize */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ udelay(1);
+ /*
+ * Turn on memories. L2 banks should be done individually
+ * to minimize inrush current.
+ */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+ Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_2;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_1;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_0;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ }
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Bring core out of reset */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val &= ~Q6SS_CORE_ARES;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* Turn on core clock */
+ val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
+ val |= Q6SS_CLK_ENABLE;
+ writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
+
+ /* Start core execution */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val &= ~Q6SS_STOP_CORE;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+pbl_wait:
+ /* Wait for PBL status */
+ ret = q6v5_rmb_pbl_wait(qproc, 1000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "PBL boot timed out\n");
+ } else if (ret != RMB_PBL_SUCCESS) {
+ dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
+{
+ unsigned int val;
+ int ret;
+
+ if (!qproc->has_qaccept_regs)
+ return 0;
+
+ if (qproc->has_ext_cntl_regs) {
+ regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
+ regmap_write(qproc->conn_map, qproc->force_clk_on, 1);
+
+ ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
+ !val, 1, Q6SS_CBCR_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable axim1 clock\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ regmap_write(map, offset + QACCEPT_REQ_REG, 1);
+
+ /* Wait for accept */
+ ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
+ QACCEPT_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "qchannel enable failed\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
+{
+ int ret;
+ unsigned int val, retry;
+ unsigned int nretry = 10;
+ bool takedown_complete = false;
+
+ if (!qproc->has_qaccept_regs)
+ return;
+
+ while (!takedown_complete && nretry) {
+ nretry--;
+
+ /* Wait for active transactions to complete */
+ regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
+ QACCEPT_TIMEOUT_US);
+
+ /* Request Q-channel transaction takedown */
+ regmap_write(map, offset + QACCEPT_REQ_REG, 0);
+
+ /*
+ * If the request is denied, reset the Q-channel takedown request,
+ * wait for active transactions to complete and retry takedown.
+ */
+ retry = 10;
+ while (retry) {
+ usleep_range(5, 10);
+ retry--;
+ ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
+ if (!ret && val) {
+ regmap_write(map, offset + QACCEPT_REQ_REG, 1);
+ break;
+ }
+
+ ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
+ if (!ret && !val) {
+ takedown_complete = true;
+ break;
+ }
+ }
+
+ if (!retry)
+ break;
+ }
+
+ /* Rely on mss_restart to clear out pending transactions on takedown failure */
+ if (!takedown_complete)
+ dev_err(qproc->dev, "qchannel takedown failed\n");
+}
+
+static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
+ struct regmap *halt_map,
+ u32 offset)
+{
+ unsigned int val;
+ int ret;
+
+ /* Check if we're already idle */
+ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
+ if (!ret && val)
+ return;
+
+ /* Assert halt request */
+ regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
+
+ /* Wait for halt */
+ regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
+ val, 1000, HALT_ACK_TIMEOUT_US);
+
+ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
+ if (ret || !val)
+ dev_err(qproc->dev, "port failed halt\n");
+
+ /* Clear halt request (port will remain halted until reset) */
+ regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
+}
+
+static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
+ const char *fw_name)
+{
+ unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
+ dma_addr_t phys;
+ void *metadata;
+ int mdata_perm;
+ int xferop_ret;
+ size_t size;
+ void *ptr;
+ int ret;
+
+ metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
+ if (IS_ERR(metadata))
+ return PTR_ERR(metadata);
+
+ if (qproc->mdata_phys) {
+ if (size > qproc->mdata_size) {
+ ret = -EINVAL;
+ dev_err(qproc->dev, "metadata size outside memory range\n");
+ goto free_metadata;
+ }
+
+ phys = qproc->mdata_phys;
+ ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC);
+ if (!ptr) {
+ ret = -EBUSY;
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
+ &qproc->mdata_phys, size);
+ goto free_metadata;
+ }
+ } else {
+ ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
+ if (!ptr) {
+ ret = -ENOMEM;
+ dev_err(qproc->dev, "failed to allocate mdt buffer\n");
+ goto free_metadata;
+ }
+ }
+
+ memcpy(ptr, metadata, size);
+
+ if (qproc->mdata_phys)
+ memunmap(ptr);
+
+ /* Hypervisor mapping to access metadata by modem */
+ mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
+ ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
+ phys, size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to metadata failed: %d\n", ret);
+ ret = -EAGAIN;
+ goto free_dma_attrs;
+ }
+
+ writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
+ writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+
+ ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
+ if (ret == -ETIMEDOUT)
+ dev_err(qproc->dev, "MPSS header authentication timed out\n");
+ else if (ret < 0)
+ dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
+
+ /* Metadata authentication done, remove modem access */
+ xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
+ phys, size);
+ if (xferop_ret)
+ dev_warn(qproc->dev,
+ "mdt buffer not reclaimed system may become unstable\n");
+
+free_dma_attrs:
+ if (!qproc->mdata_phys)
+ dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
+free_metadata:
+ kfree(metadata);
+
+ return ret < 0 ? ret : 0;
+}
+
+static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
+{
+ if (phdr->p_type != PT_LOAD)
+ return false;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ return false;
+
+ if (!phdr->p_memsz)
+ return false;
+
+ return true;
+}
+
+static int q6v5_mba_load(struct q6v5 *qproc)
+{
+ int ret;
+ int xfermemop_ret;
+ bool mba_load_err = false;
+
+ ret = qcom_q6v5_prepare(&qproc->q6v5);
+ if (ret)
+ return ret;
+
+ ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+ if (ret < 0) {
+ dev_err(qproc->dev, "failed to enable proxy power domains\n");
+ goto disable_irqs;
+ }
+
+ ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
+ goto disable_proxy_pds;
+ }
+
+ ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
+ qproc->proxy_reg_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable proxy supplies\n");
+ goto disable_fallback_proxy_reg;
+ }
+
+ ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
+ qproc->proxy_clk_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable proxy clocks\n");
+ goto disable_proxy_reg;
+ }
+
+ ret = q6v5_regulator_enable(qproc, qproc->active_regs,
+ qproc->active_reg_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable supplies\n");
+ goto disable_proxy_clk;
+ }
+
+ ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
+ qproc->reset_clk_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable reset clocks\n");
+ goto disable_vdd;
+ }
+
+ ret = q6v5_reset_deassert(qproc);
+ if (ret) {
+ dev_err(qproc->dev, "failed to deassert mss restart\n");
+ goto disable_reset_clks;
+ }
+
+ ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
+ qproc->active_clk_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable clocks\n");
+ goto assert_reset;
+ }
+
+ ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable axi bridge\n");
+ goto disable_active_clks;
+ }
+
+ /*
+ * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
+ * the Q6 access to this region.
+ */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
+ qproc->mpss_phys, qproc->mpss_size);
+ if (ret) {
+ dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
+ goto disable_active_clks;
+ }
+
+ /* Assign MBA image access in DDR to q6 */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
+ qproc->mba_phys, qproc->mba_size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mba memory failed: %d\n", ret);
+ goto disable_active_clks;
+ }
+
+ writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+ if (qproc->dp_size) {
+ writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ }
+
+ ret = q6v5proc_reset(qproc);
+ if (ret)
+ goto reclaim_mba;
+
+ if (qproc->has_mba_logs)
+ qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
+
+ ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "MBA boot timed out\n");
+ goto halt_axi_ports;
+ } else if (ret != RMB_MBA_XPU_UNLOCKED &&
+ ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
+ dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
+ ret = -EINVAL;
+ goto halt_axi_ports;
+ }
+
+ qproc->dump_mba_loaded = true;
+ return 0;
+
+halt_axi_ports:
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
+ if (qproc->has_vq6)
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
+ mba_load_err = true;
+reclaim_mba:
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ false, qproc->mba_phys,
+ qproc->mba_size);
+ if (xfermemop_ret) {
+ dev_err(qproc->dev,
+ "Failed to reclaim mba buffer, system may become unstable\n");
+ } else if (mba_load_err) {
+ q6v5_dump_mba_logs(qproc);
+ }
+
+disable_active_clks:
+ q6v5_clk_disable(qproc->dev, qproc->active_clks,
+ qproc->active_clk_count);
+assert_reset:
+ q6v5_reset_assert(qproc);
+disable_reset_clks:
+ q6v5_clk_disable(qproc->dev, qproc->reset_clks,
+ qproc->reset_clk_count);
+disable_vdd:
+ q6v5_regulator_disable(qproc, qproc->active_regs,
+ qproc->active_reg_count);
+disable_proxy_clk:
+ q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
+ qproc->proxy_clk_count);
+disable_proxy_reg:
+ q6v5_regulator_disable(qproc, qproc->proxy_regs,
+ qproc->proxy_reg_count);
+disable_fallback_proxy_reg:
+ q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
+disable_proxy_pds:
+ q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+disable_irqs:
+ qcom_q6v5_unprepare(&qproc->q6v5);
+
+ return ret;
+}
+
+static void q6v5_mba_reclaim(struct q6v5 *qproc)
+{
+ int ret;
+ u32 val;
+
+ qproc->dump_mba_loaded = false;
+ qproc->dp_size = 0;
+
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
+ if (qproc->has_vq6)
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+ if (qproc->version == MSS_MSM8996) {
+ /*
+ * To avoid high MX current during LPASS/MSS restart.
+ */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
+ QDSP6v56_CLAMP_QMC_MEM;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ }
+
+ if (qproc->has_ext_cntl_regs) {
+ regmap_write(qproc->conn_map, qproc->rscc_disable, 1);
+
+ ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
+ !val, 1, Q6SS_CBCR_TIMEOUT_US);
+ if (ret)
+ dev_err(qproc->dev, "failed to enable axim1 clock\n");
+
+ ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val,
+ !val, 1, Q6SS_CBCR_TIMEOUT_US);
+ if (ret)
+ dev_err(qproc->dev, "failed to enable crypto clock\n");
+ }
+
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
+
+ q6v5_reset_assert(qproc);
+
+ q6v5_clk_disable(qproc->dev, qproc->reset_clks,
+ qproc->reset_clk_count);
+ q6v5_clk_disable(qproc->dev, qproc->active_clks,
+ qproc->active_clk_count);
+ q6v5_regulator_disable(qproc, qproc->active_regs,
+ qproc->active_reg_count);
+
+ /* In case of failure or coredump scenario where reclaiming MBA memory
+ * could not happen reclaim it here.
+ */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
+ qproc->mba_phys,
+ qproc->mba_size);
+ WARN_ON(ret);
+
+ ret = qcom_q6v5_unprepare(&qproc->q6v5);
+ if (ret) {
+ q6v5_pds_disable(qproc, qproc->proxy_pds,
+ qproc->proxy_pd_count);
+ q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
+ qproc->proxy_clk_count);
+ q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
+ q6v5_regulator_disable(qproc, qproc->proxy_regs,
+ qproc->proxy_reg_count);
+ }
+}
+
+static int q6v5_reload_mba(struct rproc *rproc)
+{
+ struct q6v5 *qproc = rproc->priv;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, rproc->firmware, qproc->dev);
+ if (ret < 0)
+ return ret;
+
+ q6v5_load(rproc, fw);
+ ret = q6v5_mba_load(qproc);
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int q6v5_mpss_load(struct q6v5 *qproc)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct firmware *seg_fw;
+ const struct firmware *fw;
+ struct elf32_hdr *ehdr;
+ phys_addr_t mpss_reloc;
+ phys_addr_t boot_addr;
+ phys_addr_t min_addr = PHYS_ADDR_MAX;
+ phys_addr_t max_addr = 0;
+ u32 code_length;
+ bool relocate = false;
+ char *fw_name;
+ size_t fw_name_len;
+ ssize_t offset;
+ size_t size = 0;
+ void *ptr;
+ int ret;
+ int i;
+
+ fw_name_len = strlen(qproc->hexagon_mdt_image);
+ if (fw_name_len <= 4)
+ return -EINVAL;
+
+ fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ ret = request_firmware(&fw, fw_name, qproc->dev);
+ if (ret < 0) {
+ dev_err(qproc->dev, "unable to load %s\n", fw_name);
+ goto out;
+ }
+
+ /* Initialize the RMB validator */
+ writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+
+ ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image);
+ if (ret)
+ goto release_firmware;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!q6v5_phdr_valid(phdr))
+ continue;
+
+ if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
+ relocate = true;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ /*
+ * In case of a modem subsystem restart on secure devices, the modem
+ * memory can be reclaimed only after MBA is loaded.
+ */
+ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
+ qproc->mpss_phys, qproc->mpss_size);
+
+ /* Share ownership between Linux and MSS, during segment loading */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
+ qproc->mpss_phys, qproc->mpss_size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mpss memory failed: %d\n", ret);
+ ret = -EAGAIN;
+ goto release_firmware;
+ }
+
+ mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
+ qproc->mpss_reloc = mpss_reloc;
+ /* Load firmware segments */
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!q6v5_phdr_valid(phdr))
+ continue;
+
+ offset = phdr->p_paddr - mpss_reloc;
+ if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
+ dev_err(qproc->dev, "segment outside memory range\n");
+ ret = -EINVAL;
+ goto release_firmware;
+ }
+
+ if (phdr->p_filesz > phdr->p_memsz) {
+ dev_err(qproc->dev,
+ "refusing to load segment %d with p_filesz > p_memsz\n",
+ i);
+ ret = -EINVAL;
+ goto release_firmware;
+ }
+
+ ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
+ if (!ptr) {
+ dev_err(qproc->dev,
+ "unable to map memory region: %pa+%zx-%x\n",
+ &qproc->mpss_phys, offset, phdr->p_memsz);
+ goto release_firmware;
+ }
+
+ if (phdr->p_filesz && phdr->p_offset < fw->size) {
+ /* Firmware is large enough to be non-split */
+ if (phdr->p_offset + phdr->p_filesz > fw->size) {
+ dev_err(qproc->dev,
+ "failed to load segment %d from truncated file %s\n",
+ i, fw_name);
+ ret = -EINVAL;
+ memunmap(ptr);
+ goto release_firmware;
+ }
+
+ memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
+ } else if (phdr->p_filesz) {
+ /* Replace "xxx.xxx" with "xxx.bxx" */
+ sprintf(fw_name + fw_name_len - 3, "b%02d", i);
+ ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
+ ptr, phdr->p_filesz);
+ if (ret) {
+ dev_err(qproc->dev, "failed to load %s\n", fw_name);
+ memunmap(ptr);
+ goto release_firmware;
+ }
+
+ if (seg_fw->size != phdr->p_filesz) {
+ dev_err(qproc->dev,
+ "failed to load segment %d from truncated file %s\n",
+ i, fw_name);
+ ret = -EINVAL;
+ release_firmware(seg_fw);
+ memunmap(ptr);
+ goto release_firmware;
+ }
+
+ release_firmware(seg_fw);
+ }
+
+ if (phdr->p_memsz > phdr->p_filesz) {
+ memset(ptr + phdr->p_filesz, 0,
+ phdr->p_memsz - phdr->p_filesz);
+ }
+ memunmap(ptr);
+ size += phdr->p_memsz;
+
+ code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ if (!code_length) {
+ boot_addr = relocate ? qproc->mpss_phys : min_addr;
+ writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+ }
+ writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+
+ ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
+ if (ret < 0) {
+ dev_err(qproc->dev, "MPSS authentication failed: %d\n",
+ ret);
+ goto release_firmware;
+ }
+ }
+
+ /* Transfer ownership of modem ddr region to q6 */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
+ qproc->mpss_phys, qproc->mpss_size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mpss memory failed: %d\n", ret);
+ ret = -EAGAIN;
+ goto release_firmware;
+ }
+
+ ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
+ if (ret == -ETIMEDOUT)
+ dev_err(qproc->dev, "MPSS authentication timed out\n");
+ else if (ret < 0)
+ dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
+
+ qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
+
+release_firmware:
+ release_firmware(fw);
+out:
+ kfree(fw_name);
+
+ return ret < 0 ? ret : 0;
+}
+
+static void qcom_q6v5_dump_segment(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest, size_t cp_offset, size_t size)
+{
+ int ret = 0;
+ struct q6v5 *qproc = rproc->priv;
+ int offset = segment->da - qproc->mpss_reloc;
+ void *ptr = NULL;
+
+ /* Unlock mba before copying segments */
+ if (!qproc->dump_mba_loaded) {
+ ret = q6v5_reload_mba(rproc);
+ if (!ret) {
+ /* Reset ownership back to Linux to copy segments */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ true, false,
+ qproc->mpss_phys,
+ qproc->mpss_size);
+ }
+ }
+
+ if (!ret)
+ ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
+
+ if (ptr) {
+ memcpy(dest, ptr, size);
+ memunmap(ptr);
+ } else {
+ memset(dest, 0xff, size);
+ }
+
+ qproc->current_dump_size += size;
+
+ /* Reclaim mba after copying segments */
+ if (qproc->current_dump_size == qproc->total_dump_size) {
+ if (qproc->dump_mba_loaded) {
+ /* Try to reset ownership back to Q6 */
+ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ false, true,
+ qproc->mpss_phys,
+ qproc->mpss_size);
+ q6v5_mba_reclaim(qproc);
+ }
+ }
+}
+
+static int q6v5_start(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+ int xfermemop_ret;
+ int ret;
+
+ ret = q6v5_mba_load(qproc);
+ if (ret)
+ return ret;
+
+ dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
+ qproc->dp_size ? "" : "out");
+
+ ret = q6v5_mpss_load(qproc);
+ if (ret)
+ goto reclaim_mpss;
+
+ ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "start timed out\n");
+ goto reclaim_mpss;
+ }
+
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ false, qproc->mba_phys,
+ qproc->mba_size);
+ if (xfermemop_ret)
+ dev_err(qproc->dev,
+ "Failed to reclaim mba buffer system may become unstable\n");
+
+ /* Reset Dump Segment Mask */
+ qproc->current_dump_size = 0;
+
+ return 0;
+
+reclaim_mpss:
+ q6v5_mba_reclaim(qproc);
+ q6v5_dump_mba_logs(qproc);
+
+ return ret;
+}
+
+static int q6v5_stop(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+ int ret;
+
+ ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
+ if (ret == -ETIMEDOUT)
+ dev_err(qproc->dev, "timed out on wait\n");
+
+ q6v5_mba_reclaim(qproc);
+
+ return 0;
+}
+
+static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
+ const struct firmware *mba_fw)
+{
+ const struct firmware *fw;
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ struct q6v5 *qproc = rproc->priv;
+ unsigned long i;
+ int ret;
+
+ ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
+ if (ret < 0) {
+ dev_err(qproc->dev, "unable to load %s\n",
+ qproc->hexagon_mdt_image);
+ return ret;
+ }
+
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+ qproc->total_dump_size = 0;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!q6v5_phdr_valid(phdr))
+ continue;
+
+ ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
+ phdr->p_memsz,
+ qcom_q6v5_dump_segment,
+ NULL);
+ if (ret)
+ break;
+
+ qproc->total_dump_size += phdr->p_memsz;
+ }
+
+ release_firmware(fw);
+ return ret;
+}
+
+static unsigned long q6v5_panic(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+
+ return qcom_q6v5_panic(&qproc->q6v5);
+}
+
+static const struct rproc_ops q6v5_ops = {
+ .start = q6v5_start,
+ .stop = q6v5_stop,
+ .parse_fw = qcom_q6v5_register_dump_segments,
+ .load = q6v5_load,
+ .panic = q6v5_panic,
+};
+
+static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
+{
+ struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
+
+ q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
+ qproc->proxy_clk_count);
+ q6v5_regulator_disable(qproc, qproc->proxy_regs,
+ qproc->proxy_reg_count);
+ q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
+ q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+}
+
+static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
+{
+ struct of_phandle_args args;
+ int halt_cell_cnt = 3;
+ int ret;
+
+ qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6");
+ if (IS_ERR(qproc->reg_base))
+ return PTR_ERR(qproc->reg_base);
+
+ qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
+ if (IS_ERR(qproc->rmb_base))
+ return PTR_ERR(qproc->rmb_base);
+
+ if (qproc->has_vq6)
+ halt_cell_cnt++;
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,halt-regs", halt_cell_cnt, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
+ return -EINVAL;
+ }
+
+ qproc->halt_map = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(qproc->halt_map))
+ return PTR_ERR(qproc->halt_map);
+
+ qproc->halt_q6 = args.args[0];
+ qproc->halt_modem = args.args[1];
+ qproc->halt_nc = args.args[2];
+
+ if (qproc->has_vq6)
+ qproc->halt_vq6 = args.args[3];
+
+ if (qproc->has_qaccept_regs) {
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,qaccept-regs",
+ 3, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse qaccept-regs\n");
+ return -EINVAL;
+ }
+
+ qproc->qaccept_mdm = args.args[0];
+ qproc->qaccept_cx = args.args[1];
+ qproc->qaccept_axi = args.args[2];
+ }
+
+ if (qproc->has_ext_cntl_regs) {
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,ext-regs",
+ 2, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse ext-regs index 0\n");
+ return -EINVAL;
+ }
+
+ qproc->conn_map = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(qproc->conn_map))
+ return PTR_ERR(qproc->conn_map);
+
+ qproc->force_clk_on = args.args[0];
+ qproc->rscc_disable = args.args[1];
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,ext-regs",
+ 2, 1, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse ext-regs index 1\n");
+ return -EINVAL;
+ }
+
+ qproc->axim1_clk_off = args.args[0];
+ qproc->crypto_clk_off = args.args[1];
+ }
+
+ if (qproc->has_spare_reg) {
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,spare-regs",
+ 1, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse spare-regs\n");
+ return -EINVAL;
+ }
+
+ qproc->conn_map = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(qproc->conn_map))
+ return PTR_ERR(qproc->conn_map);
+
+ qproc->conn_box = args.args[0];
+ }
+
+ return 0;
+}
+
+static int q6v5_init_clocks(struct device *dev, struct clk **clks,
+ char **clk_names)
+{
+ int i;
+
+ if (!clk_names)
+ return 0;
+
+ for (i = 0; clk_names[i]; i++) {
+ clks[i] = devm_clk_get(dev, clk_names[i]);
+ if (IS_ERR(clks[i])) {
+ int rc = PTR_ERR(clks[i]);
+
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get %s clock\n",
+ clk_names[i]);
+ return rc;
+ }
+ }
+
+ return i;
+}
+
+static int q6v5_pds_attach(struct device *dev, struct device **devs,
+ char **pd_names)
+{
+ size_t num_pds = 0;
+ int ret;
+ int i;
+
+ if (!pd_names)
+ return 0;
+
+ while (pd_names[num_pds])
+ num_pds++;
+
+ for (i = 0; i < num_pds; i++) {
+ devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(devs[i])) {
+ ret = PTR_ERR(devs[i]) ? : -ENODATA;
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(devs[i], false);
+
+ return ret;
+}
+
+static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
+static int q6v5_init_reset(struct q6v5 *qproc)
+{
+ qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
+ "mss_restart");
+ if (IS_ERR(qproc->mss_restart)) {
+ dev_err(qproc->dev, "failed to acquire mss restart\n");
+ return PTR_ERR(qproc->mss_restart);
+ }
+
+ if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
+ qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
+ "pdc_reset");
+ if (IS_ERR(qproc->pdc_reset)) {
+ dev_err(qproc->dev, "failed to acquire pdc reset\n");
+ return PTR_ERR(qproc->pdc_reset);
+ }
+ }
+
+ return 0;
+}
+
+static int q6v5_alloc_memory_region(struct q6v5 *qproc)
+{
+ struct device_node *child;
+ struct reserved_mem *rmem;
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ /*
+ * In the absence of mba/mpss sub-child, extract the mba and mpss
+ * reserved memory regions from device's memory-region property.
+ */
+ child = of_get_child_by_name(qproc->dev->of_node, "mba");
+ if (!child) {
+ node = of_parse_phandle(qproc->dev->of_node,
+ "memory-region", 0);
+ } else {
+ node = of_parse_phandle(child, "memory-region", 0);
+ of_node_put(child);
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret) {
+ dev_err(qproc->dev, "unable to resolve mba region\n");
+ return ret;
+ }
+
+ qproc->mba_phys = r.start;
+ qproc->mba_size = resource_size(&r);
+
+ if (!child) {
+ node = of_parse_phandle(qproc->dev->of_node,
+ "memory-region", 1);
+ } else {
+ child = of_get_child_by_name(qproc->dev->of_node, "mpss");
+ node = of_parse_phandle(child, "memory-region", 0);
+ of_node_put(child);
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret) {
+ dev_err(qproc->dev, "unable to resolve mpss region\n");
+ return ret;
+ }
+
+ qproc->mpss_phys = qproc->mpss_reloc = r.start;
+ qproc->mpss_size = resource_size(&r);
+
+ if (!child) {
+ node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
+ } else {
+ child = of_get_child_by_name(qproc->dev->of_node, "metadata");
+ node = of_parse_phandle(child, "memory-region", 0);
+ of_node_put(child);
+ }
+
+ if (!node)
+ return 0;
+
+ rmem = of_reserved_mem_lookup(node);
+ if (!rmem) {
+ dev_err(qproc->dev, "unable to resolve metadata region\n");
+ return -EINVAL;
+ }
+
+ qproc->mdata_phys = rmem->base;
+ qproc->mdata_size = rmem->size;
+
+ return 0;
+}
+
+static int q6v5_probe(struct platform_device *pdev)
+{
+ const struct rproc_hexagon_res *desc;
+ struct device_node *node;
+ struct q6v5 *qproc;
+ struct rproc *rproc;
+ const char *mba_image;
+ int ret;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ if (desc->need_mem_protection && !qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ mba_image = desc->hexagon_mba_image;
+ ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
+ 0, &mba_image);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read mba firmware-name\n");
+ return ret;
+ }
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
+ mba_image, sizeof(*qproc));
+ if (!rproc) {
+ dev_err(&pdev->dev, "failed to allocate rproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->auto_boot = false;
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
+ qproc = (struct q6v5 *)rproc->priv;
+ qproc->dev = &pdev->dev;
+ qproc->rproc = rproc;
+ qproc->hexagon_mdt_image = "modem.mdt";
+ ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
+ 1, &qproc->hexagon_mdt_image);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
+ goto free_rproc;
+ }
+
+ platform_set_drvdata(pdev, qproc);
+
+ qproc->has_qaccept_regs = desc->has_qaccept_regs;
+ qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
+ qproc->has_vq6 = desc->has_vq6;
+ qproc->has_spare_reg = desc->has_spare_reg;
+ ret = q6v5_init_mem(qproc, pdev);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_alloc_memory_region(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
+ desc->proxy_clk_names);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
+ goto free_rproc;
+ }
+ qproc->proxy_clk_count = ret;
+
+ ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
+ desc->reset_clk_names);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get reset clocks.\n");
+ goto free_rproc;
+ }
+ qproc->reset_clk_count = ret;
+
+ ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
+ desc->active_clk_names);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get active clocks.\n");
+ goto free_rproc;
+ }
+ qproc->active_clk_count = ret;
+
+ ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
+ desc->proxy_supply);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
+ goto free_rproc;
+ }
+ qproc->proxy_reg_count = ret;
+
+ ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
+ desc->active_supply);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get active regulators.\n");
+ goto free_rproc;
+ }
+ qproc->active_reg_count = ret;
+
+ ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
+ desc->proxy_pd_names);
+ /* Fallback to regulators for old device trees */
+ if (ret == -ENODATA && desc->fallback_proxy_supply) {
+ ret = q6v5_regulator_init(&pdev->dev,
+ qproc->fallback_proxy_regs,
+ desc->fallback_proxy_supply);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
+ goto free_rproc;
+ }
+ qproc->fallback_proxy_reg_count = ret;
+ } else if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to init power domains\n");
+ goto free_rproc;
+ } else {
+ qproc->proxy_pd_count = ret;
+ }
+
+ qproc->has_alt_reset = desc->has_alt_reset;
+ ret = q6v5_init_reset(qproc);
+ if (ret)
+ goto detach_proxy_pds;
+
+ qproc->version = desc->version;
+ qproc->need_mem_protection = desc->need_mem_protection;
+ qproc->has_mba_logs = desc->has_mba_logs;
+
+ ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem",
+ qcom_msa_handover);
+ if (ret)
+ goto detach_proxy_pds;
+
+ qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
+ qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
+ qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
+ qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
+ qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
+ qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
+ if (IS_ERR(qproc->sysmon)) {
+ ret = PTR_ERR(qproc->sysmon);
+ goto remove_subdevs;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto remove_sysmon_subdev;
+
+ node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
+ qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
+ of_node_put(node);
+
+ return 0;
+
+remove_sysmon_subdev:
+ qcom_remove_sysmon_subdev(qproc->sysmon);
+remove_subdevs:
+ qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
+ qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
+ qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
+detach_proxy_pds:
+ q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+free_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int q6v5_remove(struct platform_device *pdev)
+{
+ struct q6v5 *qproc = platform_get_drvdata(pdev);
+ struct rproc *rproc = qproc->rproc;
+
+ if (qproc->bam_dmux)
+ of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
+ rproc_del(rproc);
+
+ qcom_q6v5_deinit(&qproc->q6v5);
+ qcom_remove_sysmon_subdev(qproc->sysmon);
+ qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
+ qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
+ qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
+
+ q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
+
+ rproc_free(rproc);
+
+ return 0;
+}
+
+static const struct rproc_hexagon_res sc7180_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ NULL
+ },
+ .reset_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "snoc_axi",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "mnoc_axi",
+ "nav",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ "mss",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .has_mba_logs = true,
+ .has_spare_reg = true,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_SC7180,
+};
+
+static const struct rproc_hexagon_res sc7280_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "pka",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "offline",
+ "snoc_axi",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .has_mba_logs = true,
+ .has_spare_reg = false,
+ .has_qaccept_regs = true,
+ .has_ext_cntl_regs = true,
+ .has_vq6 = true,
+ .version = MSS_SC7280,
+};
+
+static const struct rproc_hexagon_res sdm845_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "prng",
+ NULL
+ },
+ .reset_clk_names = (char*[]){
+ "iface",
+ "snoc_axi",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "bus",
+ "mem",
+ "gpll0_mss",
+ "mnoc_axi",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ "mss",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = true,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_SDM845,
+};
+
+static const struct rproc_hexagon_res msm8998_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "qdss",
+ "mem",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "gpll0_mss",
+ "mnoc_axi",
+ "snoc_axi",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_MSM8998,
+};
+
+static const struct rproc_hexagon_res msm8996_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {}
+ },
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "pnoc",
+ "qdss",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ "gpll0_mss",
+ "snoc_axi",
+ "mnoc_axi",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_MSM8996,
+};
+
+static const struct rproc_hexagon_res msm8916_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {}
+ },
+ .fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "mx",
+ .uV = 1050000,
+ },
+ {
+ .supply = "cx",
+ .uA = 100000,
+ },
+ {}
+ },
+ .proxy_clk_names = (char*[]){
+ "xo",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
+ .need_mem_protection = false,
+ .has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_MSM8916,
+};
+
+static const struct rproc_hexagon_res msm8974_mss = {
+ .hexagon_mba_image = "mba.b00",
+ .proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {}
+ },
+ .fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "mx",
+ .uV = 1050000,
+ },
+ {
+ .supply = "cx",
+ .uA = 100000,
+ },
+ {}
+ },
+ .active_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "mss",
+ .uV = 1050000,
+ .uA = 100000,
+ },
+ {}
+ },
+ .proxy_clk_names = (char*[]){
+ "xo",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
+ .need_mem_protection = false,
+ .has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_MSM8974,
+};
+
+static const struct of_device_id q6v5_of_match[] = {
+ { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
+ { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
+ { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
+ { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
+ { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
+ { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
+ { .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
+ { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
+ { },
+};
+MODULE_DEVICE_TABLE(of, q6v5_of_match);
+
+static struct platform_driver q6v5_driver = {
+ .probe = q6v5_probe,
+ .remove = q6v5_remove,
+ .driver = {
+ .name = "qcom-q6v5-mss",
+ .of_match_table = q6v5_of_match,
+ },
+};
+module_platform_driver(q6v5_driver);
+
+MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
new file mode 100644
index 000000000..dc6f07ca8
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm ADSP/SLPI Peripheral Image Loader for MSM8974 and MSM8996
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom_scm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "qcom_common.h"
+#include "qcom_pil_info.h"
+#include "qcom_q6v5.h"
+#include "remoteproc_internal.h"
+
+#define ADSP_DECRYPT_SHUTDOWN_DELAY_MS 100
+
+struct adsp_data {
+ int crash_reason_smem;
+ const char *firmware_name;
+ int pas_id;
+ unsigned int minidump_id;
+ bool has_aggre2_clk;
+ bool auto_boot;
+ bool decrypt_shutdown;
+
+ char **proxy_pd_names;
+
+ const char *load_state;
+ const char *ssr_name;
+ const char *sysmon_name;
+ int ssctl_id;
+};
+
+struct qcom_adsp {
+ struct device *dev;
+ struct rproc *rproc;
+
+ struct qcom_q6v5 q6v5;
+
+ struct clk *xo;
+ struct clk *aggre2_clk;
+
+ struct regulator *cx_supply;
+ struct regulator *px_supply;
+
+ struct device *proxy_pds[3];
+
+ int proxy_pd_count;
+
+ int pas_id;
+ unsigned int minidump_id;
+ int crash_reason_smem;
+ bool has_aggre2_clk;
+ bool decrypt_shutdown;
+ const char *info_name;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ phys_addr_t mem_phys;
+ phys_addr_t mem_reloc;
+ void *mem_region;
+ size_t mem_size;
+
+ struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_subdev smd_subdev;
+ struct qcom_rproc_ssr ssr_subdev;
+ struct qcom_sysmon *sysmon;
+
+ struct qcom_scm_pas_metadata pas_metadata;
+};
+
+static void adsp_minidump(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = rproc->priv;
+
+ if (rproc->dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
+ qcom_minidump(rproc, adsp->minidump_id);
+}
+
+static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0) {
+ pm_runtime_put_noidle(pds[i]);
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ goto unroll_pd_votes;
+ }
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+};
+
+static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
+static int adsp_shutdown_poll_decrypt(struct qcom_adsp *adsp)
+{
+ unsigned int retry_num = 50;
+ int ret;
+
+ do {
+ msleep(ADSP_DECRYPT_SHUTDOWN_DELAY_MS);
+ ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ } while (ret == -EINVAL && --retry_num);
+
+ return ret;
+}
+
+static int adsp_unprepare(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+
+ /*
+ * adsp_load() did pass pas_metadata to the SCM driver for storing
+ * metadata context. It might have been released already if
+ * auth_and_reset() was successful, but in other cases clean it up
+ * here.
+ */
+ qcom_scm_pas_metadata_release(&adsp->pas_metadata);
+
+ return 0;
+}
+
+static int adsp_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_pas_init(adsp->dev, fw, rproc->firmware, adsp->pas_id,
+ adsp->mem_phys, &adsp->pas_metadata);
+ if (ret)
+ return ret;
+
+ ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, adsp->pas_id,
+ adsp->mem_region, adsp->mem_phys, adsp->mem_size,
+ &adsp->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
+
+ return 0;
+}
+
+static int adsp_start(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
+
+ ret = qcom_q6v5_prepare(&adsp->q6v5);
+ if (ret)
+ return ret;
+
+ ret = adsp_pds_enable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ if (ret < 0)
+ goto disable_irqs;
+
+ ret = clk_prepare_enable(adsp->xo);
+ if (ret)
+ goto disable_proxy_pds;
+
+ ret = clk_prepare_enable(adsp->aggre2_clk);
+ if (ret)
+ goto disable_xo_clk;
+
+ if (adsp->cx_supply) {
+ ret = regulator_enable(adsp->cx_supply);
+ if (ret)
+ goto disable_aggre2_clk;
+ }
+
+ if (adsp->px_supply) {
+ ret = regulator_enable(adsp->px_supply);
+ if (ret)
+ goto disable_cx_supply;
+ }
+
+ ret = qcom_scm_pas_auth_and_reset(adsp->pas_id);
+ if (ret) {
+ dev_err(adsp->dev,
+ "failed to authenticate image and release reset\n");
+ goto disable_px_supply;
+ }
+
+ ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5000));
+ if (ret == -ETIMEDOUT) {
+ dev_err(adsp->dev, "start timed out\n");
+ qcom_scm_pas_shutdown(adsp->pas_id);
+ goto disable_px_supply;
+ }
+
+ qcom_scm_pas_metadata_release(&adsp->pas_metadata);
+
+ return 0;
+
+disable_px_supply:
+ if (adsp->px_supply)
+ regulator_disable(adsp->px_supply);
+disable_cx_supply:
+ if (adsp->cx_supply)
+ regulator_disable(adsp->cx_supply);
+disable_aggre2_clk:
+ clk_disable_unprepare(adsp->aggre2_clk);
+disable_xo_clk:
+ clk_disable_unprepare(adsp->xo);
+disable_proxy_pds:
+ adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+disable_irqs:
+ qcom_q6v5_unprepare(&adsp->q6v5);
+
+ return ret;
+}
+
+static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
+{
+ struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
+
+ if (adsp->px_supply)
+ regulator_disable(adsp->px_supply);
+ if (adsp->cx_supply)
+ regulator_disable(adsp->cx_supply);
+ clk_disable_unprepare(adsp->aggre2_clk);
+ clk_disable_unprepare(adsp->xo);
+ adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+}
+
+static int adsp_stop(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int handover;
+ int ret;
+
+ ret = qcom_q6v5_request_stop(&adsp->q6v5, adsp->sysmon);
+ if (ret == -ETIMEDOUT)
+ dev_err(adsp->dev, "timed out on wait\n");
+
+ ret = qcom_scm_pas_shutdown(adsp->pas_id);
+ if (ret && adsp->decrypt_shutdown)
+ ret = adsp_shutdown_poll_decrypt(adsp);
+
+ if (ret)
+ dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
+
+ handover = qcom_q6v5_unprepare(&adsp->q6v5);
+ if (handover)
+ qcom_pas_handover(&adsp->q6v5);
+
+ return ret;
+}
+
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int offset;
+
+ offset = da - adsp->mem_reloc;
+ if (offset < 0 || offset + len > adsp->mem_size)
+ return NULL;
+
+ if (is_iomem)
+ *is_iomem = true;
+
+ return adsp->mem_region + offset;
+}
+
+static unsigned long adsp_panic(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+
+ return qcom_q6v5_panic(&adsp->q6v5);
+}
+
+static const struct rproc_ops adsp_ops = {
+ .unprepare = adsp_unprepare,
+ .start = adsp_start,
+ .stop = adsp_stop,
+ .da_to_va = adsp_da_to_va,
+ .parse_fw = qcom_register_dump_segments,
+ .load = adsp_load,
+ .panic = adsp_panic,
+};
+
+static const struct rproc_ops adsp_minidump_ops = {
+ .unprepare = adsp_unprepare,
+ .start = adsp_start,
+ .stop = adsp_stop,
+ .da_to_va = adsp_da_to_va,
+ .load = adsp_load,
+ .panic = adsp_panic,
+ .coredump = adsp_minidump,
+};
+
+static int adsp_init_clock(struct qcom_adsp *adsp)
+{
+ int ret;
+
+ adsp->xo = devm_clk_get(adsp->dev, "xo");
+ if (IS_ERR(adsp->xo)) {
+ ret = PTR_ERR(adsp->xo);
+ if (ret != -EPROBE_DEFER)
+ dev_err(adsp->dev, "failed to get xo clock");
+ return ret;
+ }
+
+ if (adsp->has_aggre2_clk) {
+ adsp->aggre2_clk = devm_clk_get(adsp->dev, "aggre2");
+ if (IS_ERR(adsp->aggre2_clk)) {
+ ret = PTR_ERR(adsp->aggre2_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(adsp->dev,
+ "failed to get aggre2 clock");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int adsp_init_regulator(struct qcom_adsp *adsp)
+{
+ adsp->cx_supply = devm_regulator_get_optional(adsp->dev, "cx");
+ if (IS_ERR(adsp->cx_supply)) {
+ if (PTR_ERR(adsp->cx_supply) == -ENODEV)
+ adsp->cx_supply = NULL;
+ else
+ return PTR_ERR(adsp->cx_supply);
+ }
+
+ if (adsp->cx_supply)
+ regulator_set_load(adsp->cx_supply, 100000);
+
+ adsp->px_supply = devm_regulator_get_optional(adsp->dev, "px");
+ if (IS_ERR(adsp->px_supply)) {
+ if (PTR_ERR(adsp->px_supply) == -ENODEV)
+ adsp->px_supply = NULL;
+ else
+ return PTR_ERR(adsp->px_supply);
+ }
+
+ return 0;
+}
+
+static int adsp_pds_attach(struct device *dev, struct device **devs,
+ char **pd_names)
+{
+ size_t num_pds = 0;
+ int ret;
+ int i;
+
+ if (!pd_names)
+ return 0;
+
+ /* Handle single power domain */
+ if (dev->pm_domain) {
+ devs[0] = dev;
+ pm_runtime_enable(dev);
+ return 1;
+ }
+
+ while (pd_names[num_pds])
+ num_pds++;
+
+ for (i = 0; i < num_pds; i++) {
+ devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(devs[i])) {
+ ret = PTR_ERR(devs[i]) ? : -ENODATA;
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(devs[i], false);
+
+ return ret;
+};
+
+static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ struct device *dev = adsp->dev;
+ int i;
+
+ /* Handle single power domain */
+ if (dev->pm_domain && pd_count) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
+ for (i = 0; i < pd_count; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
+static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
+{
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(adsp->dev, "no memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret)
+ return ret;
+
+ adsp->mem_phys = adsp->mem_reloc = r.start;
+ adsp->mem_size = resource_size(&r);
+ adsp->mem_region = devm_ioremap_wc(adsp->dev, adsp->mem_phys, adsp->mem_size);
+ if (!adsp->mem_region) {
+ dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, adsp->mem_size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int adsp_probe(struct platform_device *pdev)
+{
+ const struct adsp_data *desc;
+ struct qcom_adsp *adsp;
+ struct rproc *rproc;
+ const char *fw_name;
+ const struct rproc_ops *ops = &adsp_ops;
+ int ret;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ fw_name = desc->firmware_name;
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
+ if (desc->minidump_id)
+ ops = &adsp_minidump_ops;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, ops, fw_name, sizeof(*adsp));
+
+ if (!rproc) {
+ dev_err(&pdev->dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->auto_boot = desc->auto_boot;
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
+ adsp = (struct qcom_adsp *)rproc->priv;
+ adsp->dev = &pdev->dev;
+ adsp->rproc = rproc;
+ adsp->minidump_id = desc->minidump_id;
+ adsp->pas_id = desc->pas_id;
+ adsp->has_aggre2_clk = desc->has_aggre2_clk;
+ adsp->info_name = desc->sysmon_name;
+ adsp->decrypt_shutdown = desc->decrypt_shutdown;
+ platform_set_drvdata(pdev, adsp);
+
+ ret = device_init_wakeup(adsp->dev, true);
+ if (ret)
+ goto free_rproc;
+
+ ret = adsp_alloc_memory_region(adsp);
+ if (ret)
+ goto free_rproc;
+
+ ret = adsp_init_clock(adsp);
+ if (ret)
+ goto free_rproc;
+
+ ret = adsp_init_regulator(adsp);
+ if (ret)
+ goto free_rproc;
+
+ ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds,
+ desc->proxy_pd_names);
+ if (ret < 0)
+ goto free_rproc;
+ adsp->proxy_pd_count = ret;
+
+ ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, desc->load_state,
+ qcom_pas_handover);
+ if (ret)
+ goto detach_proxy_pds;
+
+ qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
+ qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
+ qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
+ adsp->sysmon = qcom_add_sysmon_subdev(rproc,
+ desc->sysmon_name,
+ desc->ssctl_id);
+ if (IS_ERR(adsp->sysmon)) {
+ ret = PTR_ERR(adsp->sysmon);
+ goto detach_proxy_pds;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto detach_proxy_pds;
+
+ return 0;
+
+detach_proxy_pds:
+ adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+free_rproc:
+ device_init_wakeup(adsp->dev, false);
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int adsp_remove(struct platform_device *pdev)
+{
+ struct qcom_adsp *adsp = platform_get_drvdata(pdev);
+
+ rproc_del(adsp->rproc);
+
+ qcom_q6v5_deinit(&adsp->q6v5);
+ qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
+ qcom_remove_sysmon_subdev(adsp->sysmon);
+ qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev);
+ qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
+ adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ device_init_wakeup(adsp->dev, false);
+ rproc_free(adsp->rproc);
+
+ return 0;
+}
+
+static const struct adsp_data adsp_resource_init = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sdm845_adsp_resource_init = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm6350_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm8150_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm8250_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm8350_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data msm8996_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data cdsp_resource_init = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sdm845_cdsp_resource_init = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sm6350_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sm8150_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sm8250_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sc8280xp_nsp0_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "nsp",
+ NULL
+ },
+ .ssr_name = "cdsp0",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sc8280xp_nsp1_resource = {
+ .crash_reason_smem = 633,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 30,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "nsp",
+ NULL
+ },
+ .ssr_name = "cdsp1",
+ .sysmon_name = "cdsp1",
+ .ssctl_id = 0x20,
+};
+
+static const struct adsp_data sm8350_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data mpss_resource_init = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .minidump_id = 3,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
+static const struct adsp_data sc8180x_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
+static const struct adsp_data slpi_resource_init = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = true,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "ssc_cx",
+ NULL
+ },
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
+static const struct adsp_data sm8150_slpi_resource = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "slpi",
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
+static const struct adsp_data sm8250_slpi_resource = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "slpi",
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
+static const struct adsp_data sm8350_slpi_resource = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "slpi",
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
+static const struct adsp_data wcss_resource_init = {
+ .crash_reason_smem = 421,
+ .firmware_name = "wcnss.mdt",
+ .pas_id = 6,
+ .auto_boot = true,
+ .ssr_name = "mpss",
+ .sysmon_name = "wcnss",
+ .ssctl_id = 0x12,
+};
+
+static const struct adsp_data sdx55_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x22,
+};
+
+static const struct adsp_data sm8450_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .minidump_id = 3,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .decrypt_shutdown = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
+static const struct of_device_id adsp_of_match[] = {
+ { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
+ { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
+ { .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
+ { .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
+ { .compatible = "qcom,msm8998-adsp-pas", .data = &msm8996_adsp_resource},
+ { .compatible = "qcom,msm8998-slpi-pas", .data = &slpi_resource_init},
+ { .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
+ { .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
+ { .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
+ { .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sc7280-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
+ { .compatible = "qcom,sc8180x-cdsp-pas", .data = &sm8150_cdsp_resource},
+ { .compatible = "qcom,sc8180x-mpss-pas", .data = &sc8180x_mpss_resource},
+ { .compatible = "qcom,sc8280xp-adsp-pas", .data = &sm8250_adsp_resource},
+ { .compatible = "qcom,sc8280xp-nsp0-pas", .data = &sc8280xp_nsp0_resource},
+ { .compatible = "qcom,sc8280xp-nsp1-pas", .data = &sc8280xp_nsp1_resource},
+ { .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
+ { .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
+ { .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
+ { .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
+ { .compatible = "qcom,sm6350-adsp-pas", .data = &sm6350_adsp_resource},
+ { .compatible = "qcom,sm6350-cdsp-pas", .data = &sm6350_cdsp_resource},
+ { .compatible = "qcom,sm6350-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
+ { .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
+ { .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8150-slpi-pas", .data = &sm8150_slpi_resource},
+ { .compatible = "qcom,sm8250-adsp-pas", .data = &sm8250_adsp_resource},
+ { .compatible = "qcom,sm8250-cdsp-pas", .data = &sm8250_cdsp_resource},
+ { .compatible = "qcom,sm8250-slpi-pas", .data = &sm8250_slpi_resource},
+ { .compatible = "qcom,sm8350-adsp-pas", .data = &sm8350_adsp_resource},
+ { .compatible = "qcom,sm8350-cdsp-pas", .data = &sm8350_cdsp_resource},
+ { .compatible = "qcom,sm8350-slpi-pas", .data = &sm8350_slpi_resource},
+ { .compatible = "qcom,sm8350-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource},
+ { .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource},
+ { .compatible = "qcom,sm8450-slpi-pas", .data = &sm8350_slpi_resource},
+ { .compatible = "qcom,sm8450-mpss-pas", .data = &sm8450_mpss_resource},
+ { },
+};
+MODULE_DEVICE_TABLE(of, adsp_of_match);
+
+static struct platform_driver adsp_driver = {
+ .probe = adsp_probe,
+ .remove = adsp_remove,
+ .driver = {
+ .name = "qcom_q6v5_pas",
+ .of_match_table = adsp_of_match,
+ },
+};
+
+module_platform_driver(adsp_driver);
+MODULE_DESCRIPTION("Qualcomm Hexagon v5 Peripheral Authentication Service driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
new file mode 100644
index 000000000..ba24d745b
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -0,0 +1,1129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Linaro Ltd.
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include "qcom_common.h"
+#include "qcom_pil_info.h"
+#include "qcom_q6v5.h"
+
+#define WCSS_CRASH_REASON 421
+
+/* Q6SS Register Offsets */
+#define Q6SS_RESET_REG 0x014
+#define Q6SS_GFMUX_CTL_REG 0x020
+#define Q6SS_PWR_CTL_REG 0x030
+#define Q6SS_MEM_PWR_CTL 0x0B0
+#define Q6SS_STRAP_ACC 0x110
+#define Q6SS_CGC_OVERRIDE 0x034
+#define Q6SS_BCR_REG 0x6000
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ_REG 0x0
+#define AXI_HALTACK_REG 0x4
+#define AXI_IDLE_REG 0x8
+
+#define HALT_ACK_TIMEOUT_MS 100
+
+/* Q6SS_RESET */
+#define Q6SS_STOP_CORE BIT(0)
+#define Q6SS_CORE_ARES BIT(1)
+#define Q6SS_BUS_ARES_ENABLE BIT(2)
+
+/* Q6SS_BRC_RESET */
+#define Q6SS_BRC_BLK_ARES BIT(0)
+
+/* Q6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENABLE BIT(1)
+#define Q6SS_SWITCH_CLK_SRC BIT(8)
+
+/* Q6SS_PWR_CTL */
+#define Q6SS_L2DATA_STBY_N BIT(18)
+#define Q6SS_SLP_RET_N BIT(19)
+#define Q6SS_CLAMP_IO BIT(20)
+#define QDSS_BHS_ON BIT(21)
+#define QDSS_Q6_MEMORIES GENMASK(15, 0)
+
+/* Q6SS parameters */
+#define Q6SS_LDO_BYP BIT(25)
+#define Q6SS_BHS_ON BIT(24)
+#define Q6SS_CLAMP_WL BIT(21)
+#define Q6SS_CLAMP_QMC_MEM BIT(22)
+#define HALT_CHECK_MAX_LOOPS 200
+#define Q6SS_XO_CBCR GENMASK(5, 3)
+#define Q6SS_SLEEP_CBCR GENMASK(5, 2)
+
+/* Q6SS config/status registers */
+#define TCSR_GLOBAL_CFG0 0x0
+#define TCSR_GLOBAL_CFG1 0x4
+#define SSCAON_CONFIG 0x8
+#define SSCAON_STATUS 0xc
+#define Q6SS_BHS_STATUS 0x78
+#define Q6SS_RST_EVB 0x10
+
+#define BHS_EN_REST_ACK BIT(0)
+#define SSCAON_ENABLE BIT(13)
+#define SSCAON_BUS_EN BIT(15)
+#define SSCAON_BUS_MUX_MASK GENMASK(18, 16)
+
+#define MEM_BANKS 19
+#define TCSR_WCSS_CLK_MASK 0x1F
+#define TCSR_WCSS_CLK_ENABLE 0x14
+
+#define MAX_HALT_REG 3
+enum {
+ WCSS_IPQ8074,
+ WCSS_QCS404,
+};
+
+struct wcss_data {
+ const char *firmware_name;
+ unsigned int crash_reason_smem;
+ u32 version;
+ bool aon_reset_required;
+ bool wcss_q6_reset_required;
+ const char *ssr_name;
+ const char *sysmon_name;
+ int ssctl_id;
+ const struct rproc_ops *ops;
+ bool requires_force_stop;
+};
+
+struct q6v5_wcss {
+ struct device *dev;
+
+ void __iomem *reg_base;
+ void __iomem *rmb_base;
+
+ struct regmap *halt_map;
+ u32 halt_q6;
+ u32 halt_wcss;
+ u32 halt_nc;
+
+ struct clk *xo;
+ struct clk *ahbfabric_cbcr_clk;
+ struct clk *gcc_abhs_cbcr;
+ struct clk *gcc_axim_cbcr;
+ struct clk *lcc_csr_cbcr;
+ struct clk *ahbs_cbcr;
+ struct clk *tcm_slave_cbcr;
+ struct clk *qdsp6ss_abhm_cbcr;
+ struct clk *qdsp6ss_sleep_cbcr;
+ struct clk *qdsp6ss_axim_cbcr;
+ struct clk *qdsp6ss_xo_cbcr;
+ struct clk *qdsp6ss_core_gfmux;
+ struct clk *lcc_bcr_sleep;
+ struct regulator *cx_supply;
+ struct qcom_sysmon *sysmon;
+
+ struct reset_control *wcss_aon_reset;
+ struct reset_control *wcss_reset;
+ struct reset_control *wcss_q6_reset;
+ struct reset_control *wcss_q6_bcr_reset;
+
+ struct qcom_q6v5 q6v5;
+
+ phys_addr_t mem_phys;
+ phys_addr_t mem_reloc;
+ void *mem_region;
+ size_t mem_size;
+
+ unsigned int crash_reason_smem;
+ u32 version;
+ bool requires_force_stop;
+
+ struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_ssr ssr_subdev;
+};
+
+static int q6v5_wcss_reset(struct q6v5_wcss *wcss)
+{
+ int ret;
+ u32 val;
+ int i;
+
+ /* Assert resets, stop core */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* BHS require xo cbcr to be enabled */
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val |= 0x1;
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+
+ /* Read CLKOFF bit to go low indicating CLK is enabled */
+ ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR,
+ val, !(val & BIT(31)), 1,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(wcss->dev,
+ "xo cbcr enabling timed out (rc:%d)\n", ret);
+ return ret;
+ }
+ /* Enable power block headswitch and wait for it to stabilize */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val |= Q6SS_BHS_ON;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+ udelay(1);
+
+ /* Put LDO in bypass mode */
+ val |= Q6SS_LDO_BYP;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Deassert Q6 compiler memory clamp */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val &= ~Q6SS_CLAMP_QMC_MEM;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ val = readl(wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ for (i = MEM_BANKS; i >= 0; i--) {
+ val |= BIT(i);
+ writel(val, wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ /*
+ * Read back value to ensure the write is done then
+ * wait for 1us for both memory peripheral and data
+ * array to turn on.
+ */
+ val |= readl(wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ udelay(1);
+ }
+ /* Remove word line clamp */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val &= ~Q6SS_CLAMP_WL;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Bring core out of reset */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_CORE_ARES;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* Turn on core clock */
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val |= Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ /* Start core execution */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ return 0;
+}
+
+static int q6v5_wcss_start(struct rproc *rproc)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ qcom_q6v5_prepare(&wcss->q6v5);
+
+ /* Release Q6 and WCSS reset */
+ ret = reset_control_deassert(wcss->wcss_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_reset failed\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(wcss->wcss_q6_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_q6_reset failed\n");
+ goto wcss_reset;
+ }
+
+ /* Lithium configuration - clock gating and bus arbitration */
+ ret = regmap_update_bits(wcss->halt_map,
+ wcss->halt_nc + TCSR_GLOBAL_CFG0,
+ TCSR_WCSS_CLK_MASK,
+ TCSR_WCSS_CLK_ENABLE);
+ if (ret)
+ goto wcss_q6_reset;
+
+ ret = regmap_update_bits(wcss->halt_map,
+ wcss->halt_nc + TCSR_GLOBAL_CFG1,
+ 1, 0);
+ if (ret)
+ goto wcss_q6_reset;
+
+ /* Write bootaddr to EVB so that Q6WCSS will jump there after reset */
+ writel(rproc->bootaddr >> 4, wcss->reg_base + Q6SS_RST_EVB);
+
+ ret = q6v5_wcss_reset(wcss);
+ if (ret)
+ goto wcss_q6_reset;
+
+ ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ);
+ if (ret == -ETIMEDOUT)
+ dev_err(wcss->dev, "start timed out\n");
+
+ return ret;
+
+wcss_q6_reset:
+ reset_control_assert(wcss->wcss_q6_reset);
+
+wcss_reset:
+ reset_control_assert(wcss->wcss_reset);
+
+ return ret;
+}
+
+static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+ int ret, idx;
+
+ /* Toggle the restart */
+ reset_control_assert(wcss->wcss_reset);
+ usleep_range(200, 300);
+ reset_control_deassert(wcss->wcss_reset);
+ usleep_range(200, 300);
+
+ /* Enable GCC_WDSP_Q6SS_AHBS_CBCR clock */
+ ret = clk_prepare_enable(wcss->gcc_abhs_cbcr);
+ if (ret)
+ return ret;
+
+ /* Remove reset to the WCNSS QDSP6SS */
+ reset_control_deassert(wcss->wcss_q6_bcr_reset);
+
+ /* Enable Q6SSTOP_AHBFABRIC_CBCR clock */
+ ret = clk_prepare_enable(wcss->ahbfabric_cbcr_clk);
+ if (ret)
+ goto disable_gcc_abhs_cbcr_clk;
+
+ /* Enable the LCCCSR CBC clock, Q6SSTOP_Q6SSTOP_LCC_CSR_CBCR clock */
+ ret = clk_prepare_enable(wcss->lcc_csr_cbcr);
+ if (ret)
+ goto disable_ahbfabric_cbcr_clk;
+
+ /* Enable the Q6AHBS CBC, Q6SSTOP_Q6SS_AHBS_CBCR clock */
+ ret = clk_prepare_enable(wcss->ahbs_cbcr);
+ if (ret)
+ goto disable_csr_cbcr_clk;
+
+ /* Enable the TCM slave CBC, Q6SSTOP_Q6SS_TCM_SLAVE_CBCR clock */
+ ret = clk_prepare_enable(wcss->tcm_slave_cbcr);
+ if (ret)
+ goto disable_ahbs_cbcr_clk;
+
+ /* Enable the Q6SS AHB master CBC, Q6SSTOP_Q6SS_AHBM_CBCR clock */
+ ret = clk_prepare_enable(wcss->qdsp6ss_abhm_cbcr);
+ if (ret)
+ goto disable_tcm_slave_cbcr_clk;
+
+ /* Enable the Q6SS AXI master CBC, Q6SSTOP_Q6SS_AXIM_CBCR clock */
+ ret = clk_prepare_enable(wcss->qdsp6ss_axim_cbcr);
+ if (ret)
+ goto disable_abhm_cbcr_clk;
+
+ /* Enable the Q6SS XO CBC */
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val |= BIT(0);
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+ /* Read CLKOFF bit to go low indicating CLK is enabled */
+ ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR,
+ val, !(val & BIT(31)), 1,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(wcss->dev,
+ "xo cbcr enabling timed out (rc:%d)\n", ret);
+ goto disable_xo_cbcr_clk;
+ }
+
+ writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE);
+
+ /* Enable QDSP6 sleep clock clock */
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val |= BIT(0);
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+
+ /* Enable the Enable the Q6 AXI clock, GCC_WDSP_Q6SS_AXIM_CBCR*/
+ ret = clk_prepare_enable(wcss->gcc_axim_cbcr);
+ if (ret)
+ goto disable_sleep_cbcr_clk;
+
+ /* Assert resets, stop core */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* Program the QDSP6SS PWR_CTL register */
+ writel(0x01700000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x03700000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x03300000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x033C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /*
+ * Enable memories by turning on the QDSP6 memory foot/head switch, one
+ * bank at a time to avoid in-rush current
+ */
+ for (idx = 28; idx >= 0; idx--) {
+ writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) |
+ (1 << idx)), wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ }
+
+ writel(0x031C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+ writel(0x030C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_CORE_ARES;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* Enable the Q6 core clock at the GFM, Q6SSTOP_QDSP6SS_GFMUX_CTL */
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val |= Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC;
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ /* Enable sleep clock branch needed for BCR circuit */
+ ret = clk_prepare_enable(wcss->lcc_bcr_sleep);
+ if (ret)
+ goto disable_core_gfmux_clk;
+
+ return 0;
+
+disable_core_gfmux_clk:
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC);
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ clk_disable_unprepare(wcss->gcc_axim_cbcr);
+disable_sleep_cbcr_clk:
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+disable_xo_cbcr_clk:
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+ clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr);
+disable_abhm_cbcr_clk:
+ clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr);
+disable_tcm_slave_cbcr_clk:
+ clk_disable_unprepare(wcss->tcm_slave_cbcr);
+disable_ahbs_cbcr_clk:
+ clk_disable_unprepare(wcss->ahbs_cbcr);
+disable_csr_cbcr_clk:
+ clk_disable_unprepare(wcss->lcc_csr_cbcr);
+disable_ahbfabric_cbcr_clk:
+ clk_disable_unprepare(wcss->ahbfabric_cbcr_clk);
+disable_gcc_abhs_cbcr_clk:
+ clk_disable_unprepare(wcss->gcc_abhs_cbcr);
+
+ return ret;
+}
+
+static inline int q6v5_wcss_qcs404_reset(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+
+ writel(0x80800000, wcss->reg_base + Q6SS_STRAP_ACC);
+
+ /* Start core execution */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ return 0;
+}
+
+static int q6v5_qcs404_wcss_start(struct rproc *rproc)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ ret = clk_prepare_enable(wcss->xo);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(wcss->cx_supply);
+ if (ret)
+ goto disable_xo_clk;
+
+ qcom_q6v5_prepare(&wcss->q6v5);
+
+ ret = q6v5_wcss_qcs404_power_on(wcss);
+ if (ret) {
+ dev_err(wcss->dev, "wcss clk_enable failed\n");
+ goto disable_cx_supply;
+ }
+
+ writel(rproc->bootaddr >> 4, wcss->reg_base + Q6SS_RST_EVB);
+
+ q6v5_wcss_qcs404_reset(wcss);
+
+ ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ);
+ if (ret == -ETIMEDOUT) {
+ dev_err(wcss->dev, "start timed out\n");
+ goto disable_cx_supply;
+ }
+
+ return 0;
+
+disable_cx_supply:
+ regulator_disable(wcss->cx_supply);
+disable_xo_clk:
+ clk_disable_unprepare(wcss->xo);
+
+ return ret;
+}
+
+static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss,
+ struct regmap *halt_map,
+ u32 offset)
+{
+ unsigned long timeout;
+ unsigned int val;
+ int ret;
+
+ /* Check if we're already idle */
+ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
+ if (!ret && val)
+ return;
+
+ /* Assert halt request */
+ regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
+
+ /* Wait for halt */
+ timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
+ for (;;) {
+ ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
+ if (ret || val || time_after(jiffies, timeout))
+ break;
+
+ msleep(1);
+ }
+
+ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
+ if (ret || !val)
+ dev_err(wcss->dev, "port failed halt\n");
+
+ /* Clear halt request (port will remain halted until reset) */
+ regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
+}
+
+static int q6v5_qcs404_wcss_shutdown(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+ int ret;
+
+ q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_wcss);
+
+ /* assert clamps to avoid MX current inrush */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val |= (Q6SS_CLAMP_IO | Q6SS_CLAMP_WL | Q6SS_CLAMP_QMC_MEM);
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Disable memories by turning off memory foot/headswitch */
+ writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) &
+ ~QDSS_Q6_MEMORIES),
+ wcss->reg_base + Q6SS_MEM_PWR_CTL);
+
+ /* Clear the BHS_ON bit */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val &= ~Q6SS_BHS_ON;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ clk_disable_unprepare(wcss->ahbfabric_cbcr_clk);
+ clk_disable_unprepare(wcss->lcc_csr_cbcr);
+ clk_disable_unprepare(wcss->tcm_slave_cbcr);
+ clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr);
+ clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr);
+
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val &= ~BIT(0);
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val &= ~BIT(0);
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+
+ clk_disable_unprepare(wcss->ahbs_cbcr);
+ clk_disable_unprepare(wcss->lcc_bcr_sleep);
+
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC);
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ clk_disable_unprepare(wcss->gcc_abhs_cbcr);
+
+ ret = reset_control_assert(wcss->wcss_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_reset failed\n");
+ return ret;
+ }
+ usleep_range(200, 300);
+
+ ret = reset_control_deassert(wcss->wcss_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_reset failed\n");
+ return ret;
+ }
+ usleep_range(200, 300);
+
+ clk_disable_unprepare(wcss->gcc_axim_cbcr);
+
+ return 0;
+}
+
+static int q6v5_wcss_powerdown(struct q6v5_wcss *wcss)
+{
+ int ret;
+ u32 val;
+
+ /* 1 - Assert WCSS/Q6 HALTREQ */
+ q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_wcss);
+
+ /* 2 - Enable WCSSAON_CONFIG */
+ val = readl(wcss->rmb_base + SSCAON_CONFIG);
+ val |= SSCAON_ENABLE;
+ writel(val, wcss->rmb_base + SSCAON_CONFIG);
+
+ /* 3 - Set SSCAON_CONFIG */
+ val |= SSCAON_BUS_EN;
+ val &= ~SSCAON_BUS_MUX_MASK;
+ writel(val, wcss->rmb_base + SSCAON_CONFIG);
+
+ /* 4 - SSCAON_CONFIG 1 */
+ val |= BIT(1);
+ writel(val, wcss->rmb_base + SSCAON_CONFIG);
+
+ /* 5 - wait for SSCAON_STATUS */
+ ret = readl_poll_timeout(wcss->rmb_base + SSCAON_STATUS,
+ val, (val & 0xffff) == 0x400, 1000,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(wcss->dev,
+ "can't get SSCAON_STATUS rc:%d)\n", ret);
+ return ret;
+ }
+
+ /* 6 - De-assert WCSS_AON reset */
+ reset_control_assert(wcss->wcss_aon_reset);
+
+ /* 7 - Disable WCSSAON_CONFIG 13 */
+ val = readl(wcss->rmb_base + SSCAON_CONFIG);
+ val &= ~SSCAON_ENABLE;
+ writel(val, wcss->rmb_base + SSCAON_CONFIG);
+
+ /* 8 - De-assert WCSS/Q6 HALTREQ */
+ reset_control_assert(wcss->wcss_reset);
+
+ return 0;
+}
+
+static int q6v5_q6_powerdown(struct q6v5_wcss *wcss)
+{
+ int ret;
+ u32 val;
+ int i;
+
+ /* 1 - Halt Q6 bus interface */
+ q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_q6);
+
+ /* 2 - Disable Q6 Core clock */
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ /* 3 - Clamp I/O */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val |= Q6SS_CLAMP_IO;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* 4 - Clamp WL */
+ val |= QDSS_BHS_ON;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* 5 - Clear Erase standby */
+ val &= ~Q6SS_L2DATA_STBY_N;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* 6 - Clear Sleep RTN */
+ val &= ~Q6SS_SLP_RET_N;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* 7 - turn off Q6 memory foot/head switch one bank at a time */
+ for (i = 0; i < 20; i++) {
+ val = readl(wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ val &= ~BIT(i);
+ writel(val, wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ mdelay(1);
+ }
+
+ /* 8 - Assert QMC memory RTN */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val |= Q6SS_CLAMP_QMC_MEM;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* 9 - Turn off BHS */
+ val &= ~Q6SS_BHS_ON;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+ udelay(1);
+
+ /* 10 - Wait till BHS Reset is done */
+ ret = readl_poll_timeout(wcss->reg_base + Q6SS_BHS_STATUS,
+ val, !(val & BHS_EN_REST_ACK), 1000,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(wcss->dev, "BHS_STATUS not OFF (rc:%d)\n", ret);
+ return ret;
+ }
+
+ /* 11 - Assert WCSS reset */
+ reset_control_assert(wcss->wcss_reset);
+
+ /* 12 - Assert Q6 reset */
+ reset_control_assert(wcss->wcss_q6_reset);
+
+ return 0;
+}
+
+static int q6v5_wcss_stop(struct rproc *rproc)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ /* WCSS powerdown */
+ if (wcss->requires_force_stop) {
+ ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL);
+ if (ret == -ETIMEDOUT) {
+ dev_err(wcss->dev, "timed out on wait\n");
+ return ret;
+ }
+ }
+
+ if (wcss->version == WCSS_QCS404) {
+ ret = q6v5_qcs404_wcss_shutdown(wcss);
+ if (ret)
+ return ret;
+ } else {
+ ret = q6v5_wcss_powerdown(wcss);
+ if (ret)
+ return ret;
+
+ /* Q6 Power down */
+ ret = q6v5_q6_powerdown(wcss);
+ if (ret)
+ return ret;
+ }
+
+ qcom_q6v5_unprepare(&wcss->q6v5);
+
+ return 0;
+}
+
+static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int offset;
+
+ offset = da - wcss->mem_reloc;
+ if (offset < 0 || offset + len > wcss->mem_size)
+ return NULL;
+
+ return wcss->mem_region + offset;
+}
+
+static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
+ 0, wcss->mem_region, wcss->mem_phys,
+ wcss->mem_size, &wcss->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store("wcnss", wcss->mem_phys, wcss->mem_size);
+
+ return ret;
+}
+
+static const struct rproc_ops q6v5_wcss_ipq8074_ops = {
+ .start = q6v5_wcss_start,
+ .stop = q6v5_wcss_stop,
+ .da_to_va = q6v5_wcss_da_to_va,
+ .load = q6v5_wcss_load,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+};
+
+static const struct rproc_ops q6v5_wcss_qcs404_ops = {
+ .start = q6v5_qcs404_wcss_start,
+ .stop = q6v5_wcss_stop,
+ .da_to_va = q6v5_wcss_da_to_va,
+ .load = q6v5_wcss_load,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .parse_fw = qcom_register_dump_segments,
+};
+
+static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss,
+ const struct wcss_data *desc)
+{
+ struct device *dev = wcss->dev;
+
+ if (desc->aon_reset_required) {
+ wcss->wcss_aon_reset = devm_reset_control_get_exclusive(dev, "wcss_aon_reset");
+ if (IS_ERR(wcss->wcss_aon_reset)) {
+ dev_err(wcss->dev, "fail to acquire wcss_aon_reset\n");
+ return PTR_ERR(wcss->wcss_aon_reset);
+ }
+ }
+
+ wcss->wcss_reset = devm_reset_control_get_exclusive(dev, "wcss_reset");
+ if (IS_ERR(wcss->wcss_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_reset\n");
+ return PTR_ERR(wcss->wcss_reset);
+ }
+
+ if (desc->wcss_q6_reset_required) {
+ wcss->wcss_q6_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_reset");
+ if (IS_ERR(wcss->wcss_q6_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n");
+ return PTR_ERR(wcss->wcss_q6_reset);
+ }
+ }
+
+ wcss->wcss_q6_bcr_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_bcr_reset");
+ if (IS_ERR(wcss->wcss_q6_bcr_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_q6_bcr_reset\n");
+ return PTR_ERR(wcss->wcss_q6_bcr_reset);
+ }
+
+ return 0;
+}
+
+static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
+ struct platform_device *pdev)
+{
+ unsigned int halt_reg[MAX_HALT_REG] = {0};
+ struct device_node *syscon;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
+ if (!res)
+ return -EINVAL;
+
+ wcss->reg_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!wcss->reg_base)
+ return -ENOMEM;
+
+ if (wcss->version == WCSS_IPQ8074) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
+ wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wcss->rmb_base))
+ return PTR_ERR(wcss->rmb_base);
+ }
+
+ syscon = of_parse_phandle(pdev->dev.of_node,
+ "qcom,halt-regs", 0);
+ if (!syscon) {
+ dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
+ return -EINVAL;
+ }
+
+ wcss->halt_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(wcss->halt_map))
+ return PTR_ERR(wcss->halt_map);
+
+ ret = of_property_read_variable_u32_array(pdev->dev.of_node,
+ "qcom,halt-regs",
+ halt_reg, 0,
+ MAX_HALT_REG);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
+ return -EINVAL;
+ }
+
+ wcss->halt_q6 = halt_reg[0];
+ wcss->halt_wcss = halt_reg[1];
+ wcss->halt_nc = halt_reg[2];
+
+ return 0;
+}
+
+static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss)
+{
+ struct reserved_mem *rmem = NULL;
+ struct device_node *node;
+ struct device *dev = wcss->dev;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+
+ if (!rmem) {
+ dev_err(dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ wcss->mem_phys = rmem->base;
+ wcss->mem_reloc = rmem->base;
+ wcss->mem_size = rmem->size;
+ wcss->mem_region = devm_ioremap_wc(dev, wcss->mem_phys, wcss->mem_size);
+ if (!wcss->mem_region) {
+ dev_err(dev, "unable to map memory region: %pa+%pa\n",
+ &rmem->base, &rmem->size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int q6v5_wcss_init_clock(struct q6v5_wcss *wcss)
+{
+ int ret;
+
+ wcss->xo = devm_clk_get(wcss->dev, "xo");
+ if (IS_ERR(wcss->xo)) {
+ ret = PTR_ERR(wcss->xo);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get xo clock");
+ return ret;
+ }
+
+ wcss->gcc_abhs_cbcr = devm_clk_get(wcss->dev, "gcc_abhs_cbcr");
+ if (IS_ERR(wcss->gcc_abhs_cbcr)) {
+ ret = PTR_ERR(wcss->gcc_abhs_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get gcc abhs clock");
+ return ret;
+ }
+
+ wcss->gcc_axim_cbcr = devm_clk_get(wcss->dev, "gcc_axim_cbcr");
+ if (IS_ERR(wcss->gcc_axim_cbcr)) {
+ ret = PTR_ERR(wcss->gcc_axim_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get gcc axim clock\n");
+ return ret;
+ }
+
+ wcss->ahbfabric_cbcr_clk = devm_clk_get(wcss->dev,
+ "lcc_ahbfabric_cbc");
+ if (IS_ERR(wcss->ahbfabric_cbcr_clk)) {
+ ret = PTR_ERR(wcss->ahbfabric_cbcr_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get ahbfabric clock\n");
+ return ret;
+ }
+
+ wcss->lcc_csr_cbcr = devm_clk_get(wcss->dev, "tcsr_lcc_cbc");
+ if (IS_ERR(wcss->lcc_csr_cbcr)) {
+ ret = PTR_ERR(wcss->lcc_csr_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get csr cbcr clk\n");
+ return ret;
+ }
+
+ wcss->ahbs_cbcr = devm_clk_get(wcss->dev,
+ "lcc_abhs_cbc");
+ if (IS_ERR(wcss->ahbs_cbcr)) {
+ ret = PTR_ERR(wcss->ahbs_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get ahbs_cbcr clk\n");
+ return ret;
+ }
+
+ wcss->tcm_slave_cbcr = devm_clk_get(wcss->dev,
+ "lcc_tcm_slave_cbc");
+ if (IS_ERR(wcss->tcm_slave_cbcr)) {
+ ret = PTR_ERR(wcss->tcm_slave_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get tcm cbcr clk\n");
+ return ret;
+ }
+
+ wcss->qdsp6ss_abhm_cbcr = devm_clk_get(wcss->dev, "lcc_abhm_cbc");
+ if (IS_ERR(wcss->qdsp6ss_abhm_cbcr)) {
+ ret = PTR_ERR(wcss->qdsp6ss_abhm_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get abhm cbcr clk\n");
+ return ret;
+ }
+
+ wcss->qdsp6ss_axim_cbcr = devm_clk_get(wcss->dev, "lcc_axim_cbc");
+ if (IS_ERR(wcss->qdsp6ss_axim_cbcr)) {
+ ret = PTR_ERR(wcss->qdsp6ss_axim_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get axim cbcr clk\n");
+ return ret;
+ }
+
+ wcss->lcc_bcr_sleep = devm_clk_get(wcss->dev, "lcc_bcr_sleep");
+ if (IS_ERR(wcss->lcc_bcr_sleep)) {
+ ret = PTR_ERR(wcss->lcc_bcr_sleep);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get bcr cbcr clk\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int q6v5_wcss_init_regulator(struct q6v5_wcss *wcss)
+{
+ wcss->cx_supply = devm_regulator_get(wcss->dev, "cx");
+ if (IS_ERR(wcss->cx_supply))
+ return PTR_ERR(wcss->cx_supply);
+
+ regulator_set_load(wcss->cx_supply, 100000);
+
+ return 0;
+}
+
+static int q6v5_wcss_probe(struct platform_device *pdev)
+{
+ const struct wcss_data *desc;
+ struct q6v5_wcss *wcss;
+ struct rproc *rproc;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, desc->ops,
+ desc->firmware_name, sizeof(*wcss));
+ if (!rproc) {
+ dev_err(&pdev->dev, "failed to allocate rproc\n");
+ return -ENOMEM;
+ }
+
+ wcss = rproc->priv;
+ wcss->dev = &pdev->dev;
+ wcss->version = desc->version;
+
+ wcss->version = desc->version;
+ wcss->requires_force_stop = desc->requires_force_stop;
+
+ ret = q6v5_wcss_init_mmio(wcss, pdev);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_alloc_memory_region(wcss);
+ if (ret)
+ goto free_rproc;
+
+ if (wcss->version == WCSS_QCS404) {
+ ret = q6v5_wcss_init_clock(wcss);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_wcss_init_regulator(wcss);
+ if (ret)
+ goto free_rproc;
+ }
+
+ ret = q6v5_wcss_init_reset(wcss, desc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, desc->crash_reason_smem, NULL, NULL);
+ if (ret)
+ goto free_rproc;
+
+ qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss");
+ qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss");
+
+ if (desc->ssctl_id)
+ wcss->sysmon = qcom_add_sysmon_subdev(rproc,
+ desc->sysmon_name,
+ desc->ssctl_id);
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_rproc;
+
+ platform_set_drvdata(pdev, rproc);
+
+ return 0;
+
+free_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int q6v5_wcss_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct q6v5_wcss *wcss = rproc->priv;
+
+ qcom_q6v5_deinit(&wcss->q6v5);
+ rproc_del(rproc);
+ rproc_free(rproc);
+
+ return 0;
+}
+
+static const struct wcss_data wcss_ipq8074_res_init = {
+ .firmware_name = "IPQ8074/q6_fw.mdt",
+ .crash_reason_smem = WCSS_CRASH_REASON,
+ .aon_reset_required = true,
+ .wcss_q6_reset_required = true,
+ .ops = &q6v5_wcss_ipq8074_ops,
+ .requires_force_stop = true,
+};
+
+static const struct wcss_data wcss_qcs404_res_init = {
+ .crash_reason_smem = WCSS_CRASH_REASON,
+ .firmware_name = "wcnss.mdt",
+ .version = WCSS_QCS404,
+ .aon_reset_required = false,
+ .wcss_q6_reset_required = false,
+ .ssr_name = "mpss",
+ .sysmon_name = "wcnss",
+ .ssctl_id = 0x12,
+ .ops = &q6v5_wcss_qcs404_ops,
+ .requires_force_stop = false,
+};
+
+static const struct of_device_id q6v5_wcss_of_match[] = {
+ { .compatible = "qcom,ipq8074-wcss-pil", .data = &wcss_ipq8074_res_init },
+ { .compatible = "qcom,qcs404-wcss-pil", .data = &wcss_qcs404_res_init },
+ { },
+};
+MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match);
+
+static struct platform_driver q6v5_wcss_driver = {
+ .probe = q6v5_wcss_probe,
+ .remove = q6v5_wcss_remove,
+ .driver = {
+ .name = "qcom-q6v5-wcss-pil",
+ .of_match_table = q6v5_wcss_of_match,
+ },
+};
+module_platform_driver(q6v5_wcss_driver);
+
+MODULE_DESCRIPTION("Hexagon WCSS Peripheral Image Loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
new file mode 100644
index 000000000..15af52f84
--- /dev/null
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, Linaro Ltd.
+ */
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc/qcom_rproc.h>
+#include <linux/rpmsg.h>
+
+#include "qcom_common.h"
+
+static BLOCKING_NOTIFIER_HEAD(sysmon_notifiers);
+
+struct qcom_sysmon {
+ struct rproc_subdev subdev;
+ struct rproc *rproc;
+
+ int state;
+ struct mutex state_lock;
+
+ struct list_head node;
+
+ const char *name;
+
+ int shutdown_irq;
+ int ssctl_version;
+ int ssctl_instance;
+
+ struct notifier_block nb;
+
+ struct device *dev;
+
+ struct rpmsg_endpoint *ept;
+ struct completion comp;
+ struct completion ind_comp;
+ struct completion shutdown_comp;
+ struct completion ssctl_comp;
+ struct mutex lock;
+
+ bool ssr_ack;
+ bool shutdown_acked;
+
+ struct qmi_handle qmi;
+ struct sockaddr_qrtr ssctl;
+};
+
+enum {
+ SSCTL_SSR_EVENT_BEFORE_POWERUP,
+ SSCTL_SSR_EVENT_AFTER_POWERUP,
+ SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
+ SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
+};
+
+static const char * const sysmon_state_string[] = {
+ [SSCTL_SSR_EVENT_BEFORE_POWERUP] = "before_powerup",
+ [SSCTL_SSR_EVENT_AFTER_POWERUP] = "after_powerup",
+ [SSCTL_SSR_EVENT_BEFORE_SHUTDOWN] = "before_shutdown",
+ [SSCTL_SSR_EVENT_AFTER_SHUTDOWN] = "after_shutdown",
+};
+
+struct sysmon_event {
+ const char *subsys_name;
+ u32 ssr_event;
+};
+
+static DEFINE_MUTEX(sysmon_lock);
+static LIST_HEAD(sysmon_list);
+
+/**
+ * sysmon_send_event() - send notification of other remote's SSR event
+ * @sysmon: sysmon context
+ * @event: sysmon event context
+ */
+static void sysmon_send_event(struct qcom_sysmon *sysmon,
+ const struct sysmon_event *event)
+{
+ char req[50];
+ int len;
+ int ret;
+
+ len = snprintf(req, sizeof(req), "ssr:%s:%s", event->subsys_name,
+ sysmon_state_string[event->ssr_event]);
+ if (len >= sizeof(req))
+ return;
+
+ mutex_lock(&sysmon->lock);
+ reinit_completion(&sysmon->comp);
+ sysmon->ssr_ack = false;
+
+ ret = rpmsg_send(sysmon->ept, req, len);
+ if (ret < 0) {
+ dev_err(sysmon->dev, "failed to send sysmon event\n");
+ goto out_unlock;
+ }
+
+ ret = wait_for_completion_timeout(&sysmon->comp,
+ msecs_to_jiffies(5000));
+ if (!ret) {
+ dev_err(sysmon->dev, "timeout waiting for sysmon ack\n");
+ goto out_unlock;
+ }
+
+ if (!sysmon->ssr_ack)
+ dev_err(sysmon->dev, "unexpected response to sysmon event\n");
+
+out_unlock:
+ mutex_unlock(&sysmon->lock);
+}
+
+/**
+ * sysmon_request_shutdown() - request graceful shutdown of remote
+ * @sysmon: sysmon context
+ *
+ * Return: boolean indicator of the remote processor acking the request
+ */
+static bool sysmon_request_shutdown(struct qcom_sysmon *sysmon)
+{
+ char *req = "ssr:shutdown";
+ bool acked = false;
+ int ret;
+
+ mutex_lock(&sysmon->lock);
+ reinit_completion(&sysmon->comp);
+ sysmon->ssr_ack = false;
+
+ ret = rpmsg_send(sysmon->ept, req, strlen(req) + 1);
+ if (ret < 0) {
+ dev_err(sysmon->dev, "send sysmon shutdown request failed\n");
+ goto out_unlock;
+ }
+
+ ret = wait_for_completion_timeout(&sysmon->comp,
+ msecs_to_jiffies(5000));
+ if (!ret) {
+ dev_err(sysmon->dev, "timeout waiting for sysmon ack\n");
+ goto out_unlock;
+ }
+
+ if (!sysmon->ssr_ack)
+ dev_err(sysmon->dev,
+ "unexpected response to sysmon shutdown request\n");
+ else
+ acked = true;
+
+out_unlock:
+ mutex_unlock(&sysmon->lock);
+
+ return acked;
+}
+
+static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count,
+ void *priv, u32 addr)
+{
+ struct qcom_sysmon *sysmon = priv;
+ const char *ssr_ack = "ssr:ack";
+ const int ssr_ack_len = strlen(ssr_ack) + 1;
+
+ if (!sysmon)
+ return -EINVAL;
+
+ if (count >= ssr_ack_len && !memcmp(data, ssr_ack, ssr_ack_len))
+ sysmon->ssr_ack = true;
+
+ complete(&sysmon->comp);
+
+ return 0;
+}
+
+#define SSCTL_SHUTDOWN_REQ 0x21
+#define SSCTL_SHUTDOWN_READY_IND 0x21
+#define SSCTL_SUBSYS_EVENT_REQ 0x23
+
+#define SSCTL_MAX_MSG_LEN 7
+
+#define SSCTL_SUBSYS_NAME_LENGTH 15
+
+enum {
+ SSCTL_SSR_EVENT_FORCED,
+ SSCTL_SSR_EVENT_GRACEFUL,
+};
+
+struct ssctl_shutdown_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+static struct qmi_elem_info ssctl_shutdown_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ssctl_shutdown_resp, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct ssctl_subsys_event_req {
+ u8 subsys_name_len;
+ char subsys_name[SSCTL_SUBSYS_NAME_LENGTH];
+ u32 event;
+ u8 evt_driven_valid;
+ u32 evt_driven;
+};
+
+static struct qmi_elem_info ssctl_subsys_event_req_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct ssctl_subsys_event_req,
+ subsys_name_len),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = SSCTL_SUBSYS_NAME_LENGTH,
+ .elem_size = sizeof(char),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct ssctl_subsys_event_req,
+ subsys_name),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ssctl_subsys_event_req,
+ event),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ssctl_subsys_event_req,
+ evt_driven_valid),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ssctl_subsys_event_req,
+ evt_driven),
+ .ei_array = NULL,
+ },
+ {}
+};
+
+struct ssctl_subsys_event_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+static struct qmi_elem_info ssctl_subsys_event_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ssctl_subsys_event_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+static struct qmi_elem_info ssctl_shutdown_ind_ei[] = {
+ {}
+};
+
+static void sysmon_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct qcom_sysmon *sysmon = container_of(qmi, struct qcom_sysmon, qmi);
+
+ complete(&sysmon->ind_comp);
+}
+
+static const struct qmi_msg_handler qmi_indication_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = SSCTL_SHUTDOWN_READY_IND,
+ .ei = ssctl_shutdown_ind_ei,
+ .decoded_size = 0,
+ .fn = sysmon_ind_cb
+ },
+ {}
+};
+
+static bool ssctl_request_shutdown_wait(struct qcom_sysmon *sysmon)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&sysmon->shutdown_comp, 10 * HZ);
+ if (ret)
+ return true;
+
+ ret = try_wait_for_completion(&sysmon->ind_comp);
+ if (ret)
+ return true;
+
+ dev_err(sysmon->dev, "timeout waiting for shutdown ack\n");
+ return false;
+}
+
+/**
+ * ssctl_request_shutdown() - request shutdown via SSCTL QMI service
+ * @sysmon: sysmon context
+ *
+ * Return: boolean indicator of the remote processor acking the request
+ */
+static bool ssctl_request_shutdown(struct qcom_sysmon *sysmon)
+{
+ struct ssctl_shutdown_resp resp;
+ struct qmi_txn txn;
+ bool acked = false;
+ int ret;
+
+ reinit_completion(&sysmon->ind_comp);
+ reinit_completion(&sysmon->shutdown_comp);
+ ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_shutdown_resp_ei, &resp);
+ if (ret < 0) {
+ dev_err(sysmon->dev, "failed to allocate QMI txn\n");
+ return false;
+ }
+
+ ret = qmi_send_request(&sysmon->qmi, &sysmon->ssctl, &txn,
+ SSCTL_SHUTDOWN_REQ, 0, NULL, NULL);
+ if (ret < 0) {
+ dev_err(sysmon->dev, "failed to send shutdown request\n");
+ qmi_txn_cancel(&txn);
+ return false;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ dev_err(sysmon->dev, "timeout waiting for shutdown response\n");
+ } else if (resp.resp.result) {
+ dev_err(sysmon->dev, "shutdown request rejected\n");
+ } else {
+ dev_dbg(sysmon->dev, "shutdown request completed\n");
+ acked = true;
+ }
+
+ if (sysmon->shutdown_irq > 0)
+ return ssctl_request_shutdown_wait(sysmon);
+
+ return acked;
+}
+
+/**
+ * ssctl_send_event() - send notification of other remote's SSR event
+ * @sysmon: sysmon context
+ * @event: sysmon event context
+ */
+static void ssctl_send_event(struct qcom_sysmon *sysmon,
+ const struct sysmon_event *event)
+{
+ struct ssctl_subsys_event_resp resp;
+ struct ssctl_subsys_event_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ memset(&resp, 0, sizeof(resp));
+ ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_subsys_event_resp_ei, &resp);
+ if (ret < 0) {
+ dev_err(sysmon->dev, "failed to allocate QMI txn\n");
+ return;
+ }
+
+ memset(&req, 0, sizeof(req));
+ strlcpy(req.subsys_name, event->subsys_name, sizeof(req.subsys_name));
+ req.subsys_name_len = strlen(req.subsys_name);
+ req.event = event->ssr_event;
+ req.evt_driven_valid = true;
+ req.evt_driven = SSCTL_SSR_EVENT_FORCED;
+
+ ret = qmi_send_request(&sysmon->qmi, &sysmon->ssctl, &txn,
+ SSCTL_SUBSYS_EVENT_REQ, 40,
+ ssctl_subsys_event_req_ei, &req);
+ if (ret < 0) {
+ dev_err(sysmon->dev, "failed to send subsystem event\n");
+ qmi_txn_cancel(&txn);
+ return;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0)
+ dev_err(sysmon->dev, "timeout waiting for subsystem event response\n");
+ else if (resp.resp.result)
+ dev_err(sysmon->dev, "subsystem event rejected\n");
+ else
+ dev_dbg(sysmon->dev, "subsystem event accepted\n");
+}
+
+/**
+ * ssctl_new_server() - QMI callback indicating a new service
+ * @qmi: QMI handle
+ * @svc: service information
+ *
+ * Return: 0 if we're interested in this service, -EINVAL otherwise.
+ */
+static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct qcom_sysmon *sysmon = container_of(qmi, struct qcom_sysmon, qmi);
+
+ switch (svc->version) {
+ case 1:
+ if (svc->instance != 0)
+ return -EINVAL;
+ if (strcmp(sysmon->name, "modem"))
+ return -EINVAL;
+ break;
+ case 2:
+ if (svc->instance != sysmon->ssctl_instance)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sysmon->ssctl_version = svc->version;
+
+ sysmon->ssctl.sq_family = AF_QIPCRTR;
+ sysmon->ssctl.sq_node = svc->node;
+ sysmon->ssctl.sq_port = svc->port;
+
+ svc->priv = sysmon;
+
+ complete(&sysmon->ssctl_comp);
+
+ return 0;
+}
+
+/**
+ * ssctl_del_server() - QMI callback indicating that @svc is removed
+ * @qmi: QMI handle
+ * @svc: service information
+ */
+static void ssctl_del_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct qcom_sysmon *sysmon = svc->priv;
+
+ sysmon->ssctl_version = 0;
+}
+
+static const struct qmi_ops ssctl_ops = {
+ .new_server = ssctl_new_server,
+ .del_server = ssctl_del_server,
+};
+
+static int sysmon_prepare(struct rproc_subdev *subdev)
+{
+ struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
+ subdev);
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_BEFORE_POWERUP
+ };
+
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_BEFORE_POWERUP;
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
+
+ return 0;
+}
+
+/**
+ * sysmon_start() - start callback for the sysmon remoteproc subdevice
+ * @subdev: instance of the sysmon subdevice
+ *
+ * Inform all the listners of sysmon notifications that the rproc associated
+ * to @subdev has booted up. The rproc that booted up also needs to know
+ * which rprocs are already up and running, so send start notifications
+ * on behalf of all the online rprocs.
+ */
+static int sysmon_start(struct rproc_subdev *subdev)
+{
+ struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
+ subdev);
+ struct qcom_sysmon *target;
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
+ };
+
+ reinit_completion(&sysmon->ssctl_comp);
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP;
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
+
+ mutex_lock(&sysmon_lock);
+ list_for_each_entry(target, &sysmon_list, node) {
+ mutex_lock(&target->state_lock);
+ if (target == sysmon || target->state != SSCTL_SSR_EVENT_AFTER_POWERUP) {
+ mutex_unlock(&target->state_lock);
+ continue;
+ }
+
+ event.subsys_name = target->name;
+ event.ssr_event = target->state;
+
+ if (sysmon->ssctl_version == 2)
+ ssctl_send_event(sysmon, &event);
+ else if (sysmon->ept)
+ sysmon_send_event(sysmon, &event);
+ mutex_unlock(&target->state_lock);
+ }
+ mutex_unlock(&sysmon_lock);
+
+ return 0;
+}
+
+static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon, subdev);
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN
+ };
+
+ sysmon->shutdown_acked = false;
+
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN;
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
+
+ /* Don't request graceful shutdown if we've crashed */
+ if (crashed)
+ return;
+
+ if (sysmon->ssctl_instance) {
+ if (!wait_for_completion_timeout(&sysmon->ssctl_comp, HZ / 2))
+ dev_err(sysmon->dev, "timeout waiting for ssctl service\n");
+ }
+
+ if (sysmon->ssctl_version)
+ sysmon->shutdown_acked = ssctl_request_shutdown(sysmon);
+ else if (sysmon->ept)
+ sysmon->shutdown_acked = sysmon_request_shutdown(sysmon);
+}
+
+static void sysmon_unprepare(struct rproc_subdev *subdev)
+{
+ struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
+ subdev);
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_AFTER_SHUTDOWN
+ };
+
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_AFTER_SHUTDOWN;
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
+}
+
+/**
+ * sysmon_notify() - notify sysmon target of another's SSR
+ * @nb: notifier_block associated with sysmon instance
+ * @event: unused
+ * @data: SSR identifier of the remote that is going down
+ */
+static int sysmon_notify(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct qcom_sysmon *sysmon = container_of(nb, struct qcom_sysmon, nb);
+ struct sysmon_event *sysmon_event = data;
+
+ /* Skip non-running rprocs and the originating instance */
+ if (sysmon->state != SSCTL_SSR_EVENT_AFTER_POWERUP ||
+ !strcmp(sysmon_event->subsys_name, sysmon->name)) {
+ dev_dbg(sysmon->dev, "not notifying %s\n", sysmon->name);
+ return NOTIFY_DONE;
+ }
+
+ /* Only SSCTL version 2 supports SSR events */
+ if (sysmon->ssctl_version == 2)
+ ssctl_send_event(sysmon, sysmon_event);
+ else if (sysmon->ept)
+ sysmon_send_event(sysmon, sysmon_event);
+
+ return NOTIFY_DONE;
+}
+
+static irqreturn_t sysmon_shutdown_interrupt(int irq, void *data)
+{
+ struct qcom_sysmon *sysmon = data;
+
+ complete(&sysmon->shutdown_comp);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * qcom_add_sysmon_subdev() - create a sysmon subdev for the given remoteproc
+ * @rproc: rproc context to associate the subdev with
+ * @name: name of this subdev, to use in SSR
+ * @ssctl_instance: instance id of the ssctl QMI service
+ *
+ * Return: A new qcom_sysmon object, or NULL on failure
+ */
+struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
+ const char *name,
+ int ssctl_instance)
+{
+ struct qcom_sysmon *sysmon;
+ int ret;
+
+ sysmon = kzalloc(sizeof(*sysmon), GFP_KERNEL);
+ if (!sysmon)
+ return ERR_PTR(-ENOMEM);
+
+ sysmon->dev = rproc->dev.parent;
+ sysmon->rproc = rproc;
+
+ sysmon->name = name;
+ sysmon->ssctl_instance = ssctl_instance;
+
+ init_completion(&sysmon->comp);
+ init_completion(&sysmon->ind_comp);
+ init_completion(&sysmon->shutdown_comp);
+ init_completion(&sysmon->ssctl_comp);
+ mutex_init(&sysmon->lock);
+ mutex_init(&sysmon->state_lock);
+
+ sysmon->shutdown_irq = of_irq_get_byname(sysmon->dev->of_node,
+ "shutdown-ack");
+ if (sysmon->shutdown_irq < 0) {
+ if (sysmon->shutdown_irq != -ENODATA) {
+ dev_err(sysmon->dev,
+ "failed to retrieve shutdown-ack IRQ\n");
+ ret = sysmon->shutdown_irq;
+ kfree(sysmon);
+ return ERR_PTR(ret);
+ }
+ } else {
+ ret = devm_request_threaded_irq(sysmon->dev,
+ sysmon->shutdown_irq,
+ NULL, sysmon_shutdown_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5 shutdown-ack", sysmon);
+ if (ret) {
+ dev_err(sysmon->dev,
+ "failed to acquire shutdown-ack IRQ\n");
+ kfree(sysmon);
+ return ERR_PTR(ret);
+ }
+ }
+
+ ret = qmi_handle_init(&sysmon->qmi, SSCTL_MAX_MSG_LEN, &ssctl_ops,
+ qmi_indication_handler);
+ if (ret < 0) {
+ dev_err(sysmon->dev, "failed to initialize qmi handle\n");
+ kfree(sysmon);
+ return ERR_PTR(ret);
+ }
+
+ qmi_add_lookup(&sysmon->qmi, 43, 0, 0);
+
+ sysmon->subdev.prepare = sysmon_prepare;
+ sysmon->subdev.start = sysmon_start;
+ sysmon->subdev.stop = sysmon_stop;
+ sysmon->subdev.unprepare = sysmon_unprepare;
+
+ rproc_add_subdev(rproc, &sysmon->subdev);
+
+ sysmon->nb.notifier_call = sysmon_notify;
+ blocking_notifier_chain_register(&sysmon_notifiers, &sysmon->nb);
+
+ mutex_lock(&sysmon_lock);
+ list_add(&sysmon->node, &sysmon_list);
+ mutex_unlock(&sysmon_lock);
+
+ return sysmon;
+}
+EXPORT_SYMBOL_GPL(qcom_add_sysmon_subdev);
+
+/**
+ * qcom_remove_sysmon_subdev() - release a qcom_sysmon
+ * @sysmon: sysmon context, as retrieved by qcom_add_sysmon_subdev()
+ */
+void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon)
+{
+ if (!sysmon)
+ return;
+
+ mutex_lock(&sysmon_lock);
+ list_del(&sysmon->node);
+ mutex_unlock(&sysmon_lock);
+
+ blocking_notifier_chain_unregister(&sysmon_notifiers, &sysmon->nb);
+
+ rproc_remove_subdev(sysmon->rproc, &sysmon->subdev);
+
+ qmi_handle_release(&sysmon->qmi);
+
+ kfree(sysmon);
+}
+EXPORT_SYMBOL_GPL(qcom_remove_sysmon_subdev);
+
+/**
+ * qcom_sysmon_shutdown_acked() - query the success of the last shutdown
+ * @sysmon: sysmon context
+ *
+ * When sysmon is used to request a graceful shutdown of the remote processor
+ * this can be used by the remoteproc driver to query the success, in order to
+ * know if it should fall back to other means of requesting a shutdown.
+ *
+ * Return: boolean indicator of the success of the last shutdown request
+ */
+bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon)
+{
+ return sysmon && sysmon->shutdown_acked;
+}
+EXPORT_SYMBOL_GPL(qcom_sysmon_shutdown_acked);
+
+/**
+ * sysmon_probe() - probe sys_mon channel
+ * @rpdev: rpmsg device handle
+ *
+ * Find the sysmon context associated with the ancestor remoteproc and assign
+ * this rpmsg device with said sysmon context.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+static int sysmon_probe(struct rpmsg_device *rpdev)
+{
+ struct qcom_sysmon *sysmon;
+ struct rproc *rproc;
+
+ rproc = rproc_get_by_child(&rpdev->dev);
+ if (!rproc) {
+ dev_err(&rpdev->dev, "sysmon device not child of rproc\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&sysmon_lock);
+ list_for_each_entry(sysmon, &sysmon_list, node) {
+ if (sysmon->rproc == rproc)
+ goto found;
+ }
+ mutex_unlock(&sysmon_lock);
+
+ dev_err(&rpdev->dev, "no sysmon associated with parent rproc\n");
+
+ return -EINVAL;
+
+found:
+ mutex_unlock(&sysmon_lock);
+
+ rpdev->ept->priv = sysmon;
+ sysmon->ept = rpdev->ept;
+
+ return 0;
+}
+
+/**
+ * sysmon_remove() - sys_mon channel remove handler
+ * @rpdev: rpmsg device handle
+ *
+ * Disassociate the rpmsg device with the sysmon instance.
+ */
+static void sysmon_remove(struct rpmsg_device *rpdev)
+{
+ struct qcom_sysmon *sysmon = rpdev->ept->priv;
+
+ sysmon->ept = NULL;
+}
+
+static const struct rpmsg_device_id sysmon_match[] = {
+ { "sys_mon" },
+ {}
+};
+
+static struct rpmsg_driver sysmon_driver = {
+ .probe = sysmon_probe,
+ .remove = sysmon_remove,
+ .callback = sysmon_callback,
+ .id_table = sysmon_match,
+ .drv = {
+ .name = "qcom_sysmon",
+ },
+};
+
+module_rpmsg_driver(sysmon_driver);
+
+MODULE_DESCRIPTION("Qualcomm sysmon driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
new file mode 100644
index 000000000..68f37296b
--- /dev/null
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -0,0 +1,694 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm Wireless Connectivity Subsystem Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom_scm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "qcom_common.h"
+#include "remoteproc_internal.h"
+#include "qcom_pil_info.h"
+#include "qcom_wcnss.h"
+
+#define WCNSS_CRASH_REASON_SMEM 422
+#define WCNSS_FIRMWARE_NAME "wcnss.mdt"
+#define WCNSS_PAS_ID 6
+#define WCNSS_SSCTL_ID 0x13
+
+#define WCNSS_SPARE_NVBIN_DLND BIT(25)
+
+#define WCNSS_PMU_IRIS_XO_CFG BIT(3)
+#define WCNSS_PMU_IRIS_XO_EN BIT(4)
+#define WCNSS_PMU_GC_BUS_MUX_SEL_TOP BIT(5)
+#define WCNSS_PMU_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
+
+#define WCNSS_PMU_IRIS_RESET BIT(7)
+#define WCNSS_PMU_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */
+#define WCNSS_PMU_IRIS_XO_READ BIT(9)
+#define WCNSS_PMU_IRIS_XO_READ_STS BIT(10)
+
+#define WCNSS_PMU_XO_MODE_MASK GENMASK(2, 1)
+#define WCNSS_PMU_XO_MODE_19p2 0
+#define WCNSS_PMU_XO_MODE_48 3
+
+#define WCNSS_MAX_PDS 2
+
+struct wcnss_data {
+ size_t pmu_offset;
+ size_t spare_offset;
+
+ const char *pd_names[WCNSS_MAX_PDS];
+ const struct wcnss_vreg_info *vregs;
+ size_t num_vregs, num_pd_vregs;
+};
+
+struct qcom_wcnss {
+ struct device *dev;
+ struct rproc *rproc;
+
+ void __iomem *pmu_cfg;
+ void __iomem *spare_out;
+
+ bool use_48mhz_xo;
+
+ int wdog_irq;
+ int fatal_irq;
+ int ready_irq;
+ int handover_irq;
+ int stop_ack_irq;
+
+ struct qcom_smem_state *state;
+ unsigned stop_bit;
+
+ struct mutex iris_lock;
+ struct qcom_iris *iris;
+
+ struct device *pds[WCNSS_MAX_PDS];
+ size_t num_pds;
+ struct regulator_bulk_data *vregs;
+ size_t num_vregs;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ phys_addr_t mem_phys;
+ phys_addr_t mem_reloc;
+ void *mem_region;
+ size_t mem_size;
+
+ struct qcom_rproc_subdev smd_subdev;
+ struct qcom_sysmon *sysmon;
+};
+
+static const struct wcnss_data riva_data = {
+ .pmu_offset = 0x28,
+ .spare_offset = 0xb4,
+
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddmx", 1050000, 1150000, 0 },
+ { "vddcx", 1050000, 1150000, 0 },
+ { "vddpx", 1800000, 1800000, 0 },
+ },
+ .num_vregs = 3,
+};
+
+static const struct wcnss_data pronto_v1_data = {
+ .pmu_offset = 0x1004,
+ .spare_offset = 0x1088,
+
+ .pd_names = { "mx", "cx" },
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddmx", 950000, 1150000, 0 },
+ { "vddcx", .super_turbo = true},
+ { "vddpx", 1800000, 1800000, 0 },
+ },
+ .num_pd_vregs = 2,
+ .num_vregs = 1,
+};
+
+static const struct wcnss_data pronto_v2_data = {
+ .pmu_offset = 0x1004,
+ .spare_offset = 0x1088,
+
+ .pd_names = { "mx", "cx" },
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddmx", 1287500, 1287500, 0 },
+ { "vddcx", .super_turbo = true },
+ { "vddpx", 1800000, 1800000, 0 },
+ },
+ .num_pd_vregs = 2,
+ .num_vregs = 1,
+};
+
+static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
+ wcnss->mem_region, wcnss->mem_phys,
+ wcnss->mem_size, &wcnss->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size);
+
+ return 0;
+}
+
+static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
+{
+ u32 val;
+
+ /* Indicate NV download capability */
+ val = readl(wcnss->spare_out);
+ val |= WCNSS_SPARE_NVBIN_DLND;
+ writel(val, wcnss->spare_out);
+}
+
+static void wcnss_configure_iris(struct qcom_wcnss *wcnss)
+{
+ u32 val;
+
+ /* Clear PMU cfg register */
+ writel(0, wcnss->pmu_cfg);
+
+ val = WCNSS_PMU_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_IRIS_XO_EN;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Clear XO_MODE */
+ val &= ~WCNSS_PMU_XO_MODE_MASK;
+ if (wcnss->use_48mhz_xo)
+ val |= WCNSS_PMU_XO_MODE_48 << 1;
+ else
+ val |= WCNSS_PMU_XO_MODE_19p2 << 1;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Reset IRIS */
+ val |= WCNSS_PMU_IRIS_RESET;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Wait for PMU.iris_reg_reset_sts */
+ while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_RESET_STS)
+ cpu_relax();
+
+ /* Clear IRIS reset */
+ val &= ~WCNSS_PMU_IRIS_RESET;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Start IRIS XO configuration */
+ val |= WCNSS_PMU_IRIS_XO_CFG;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Wait for XO configuration to finish */
+ while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_XO_CFG_STS)
+ cpu_relax();
+
+ /* Stop IRIS XO configuration */
+ val &= ~WCNSS_PMU_GC_BUS_MUX_SEL_TOP;
+ val &= ~WCNSS_PMU_IRIS_XO_CFG;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Add some delay for XO to settle */
+ msleep(20);
+}
+
+static int wcnss_start(struct rproc *rproc)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int ret, i;
+
+ mutex_lock(&wcnss->iris_lock);
+ if (!wcnss->iris) {
+ dev_err(wcnss->dev, "no iris registered\n");
+ ret = -EINVAL;
+ goto release_iris_lock;
+ }
+
+ for (i = 0; i < wcnss->num_pds; i++) {
+ dev_pm_genpd_set_performance_state(wcnss->pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(wcnss->pds[i]);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wcnss->pds[i]);
+ goto disable_pds;
+ }
+ }
+
+ ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs);
+ if (ret)
+ goto disable_pds;
+
+ ret = qcom_iris_enable(wcnss->iris);
+ if (ret)
+ goto disable_regulators;
+
+ wcnss_indicate_nv_download(wcnss);
+ wcnss_configure_iris(wcnss);
+
+ ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID);
+ if (ret) {
+ dev_err(wcnss->dev,
+ "failed to authenticate image and release reset\n");
+ goto disable_iris;
+ }
+
+ ret = wait_for_completion_timeout(&wcnss->start_done,
+ msecs_to_jiffies(5000));
+ if (wcnss->ready_irq > 0 && ret == 0) {
+ /* We have a ready_irq, but it didn't fire in time. */
+ dev_err(wcnss->dev, "start timed out\n");
+ qcom_scm_pas_shutdown(WCNSS_PAS_ID);
+ ret = -ETIMEDOUT;
+ goto disable_iris;
+ }
+
+ ret = 0;
+
+disable_iris:
+ qcom_iris_disable(wcnss->iris);
+disable_regulators:
+ regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs);
+disable_pds:
+ for (i--; i >= 0; i--) {
+ pm_runtime_put(wcnss->pds[i]);
+ dev_pm_genpd_set_performance_state(wcnss->pds[i], 0);
+ }
+release_iris_lock:
+ mutex_unlock(&wcnss->iris_lock);
+
+ return ret;
+}
+
+static int wcnss_stop(struct rproc *rproc)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int ret;
+
+ if (wcnss->state) {
+ qcom_smem_state_update_bits(wcnss->state,
+ BIT(wcnss->stop_bit),
+ BIT(wcnss->stop_bit));
+
+ ret = wait_for_completion_timeout(&wcnss->stop_done,
+ msecs_to_jiffies(5000));
+ if (ret == 0)
+ dev_err(wcnss->dev, "timed out on wait\n");
+
+ qcom_smem_state_update_bits(wcnss->state,
+ BIT(wcnss->stop_bit),
+ 0);
+ }
+
+ ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID);
+ if (ret)
+ dev_err(wcnss->dev, "failed to shutdown: %d\n", ret);
+
+ return ret;
+}
+
+static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int offset;
+
+ offset = da - wcnss->mem_reloc;
+ if (offset < 0 || offset + len > wcnss->mem_size)
+ return NULL;
+
+ return wcnss->mem_region + offset;
+}
+
+static const struct rproc_ops wcnss_ops = {
+ .start = wcnss_start,
+ .stop = wcnss_stop,
+ .da_to_va = wcnss_da_to_va,
+ .parse_fw = qcom_register_dump_segments,
+ .load = wcnss_load,
+};
+
+static irqreturn_t wcnss_wdog_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+
+ rproc_report_crash(wcnss->rproc, RPROC_WATCHDOG);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_fatal_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+ size_t len;
+ char *msg;
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, WCNSS_CRASH_REASON_SMEM, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(wcnss->dev, "fatal error received: %s\n", msg);
+
+ rproc_report_crash(wcnss->rproc, RPROC_FATAL_ERROR);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_ready_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+
+ complete(&wcnss->start_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_handover_interrupt(int irq, void *dev)
+{
+ /*
+ * XXX: At this point we're supposed to release the resources that we
+ * have been holding on behalf of the WCNSS. Unfortunately this
+ * interrupt comes way before the other side seems to be done.
+ *
+ * So we're currently relying on the ready interrupt firing later then
+ * this and we just disable the resources at the end of wcnss_start().
+ */
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+
+ complete(&wcnss->stop_done);
+
+ return IRQ_HANDLED;
+}
+
+static int wcnss_init_pds(struct qcom_wcnss *wcnss,
+ const char * const pd_names[WCNSS_MAX_PDS])
+{
+ int i, ret;
+
+ for (i = 0; i < WCNSS_MAX_PDS; i++) {
+ if (!pd_names[i])
+ break;
+
+ wcnss->pds[i] = dev_pm_domain_attach_by_name(wcnss->dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(wcnss->pds[i])) {
+ ret = PTR_ERR(wcnss->pds[i]) ? : -ENODATA;
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(wcnss->pds[i], false);
+ return ret;
+ }
+ }
+ wcnss->num_pds = i;
+
+ return 0;
+}
+
+static void wcnss_release_pds(struct qcom_wcnss *wcnss)
+{
+ int i;
+
+ for (i = 0; i < wcnss->num_pds; i++)
+ dev_pm_domain_detach(wcnss->pds[i], false);
+}
+
+static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
+ const struct wcnss_vreg_info *info,
+ int num_vregs, int num_pd_vregs)
+{
+ struct regulator_bulk_data *bulk;
+ int ret;
+ int i;
+
+ /*
+ * If attaching the power domains suceeded we can skip requesting
+ * the regulators for the power domains. For old device trees we need to
+ * reserve extra space to manage them through the regulator interface.
+ */
+ if (wcnss->num_pds)
+ info += num_pd_vregs;
+ else
+ num_vregs += num_pd_vregs;
+
+ bulk = devm_kcalloc(wcnss->dev,
+ num_vregs, sizeof(struct regulator_bulk_data),
+ GFP_KERNEL);
+ if (!bulk)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vregs; i++)
+ bulk[i].supply = info[i].name;
+
+ ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_vregs; i++) {
+ if (info[i].max_voltage)
+ regulator_set_voltage(bulk[i].consumer,
+ info[i].min_voltage,
+ info[i].max_voltage);
+
+ if (info[i].load_uA)
+ regulator_set_load(bulk[i].consumer, info[i].load_uA);
+ }
+
+ wcnss->vregs = bulk;
+ wcnss->num_vregs = num_vregs;
+
+ return 0;
+}
+
+static int wcnss_request_irq(struct qcom_wcnss *wcnss,
+ struct platform_device *pdev,
+ const char *name,
+ bool optional,
+ irq_handler_t thread_fn)
+{
+ int ret;
+ int irq_number;
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0 && optional) {
+ dev_dbg(&pdev->dev, "no %s IRQ defined, ignoring\n", name);
+ return 0;
+ } else if (ret < 0) {
+ dev_err(&pdev->dev, "no %s IRQ defined\n", name);
+ return ret;
+ }
+
+ irq_number = ret;
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret,
+ NULL, thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "wcnss", wcnss);
+ if (ret) {
+ dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+ }
+
+ /* Return the IRQ number if the IRQ was successfully acquired */
+ return irq_number;
+}
+
+static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
+{
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(wcnss->dev, "no memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret)
+ return ret;
+
+ wcnss->mem_phys = wcnss->mem_reloc = r.start;
+ wcnss->mem_size = resource_size(&r);
+ wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
+ if (!wcnss->mem_region) {
+ dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, wcnss->mem_size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int wcnss_probe(struct platform_device *pdev)
+{
+ const char *fw_name = WCNSS_FIRMWARE_NAME;
+ const struct wcnss_data *data;
+ struct qcom_wcnss *wcnss;
+ struct resource *res;
+ struct rproc *rproc;
+ void __iomem *mmio;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ if (!qcom_scm_pas_supported(WCNSS_PAS_ID)) {
+ dev_err(&pdev->dev, "PAS is not available for WCNSS\n");
+ return -ENXIO;
+ }
+
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
+ fw_name, sizeof(*wcnss));
+ if (!rproc) {
+ dev_err(&pdev->dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
+ wcnss = (struct qcom_wcnss *)rproc->priv;
+ wcnss->dev = &pdev->dev;
+ wcnss->rproc = rproc;
+ platform_set_drvdata(pdev, wcnss);
+
+ init_completion(&wcnss->start_done);
+ init_completion(&wcnss->stop_done);
+
+ mutex_init(&wcnss->iris_lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu");
+ mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mmio)) {
+ ret = PTR_ERR(mmio);
+ goto free_rproc;
+ }
+
+ ret = wcnss_alloc_memory_region(wcnss);
+ if (ret)
+ goto free_rproc;
+
+ wcnss->pmu_cfg = mmio + data->pmu_offset;
+ wcnss->spare_out = mmio + data->spare_offset;
+
+ /*
+ * We might need to fallback to regulators instead of power domains
+ * for old device trees. Don't report an error in that case.
+ */
+ ret = wcnss_init_pds(wcnss, data->pd_names);
+ if (ret && (ret != -ENODATA || !data->num_pd_vregs))
+ goto free_rproc;
+
+ ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs,
+ data->num_pd_vregs);
+ if (ret)
+ goto detach_pds;
+
+ ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt);
+ if (ret < 0)
+ goto detach_pds;
+ wcnss->wdog_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt);
+ if (ret < 0)
+ goto detach_pds;
+ wcnss->fatal_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt);
+ if (ret < 0)
+ goto detach_pds;
+ wcnss->ready_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt);
+ if (ret < 0)
+ goto detach_pds;
+ wcnss->handover_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt);
+ if (ret < 0)
+ goto detach_pds;
+ wcnss->stop_ack_irq = ret;
+
+ if (wcnss->stop_ack_irq) {
+ wcnss->state = devm_qcom_smem_state_get(&pdev->dev, "stop",
+ &wcnss->stop_bit);
+ if (IS_ERR(wcnss->state)) {
+ ret = PTR_ERR(wcnss->state);
+ goto detach_pds;
+ }
+ }
+
+ qcom_add_smd_subdev(rproc, &wcnss->smd_subdev);
+ wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID);
+ if (IS_ERR(wcnss->sysmon)) {
+ ret = PTR_ERR(wcnss->sysmon);
+ goto detach_pds;
+ }
+
+ wcnss->iris = qcom_iris_probe(&pdev->dev, &wcnss->use_48mhz_xo);
+ if (IS_ERR(wcnss->iris)) {
+ ret = PTR_ERR(wcnss->iris);
+ goto detach_pds;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto remove_iris;
+
+ return 0;
+
+remove_iris:
+ qcom_iris_remove(wcnss->iris);
+detach_pds:
+ wcnss_release_pds(wcnss);
+free_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int wcnss_remove(struct platform_device *pdev)
+{
+ struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
+
+ qcom_iris_remove(wcnss->iris);
+
+ rproc_del(wcnss->rproc);
+
+ qcom_remove_sysmon_subdev(wcnss->sysmon);
+ qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
+ wcnss_release_pds(wcnss);
+ rproc_free(wcnss->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id wcnss_of_match[] = {
+ { .compatible = "qcom,riva-pil", &riva_data },
+ { .compatible = "qcom,pronto-v1-pil", &pronto_v1_data },
+ { .compatible = "qcom,pronto-v2-pil", &pronto_v2_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wcnss_of_match);
+
+static struct platform_driver wcnss_driver = {
+ .probe = wcnss_probe,
+ .remove = wcnss_remove,
+ .driver = {
+ .name = "qcom-wcnss-pil",
+ .of_match_table = wcnss_of_match,
+ },
+};
+
+module_platform_driver(wcnss_driver);
+
+MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h
new file mode 100644
index 000000000..6d01ee6af
--- /dev/null
+++ b/drivers/remoteproc/qcom_wcnss.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_WNCSS_H__
+#define __QCOM_WNCSS_H__
+
+struct qcom_iris;
+struct qcom_wcnss;
+
+extern struct platform_driver qcom_iris_driver;
+
+struct wcnss_vreg_info {
+ const char * const name;
+ int min_voltage;
+ int max_voltage;
+
+ int load_uA;
+
+ bool super_turbo;
+};
+
+struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo);
+void qcom_iris_remove(struct qcom_iris *iris);
+int qcom_iris_enable(struct qcom_iris *iris);
+void qcom_iris_disable(struct qcom_iris *iris);
+
+#endif
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
new file mode 100644
index 000000000..09720dddd
--- /dev/null
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm Wireless Connectivity Subsystem Iris driver
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "qcom_wcnss.h"
+
+struct qcom_iris {
+ struct device dev;
+
+ struct clk *xo_clk;
+
+ struct regulator_bulk_data *vregs;
+ size_t num_vregs;
+};
+
+struct iris_data {
+ const struct wcnss_vreg_info *vregs;
+ size_t num_vregs;
+
+ bool use_48mhz_xo;
+};
+
+static const struct iris_data wcn3620_data = {
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddxo", 1800000, 1800000, 10000 },
+ { "vddrfa", 1300000, 1300000, 100000 },
+ { "vddpa", 3300000, 3300000, 515000 },
+ { "vdddig", 1800000, 1800000, 10000 },
+ },
+ .num_vregs = 4,
+ .use_48mhz_xo = false,
+};
+
+static const struct iris_data wcn3660_data = {
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddxo", 1800000, 1800000, 10000 },
+ { "vddrfa", 1300000, 1300000, 100000 },
+ { "vddpa", 2900000, 3000000, 515000 },
+ { "vdddig", 1200000, 1225000, 10000 },
+ },
+ .num_vregs = 4,
+ .use_48mhz_xo = true,
+};
+
+static const struct iris_data wcn3680_data = {
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddxo", 1800000, 1800000, 10000 },
+ { "vddrfa", 1300000, 1300000, 100000 },
+ { "vddpa", 3300000, 3300000, 515000 },
+ { "vdddig", 1800000, 1800000, 10000 },
+ },
+ .num_vregs = 4,
+ .use_48mhz_xo = true,
+};
+
+int qcom_iris_enable(struct qcom_iris *iris)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(iris->num_vregs, iris->vregs);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(iris->xo_clk);
+ if (ret) {
+ dev_err(&iris->dev, "failed to enable xo clk\n");
+ goto disable_regulators;
+ }
+
+ return 0;
+
+disable_regulators:
+ regulator_bulk_disable(iris->num_vregs, iris->vregs);
+
+ return ret;
+}
+
+void qcom_iris_disable(struct qcom_iris *iris)
+{
+ clk_disable_unprepare(iris->xo_clk);
+ regulator_bulk_disable(iris->num_vregs, iris->vregs);
+}
+
+static const struct of_device_id iris_of_match[] = {
+ { .compatible = "qcom,wcn3620", .data = &wcn3620_data },
+ { .compatible = "qcom,wcn3660", .data = &wcn3660_data },
+ { .compatible = "qcom,wcn3660b", .data = &wcn3680_data },
+ { .compatible = "qcom,wcn3680", .data = &wcn3680_data },
+ {}
+};
+
+static void qcom_iris_release(struct device *dev)
+{
+ struct qcom_iris *iris = container_of(dev, struct qcom_iris, dev);
+
+ of_node_put(iris->dev.of_node);
+ kfree(iris);
+}
+
+struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
+{
+ const struct of_device_id *match;
+ const struct iris_data *data;
+ struct device_node *of_node;
+ struct qcom_iris *iris;
+ int ret;
+ int i;
+
+ of_node = of_get_child_by_name(parent->of_node, "iris");
+ if (!of_node) {
+ dev_err(parent, "No child node \"iris\" found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ iris = kzalloc(sizeof(*iris), GFP_KERNEL);
+ if (!iris) {
+ of_node_put(of_node);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ device_initialize(&iris->dev);
+ iris->dev.parent = parent;
+ iris->dev.release = qcom_iris_release;
+ iris->dev.of_node = of_node;
+
+ dev_set_name(&iris->dev, "%s.iris", dev_name(parent));
+
+ ret = device_add(&iris->dev);
+ if (ret) {
+ put_device(&iris->dev);
+ return ERR_PTR(ret);
+ }
+
+ match = of_match_device(iris_of_match, &iris->dev);
+ if (!match) {
+ dev_err(&iris->dev, "no matching compatible for iris\n");
+ ret = -EINVAL;
+ goto err_device_del;
+ }
+
+ data = match->data;
+
+ iris->xo_clk = devm_clk_get(&iris->dev, "xo");
+ if (IS_ERR(iris->xo_clk)) {
+ ret = PTR_ERR(iris->xo_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&iris->dev, "failed to acquire xo clk\n");
+ goto err_device_del;
+ }
+
+ iris->num_vregs = data->num_vregs;
+ iris->vregs = devm_kcalloc(&iris->dev,
+ iris->num_vregs,
+ sizeof(struct regulator_bulk_data),
+ GFP_KERNEL);
+ if (!iris->vregs) {
+ ret = -ENOMEM;
+ goto err_device_del;
+ }
+
+ for (i = 0; i < iris->num_vregs; i++)
+ iris->vregs[i].supply = data->vregs[i].name;
+
+ ret = devm_regulator_bulk_get(&iris->dev, iris->num_vregs, iris->vregs);
+ if (ret) {
+ dev_err(&iris->dev, "failed to get regulators\n");
+ goto err_device_del;
+ }
+
+ for (i = 0; i < iris->num_vregs; i++) {
+ if (data->vregs[i].max_voltage)
+ regulator_set_voltage(iris->vregs[i].consumer,
+ data->vregs[i].min_voltage,
+ data->vregs[i].max_voltage);
+
+ if (data->vregs[i].load_uA)
+ regulator_set_load(iris->vregs[i].consumer,
+ data->vregs[i].load_uA);
+ }
+
+ *use_48mhz_xo = data->use_48mhz_xo;
+
+ return iris;
+
+err_device_del:
+ device_del(&iris->dev);
+
+ return ERR_PTR(ret);
+}
+
+void qcom_iris_remove(struct qcom_iris *iris)
+{
+ device_del(&iris->dev);
+}
diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
new file mode 100644
index 000000000..1ff2a73ad
--- /dev/null
+++ b/drivers/remoteproc/rcar_rproc.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) IoT.bzh 2021
+ */
+
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include "remoteproc_internal.h"
+
+struct rcar_rproc {
+ struct reset_control *rst;
+};
+
+static int rcar_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = &rproc->dev;
+ void *va;
+
+ dev_dbg(dev, "map memory: %pa+%zx\n", &mem->dma, mem->len);
+ va = ioremap_wc(mem->dma, mem->len);
+ if (!va) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int rcar_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ dev_dbg(&rproc->dev, "unmap memory: %pa\n", &mem->dma);
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int rcar_rproc_prepare(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ u32 da;
+
+ /* Register associated reserved memory regions */
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ of_node_put(it.node);
+ dev_err(&rproc->dev,
+ "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ if (rmem->base > U32_MAX) {
+ of_node_put(it.node);
+ return -EINVAL;
+ }
+
+ /* No need to translate pa to da, R-Car use same map */
+ da = rmem->base;
+ mem = rproc_mem_entry_init(dev, NULL,
+ rmem->base,
+ rmem->size, da,
+ rcar_rproc_mem_alloc,
+ rcar_rproc_mem_release,
+ it.node->name);
+
+ if (!mem) {
+ of_node_put(it.node);
+ return -ENOMEM;
+ }
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ return 0;
+}
+
+static int rcar_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret;
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret)
+ dev_info(&rproc->dev, "No resource table in elf\n");
+
+ return 0;
+}
+
+static int rcar_rproc_start(struct rproc *rproc)
+{
+ struct rcar_rproc *priv = rproc->priv;
+ int err;
+
+ if (!rproc->bootaddr)
+ return -EINVAL;
+
+ err = rcar_rst_set_rproc_boot_addr(rproc->bootaddr);
+ if (err) {
+ dev_err(&rproc->dev, "failed to set rproc boot addr\n");
+ return err;
+ }
+
+ err = reset_control_deassert(priv->rst);
+ if (err)
+ dev_err(&rproc->dev, "failed to deassert reset\n");
+
+ return err;
+}
+
+static int rcar_rproc_stop(struct rproc *rproc)
+{
+ struct rcar_rproc *priv = rproc->priv;
+ int err;
+
+ err = reset_control_assert(priv->rst);
+ if (err)
+ dev_err(&rproc->dev, "failed to assert reset\n");
+
+ return err;
+}
+
+static struct rproc_ops rcar_rproc_ops = {
+ .prepare = rcar_rproc_prepare,
+ .start = rcar_rproc_start,
+ .stop = rcar_rproc_stop,
+ .load = rproc_elf_load_segments,
+ .parse_fw = rcar_rproc_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+
+};
+
+static int rcar_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct rcar_rproc *priv;
+ struct rproc *rproc;
+ int ret;
+
+ rproc = devm_rproc_alloc(dev, np->name, &rcar_rproc_ops,
+ NULL, sizeof(*priv));
+ if (!rproc)
+ return -ENOMEM;
+
+ priv = rproc->priv;
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ ret = PTR_ERR(priv->rst);
+ dev_err_probe(dev, ret, "fail to acquire rproc reset\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "failed to power up\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, rproc);
+
+ /* Manually start the rproc */
+ rproc->auto_boot = false;
+
+ ret = devm_rproc_add(dev, rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed\n");
+ goto pm_disable;
+ }
+
+ return 0;
+
+pm_disable:
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int rcar_rproc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_rproc_of_match[] = {
+ { .compatible = "renesas,rcar-cr7" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, rcar_rproc_of_match);
+
+static struct platform_driver rcar_rproc_driver = {
+ .probe = rcar_rproc_probe,
+ .remove = rcar_rproc_remove,
+ .driver = {
+ .name = "rcar-rproc",
+ .of_match_table = rcar_rproc_of_match,
+ },
+};
+
+module_platform_driver(rcar_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 remote processor control driver");
+MODULE_AUTHOR("Julien Massot <julien.massot@iot.bzh>");
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
new file mode 100644
index 000000000..687f205fd
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_cdev.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Character device interface driver for Remoteproc framework.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/cdev.h>
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/remoteproc_cdev.h>
+
+#include "remoteproc_internal.h"
+
+#define NUM_RPROC_DEVICES 64
+static dev_t rproc_major;
+
+static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos)
+{
+ struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
+ int ret = 0;
+ char cmd[10];
+
+ if (!len || len > sizeof(cmd))
+ return -EINVAL;
+
+ ret = copy_from_user(cmd, buf, len);
+ if (ret)
+ return -EFAULT;
+
+ if (!strncmp(cmd, "start", len)) {
+ ret = rproc_boot(rproc);
+ } else if (!strncmp(cmd, "stop", len)) {
+ ret = rproc_shutdown(rproc);
+ } else if (!strncmp(cmd, "detach", len)) {
+ ret = rproc_detach(rproc);
+ } else {
+ dev_err(&rproc->dev, "Unrecognized option\n");
+ ret = -EINVAL;
+ }
+
+ return ret ? ret : len;
+}
+
+static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
+ void __user *argp = (void __user *)arg;
+ s32 param;
+
+ switch (ioctl) {
+ case RPROC_SET_SHUTDOWN_ON_RELEASE:
+ if (copy_from_user(&param, argp, sizeof(s32)))
+ return -EFAULT;
+
+ rproc->cdev_put_on_release = !!param;
+ break;
+ case RPROC_GET_SHUTDOWN_ON_RELEASE:
+ param = (s32)rproc->cdev_put_on_release;
+ if (copy_to_user(argp, &param, sizeof(s32)))
+ return -EFAULT;
+
+ break;
+ default:
+ dev_err(&rproc->dev, "Unsupported ioctl\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rproc_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev);
+ int ret = 0;
+
+ if (!rproc->cdev_put_on_release)
+ return 0;
+
+ if (rproc->state == RPROC_RUNNING)
+ rproc_shutdown(rproc);
+ else if (rproc->state == RPROC_ATTACHED)
+ ret = rproc_detach(rproc);
+
+ return ret;
+}
+
+static const struct file_operations rproc_fops = {
+ .write = rproc_cdev_write,
+ .unlocked_ioctl = rproc_device_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .release = rproc_cdev_release,
+};
+
+int rproc_char_device_add(struct rproc *rproc)
+{
+ int ret;
+
+ cdev_init(&rproc->cdev, &rproc_fops);
+ rproc->cdev.owner = THIS_MODULE;
+
+ rproc->dev.devt = MKDEV(MAJOR(rproc_major), rproc->index);
+ cdev_set_parent(&rproc->cdev, &rproc->dev.kobj);
+ ret = cdev_add(&rproc->cdev, rproc->dev.devt, 1);
+ if (ret < 0)
+ dev_err(&rproc->dev, "Failed to add char dev for %s\n", rproc->name);
+
+ return ret;
+}
+
+void rproc_char_device_remove(struct rproc *rproc)
+{
+ cdev_del(&rproc->cdev);
+}
+
+void __init rproc_init_cdev(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&rproc_major, 0, NUM_RPROC_DEVICES, "remoteproc");
+ if (ret < 0)
+ pr_err("Failed to alloc rproc_cdev region, err %d\n", ret);
+}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
new file mode 100644
index 000000000..c3f194d93
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -0,0 +1,2769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Remote Processor Framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ * Mark Grosen <mgrosen@ti.com>
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ * Robert Tivy <rtivy@ti.com>
+ * Armando Uribe De Leon <x0095078@ti.com>
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/panic_notifier.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/rculist.h>
+#include <linux/remoteproc.h>
+#include <linux/iommu.h>
+#include <linux/idr.h>
+#include <linux/elf.h>
+#include <linux/crc32.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_ring.h>
+#include <asm/byteorder.h>
+#include <linux/platform_device.h>
+
+#include "remoteproc_internal.h"
+
+#define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
+
+static DEFINE_MUTEX(rproc_list_mutex);
+static LIST_HEAD(rproc_list);
+static struct notifier_block rproc_panic_nb;
+
+typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
+ void *, int offset, int avail);
+
+static int rproc_alloc_carveout(struct rproc *rproc,
+ struct rproc_mem_entry *mem);
+static int rproc_release_carveout(struct rproc *rproc,
+ struct rproc_mem_entry *mem);
+
+/* Unique indices for remoteproc devices */
+static DEFINE_IDA(rproc_dev_index);
+static struct workqueue_struct *rproc_recovery_wq;
+
+static const char * const rproc_crash_names[] = {
+ [RPROC_MMUFAULT] = "mmufault",
+ [RPROC_WATCHDOG] = "watchdog",
+ [RPROC_FATAL_ERROR] = "fatal error",
+};
+
+/* translate rproc_crash_type to string */
+static const char *rproc_crash_to_string(enum rproc_crash_type type)
+{
+ if (type < ARRAY_SIZE(rproc_crash_names))
+ return rproc_crash_names[type];
+ return "unknown";
+}
+
+/*
+ * This is the IOMMU fault handler we register with the IOMMU API
+ * (when relevant; not all remote processors access memory through
+ * an IOMMU).
+ *
+ * IOMMU core will invoke this handler whenever the remote processor
+ * will try to access an unmapped device address.
+ */
+static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *token)
+{
+ struct rproc *rproc = token;
+
+ dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
+
+ rproc_report_crash(rproc, RPROC_MMUFAULT);
+
+ /*
+ * Let the iommu core know we're not really handling this fault;
+ * we just used it as a recovery trigger.
+ */
+ return -ENOSYS;
+}
+
+static int rproc_enable_iommu(struct rproc *rproc)
+{
+ struct iommu_domain *domain;
+ struct device *dev = rproc->dev.parent;
+ int ret;
+
+ if (!rproc->has_iommu) {
+ dev_dbg(dev, "iommu not present\n");
+ return 0;
+ }
+
+ domain = iommu_domain_alloc(dev->bus);
+ if (!domain) {
+ dev_err(dev, "can't alloc iommu domain\n");
+ return -ENOMEM;
+ }
+
+ iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
+
+ ret = iommu_attach_device(domain, dev);
+ if (ret) {
+ dev_err(dev, "can't attach iommu device: %d\n", ret);
+ goto free_domain;
+ }
+
+ rproc->domain = domain;
+
+ return 0;
+
+free_domain:
+ iommu_domain_free(domain);
+ return ret;
+}
+
+static void rproc_disable_iommu(struct rproc *rproc)
+{
+ struct iommu_domain *domain = rproc->domain;
+ struct device *dev = rproc->dev.parent;
+
+ if (!domain)
+ return;
+
+ iommu_detach_device(domain, dev);
+ iommu_domain_free(domain);
+}
+
+phys_addr_t rproc_va_to_pa(void *cpu_addr)
+{
+ /*
+ * Return physical address according to virtual address location
+ * - in vmalloc: if region ioremapped or defined as dma_alloc_coherent
+ * - in kernel: if region allocated in generic dma memory pool
+ */
+ if (is_vmalloc_addr(cpu_addr)) {
+ return page_to_phys(vmalloc_to_page(cpu_addr)) +
+ offset_in_page(cpu_addr);
+ }
+
+ WARN_ON(!virt_addr_valid(cpu_addr));
+ return virt_to_phys(cpu_addr);
+}
+EXPORT_SYMBOL(rproc_va_to_pa);
+
+/**
+ * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
+ * @rproc: handle of a remote processor
+ * @da: remoteproc device address to translate
+ * @len: length of the memory region @da is pointing to
+ * @is_iomem: optional pointer filled in to indicate if @da is iomapped memory
+ *
+ * Some remote processors will ask us to allocate them physically contiguous
+ * memory regions (which we call "carveouts"), and map them to specific
+ * device addresses (which are hardcoded in the firmware). They may also have
+ * dedicated memory regions internal to the processors, and use them either
+ * exclusively or alongside carveouts.
+ *
+ * They may then ask us to copy objects into specific device addresses (e.g.
+ * code/data sections) or expose us certain symbols in other device address
+ * (e.g. their trace buffer).
+ *
+ * This function is a helper function with which we can go over the allocated
+ * carveouts and translate specific device addresses to kernel virtual addresses
+ * so we can access the referenced memory. This function also allows to perform
+ * translations on the internal remoteproc memory regions through a platform
+ * implementation specific da_to_va ops, if present.
+ *
+ * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
+ * but only on kernel direct mapped RAM memory. Instead, we're just using
+ * here the output of the DMA API for the carveouts, which should be more
+ * correct.
+ *
+ * Return: a valid kernel address on success or NULL on failure
+ */
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct rproc_mem_entry *carveout;
+ void *ptr = NULL;
+
+ if (rproc->ops->da_to_va) {
+ ptr = rproc->ops->da_to_va(rproc, da, len, is_iomem);
+ if (ptr)
+ goto out;
+ }
+
+ list_for_each_entry(carveout, &rproc->carveouts, node) {
+ int offset = da - carveout->da;
+
+ /* Verify that carveout is allocated */
+ if (!carveout->va)
+ continue;
+
+ /* try next carveout if da is too small */
+ if (offset < 0)
+ continue;
+
+ /* try next carveout if da is too large */
+ if (offset + len > carveout->len)
+ continue;
+
+ ptr = carveout->va + offset;
+
+ if (is_iomem)
+ *is_iomem = carveout->is_iomem;
+
+ break;
+ }
+
+out:
+ return ptr;
+}
+EXPORT_SYMBOL(rproc_da_to_va);
+
+/**
+ * rproc_find_carveout_by_name() - lookup the carveout region by a name
+ * @rproc: handle of a remote processor
+ * @name: carveout name to find (format string)
+ * @...: optional parameters matching @name string
+ *
+ * Platform driver has the capability to register some pre-allacoted carveout
+ * (physically contiguous memory regions) before rproc firmware loading and
+ * associated resource table analysis. These regions may be dedicated memory
+ * regions internal to the coprocessor or specified DDR region with specific
+ * attributes
+ *
+ * This function is a helper function with which we can go over the
+ * allocated carveouts and return associated region characteristics like
+ * coprocessor address, length or processor virtual address.
+ *
+ * Return: a valid pointer on carveout entry on success or NULL on failure.
+ */
+__printf(2, 3)
+struct rproc_mem_entry *
+rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
+{
+ va_list args;
+ char _name[32];
+ struct rproc_mem_entry *carveout, *mem = NULL;
+
+ if (!name)
+ return NULL;
+
+ va_start(args, name);
+ vsnprintf(_name, sizeof(_name), name, args);
+ va_end(args);
+
+ list_for_each_entry(carveout, &rproc->carveouts, node) {
+ /* Compare carveout and requested names */
+ if (!strcmp(carveout->name, _name)) {
+ mem = carveout;
+ break;
+ }
+ }
+
+ return mem;
+}
+
+/**
+ * rproc_check_carveout_da() - Check specified carveout da configuration
+ * @rproc: handle of a remote processor
+ * @mem: pointer on carveout to check
+ * @da: area device address
+ * @len: associated area size
+ *
+ * This function is a helper function to verify requested device area (couple
+ * da, len) is part of specified carveout.
+ * If da is not set (defined as FW_RSC_ADDR_ANY), only requested length is
+ * checked.
+ *
+ * Return: 0 if carveout matches request else error
+ */
+static int rproc_check_carveout_da(struct rproc *rproc,
+ struct rproc_mem_entry *mem, u32 da, u32 len)
+{
+ struct device *dev = &rproc->dev;
+ int delta;
+
+ /* Check requested resource length */
+ if (len > mem->len) {
+ dev_err(dev, "Registered carveout doesn't fit len request\n");
+ return -EINVAL;
+ }
+
+ if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) {
+ /* Address doesn't match registered carveout configuration */
+ return -EINVAL;
+ } else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) {
+ delta = da - mem->da;
+
+ /* Check requested resource belongs to registered carveout */
+ if (delta < 0) {
+ dev_err(dev,
+ "Registered carveout doesn't fit da request\n");
+ return -EINVAL;
+ }
+
+ if (delta + len > mem->len) {
+ dev_err(dev,
+ "Registered carveout doesn't fit len request\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
+{
+ struct rproc *rproc = rvdev->rproc;
+ struct device *dev = &rproc->dev;
+ struct rproc_vring *rvring = &rvdev->vring[i];
+ struct fw_rsc_vdev *rsc;
+ int ret, notifyid;
+ struct rproc_mem_entry *mem;
+ size_t size;
+
+ /* actual size of vring (in bytes) */
+ size = PAGE_ALIGN(vring_size(rvring->num, rvring->align));
+
+ rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
+
+ /* Search for pre-registered carveout */
+ mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
+ i);
+ if (mem) {
+ if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size))
+ return -ENOMEM;
+ } else {
+ /* Register carveout in list */
+ mem = rproc_mem_entry_init(dev, NULL, 0,
+ size, rsc->vring[i].da,
+ rproc_alloc_carveout,
+ rproc_release_carveout,
+ "vdev%dvring%d",
+ rvdev->index, i);
+ if (!mem) {
+ dev_err(dev, "Can't allocate memory entry structure\n");
+ return -ENOMEM;
+ }
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ /*
+ * Assign an rproc-wide unique index for this vring
+ * TODO: assign a notifyid for rvdev updates as well
+ * TODO: support predefined notifyids (via resource table)
+ */
+ ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(dev, "idr_alloc failed: %d\n", ret);
+ return ret;
+ }
+ notifyid = ret;
+
+ /* Potentially bump max_notifyid */
+ if (notifyid > rproc->max_notifyid)
+ rproc->max_notifyid = notifyid;
+
+ rvring->notifyid = notifyid;
+
+ /* Let the rproc know the notifyid of this vring.*/
+ rsc->vring[i].notifyid = notifyid;
+ return 0;
+}
+
+int
+rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
+{
+ struct rproc *rproc = rvdev->rproc;
+ struct device *dev = &rproc->dev;
+ struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
+ struct rproc_vring *rvring = &rvdev->vring[i];
+
+ dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n",
+ i, vring->da, vring->num, vring->align);
+
+ /* verify queue size and vring alignment are sane */
+ if (!vring->num || !vring->align) {
+ dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
+ vring->num, vring->align);
+ return -EINVAL;
+ }
+
+ rvring->num = vring->num;
+ rvring->align = vring->align;
+ rvring->rvdev = rvdev;
+
+ return 0;
+}
+
+void rproc_free_vring(struct rproc_vring *rvring)
+{
+ struct rproc *rproc = rvring->rvdev->rproc;
+ int idx = rvring - rvring->rvdev->vring;
+ struct fw_rsc_vdev *rsc;
+
+ idr_remove(&rproc->notifyids, rvring->notifyid);
+
+ /*
+ * At this point rproc_stop() has been called and the installed resource
+ * table in the remote processor memory may no longer be accessible. As
+ * such and as per rproc_stop(), rproc->table_ptr points to the cached
+ * resource table (rproc->cached_table). The cached resource table is
+ * only available when a remote processor has been booted by the
+ * remoteproc core, otherwise it is NULL.
+ *
+ * Based on the above, reset the virtio device section in the cached
+ * resource table only if there is one to work with.
+ */
+ if (rproc->table_ptr) {
+ rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
+ rsc->vring[idx].da = 0;
+ rsc->vring[idx].notifyid = -1;
+ }
+}
+
+void rproc_add_rvdev(struct rproc *rproc, struct rproc_vdev *rvdev)
+{
+ if (rvdev && rproc)
+ list_add_tail(&rvdev->node, &rproc->rvdevs);
+}
+
+void rproc_remove_rvdev(struct rproc_vdev *rvdev)
+{
+ if (rvdev)
+ list_del(&rvdev->node);
+}
+/**
+ * rproc_handle_vdev() - handle a vdev fw resource
+ * @rproc: the remote processor
+ * @ptr: the vring resource descriptor
+ * @offset: offset of the resource entry
+ * @avail: size of available data (for sanity checking the image)
+ *
+ * This resource entry requests the host to statically register a virtio
+ * device (vdev), and setup everything needed to support it. It contains
+ * everything needed to make it possible: the virtio device id, virtio
+ * device features, vrings information, virtio config space, etc...
+ *
+ * Before registering the vdev, the vrings are allocated from non-cacheable
+ * physically contiguous memory. Currently we only support two vrings per
+ * remote processor (temporary limitation). We might also want to consider
+ * doing the vring allocation only later when ->find_vqs() is invoked, and
+ * then release them upon ->del_vqs().
+ *
+ * Note: @da is currently not really handled correctly: we dynamically
+ * allocate it using the DMA API, ignoring requested hard coded addresses,
+ * and we don't take care of any required IOMMU programming. This is all
+ * going to be taken care of when the generic iommu-based DMA API will be
+ * merged. Meanwhile, statically-addressed iommu-based firmware images should
+ * use RSC_DEVMEM resource entries to map their required @da to the physical
+ * address of their base CMA region (ouch, hacky!).
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
+ */
+static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
+ int offset, int avail)
+{
+ struct fw_rsc_vdev *rsc = ptr;
+ struct device *dev = &rproc->dev;
+ struct rproc_vdev *rvdev;
+ size_t rsc_size;
+ struct rproc_vdev_data rvdev_data;
+ struct platform_device *pdev;
+
+ /* make sure resource isn't truncated */
+ rsc_size = struct_size(rsc, vring, rsc->num_of_vrings);
+ if (size_add(rsc_size, rsc->config_len) > avail) {
+ dev_err(dev, "vdev rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (rsc->reserved[0] || rsc->reserved[1]) {
+ dev_err(dev, "vdev rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n",
+ rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
+
+ /* we currently support only two vrings per rvdev */
+ if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) {
+ dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
+ return -EINVAL;
+ }
+
+ rvdev_data.id = rsc->id;
+ rvdev_data.index = rproc->nb_vdev++;
+ rvdev_data.rsc_offset = offset;
+ rvdev_data.rsc = rsc;
+
+ /*
+ * When there is more than one remote processor, rproc->nb_vdev number is
+ * same for each separate instances of "rproc". If rvdev_data.index is used
+ * as device id, then we get duplication in sysfs, so need to use
+ * PLATFORM_DEVID_AUTO to auto select device id.
+ */
+ pdev = platform_device_register_data(dev, "rproc-virtio", PLATFORM_DEVID_AUTO, &rvdev_data,
+ sizeof(rvdev_data));
+ if (IS_ERR(pdev)) {
+ dev_err(dev, "failed to create rproc-virtio device\n");
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+
+/**
+ * rproc_handle_trace() - handle a shared trace buffer resource
+ * @rproc: the remote processor
+ * @ptr: the trace resource descriptor
+ * @offset: offset of the resource entry
+ * @avail: size of available data (for sanity checking the image)
+ *
+ * In case the remote processor dumps trace logs into memory,
+ * export it via debugfs.
+ *
+ * Currently, the 'da' member of @rsc should contain the device address
+ * where the remote processor is dumping the traces. Later we could also
+ * support dynamically allocating this address using the generic
+ * DMA API (but currently there isn't a use case for that).
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
+ */
+static int rproc_handle_trace(struct rproc *rproc, void *ptr,
+ int offset, int avail)
+{
+ struct fw_rsc_trace *rsc = ptr;
+ struct rproc_debug_trace *trace;
+ struct device *dev = &rproc->dev;
+ char name[15];
+
+ if (sizeof(*rsc) > avail) {
+ dev_err(dev, "trace rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (rsc->reserved) {
+ dev_err(dev, "trace rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ trace = kzalloc(sizeof(*trace), GFP_KERNEL);
+ if (!trace)
+ return -ENOMEM;
+
+ /* set the trace buffer dma properties */
+ trace->trace_mem.len = rsc->len;
+ trace->trace_mem.da = rsc->da;
+
+ /* set pointer on rproc device */
+ trace->rproc = rproc;
+
+ /* make sure snprintf always null terminates, even if truncating */
+ snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
+
+ /* create the debugfs entry */
+ trace->tfile = rproc_create_trace_file(name, rproc, trace);
+
+ list_add_tail(&trace->node, &rproc->traces);
+
+ rproc->num_traces++;
+
+ dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n",
+ name, rsc->da, rsc->len);
+
+ return 0;
+}
+
+/**
+ * rproc_handle_devmem() - handle devmem resource entry
+ * @rproc: remote processor handle
+ * @ptr: the devmem resource entry
+ * @offset: offset of the resource entry
+ * @avail: size of available data (for sanity checking the image)
+ *
+ * Remote processors commonly need to access certain on-chip peripherals.
+ *
+ * Some of these remote processors access memory via an iommu device,
+ * and might require us to configure their iommu before they can access
+ * the on-chip peripherals they need.
+ *
+ * This resource entry is a request to map such a peripheral device.
+ *
+ * These devmem entries will contain the physical address of the device in
+ * the 'pa' member. If a specific device address is expected, then 'da' will
+ * contain it (currently this is the only use case supported). 'len' will
+ * contain the size of the physical region we need to map.
+ *
+ * Currently we just "trust" those devmem entries to contain valid physical
+ * addresses, but this is going to change: we want the implementations to
+ * tell us ranges of physical addresses the firmware is allowed to request,
+ * and not allow firmwares to request access to physical addresses that
+ * are outside those ranges.
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
+ */
+static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
+ int offset, int avail)
+{
+ struct fw_rsc_devmem *rsc = ptr;
+ struct rproc_mem_entry *mapping;
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /* no point in handling this resource without a valid iommu domain */
+ if (!rproc->domain)
+ return -EINVAL;
+
+ if (sizeof(*rsc) > avail) {
+ dev_err(dev, "devmem rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (rsc->reserved) {
+ dev_err(dev, "devmem rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
+ if (ret) {
+ dev_err(dev, "failed to map devmem: %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * We'll need this info later when we'll want to unmap everything
+ * (e.g. on shutdown).
+ *
+ * We can't trust the remote processor not to change the resource
+ * table, so we must maintain this info independently.
+ */
+ mapping->da = rsc->da;
+ mapping->len = rsc->len;
+ list_add_tail(&mapping->node, &rproc->mappings);
+
+ dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
+ rsc->pa, rsc->da, rsc->len);
+
+ return 0;
+
+out:
+ kfree(mapping);
+ return ret;
+}
+
+/**
+ * rproc_alloc_carveout() - allocated specified carveout
+ * @rproc: rproc handle
+ * @mem: the memory entry to allocate
+ *
+ * This function allocate specified memory entry @mem using
+ * dma_alloc_coherent() as default allocator
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
+ */
+static int rproc_alloc_carveout(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct rproc_mem_entry *mapping = NULL;
+ struct device *dev = &rproc->dev;
+ dma_addr_t dma;
+ void *va;
+ int ret;
+
+ va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL);
+ if (!va) {
+ dev_err(dev->parent,
+ "failed to allocate dma memory: len 0x%zx\n",
+ mem->len);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n",
+ va, &dma, mem->len);
+
+ if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
+ /*
+ * Check requested da is equal to dma address
+ * and print a warn message in case of missalignment.
+ * Don't stop rproc_start sequence as coprocessor may
+ * build pa to da translation on its side.
+ */
+ if (mem->da != (u32)dma)
+ dev_warn(dev->parent,
+ "Allocated carveout doesn't fit device address request\n");
+ }
+
+ /*
+ * Ok, this is non-standard.
+ *
+ * Sometimes we can't rely on the generic iommu-based DMA API
+ * to dynamically allocate the device address and then set the IOMMU
+ * tables accordingly, because some remote processors might
+ * _require_ us to use hard coded device addresses that their
+ * firmware was compiled with.
+ *
+ * In this case, we must use the IOMMU API directly and map
+ * the memory to the device address as expected by the remote
+ * processor.
+ *
+ * Obviously such remote processor devices should not be configured
+ * to use the iommu-based DMA API: we expect 'dma' to contain the
+ * physical address in this case.
+ */
+ if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) {
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping) {
+ ret = -ENOMEM;
+ goto dma_free;
+ }
+
+ ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
+ mem->flags);
+ if (ret) {
+ dev_err(dev, "iommu_map failed: %d\n", ret);
+ goto free_mapping;
+ }
+
+ /*
+ * We'll need this info later when we'll want to unmap
+ * everything (e.g. on shutdown).
+ *
+ * We can't trust the remote processor not to change the
+ * resource table, so we must maintain this info independently.
+ */
+ mapping->da = mem->da;
+ mapping->len = mem->len;
+ list_add_tail(&mapping->node, &rproc->mappings);
+
+ dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
+ mem->da, &dma);
+ }
+
+ if (mem->da == FW_RSC_ADDR_ANY) {
+ /* Update device address as undefined by requester */
+ if ((u64)dma & HIGH_BITS_MASK)
+ dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n");
+
+ mem->da = (u32)dma;
+ }
+
+ mem->dma = dma;
+ mem->va = va;
+
+ return 0;
+
+free_mapping:
+ kfree(mapping);
+dma_free:
+ dma_free_coherent(dev->parent, mem->len, va, dma);
+ return ret;
+}
+
+/**
+ * rproc_release_carveout() - release acquired carveout
+ * @rproc: rproc handle
+ * @mem: the memory entry to release
+ *
+ * This function releases specified memory entry @mem allocated via
+ * rproc_alloc_carveout() function by @rproc.
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
+ */
+static int rproc_release_carveout(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = &rproc->dev;
+
+ /* clean up carveout allocations */
+ dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma);
+ return 0;
+}
+
+/**
+ * rproc_handle_carveout() - handle phys contig memory allocation requests
+ * @rproc: rproc handle
+ * @ptr: the resource entry
+ * @offset: offset of the resource entry
+ * @avail: size of available data (for image validation)
+ *
+ * This function will handle firmware requests for allocation of physically
+ * contiguous memory regions.
+ *
+ * These request entries should come first in the firmware's resource table,
+ * as other firmware entries might request placing other data objects inside
+ * these memory regions (e.g. data/code segments, trace resource entries, ...).
+ *
+ * Allocating memory this way helps utilizing the reserved physical memory
+ * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
+ * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
+ * pressure is important; it may have a substantial impact on performance.
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
+ */
+static int rproc_handle_carveout(struct rproc *rproc,
+ void *ptr, int offset, int avail)
+{
+ struct fw_rsc_carveout *rsc = ptr;
+ struct rproc_mem_entry *carveout;
+ struct device *dev = &rproc->dev;
+
+ if (sizeof(*rsc) > avail) {
+ dev_err(dev, "carveout rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (rsc->reserved) {
+ dev_err(dev, "carveout rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
+ rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
+
+ /*
+ * Check carveout rsc already part of a registered carveout,
+ * Search by name, then check the da and length
+ */
+ carveout = rproc_find_carveout_by_name(rproc, rsc->name);
+
+ if (carveout) {
+ if (carveout->rsc_offset != FW_RSC_ADDR_ANY) {
+ dev_err(dev,
+ "Carveout already associated to resource table\n");
+ return -ENOMEM;
+ }
+
+ if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len))
+ return -ENOMEM;
+
+ /* Update memory carveout with resource table info */
+ carveout->rsc_offset = offset;
+ carveout->flags = rsc->flags;
+
+ return 0;
+ }
+
+ /* Register carveout in list */
+ carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da,
+ rproc_alloc_carveout,
+ rproc_release_carveout, rsc->name);
+ if (!carveout) {
+ dev_err(dev, "Can't allocate memory entry structure\n");
+ return -ENOMEM;
+ }
+
+ carveout->flags = rsc->flags;
+ carveout->rsc_offset = offset;
+ rproc_add_carveout(rproc, carveout);
+
+ return 0;
+}
+
+/**
+ * rproc_add_carveout() - register an allocated carveout region
+ * @rproc: rproc handle
+ * @mem: memory entry to register
+ *
+ * This function registers specified memory entry in @rproc carveouts list.
+ * Specified carveout should have been allocated before registering.
+ */
+void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem)
+{
+ list_add_tail(&mem->node, &rproc->carveouts);
+}
+EXPORT_SYMBOL(rproc_add_carveout);
+
+/**
+ * rproc_mem_entry_init() - allocate and initialize rproc_mem_entry struct
+ * @dev: pointer on device struct
+ * @va: virtual address
+ * @dma: dma address
+ * @len: memory carveout length
+ * @da: device address
+ * @alloc: memory carveout allocation function
+ * @release: memory carveout release function
+ * @name: carveout name
+ *
+ * This function allocates a rproc_mem_entry struct and fill it with parameters
+ * provided by client.
+ *
+ * Return: a valid pointer on success, or NULL on failure
+ */
+__printf(8, 9)
+struct rproc_mem_entry *
+rproc_mem_entry_init(struct device *dev,
+ void *va, dma_addr_t dma, size_t len, u32 da,
+ int (*alloc)(struct rproc *, struct rproc_mem_entry *),
+ int (*release)(struct rproc *, struct rproc_mem_entry *),
+ const char *name, ...)
+{
+ struct rproc_mem_entry *mem;
+ va_list args;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return mem;
+
+ mem->va = va;
+ mem->dma = dma;
+ mem->da = da;
+ mem->len = len;
+ mem->alloc = alloc;
+ mem->release = release;
+ mem->rsc_offset = FW_RSC_ADDR_ANY;
+ mem->of_resm_idx = -1;
+
+ va_start(args, name);
+ vsnprintf(mem->name, sizeof(mem->name), name, args);
+ va_end(args);
+
+ return mem;
+}
+EXPORT_SYMBOL(rproc_mem_entry_init);
+
+/**
+ * rproc_of_resm_mem_entry_init() - allocate and initialize rproc_mem_entry struct
+ * from a reserved memory phandle
+ * @dev: pointer on device struct
+ * @of_resm_idx: reserved memory phandle index in "memory-region"
+ * @len: memory carveout length
+ * @da: device address
+ * @name: carveout name
+ *
+ * This function allocates a rproc_mem_entry struct and fill it with parameters
+ * provided by client.
+ *
+ * Return: a valid pointer on success, or NULL on failure
+ */
+__printf(5, 6)
+struct rproc_mem_entry *
+rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
+ u32 da, const char *name, ...)
+{
+ struct rproc_mem_entry *mem;
+ va_list args;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return mem;
+
+ mem->da = da;
+ mem->len = len;
+ mem->rsc_offset = FW_RSC_ADDR_ANY;
+ mem->of_resm_idx = of_resm_idx;
+
+ va_start(args, name);
+ vsnprintf(mem->name, sizeof(mem->name), name, args);
+ va_end(args);
+
+ return mem;
+}
+EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
+
+/**
+ * rproc_of_parse_firmware() - parse and return the firmware-name
+ * @dev: pointer on device struct representing a rproc
+ * @index: index to use for the firmware-name retrieval
+ * @fw_name: pointer to a character string, in which the firmware
+ * name is returned on success and unmodified otherwise.
+ *
+ * This is an OF helper function that parses a device's DT node for
+ * the "firmware-name" property and returns the firmware name pointer
+ * in @fw_name on success.
+ *
+ * Return: 0 on success, or an appropriate failure.
+ */
+int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name)
+{
+ int ret;
+
+ ret = of_property_read_string_index(dev->of_node, "firmware-name",
+ index, fw_name);
+ return ret ? ret : 0;
+}
+EXPORT_SYMBOL(rproc_of_parse_firmware);
+
+/*
+ * A lookup table for resource handlers. The indices are defined in
+ * enum fw_resource_type.
+ */
+static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
+ [RSC_CARVEOUT] = rproc_handle_carveout,
+ [RSC_DEVMEM] = rproc_handle_devmem,
+ [RSC_TRACE] = rproc_handle_trace,
+ [RSC_VDEV] = rproc_handle_vdev,
+};
+
+/* handle firmware resource entries before booting the remote processor */
+static int rproc_handle_resources(struct rproc *rproc,
+ rproc_handle_resource_t handlers[RSC_LAST])
+{
+ struct device *dev = &rproc->dev;
+ rproc_handle_resource_t handler;
+ int ret = 0, i;
+
+ if (!rproc->table_ptr)
+ return 0;
+
+ for (i = 0; i < rproc->table_ptr->num; i++) {
+ int offset = rproc->table_ptr->offset[i];
+ struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset;
+ int avail = rproc->table_sz - offset - sizeof(*hdr);
+ void *rsc = (void *)hdr + sizeof(*hdr);
+
+ /* make sure table isn't truncated */
+ if (avail < 0) {
+ dev_err(dev, "rsc table is truncated\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "rsc: type %d\n", hdr->type);
+
+ if (hdr->type >= RSC_VENDOR_START &&
+ hdr->type <= RSC_VENDOR_END) {
+ ret = rproc_handle_rsc(rproc, hdr->type, rsc,
+ offset + sizeof(*hdr), avail);
+ if (ret == RSC_HANDLED)
+ continue;
+ else if (ret < 0)
+ break;
+
+ dev_warn(dev, "unsupported vendor resource %d\n",
+ hdr->type);
+ continue;
+ }
+
+ if (hdr->type >= RSC_LAST) {
+ dev_warn(dev, "unsupported resource %d\n", hdr->type);
+ continue;
+ }
+
+ handler = handlers[hdr->type];
+ if (!handler)
+ continue;
+
+ ret = handler(rproc, rsc, offset + sizeof(*hdr), avail);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int rproc_prepare_subdevices(struct rproc *rproc)
+{
+ struct rproc_subdev *subdev;
+ int ret;
+
+ list_for_each_entry(subdev, &rproc->subdevs, node) {
+ if (subdev->prepare) {
+ ret = subdev->prepare(subdev);
+ if (ret)
+ goto unroll_preparation;
+ }
+ }
+
+ return 0;
+
+unroll_preparation:
+ list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
+ if (subdev->unprepare)
+ subdev->unprepare(subdev);
+ }
+
+ return ret;
+}
+
+static int rproc_start_subdevices(struct rproc *rproc)
+{
+ struct rproc_subdev *subdev;
+ int ret;
+
+ list_for_each_entry(subdev, &rproc->subdevs, node) {
+ if (subdev->start) {
+ ret = subdev->start(subdev);
+ if (ret)
+ goto unroll_registration;
+ }
+ }
+
+ return 0;
+
+unroll_registration:
+ list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
+ if (subdev->stop)
+ subdev->stop(subdev, true);
+ }
+
+ return ret;
+}
+
+static void rproc_stop_subdevices(struct rproc *rproc, bool crashed)
+{
+ struct rproc_subdev *subdev;
+
+ list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
+ if (subdev->stop)
+ subdev->stop(subdev, crashed);
+ }
+}
+
+static void rproc_unprepare_subdevices(struct rproc *rproc)
+{
+ struct rproc_subdev *subdev;
+
+ list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
+ if (subdev->unprepare)
+ subdev->unprepare(subdev);
+ }
+}
+
+/**
+ * rproc_alloc_registered_carveouts() - allocate all carveouts registered
+ * in the list
+ * @rproc: the remote processor handle
+ *
+ * This function parses registered carveout list, performs allocation
+ * if alloc() ops registered and updates resource table information
+ * if rsc_offset set.
+ *
+ * Return: 0 on success
+ */
+static int rproc_alloc_registered_carveouts(struct rproc *rproc)
+{
+ struct rproc_mem_entry *entry, *tmp;
+ struct fw_rsc_carveout *rsc;
+ struct device *dev = &rproc->dev;
+ u64 pa;
+ int ret;
+
+ list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
+ if (entry->alloc) {
+ ret = entry->alloc(rproc, entry);
+ if (ret) {
+ dev_err(dev, "Unable to allocate carveout %s: %d\n",
+ entry->name, ret);
+ return -ENOMEM;
+ }
+ }
+
+ if (entry->rsc_offset != FW_RSC_ADDR_ANY) {
+ /* update resource table */
+ rsc = (void *)rproc->table_ptr + entry->rsc_offset;
+
+ /*
+ * Some remote processors might need to know the pa
+ * even though they are behind an IOMMU. E.g., OMAP4's
+ * remote M3 processor needs this so it can control
+ * on-chip hardware accelerators that are not behind
+ * the IOMMU, and therefor must know the pa.
+ *
+ * Generally we don't want to expose physical addresses
+ * if we don't have to (remote processors are generally
+ * _not_ trusted), so we might want to do this only for
+ * remote processor that _must_ have this (e.g. OMAP4's
+ * dual M3 subsystem).
+ *
+ * Non-IOMMU processors might also want to have this info.
+ * In this case, the device address and the physical address
+ * are the same.
+ */
+
+ /* Use va if defined else dma to generate pa */
+ if (entry->va)
+ pa = (u64)rproc_va_to_pa(entry->va);
+ else
+ pa = (u64)entry->dma;
+
+ if (((u64)pa) & HIGH_BITS_MASK)
+ dev_warn(dev,
+ "Physical address cast in 32bit to fit resource table format\n");
+
+ rsc->pa = (u32)pa;
+ rsc->da = entry->da;
+ rsc->len = entry->len;
+ }
+ }
+
+ return 0;
+}
+
+
+/**
+ * rproc_resource_cleanup() - clean up and free all acquired resources
+ * @rproc: rproc handle
+ *
+ * This function will free all resources acquired for @rproc, and it
+ * is called whenever @rproc either shuts down or fails to boot.
+ */
+void rproc_resource_cleanup(struct rproc *rproc)
+{
+ struct rproc_mem_entry *entry, *tmp;
+ struct rproc_debug_trace *trace, *ttmp;
+ struct rproc_vdev *rvdev, *rvtmp;
+ struct device *dev = &rproc->dev;
+
+ /* clean up debugfs trace entries */
+ list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) {
+ rproc_remove_trace_file(trace->tfile);
+ rproc->num_traces--;
+ list_del(&trace->node);
+ kfree(trace);
+ }
+
+ /* clean up iommu mapping entries */
+ list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
+ size_t unmapped;
+
+ unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
+ if (unmapped != entry->len) {
+ /* nothing much to do besides complaining */
+ dev_err(dev, "failed to unmap %zx/%zu\n", entry->len,
+ unmapped);
+ }
+
+ list_del(&entry->node);
+ kfree(entry);
+ }
+
+ /* clean up carveout allocations */
+ list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
+ if (entry->release)
+ entry->release(rproc, entry);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+
+ /* clean up remote vdev entries */
+ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
+ platform_device_unregister(rvdev->pdev);
+
+ rproc_coredump_cleanup(rproc);
+}
+EXPORT_SYMBOL(rproc_resource_cleanup);
+
+static int rproc_start(struct rproc *rproc, const struct firmware *fw)
+{
+ struct resource_table *loaded_table;
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /* load the ELF segments to memory */
+ ret = rproc_load_segments(rproc, fw);
+ if (ret) {
+ dev_err(dev, "Failed to load program segments: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * The starting device has been given the rproc->cached_table as the
+ * resource table. The address of the vring along with the other
+ * allocated resources (carveouts etc) is stored in cached_table.
+ * In order to pass this information to the remote device we must copy
+ * this information to device memory. We also update the table_ptr so
+ * that any subsequent changes will be applied to the loaded version.
+ */
+ loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
+ if (loaded_table) {
+ memcpy(loaded_table, rproc->cached_table, rproc->table_sz);
+ rproc->table_ptr = loaded_table;
+ }
+
+ ret = rproc_prepare_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to prepare subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto reset_table_ptr;
+ }
+
+ /* power up the remote processor */
+ ret = rproc->ops->start(rproc);
+ if (ret) {
+ dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
+ goto unprepare_subdevices;
+ }
+
+ /* Start any subdevices for the remote processor */
+ ret = rproc_start_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to probe subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto stop_rproc;
+ }
+
+ rproc->state = RPROC_RUNNING;
+
+ dev_info(dev, "remote processor %s is now up\n", rproc->name);
+
+ return 0;
+
+stop_rproc:
+ rproc->ops->stop(rproc);
+unprepare_subdevices:
+ rproc_unprepare_subdevices(rproc);
+reset_table_ptr:
+ rproc->table_ptr = rproc->cached_table;
+
+ return ret;
+}
+
+static int __rproc_attach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = rproc_prepare_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to prepare subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto out;
+ }
+
+ /* Attach to the remote processor */
+ ret = rproc_attach_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't attach to rproc %s: %d\n",
+ rproc->name, ret);
+ goto unprepare_subdevices;
+ }
+
+ /* Start any subdevices for the remote processor */
+ ret = rproc_start_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to probe subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto stop_rproc;
+ }
+
+ rproc->state = RPROC_ATTACHED;
+
+ dev_info(dev, "remote processor %s is now attached\n", rproc->name);
+
+ return 0;
+
+stop_rproc:
+ rproc->ops->stop(rproc);
+unprepare_subdevices:
+ rproc_unprepare_subdevices(rproc);
+out:
+ return ret;
+}
+
+/*
+ * take a firmware and boot a remote processor with it.
+ */
+static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ const char *name = rproc->firmware;
+ int ret;
+
+ ret = rproc_fw_sanity_check(rproc, fw);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
+
+ /*
+ * if enabling an IOMMU isn't relevant for this rproc, this is
+ * just a nop
+ */
+ ret = rproc_enable_iommu(rproc);
+ if (ret) {
+ dev_err(dev, "can't enable iommu: %d\n", ret);
+ return ret;
+ }
+
+ /* Prepare rproc for firmware loading if needed */
+ ret = rproc_prepare_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
+ goto disable_iommu;
+ }
+
+ rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
+
+ /* Load resource table, core dump segment list etc from the firmware */
+ ret = rproc_parse_fw(rproc, fw);
+ if (ret)
+ goto unprepare_rproc;
+
+ /* reset max_notifyid */
+ rproc->max_notifyid = -1;
+
+ /* reset handled vdev */
+ rproc->nb_vdev = 0;
+
+ /* handle fw resources which are required to boot rproc */
+ ret = rproc_handle_resources(rproc, rproc_loading_handlers);
+ if (ret) {
+ dev_err(dev, "Failed to process resources: %d\n", ret);
+ goto clean_up_resources;
+ }
+
+ /* Allocate carveout resources associated to rproc */
+ ret = rproc_alloc_registered_carveouts(rproc);
+ if (ret) {
+ dev_err(dev, "Failed to allocate associated carveouts: %d\n",
+ ret);
+ goto clean_up_resources;
+ }
+
+ ret = rproc_start(rproc, fw);
+ if (ret)
+ goto clean_up_resources;
+
+ return 0;
+
+clean_up_resources:
+ rproc_resource_cleanup(rproc);
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
+ rproc->table_ptr = NULL;
+unprepare_rproc:
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+disable_iommu:
+ rproc_disable_iommu(rproc);
+ return ret;
+}
+
+static int rproc_set_rsc_table(struct rproc *rproc)
+{
+ struct resource_table *table_ptr;
+ struct device *dev = &rproc->dev;
+ size_t table_sz;
+ int ret;
+
+ table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz);
+ if (!table_ptr) {
+ /* Not having a resource table is acceptable */
+ return 0;
+ }
+
+ if (IS_ERR(table_ptr)) {
+ ret = PTR_ERR(table_ptr);
+ dev_err(dev, "can't load resource table: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * If it is possible to detach the remote processor, keep an untouched
+ * copy of the resource table. That way we can start fresh again when
+ * the remote processor is re-attached, that is:
+ *
+ * DETACHED -> ATTACHED -> DETACHED -> ATTACHED
+ *
+ * Free'd in rproc_reset_rsc_table_on_detach() and
+ * rproc_reset_rsc_table_on_stop().
+ */
+ if (rproc->ops->detach) {
+ rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL);
+ if (!rproc->clean_table)
+ return -ENOMEM;
+ } else {
+ rproc->clean_table = NULL;
+ }
+
+ rproc->cached_table = NULL;
+ rproc->table_ptr = table_ptr;
+ rproc->table_sz = table_sz;
+
+ return 0;
+}
+
+static int rproc_reset_rsc_table_on_detach(struct rproc *rproc)
+{
+ struct resource_table *table_ptr;
+
+ /* A resource table was never retrieved, nothing to do here */
+ if (!rproc->table_ptr)
+ return 0;
+
+ /*
+ * If we made it to this point a clean_table _must_ have been
+ * allocated in rproc_set_rsc_table(). If one isn't present
+ * something went really wrong and we must complain.
+ */
+ if (WARN_ON(!rproc->clean_table))
+ return -EINVAL;
+
+ /* Remember where the external entity installed the resource table */
+ table_ptr = rproc->table_ptr;
+
+ /*
+ * If we made it here the remote processor was started by another
+ * entity and a cache table doesn't exist. As such make a copy of
+ * the resource table currently used by the remote processor and
+ * use that for the rest of the shutdown process. The memory
+ * allocated here is free'd in rproc_detach().
+ */
+ rproc->cached_table = kmemdup(rproc->table_ptr,
+ rproc->table_sz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ /*
+ * Use a copy of the resource table for the remainder of the
+ * shutdown process.
+ */
+ rproc->table_ptr = rproc->cached_table;
+
+ /*
+ * Reset the memory area where the firmware loaded the resource table
+ * to its original value. That way when we re-attach the remote
+ * processor the resource table is clean and ready to be used again.
+ */
+ memcpy(table_ptr, rproc->clean_table, rproc->table_sz);
+
+ /*
+ * The clean resource table is no longer needed. Allocated in
+ * rproc_set_rsc_table().
+ */
+ kfree(rproc->clean_table);
+
+ return 0;
+}
+
+static int rproc_reset_rsc_table_on_stop(struct rproc *rproc)
+{
+ /* A resource table was never retrieved, nothing to do here */
+ if (!rproc->table_ptr)
+ return 0;
+
+ /*
+ * If a cache table exists the remote processor was started by
+ * the remoteproc core. That cache table should be used for
+ * the rest of the shutdown process.
+ */
+ if (rproc->cached_table)
+ goto out;
+
+ /*
+ * If we made it here the remote processor was started by another
+ * entity and a cache table doesn't exist. As such make a copy of
+ * the resource table currently used by the remote processor and
+ * use that for the rest of the shutdown process. The memory
+ * allocated here is free'd in rproc_shutdown().
+ */
+ rproc->cached_table = kmemdup(rproc->table_ptr,
+ rproc->table_sz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ /*
+ * Since the remote processor is being switched off the clean table
+ * won't be needed. Allocated in rproc_set_rsc_table().
+ */
+ kfree(rproc->clean_table);
+
+out:
+ /*
+ * Use a copy of the resource table for the remainder of the
+ * shutdown process.
+ */
+ rproc->table_ptr = rproc->cached_table;
+ return 0;
+}
+
+/*
+ * Attach to remote processor - similar to rproc_fw_boot() but without
+ * the steps that deal with the firmware image.
+ */
+static int rproc_attach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /*
+ * if enabling an IOMMU isn't relevant for this rproc, this is
+ * just a nop
+ */
+ ret = rproc_enable_iommu(rproc);
+ if (ret) {
+ dev_err(dev, "can't enable iommu: %d\n", ret);
+ return ret;
+ }
+
+ /* Do anything that is needed to boot the remote processor */
+ ret = rproc_prepare_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
+ goto disable_iommu;
+ }
+
+ ret = rproc_set_rsc_table(rproc);
+ if (ret) {
+ dev_err(dev, "can't load resource table: %d\n", ret);
+ goto unprepare_device;
+ }
+
+ /* reset max_notifyid */
+ rproc->max_notifyid = -1;
+
+ /* reset handled vdev */
+ rproc->nb_vdev = 0;
+
+ /*
+ * Handle firmware resources required to attach to a remote processor.
+ * Because we are attaching rather than booting the remote processor,
+ * we expect the platform driver to properly set rproc->table_ptr.
+ */
+ ret = rproc_handle_resources(rproc, rproc_loading_handlers);
+ if (ret) {
+ dev_err(dev, "Failed to process resources: %d\n", ret);
+ goto unprepare_device;
+ }
+
+ /* Allocate carveout resources associated to rproc */
+ ret = rproc_alloc_registered_carveouts(rproc);
+ if (ret) {
+ dev_err(dev, "Failed to allocate associated carveouts: %d\n",
+ ret);
+ goto clean_up_resources;
+ }
+
+ ret = __rproc_attach(rproc);
+ if (ret)
+ goto clean_up_resources;
+
+ return 0;
+
+clean_up_resources:
+ rproc_resource_cleanup(rproc);
+unprepare_device:
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+disable_iommu:
+ rproc_disable_iommu(rproc);
+ return ret;
+}
+
+/*
+ * take a firmware and boot it up.
+ *
+ * Note: this function is called asynchronously upon registration of the
+ * remote processor (so we must wait until it completes before we try
+ * to unregister the device. one other option is just to use kref here,
+ * that might be cleaner).
+ */
+static void rproc_auto_boot_callback(const struct firmware *fw, void *context)
+{
+ struct rproc *rproc = context;
+
+ rproc_boot(rproc);
+
+ release_firmware(fw);
+}
+
+static int rproc_trigger_auto_boot(struct rproc *rproc)
+{
+ int ret;
+
+ /*
+ * Since the remote processor is in a detached state, it has already
+ * been booted by another entity. As such there is no point in waiting
+ * for a firmware image to be loaded, we can simply initiate the process
+ * of attaching to it immediately.
+ */
+ if (rproc->state == RPROC_DETACHED)
+ return rproc_boot(rproc);
+
+ /*
+ * We're initiating an asynchronous firmware loading, so we can
+ * be built-in kernel code, without hanging the boot process.
+ */
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+ rproc->firmware, &rproc->dev, GFP_KERNEL,
+ rproc, rproc_auto_boot_callback);
+ if (ret < 0)
+ dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret);
+
+ return ret;
+}
+
+static int rproc_stop(struct rproc *rproc, bool crashed)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /* No need to continue if a stop() operation has not been provided */
+ if (!rproc->ops->stop)
+ return -EINVAL;
+
+ /* Stop any subdevices for the remote processor */
+ rproc_stop_subdevices(rproc, crashed);
+
+ /* the installed resource table is no longer accessible */
+ ret = rproc_reset_rsc_table_on_stop(rproc);
+ if (ret) {
+ dev_err(dev, "can't reset resource table: %d\n", ret);
+ return ret;
+ }
+
+
+ /* power off the remote processor */
+ ret = rproc->ops->stop(rproc);
+ if (ret) {
+ dev_err(dev, "can't stop rproc: %d\n", ret);
+ return ret;
+ }
+
+ rproc_unprepare_subdevices(rproc);
+
+ rproc->state = RPROC_OFFLINE;
+
+ dev_info(dev, "stopped remote processor %s\n", rproc->name);
+
+ return 0;
+}
+
+/*
+ * __rproc_detach(): Does the opposite of __rproc_attach()
+ */
+static int __rproc_detach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /* No need to continue if a detach() operation has not been provided */
+ if (!rproc->ops->detach)
+ return -EINVAL;
+
+ /* Stop any subdevices for the remote processor */
+ rproc_stop_subdevices(rproc, false);
+
+ /* the installed resource table is no longer accessible */
+ ret = rproc_reset_rsc_table_on_detach(rproc);
+ if (ret) {
+ dev_err(dev, "can't reset resource table: %d\n", ret);
+ return ret;
+ }
+
+ /* Tell the remote processor the core isn't available anymore */
+ ret = rproc->ops->detach(rproc);
+ if (ret) {
+ dev_err(dev, "can't detach from rproc: %d\n", ret);
+ return ret;
+ }
+
+ rproc_unprepare_subdevices(rproc);
+
+ rproc->state = RPROC_DETACHED;
+
+ dev_info(dev, "detached remote processor %s\n", rproc->name);
+
+ return 0;
+}
+
+static int rproc_attach_recovery(struct rproc *rproc)
+{
+ int ret;
+
+ ret = __rproc_detach(rproc);
+ if (ret)
+ return ret;
+
+ return __rproc_attach(rproc);
+}
+
+static int rproc_boot_recovery(struct rproc *rproc)
+{
+ const struct firmware *firmware_p;
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = rproc_stop(rproc, true);
+ if (ret)
+ return ret;
+
+ /* generate coredump */
+ rproc->ops->coredump(rproc);
+
+ /* load firmware */
+ ret = request_firmware(&firmware_p, rproc->firmware, dev);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware failed: %d\n", ret);
+ return ret;
+ }
+
+ /* boot the remote processor up again */
+ ret = rproc_start(rproc, firmware_p);
+
+ release_firmware(firmware_p);
+
+ return ret;
+}
+
+/**
+ * rproc_trigger_recovery() - recover a remoteproc
+ * @rproc: the remote processor
+ *
+ * The recovery is done by resetting all the virtio devices, that way all the
+ * rpmsg drivers will be reseted along with the remote processor making the
+ * remoteproc functional again.
+ *
+ * This function can sleep, so it cannot be called from atomic context.
+ *
+ * Return: 0 on success or a negative value upon failure
+ */
+int rproc_trigger_recovery(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret)
+ return ret;
+
+ /* State could have changed before we got the mutex */
+ if (rproc->state != RPROC_CRASHED)
+ goto unlock_mutex;
+
+ dev_err(dev, "recovering %s\n", rproc->name);
+
+ if (rproc_has_feature(rproc, RPROC_FEAT_ATTACH_ON_RECOVERY))
+ ret = rproc_attach_recovery(rproc);
+ else
+ ret = rproc_boot_recovery(rproc);
+
+unlock_mutex:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+/**
+ * rproc_crash_handler_work() - handle a crash
+ * @work: work treating the crash
+ *
+ * This function needs to handle everything related to a crash, like cpu
+ * registers and stack dump, information to help to debug the fatal error, etc.
+ */
+static void rproc_crash_handler_work(struct work_struct *work)
+{
+ struct rproc *rproc = container_of(work, struct rproc, crash_handler);
+ struct device *dev = &rproc->dev;
+
+ dev_dbg(dev, "enter %s\n", __func__);
+
+ mutex_lock(&rproc->lock);
+
+ if (rproc->state == RPROC_CRASHED) {
+ /* handle only the first crash detected */
+ mutex_unlock(&rproc->lock);
+ return;
+ }
+
+ if (rproc->state == RPROC_OFFLINE) {
+ /* Don't recover if the remote processor was stopped */
+ mutex_unlock(&rproc->lock);
+ goto out;
+ }
+
+ rproc->state = RPROC_CRASHED;
+ dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt,
+ rproc->name);
+
+ mutex_unlock(&rproc->lock);
+
+ if (!rproc->recovery_disabled)
+ rproc_trigger_recovery(rproc);
+
+out:
+ pm_relax(rproc->dev.parent);
+}
+
+/**
+ * rproc_boot() - boot a remote processor
+ * @rproc: handle of a remote processor
+ *
+ * Boot a remote processor (i.e. load its firmware, power it on, ...).
+ *
+ * If the remote processor is already powered on, this function immediately
+ * returns (successfully).
+ *
+ * Return: 0 on success, and an appropriate error value otherwise
+ */
+int rproc_boot(struct rproc *rproc)
+{
+ const struct firmware *firmware_p;
+ struct device *dev;
+ int ret;
+
+ if (!rproc) {
+ pr_err("invalid rproc handle\n");
+ return -EINVAL;
+ }
+
+ dev = &rproc->dev;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return ret;
+ }
+
+ if (rproc->state == RPROC_DELETED) {
+ ret = -ENODEV;
+ dev_err(dev, "can't boot deleted rproc %s\n", rproc->name);
+ goto unlock_mutex;
+ }
+
+ /* skip the boot or attach process if rproc is already powered up */
+ if (atomic_inc_return(&rproc->power) > 1) {
+ ret = 0;
+ goto unlock_mutex;
+ }
+
+ if (rproc->state == RPROC_DETACHED) {
+ dev_info(dev, "attaching to %s\n", rproc->name);
+
+ ret = rproc_attach(rproc);
+ } else {
+ dev_info(dev, "powering up %s\n", rproc->name);
+
+ /* load firmware */
+ ret = request_firmware(&firmware_p, rproc->firmware, dev);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware failed: %d\n", ret);
+ goto downref_rproc;
+ }
+
+ ret = rproc_fw_boot(rproc, firmware_p);
+
+ release_firmware(firmware_p);
+ }
+
+downref_rproc:
+ if (ret)
+ atomic_dec(&rproc->power);
+unlock_mutex:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_boot);
+
+/**
+ * rproc_shutdown() - power off the remote processor
+ * @rproc: the remote processor
+ *
+ * Power off a remote processor (previously booted with rproc_boot()).
+ *
+ * In case @rproc is still being used by an additional user(s), then
+ * this function will just decrement the power refcount and exit,
+ * without really powering off the device.
+ *
+ * Every call to rproc_boot() must (eventually) be accompanied by a call
+ * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
+ *
+ * Notes:
+ * - we're not decrementing the rproc's refcount, only the power refcount.
+ * which means that the @rproc handle stays valid even after rproc_shutdown()
+ * returns, and users can still use it with a subsequent rproc_boot(), if
+ * needed.
+ *
+ * Return: 0 on success, and an appropriate error value otherwise
+ */
+int rproc_shutdown(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret = 0;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return ret;
+ }
+
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* if the remote proc is still needed, bail out */
+ if (!atomic_dec_and_test(&rproc->power))
+ goto out;
+
+ ret = rproc_stop(rproc, false);
+ if (ret) {
+ atomic_inc(&rproc->power);
+ goto out;
+ }
+
+ /* clean up all acquired resources */
+ rproc_resource_cleanup(rproc);
+
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+
+ rproc_disable_iommu(rproc);
+
+ /* Free the copy of the resource table */
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
+ rproc->table_ptr = NULL;
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_shutdown);
+
+/**
+ * rproc_detach() - Detach the remote processor from the
+ * remoteproc core
+ *
+ * @rproc: the remote processor
+ *
+ * Detach a remote processor (previously attached to with rproc_attach()).
+ *
+ * In case @rproc is still being used by an additional user(s), then
+ * this function will just decrement the power refcount and exit,
+ * without disconnecting the device.
+ *
+ * Function rproc_detach() calls __rproc_detach() in order to let a remote
+ * processor know that services provided by the application processor are
+ * no longer available. From there it should be possible to remove the
+ * platform driver and even power cycle the application processor (if the HW
+ * supports it) without needing to switch off the remote processor.
+ *
+ * Return: 0 on success, and an appropriate error value otherwise
+ */
+int rproc_detach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return ret;
+ }
+
+ if (rproc->state != RPROC_ATTACHED) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* if the remote proc is still needed, bail out */
+ if (!atomic_dec_and_test(&rproc->power)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = __rproc_detach(rproc);
+ if (ret) {
+ atomic_inc(&rproc->power);
+ goto out;
+ }
+
+ /* clean up all acquired resources */
+ rproc_resource_cleanup(rproc);
+
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+
+ rproc_disable_iommu(rproc);
+
+ /* Free the copy of the resource table */
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
+ rproc->table_ptr = NULL;
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_detach);
+
+/**
+ * rproc_get_by_phandle() - find a remote processor by phandle
+ * @phandle: phandle to the rproc
+ *
+ * Finds an rproc handle using the remote processor's phandle, and then
+ * return a handle to the rproc.
+ *
+ * This function increments the remote processor's refcount, so always
+ * use rproc_put() to decrement it back once rproc isn't needed anymore.
+ *
+ * Return: rproc handle on success, and NULL on failure
+ */
+#ifdef CONFIG_OF
+struct rproc *rproc_get_by_phandle(phandle phandle)
+{
+ struct rproc *rproc = NULL, *r;
+ struct device_node *np;
+
+ np = of_find_node_by_phandle(phandle);
+ if (!np)
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(r, &rproc_list, node) {
+ if (r->dev.parent && r->dev.parent->of_node == np) {
+ /* prevent underlying implementation from being removed */
+ if (!try_module_get(r->dev.parent->driver->owner)) {
+ dev_err(&r->dev, "can't get owner\n");
+ break;
+ }
+
+ rproc = r;
+ get_device(&rproc->dev);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ of_node_put(np);
+
+ return rproc;
+}
+#else
+struct rproc *rproc_get_by_phandle(phandle phandle)
+{
+ return NULL;
+}
+#endif
+EXPORT_SYMBOL(rproc_get_by_phandle);
+
+/**
+ * rproc_set_firmware() - assign a new firmware
+ * @rproc: rproc handle to which the new firmware is being assigned
+ * @fw_name: new firmware name to be assigned
+ *
+ * This function allows remoteproc drivers or clients to configure a custom
+ * firmware name that is different from the default name used during remoteproc
+ * registration. The function does not trigger a remote processor boot,
+ * only sets the firmware name used for a subsequent boot. This function
+ * should also be called only when the remote processor is offline.
+ *
+ * This allows either the userspace to configure a different name through
+ * sysfs or a kernel-level remoteproc or a remoteproc client driver to set
+ * a specific firmware when it is controlling the boot and shutdown of the
+ * remote processor.
+ *
+ * Return: 0 on success or a negative value upon failure
+ */
+int rproc_set_firmware(struct rproc *rproc, const char *fw_name)
+{
+ struct device *dev;
+ int ret, len;
+ char *p;
+
+ if (!rproc || !fw_name)
+ return -EINVAL;
+
+ dev = rproc->dev.parent;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return -EINVAL;
+ }
+
+ if (rproc->state != RPROC_OFFLINE) {
+ dev_err(dev, "can't change firmware while running\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ len = strcspn(fw_name, "\n");
+ if (!len) {
+ dev_err(dev, "can't provide empty string for firmware name\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ p = kstrndup(fw_name, len, GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ kfree_const(rproc->firmware);
+ rproc->firmware = p;
+
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_set_firmware);
+
+static int rproc_validate(struct rproc *rproc)
+{
+ switch (rproc->state) {
+ case RPROC_OFFLINE:
+ /*
+ * An offline processor without a start()
+ * function makes no sense.
+ */
+ if (!rproc->ops->start)
+ return -EINVAL;
+ break;
+ case RPROC_DETACHED:
+ /*
+ * A remote processor in a detached state without an
+ * attach() function makes not sense.
+ */
+ if (!rproc->ops->attach)
+ return -EINVAL;
+ /*
+ * When attaching to a remote processor the device memory
+ * is already available and as such there is no need to have a
+ * cached table.
+ */
+ if (rproc->cached_table)
+ return -EINVAL;
+ break;
+ default:
+ /*
+ * When adding a remote processor, the state of the device
+ * can be offline or detached, nothing else.
+ */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * rproc_add() - register a remote processor
+ * @rproc: the remote processor handle to register
+ *
+ * Registers @rproc with the remoteproc framework, after it has been
+ * allocated with rproc_alloc().
+ *
+ * This is called by the platform-specific rproc implementation, whenever
+ * a new remote processor device is probed.
+ *
+ * Note: this function initiates an asynchronous firmware loading
+ * context, which will look for virtio devices supported by the rproc's
+ * firmware.
+ *
+ * If found, those virtio devices will be created and added, so as a result
+ * of registering this remote processor, additional virtio drivers might be
+ * probed.
+ *
+ * Return: 0 on success and an appropriate error code otherwise
+ */
+int rproc_add(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = rproc_validate(rproc);
+ if (ret < 0)
+ return ret;
+
+ /* add char device for this remoteproc */
+ ret = rproc_char_device_add(rproc);
+ if (ret < 0)
+ return ret;
+
+ ret = device_add(dev);
+ if (ret < 0) {
+ put_device(dev);
+ goto rproc_remove_cdev;
+ }
+
+ dev_info(dev, "%s is available\n", rproc->name);
+
+ /* create debugfs entries */
+ rproc_create_debug_dir(rproc);
+
+ /* if rproc is marked always-on, request it to boot */
+ if (rproc->auto_boot) {
+ ret = rproc_trigger_auto_boot(rproc);
+ if (ret < 0)
+ goto rproc_remove_dev;
+ }
+
+ /* expose to rproc_get_by_phandle users */
+ mutex_lock(&rproc_list_mutex);
+ list_add_rcu(&rproc->node, &rproc_list);
+ mutex_unlock(&rproc_list_mutex);
+
+ return 0;
+
+rproc_remove_dev:
+ rproc_delete_debug_dir(rproc);
+ device_del(dev);
+rproc_remove_cdev:
+ rproc_char_device_remove(rproc);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_add);
+
+static void devm_rproc_remove(void *rproc)
+{
+ rproc_del(rproc);
+}
+
+/**
+ * devm_rproc_add() - resource managed rproc_add()
+ * @dev: the underlying device
+ * @rproc: the remote processor handle to register
+ *
+ * This function performs like rproc_add() but the registered rproc device will
+ * automatically be removed on driver detach.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int devm_rproc_add(struct device *dev, struct rproc *rproc)
+{
+ int err;
+
+ err = rproc_add(rproc);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(dev, devm_rproc_remove, rproc);
+}
+EXPORT_SYMBOL(devm_rproc_add);
+
+/**
+ * rproc_type_release() - release a remote processor instance
+ * @dev: the rproc's device
+ *
+ * This function should _never_ be called directly.
+ *
+ * It will be called by the driver core when no one holds a valid pointer
+ * to @dev anymore.
+ */
+static void rproc_type_release(struct device *dev)
+{
+ struct rproc *rproc = container_of(dev, struct rproc, dev);
+
+ dev_info(&rproc->dev, "releasing %s\n", rproc->name);
+
+ idr_destroy(&rproc->notifyids);
+
+ if (rproc->index >= 0)
+ ida_free(&rproc_dev_index, rproc->index);
+
+ kfree_const(rproc->firmware);
+ kfree_const(rproc->name);
+ kfree(rproc->ops);
+ kfree(rproc);
+}
+
+static const struct device_type rproc_type = {
+ .name = "remoteproc",
+ .release = rproc_type_release,
+};
+
+static int rproc_alloc_firmware(struct rproc *rproc,
+ const char *name, const char *firmware)
+{
+ const char *p;
+
+ /*
+ * Allocate a firmware name if the caller gave us one to work
+ * with. Otherwise construct a new one using a default pattern.
+ */
+ if (firmware)
+ p = kstrdup_const(firmware, GFP_KERNEL);
+ else
+ p = kasprintf(GFP_KERNEL, "rproc-%s-fw", name);
+
+ if (!p)
+ return -ENOMEM;
+
+ rproc->firmware = p;
+
+ return 0;
+}
+
+static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops)
+{
+ rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
+ if (!rproc->ops)
+ return -ENOMEM;
+
+ /* Default to rproc_coredump if no coredump function is specified */
+ if (!rproc->ops->coredump)
+ rproc->ops->coredump = rproc_coredump;
+
+ if (rproc->ops->load)
+ return 0;
+
+ /* Default to ELF loader if no load function is specified */
+ rproc->ops->load = rproc_elf_load_segments;
+ rproc->ops->parse_fw = rproc_elf_load_rsc_table;
+ rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
+ rproc->ops->sanity_check = rproc_elf_sanity_check;
+ rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
+
+ return 0;
+}
+
+/**
+ * rproc_alloc() - allocate a remote processor handle
+ * @dev: the underlying device
+ * @name: name of this remote processor
+ * @ops: platform-specific handlers (mainly start/stop)
+ * @firmware: name of firmware file to load, can be NULL
+ * @len: length of private data needed by the rproc driver (in bytes)
+ *
+ * Allocates a new remote processor handle, but does not register
+ * it yet. if @firmware is NULL, a default name is used.
+ *
+ * This function should be used by rproc implementations during initialization
+ * of the remote processor.
+ *
+ * After creating an rproc handle using this function, and when ready,
+ * implementations should then call rproc_add() to complete
+ * the registration of the remote processor.
+ *
+ * Note: _never_ directly deallocate @rproc, even if it was not registered
+ * yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
+ *
+ * Return: new rproc pointer on success, and NULL on failure
+ */
+struct rproc *rproc_alloc(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware, int len)
+{
+ struct rproc *rproc;
+
+ if (!dev || !name || !ops)
+ return NULL;
+
+ rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
+ if (!rproc)
+ return NULL;
+
+ rproc->priv = &rproc[1];
+ rproc->auto_boot = true;
+ rproc->elf_class = ELFCLASSNONE;
+ rproc->elf_machine = EM_NONE;
+
+ device_initialize(&rproc->dev);
+ rproc->dev.parent = dev;
+ rproc->dev.type = &rproc_type;
+ rproc->dev.class = &rproc_class;
+ rproc->dev.driver_data = rproc;
+ idr_init(&rproc->notifyids);
+
+ rproc->name = kstrdup_const(name, GFP_KERNEL);
+ if (!rproc->name)
+ goto put_device;
+
+ if (rproc_alloc_firmware(rproc, name, firmware))
+ goto put_device;
+
+ if (rproc_alloc_ops(rproc, ops))
+ goto put_device;
+
+ /* Assign a unique device index and name */
+ rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
+ if (rproc->index < 0) {
+ dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
+ goto put_device;
+ }
+
+ dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
+
+ atomic_set(&rproc->power, 0);
+
+ mutex_init(&rproc->lock);
+
+ INIT_LIST_HEAD(&rproc->carveouts);
+ INIT_LIST_HEAD(&rproc->mappings);
+ INIT_LIST_HEAD(&rproc->traces);
+ INIT_LIST_HEAD(&rproc->rvdevs);
+ INIT_LIST_HEAD(&rproc->subdevs);
+ INIT_LIST_HEAD(&rproc->dump_segments);
+
+ INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work);
+
+ rproc->state = RPROC_OFFLINE;
+
+ return rproc;
+
+put_device:
+ put_device(&rproc->dev);
+ return NULL;
+}
+EXPORT_SYMBOL(rproc_alloc);
+
+/**
+ * rproc_free() - unroll rproc_alloc()
+ * @rproc: the remote processor handle
+ *
+ * This function decrements the rproc dev refcount.
+ *
+ * If no one holds any reference to rproc anymore, then its refcount would
+ * now drop to zero, and it would be freed.
+ */
+void rproc_free(struct rproc *rproc)
+{
+ put_device(&rproc->dev);
+}
+EXPORT_SYMBOL(rproc_free);
+
+/**
+ * rproc_put() - release rproc reference
+ * @rproc: the remote processor handle
+ *
+ * This function decrements the rproc dev refcount.
+ *
+ * If no one holds any reference to rproc anymore, then its refcount would
+ * now drop to zero, and it would be freed.
+ */
+void rproc_put(struct rproc *rproc)
+{
+ module_put(rproc->dev.parent->driver->owner);
+ put_device(&rproc->dev);
+}
+EXPORT_SYMBOL(rproc_put);
+
+/**
+ * rproc_del() - unregister a remote processor
+ * @rproc: rproc handle to unregister
+ *
+ * This function should be called when the platform specific rproc
+ * implementation decides to remove the rproc device. it should
+ * _only_ be called if a previous invocation of rproc_add()
+ * has completed successfully.
+ *
+ * After rproc_del() returns, @rproc isn't freed yet, because
+ * of the outstanding reference created by rproc_alloc. To decrement that
+ * one last refcount, one still needs to call rproc_free().
+ *
+ * Return: 0 on success and -EINVAL if @rproc isn't valid
+ */
+int rproc_del(struct rproc *rproc)
+{
+ if (!rproc)
+ return -EINVAL;
+
+ /* TODO: make sure this works with rproc->power > 1 */
+ rproc_shutdown(rproc);
+
+ mutex_lock(&rproc->lock);
+ rproc->state = RPROC_DELETED;
+ mutex_unlock(&rproc->lock);
+
+ rproc_delete_debug_dir(rproc);
+
+ /* the rproc is downref'ed as soon as it's removed from the klist */
+ mutex_lock(&rproc_list_mutex);
+ list_del_rcu(&rproc->node);
+ mutex_unlock(&rproc_list_mutex);
+
+ /* Ensure that no readers of rproc_list are still active */
+ synchronize_rcu();
+
+ device_del(&rproc->dev);
+ rproc_char_device_remove(rproc);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_del);
+
+static void devm_rproc_free(struct device *dev, void *res)
+{
+ rproc_free(*(struct rproc **)res);
+}
+
+/**
+ * devm_rproc_alloc() - resource managed rproc_alloc()
+ * @dev: the underlying device
+ * @name: name of this remote processor
+ * @ops: platform-specific handlers (mainly start/stop)
+ * @firmware: name of firmware file to load, can be NULL
+ * @len: length of private data needed by the rproc driver (in bytes)
+ *
+ * This function performs like rproc_alloc() but the acquired rproc device will
+ * automatically be released on driver detach.
+ *
+ * Return: new rproc instance, or NULL on failure
+ */
+struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware, int len)
+{
+ struct rproc **ptr, *rproc;
+
+ ptr = devres_alloc(devm_rproc_free, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ rproc = rproc_alloc(dev, name, ops, firmware, len);
+ if (rproc) {
+ *ptr = rproc;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return rproc;
+}
+EXPORT_SYMBOL(devm_rproc_alloc);
+
+/**
+ * rproc_add_subdev() - add a subdevice to a remoteproc
+ * @rproc: rproc handle to add the subdevice to
+ * @subdev: subdev handle to register
+ *
+ * Caller is responsible for populating optional subdevice function pointers.
+ */
+void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
+{
+ list_add_tail(&subdev->node, &rproc->subdevs);
+}
+EXPORT_SYMBOL(rproc_add_subdev);
+
+/**
+ * rproc_remove_subdev() - remove a subdevice from a remoteproc
+ * @rproc: rproc handle to remove the subdevice from
+ * @subdev: subdev handle, previously registered with rproc_add_subdev()
+ */
+void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
+{
+ list_del(&subdev->node);
+}
+EXPORT_SYMBOL(rproc_remove_subdev);
+
+/**
+ * rproc_get_by_child() - acquire rproc handle of @dev's ancestor
+ * @dev: child device to find ancestor of
+ *
+ * Return: the ancestor rproc instance, or NULL if not found
+ */
+struct rproc *rproc_get_by_child(struct device *dev)
+{
+ for (dev = dev->parent; dev; dev = dev->parent) {
+ if (dev->type == &rproc_type)
+ return dev->driver_data;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(rproc_get_by_child);
+
+/**
+ * rproc_report_crash() - rproc crash reporter function
+ * @rproc: remote processor
+ * @type: crash type
+ *
+ * This function must be called every time a crash is detected by the low-level
+ * drivers implementing a specific remoteproc. This should not be called from a
+ * non-remoteproc driver.
+ *
+ * This function can be called from atomic/interrupt context.
+ */
+void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
+{
+ if (!rproc) {
+ pr_err("NULL rproc pointer\n");
+ return;
+ }
+
+ /* Prevent suspend while the remoteproc is being recovered */
+ pm_stay_awake(rproc->dev.parent);
+
+ dev_err(&rproc->dev, "crash detected in %s: type %s\n",
+ rproc->name, rproc_crash_to_string(type));
+
+ queue_work(rproc_recovery_wq, &rproc->crash_handler);
+}
+EXPORT_SYMBOL(rproc_report_crash);
+
+static int rproc_panic_handler(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ unsigned int longest = 0;
+ struct rproc *rproc;
+ unsigned int d;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rproc, &rproc_list, node) {
+ if (!rproc->ops->panic)
+ continue;
+
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
+ continue;
+
+ d = rproc->ops->panic(rproc);
+ longest = max(longest, d);
+ }
+ rcu_read_unlock();
+
+ /*
+ * Delay for the longest requested duration before returning. This can
+ * be used by the remoteproc drivers to give the remote processor time
+ * to perform any requested operations (such as flush caches), when
+ * it's not possible to signal the Linux side due to the panic.
+ */
+ mdelay(longest);
+
+ return NOTIFY_DONE;
+}
+
+static void __init rproc_init_panic(void)
+{
+ rproc_panic_nb.notifier_call = rproc_panic_handler;
+ atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb);
+}
+
+static void __exit rproc_exit_panic(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb);
+}
+
+static int __init remoteproc_init(void)
+{
+ rproc_recovery_wq = alloc_workqueue("rproc_recovery_wq",
+ WQ_UNBOUND | WQ_FREEZABLE, 0);
+ if (!rproc_recovery_wq) {
+ pr_err("remoteproc: creation of rproc_recovery_wq failed\n");
+ return -ENOMEM;
+ }
+
+ rproc_init_sysfs();
+ rproc_init_debugfs();
+ rproc_init_cdev();
+ rproc_init_panic();
+
+ return 0;
+}
+subsys_initcall(remoteproc_init);
+
+static void __exit remoteproc_exit(void)
+{
+ ida_destroy(&rproc_dev_index);
+
+ if (!rproc_recovery_wq)
+ return;
+
+ rproc_exit_panic();
+ rproc_exit_debugfs();
+ rproc_exit_sysfs();
+ destroy_workqueue(rproc_recovery_wq);
+}
+module_exit(remoteproc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Generic Remote Processor Framework");
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
new file mode 100644
index 000000000..4b093420d
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Coredump functionality for Remoteproc framework.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/devcoredump.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/remoteproc.h>
+#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
+
+struct rproc_coredump_state {
+ struct rproc *rproc;
+ void *header;
+ struct completion dump_done;
+};
+
+/**
+ * rproc_coredump_cleanup() - clean up dump_segments list
+ * @rproc: the remote processor handle
+ */
+void rproc_coredump_cleanup(struct rproc *rproc)
+{
+ struct rproc_dump_segment *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+/**
+ * rproc_coredump_add_segment() - add segment of device memory to coredump
+ * @rproc: handle of a remote processor
+ * @da: device address
+ * @size: size of segment
+ *
+ * Add device memory to the list of segments to be included in a coredump for
+ * the remoteproc.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
+{
+ struct rproc_dump_segment *segment;
+
+ segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+ if (!segment)
+ return -ENOMEM;
+
+ segment->da = da;
+ segment->size = size;
+
+ list_add_tail(&segment->node, &rproc->dump_segments);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_segment);
+
+/**
+ * rproc_coredump_add_custom_segment() - add custom coredump segment
+ * @rproc: handle of a remote processor
+ * @da: device address
+ * @size: size of segment
+ * @dumpfn: custom dump function called for each segment during coredump
+ * @priv: private data
+ *
+ * Add device memory to the list of segments to be included in the coredump
+ * and associate the segment with the given custom dump function and private
+ * data.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_custom_segment(struct rproc *rproc,
+ dma_addr_t da, size_t size,
+ void (*dumpfn)(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest, size_t offset,
+ size_t size),
+ void *priv)
+{
+ struct rproc_dump_segment *segment;
+
+ segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+ if (!segment)
+ return -ENOMEM;
+
+ segment->da = da;
+ segment->size = size;
+ segment->priv = priv;
+ segment->dump = dumpfn;
+
+ list_add_tail(&segment->node, &rproc->dump_segments);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
+
+/**
+ * rproc_coredump_set_elf_info() - set coredump elf information
+ * @rproc: handle of a remote processor
+ * @class: elf class for coredump elf file
+ * @machine: elf machine for coredump elf file
+ *
+ * Set elf information which will be used for coredump elf file.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
+{
+ if (class != ELFCLASS64 && class != ELFCLASS32)
+ return -EINVAL;
+
+ rproc->elf_class = class;
+ rproc->elf_machine = machine;
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_set_elf_info);
+
+static void rproc_coredump_free(void *data)
+{
+ struct rproc_coredump_state *dump_state = data;
+
+ vfree(dump_state->header);
+ complete(&dump_state->dump_done);
+}
+
+static void *rproc_coredump_find_segment(loff_t user_offset,
+ struct list_head *segments,
+ size_t *data_left)
+{
+ struct rproc_dump_segment *segment;
+
+ list_for_each_entry(segment, segments, node) {
+ if (user_offset < segment->size) {
+ *data_left = segment->size - user_offset;
+ return segment;
+ }
+ user_offset -= segment->size;
+ }
+
+ *data_left = 0;
+ return NULL;
+}
+
+static void rproc_copy_segment(struct rproc *rproc, void *dest,
+ struct rproc_dump_segment *segment,
+ size_t offset, size_t size)
+{
+ bool is_iomem = false;
+ void *ptr;
+
+ if (segment->dump) {
+ segment->dump(rproc, segment, dest, offset, size);
+ } else {
+ ptr = rproc_da_to_va(rproc, segment->da + offset, size, &is_iomem);
+ if (!ptr) {
+ dev_err(&rproc->dev,
+ "invalid copy request for segment %pad with offset %zu and size %zu)\n",
+ &segment->da, offset, size);
+ memset(dest, 0xff, size);
+ } else {
+ if (is_iomem)
+ memcpy_fromio(dest, (void const __iomem *)ptr, size);
+ else
+ memcpy(dest, ptr, size);
+ }
+ }
+}
+
+static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count,
+ void *data, size_t header_sz)
+{
+ size_t seg_data, bytes_left = count;
+ ssize_t copy_sz;
+ struct rproc_dump_segment *seg;
+ struct rproc_coredump_state *dump_state = data;
+ struct rproc *rproc = dump_state->rproc;
+ void *elfcore = dump_state->header;
+
+ /* Copy the vmalloc'ed header first. */
+ if (offset < header_sz) {
+ copy_sz = memory_read_from_buffer(buffer, count, &offset,
+ elfcore, header_sz);
+
+ return copy_sz;
+ }
+
+ /*
+ * Find out the segment memory chunk to be copied based on offset.
+ * Keep copying data until count bytes are read.
+ */
+ while (bytes_left) {
+ seg = rproc_coredump_find_segment(offset - header_sz,
+ &rproc->dump_segments,
+ &seg_data);
+ /* EOF check */
+ if (!seg) {
+ dev_info(&rproc->dev, "Ramdump done, %lld bytes read",
+ offset);
+ break;
+ }
+
+ copy_sz = min_t(size_t, bytes_left, seg_data);
+
+ rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data,
+ copy_sz);
+
+ offset += copy_sz;
+ buffer += copy_sz;
+ bytes_left -= copy_sz;
+ }
+
+ return count - bytes_left;
+}
+
+/**
+ * rproc_coredump() - perform coredump
+ * @rproc: rproc handle
+ *
+ * This function will generate an ELF header for the registered segments
+ * and create a devcoredump device associated with rproc. Based on the
+ * coredump configuration this function will directly copy the segments
+ * from device memory to userspace or copy segments from device memory to
+ * a separate buffer, which can then be read by userspace.
+ * The first approach avoids using extra vmalloc memory. But it will stall
+ * recovery flow until dump is read by userspace.
+ */
+void rproc_coredump(struct rproc *rproc)
+{
+ struct rproc_dump_segment *segment;
+ void *phdr;
+ void *ehdr;
+ size_t data_size;
+ size_t offset;
+ void *data;
+ u8 class = rproc->elf_class;
+ int phnum = 0;
+ struct rproc_coredump_state dump_state;
+ enum rproc_dump_mechanism dump_conf = rproc->dump_conf;
+
+ if (list_empty(&rproc->dump_segments) ||
+ dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
+ if (class == ELFCLASSNONE) {
+ dev_err(&rproc->dev, "Elf class is not set\n");
+ return;
+ }
+
+ data_size = elf_size_of_hdr(class);
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ /*
+ * For default configuration buffer includes headers & segments.
+ * For inline dump buffer just includes headers as segments are
+ * directly read from device memory.
+ */
+ data_size += elf_size_of_phdr(class);
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
+ data_size += segment->size;
+
+ phnum++;
+ }
+
+ data = vmalloc(data_size);
+ if (!data)
+ return;
+
+ ehdr = data;
+
+ memset(ehdr, 0, elf_size_of_hdr(class));
+ /* e_ident field is common for both elf32 and elf64 */
+ elf_hdr_init_ident(ehdr, class);
+
+ elf_hdr_set_e_type(class, ehdr, ET_CORE);
+ elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
+ elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
+ elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
+ elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
+ elf_hdr_set_e_phnum(class, ehdr, phnum);
+
+ phdr = data + elf_hdr_get_e_phoff(class, ehdr);
+ offset = elf_hdr_get_e_phoff(class, ehdr);
+ offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ memset(phdr, 0, elf_size_of_phdr(class));
+ elf_phdr_set_p_type(class, phdr, PT_LOAD);
+ elf_phdr_set_p_offset(class, phdr, offset);
+ elf_phdr_set_p_vaddr(class, phdr, segment->da);
+ elf_phdr_set_p_paddr(class, phdr, segment->da);
+ elf_phdr_set_p_filesz(class, phdr, segment->size);
+ elf_phdr_set_p_memsz(class, phdr, segment->size);
+ elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
+ elf_phdr_set_p_align(class, phdr, 0);
+
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
+ rproc_copy_segment(rproc, data + offset, segment, 0,
+ segment->size);
+
+ offset += elf_phdr_get_p_filesz(class, phdr);
+ phdr += elf_size_of_phdr(class);
+ }
+ if (dump_conf == RPROC_COREDUMP_ENABLED) {
+ dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
+ return;
+ }
+
+ /* Initialize the dump state struct to be used by rproc_coredump_read */
+ dump_state.rproc = rproc;
+ dump_state.header = data;
+ init_completion(&dump_state.dump_done);
+
+ dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
+ rproc_coredump_read, rproc_coredump_free);
+
+ /*
+ * Wait until the dump is read and free is called. Data is freed
+ * by devcoredump framework automatically after 5 minutes.
+ */
+ wait_for_completion(&dump_state.dump_done);
+}
+
+/**
+ * rproc_coredump_using_sections() - perform coredump using section headers
+ * @rproc: rproc handle
+ *
+ * This function will generate an ELF header for the registered sections of
+ * segments and create a devcoredump device associated with rproc. Based on
+ * the coredump configuration this function will directly copy the segments
+ * from device memory to userspace or copy segments from device memory to
+ * a separate buffer, which can then be read by userspace.
+ * The first approach avoids using extra vmalloc memory. But it will stall
+ * recovery flow until dump is read by userspace.
+ */
+void rproc_coredump_using_sections(struct rproc *rproc)
+{
+ struct rproc_dump_segment *segment;
+ void *shdr;
+ void *ehdr;
+ size_t data_size;
+ size_t strtbl_size = 0;
+ size_t strtbl_index = 1;
+ size_t offset;
+ void *data;
+ u8 class = rproc->elf_class;
+ int shnum;
+ struct rproc_coredump_state dump_state;
+ unsigned int dump_conf = rproc->dump_conf;
+ char *str_tbl = "STR_TBL";
+
+ if (list_empty(&rproc->dump_segments) ||
+ dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
+ if (class == ELFCLASSNONE) {
+ dev_err(&rproc->dev, "Elf class is not set\n");
+ return;
+ }
+
+ /*
+ * We allocate two extra section headers. The first one is null.
+ * Second section header is for the string table. Also space is
+ * allocated for string table.
+ */
+ data_size = elf_size_of_hdr(class) + 2 * elf_size_of_shdr(class);
+ shnum = 2;
+
+ /* the extra byte is for the null character at index 0 */
+ strtbl_size += strlen(str_tbl) + 2;
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ data_size += elf_size_of_shdr(class);
+ strtbl_size += strlen(segment->priv) + 1;
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
+ data_size += segment->size;
+ shnum++;
+ }
+
+ data_size += strtbl_size;
+
+ data = vmalloc(data_size);
+ if (!data)
+ return;
+
+ ehdr = data;
+ memset(ehdr, 0, elf_size_of_hdr(class));
+ /* e_ident field is common for both elf32 and elf64 */
+ elf_hdr_init_ident(ehdr, class);
+
+ elf_hdr_set_e_type(class, ehdr, ET_CORE);
+ elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
+ elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
+ elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
+ elf_hdr_set_e_shoff(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_shentsize(class, ehdr, elf_size_of_shdr(class));
+ elf_hdr_set_e_shnum(class, ehdr, shnum);
+ elf_hdr_set_e_shstrndx(class, ehdr, 1);
+
+ /*
+ * The zeroth index of the section header is reserved and is rarely used.
+ * Set the section header as null (SHN_UNDEF) and move to the next one.
+ */
+ shdr = data + elf_hdr_get_e_shoff(class, ehdr);
+ memset(shdr, 0, elf_size_of_shdr(class));
+ shdr += elf_size_of_shdr(class);
+
+ /* Initialize the string table. */
+ offset = elf_hdr_get_e_shoff(class, ehdr) +
+ elf_size_of_shdr(class) * elf_hdr_get_e_shnum(class, ehdr);
+ memset(data + offset, 0, strtbl_size);
+
+ /* Fill in the string table section header. */
+ memset(shdr, 0, elf_size_of_shdr(class));
+ elf_shdr_set_sh_type(class, shdr, SHT_STRTAB);
+ elf_shdr_set_sh_offset(class, shdr, offset);
+ elf_shdr_set_sh_size(class, shdr, strtbl_size);
+ elf_shdr_set_sh_entsize(class, shdr, 0);
+ elf_shdr_set_sh_flags(class, shdr, 0);
+ elf_shdr_set_sh_name(class, shdr, elf_strtbl_add(str_tbl, ehdr, class, &strtbl_index));
+ offset += elf_shdr_get_sh_size(class, shdr);
+ shdr += elf_size_of_shdr(class);
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ memset(shdr, 0, elf_size_of_shdr(class));
+ elf_shdr_set_sh_type(class, shdr, SHT_PROGBITS);
+ elf_shdr_set_sh_offset(class, shdr, offset);
+ elf_shdr_set_sh_addr(class, shdr, segment->da);
+ elf_shdr_set_sh_size(class, shdr, segment->size);
+ elf_shdr_set_sh_entsize(class, shdr, 0);
+ elf_shdr_set_sh_flags(class, shdr, SHF_WRITE);
+ elf_shdr_set_sh_name(class, shdr,
+ elf_strtbl_add(segment->priv, ehdr, class, &strtbl_index));
+
+ /* No need to copy segments for inline dumps */
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
+ rproc_copy_segment(rproc, data + offset, segment, 0,
+ segment->size);
+ offset += elf_shdr_get_sh_size(class, shdr);
+ shdr += elf_size_of_shdr(class);
+ }
+
+ if (dump_conf == RPROC_COREDUMP_ENABLED) {
+ dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
+ return;
+ }
+
+ /* Initialize the dump state struct to be used by rproc_coredump_read */
+ dump_state.rproc = rproc;
+ dump_state.header = data;
+ init_completion(&dump_state.dump_done);
+
+ dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
+ rproc_coredump_read, rproc_coredump_free);
+
+ /* Wait until the dump is read and free is called. Data is freed
+ * by devcoredump framework automatically after 5 minutes.
+ */
+ wait_for_completion(&dump_state.dump_done);
+}
+EXPORT_SYMBOL(rproc_coredump_using_sections);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
new file mode 100644
index 000000000..b86c1d09c
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Remote Processor Framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Mark Grosen <mgrosen@ti.com>
+ * Brian Swetland <swetland@google.com>
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ * Robert Tivy <rtivy@ti.com>
+ * Armando Uribe De Leon <x0095078@ti.com>
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/remoteproc.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+
+#include "remoteproc_internal.h"
+
+/* remoteproc debugfs parent dir */
+static struct dentry *rproc_dbg;
+
+/*
+ * A coredump-configuration-to-string lookup table, for exposing a
+ * human readable configuration via debugfs. Always keep in sync with
+ * enum rproc_coredump_mechanism
+ */
+static const char * const rproc_coredump_str[] = {
+ [RPROC_COREDUMP_DISABLED] = "disabled",
+ [RPROC_COREDUMP_ENABLED] = "enabled",
+ [RPROC_COREDUMP_INLINE] = "inline",
+};
+
+/* Expose the current coredump configuration via debugfs */
+static ssize_t rproc_coredump_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ char buf[20];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%s\n",
+ rproc_coredump_str[rproc->dump_conf]);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+/*
+ * By writing to the 'coredump' debugfs entry, we control the behavior of the
+ * coredump mechanism dynamically. The default value of this entry is "disabled".
+ *
+ * The 'coredump' debugfs entry supports these commands:
+ *
+ * disabled: By default coredump collection is disabled. Recovery will
+ * proceed without collecting any dump.
+ *
+ * enabled: When the remoteproc crashes the entire coredump will be copied
+ * to a separate buffer and exposed to userspace.
+ *
+ * inline: The coredump will not be copied to a separate buffer and the
+ * recovery process will have to wait until data is read by
+ * userspace. But this avoid usage of extra memory.
+ */
+static ssize_t rproc_coredump_write(struct file *filp,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ int ret, err = 0;
+ char buf[20];
+
+ if (count < 1 || count > sizeof(buf))
+ return -EINVAL;
+
+ ret = copy_from_user(buf, user_buf, count);
+ if (ret)
+ return -EFAULT;
+
+ /* remove end of line */
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+
+ if (rproc->state == RPROC_CRASHED) {
+ dev_err(&rproc->dev, "can't change coredump configuration\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (!strncmp(buf, "disabled", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+ } else if (!strncmp(buf, "enabled", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_ENABLED;
+ } else if (!strncmp(buf, "inline", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_INLINE;
+ } else {
+ dev_err(&rproc->dev, "Invalid coredump configuration\n");
+ err = -EINVAL;
+ }
+out:
+ return err ? err : count;
+}
+
+static const struct file_operations rproc_coredump_fops = {
+ .read = rproc_coredump_read,
+ .write = rproc_coredump_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * Some remote processors may support dumping trace logs into a shared
+ * memory buffer. We expose this trace buffer using debugfs, so users
+ * can easily tell what's going on remotely.
+ *
+ * We will most probably improve the rproc tracing facilities later on,
+ * but this kind of lightweight and simple mechanism is always good to have,
+ * as it provides very early tracing with little to no dependencies at all.
+ */
+static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc_debug_trace *data = filp->private_data;
+ struct rproc_mem_entry *trace = &data->trace_mem;
+ void *va;
+ char buf[100];
+ int len;
+
+ va = rproc_da_to_va(data->rproc, trace->da, trace->len, NULL);
+
+ if (!va) {
+ len = scnprintf(buf, sizeof(buf), "Trace %s not available\n",
+ trace->name);
+ va = buf;
+ } else {
+ len = strnlen(va, trace->len);
+ }
+
+ return simple_read_from_buffer(userbuf, count, ppos, va, len);
+}
+
+static const struct file_operations trace_rproc_ops = {
+ .read = rproc_trace_read,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+/* expose the name of the remote processor via debugfs */
+static ssize_t rproc_name_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ /* need room for the name, a newline and a terminating null */
+ char buf[100];
+ int i;
+
+ i = scnprintf(buf, sizeof(buf), "%.98s\n", rproc->name);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, i);
+}
+
+static const struct file_operations rproc_name_ops = {
+ .read = rproc_name_read,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+/* expose recovery flag via debugfs */
+static ssize_t rproc_recovery_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ char *buf = rproc->recovery_disabled ? "disabled\n" : "enabled\n";
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
+}
+
+/*
+ * By writing to the 'recovery' debugfs entry, we control the behavior of the
+ * recovery mechanism dynamically. The default value of this entry is "enabled".
+ *
+ * The 'recovery' debugfs entry supports these commands:
+ *
+ * enabled: When enabled, the remote processor will be automatically
+ * recovered whenever it crashes. Moreover, if the remote
+ * processor crashes while recovery is disabled, it will
+ * be automatically recovered too as soon as recovery is enabled.
+ *
+ * disabled: When disabled, a remote processor will remain in a crashed
+ * state if it crashes. This is useful for debugging purposes;
+ * without it, debugging a crash is substantially harder.
+ *
+ * recover: This function will trigger an immediate recovery if the
+ * remote processor is in a crashed state, without changing
+ * or checking the recovery state (enabled/disabled).
+ * This is useful during debugging sessions, when one expects
+ * additional crashes to happen after enabling recovery. In this
+ * case, enabling recovery will make it hard to debug subsequent
+ * crashes, so it's recommended to keep recovery disabled, and
+ * instead use the "recover" command as needed.
+ */
+static ssize_t
+rproc_recovery_write(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ char buf[10];
+ int ret;
+
+ if (count < 1 || count > sizeof(buf))
+ return -EINVAL;
+
+ ret = copy_from_user(buf, user_buf, count);
+ if (ret)
+ return -EFAULT;
+
+ /* remove end of line */
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+
+ if (!strncmp(buf, "enabled", count)) {
+ /* change the flag and begin the recovery process if needed */
+ rproc->recovery_disabled = false;
+ rproc_trigger_recovery(rproc);
+ } else if (!strncmp(buf, "disabled", count)) {
+ rproc->recovery_disabled = true;
+ } else if (!strncmp(buf, "recover", count)) {
+ /* begin the recovery process without changing the flag */
+ rproc_trigger_recovery(rproc);
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations rproc_recovery_ops = {
+ .read = rproc_recovery_read,
+ .write = rproc_recovery_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+/* expose the crash trigger via debugfs */
+static ssize_t
+rproc_crash_write(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ unsigned int type;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 0, &type);
+ if (ret < 0)
+ return ret;
+
+ rproc_report_crash(rproc, type);
+
+ return count;
+}
+
+static const struct file_operations rproc_crash_ops = {
+ .write = rproc_crash_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+/* Expose resource table content via debugfs */
+static int rproc_rsc_table_show(struct seq_file *seq, void *p)
+{
+ static const char * const types[] = {"carveout", "devmem", "trace", "vdev"};
+ struct rproc *rproc = seq->private;
+ struct resource_table *table = rproc->table_ptr;
+ struct fw_rsc_carveout *c;
+ struct fw_rsc_devmem *d;
+ struct fw_rsc_trace *t;
+ struct fw_rsc_vdev *v;
+ int i, j;
+
+ if (!table) {
+ seq_puts(seq, "No resource table found\n");
+ return 0;
+ }
+
+ for (i = 0; i < table->num; i++) {
+ int offset = table->offset[i];
+ struct fw_rsc_hdr *hdr = (void *)table + offset;
+ void *rsc = (void *)hdr + sizeof(*hdr);
+
+ switch (hdr->type) {
+ case RSC_CARVEOUT:
+ c = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+ seq_printf(seq, " Device Address 0x%x\n", c->da);
+ seq_printf(seq, " Physical Address 0x%x\n", c->pa);
+ seq_printf(seq, " Length 0x%x Bytes\n", c->len);
+ seq_printf(seq, " Flags 0x%x\n", c->flags);
+ seq_printf(seq, " Reserved (should be zero) [%d]\n", c->reserved);
+ seq_printf(seq, " Name %s\n\n", c->name);
+ break;
+ case RSC_DEVMEM:
+ d = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+ seq_printf(seq, " Device Address 0x%x\n", d->da);
+ seq_printf(seq, " Physical Address 0x%x\n", d->pa);
+ seq_printf(seq, " Length 0x%x Bytes\n", d->len);
+ seq_printf(seq, " Flags 0x%x\n", d->flags);
+ seq_printf(seq, " Reserved (should be zero) [%d]\n", d->reserved);
+ seq_printf(seq, " Name %s\n\n", d->name);
+ break;
+ case RSC_TRACE:
+ t = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+ seq_printf(seq, " Device Address 0x%x\n", t->da);
+ seq_printf(seq, " Length 0x%x Bytes\n", t->len);
+ seq_printf(seq, " Reserved (should be zero) [%d]\n", t->reserved);
+ seq_printf(seq, " Name %s\n\n", t->name);
+ break;
+ case RSC_VDEV:
+ v = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+
+ seq_printf(seq, " ID %d\n", v->id);
+ seq_printf(seq, " Notify ID %d\n", v->notifyid);
+ seq_printf(seq, " Device features 0x%x\n", v->dfeatures);
+ seq_printf(seq, " Guest features 0x%x\n", v->gfeatures);
+ seq_printf(seq, " Config length 0x%x\n", v->config_len);
+ seq_printf(seq, " Status 0x%x\n", v->status);
+ seq_printf(seq, " Number of vrings %d\n", v->num_of_vrings);
+ seq_printf(seq, " Reserved (should be zero) [%d][%d]\n\n",
+ v->reserved[0], v->reserved[1]);
+
+ for (j = 0; j < v->num_of_vrings; j++) {
+ seq_printf(seq, " Vring %d\n", j);
+ seq_printf(seq, " Device Address 0x%x\n", v->vring[j].da);
+ seq_printf(seq, " Alignment %d\n", v->vring[j].align);
+ seq_printf(seq, " Number of buffers %d\n", v->vring[j].num);
+ seq_printf(seq, " Notify ID %d\n", v->vring[j].notifyid);
+ seq_printf(seq, " Physical Address 0x%x\n\n",
+ v->vring[j].pa);
+ }
+ break;
+ default:
+ seq_printf(seq, "Unknown resource type found: %d [hdr: %pK]\n",
+ hdr->type, hdr);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(rproc_rsc_table);
+
+/* Expose carveout content via debugfs */
+static int rproc_carveouts_show(struct seq_file *seq, void *p)
+{
+ struct rproc *rproc = seq->private;
+ struct rproc_mem_entry *carveout;
+
+ list_for_each_entry(carveout, &rproc->carveouts, node) {
+ seq_puts(seq, "Carveout memory entry:\n");
+ seq_printf(seq, "\tName: %s\n", carveout->name);
+ seq_printf(seq, "\tVirtual address: %pK\n", carveout->va);
+ seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
+ seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
+ seq_printf(seq, "\tLength: 0x%zx Bytes\n\n", carveout->len);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(rproc_carveouts);
+
+void rproc_remove_trace_file(struct dentry *tfile)
+{
+ debugfs_remove(tfile);
+}
+
+struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
+ struct rproc_debug_trace *trace)
+{
+ return debugfs_create_file(name, 0400, rproc->dbg_dir, trace,
+ &trace_rproc_ops);
+}
+
+void rproc_delete_debug_dir(struct rproc *rproc)
+{
+ debugfs_remove_recursive(rproc->dbg_dir);
+}
+
+void rproc_create_debug_dir(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+
+ if (!rproc_dbg)
+ return;
+
+ rproc->dbg_dir = debugfs_create_dir(dev_name(dev), rproc_dbg);
+
+ debugfs_create_file("name", 0400, rproc->dbg_dir,
+ rproc, &rproc_name_ops);
+ debugfs_create_file("recovery", 0600, rproc->dbg_dir,
+ rproc, &rproc_recovery_ops);
+ debugfs_create_file("crash", 0200, rproc->dbg_dir,
+ rproc, &rproc_crash_ops);
+ debugfs_create_file("resource_table", 0400, rproc->dbg_dir,
+ rproc, &rproc_rsc_table_fops);
+ debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
+ rproc, &rproc_carveouts_fops);
+ debugfs_create_file("coredump", 0600, rproc->dbg_dir,
+ rproc, &rproc_coredump_fops);
+}
+
+void __init rproc_init_debugfs(void)
+{
+ if (debugfs_initialized())
+ rproc_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
+}
+
+void __exit rproc_exit_debugfs(void)
+{
+ debugfs_remove(rproc_dbg);
+}
diff --git a/drivers/remoteproc/remoteproc_elf_helpers.h b/drivers/remoteproc/remoteproc_elf_helpers.h
new file mode 100644
index 000000000..e6de53a50
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_elf_helpers.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Remote processor elf helpers defines
+ *
+ * Copyright (C) 2020 Kalray, Inc.
+ */
+
+#ifndef REMOTEPROC_ELF_LOADER_H
+#define REMOTEPROC_ELF_LOADER_H
+
+#include <linux/elf.h>
+#include <linux/types.h>
+
+/**
+ * fw_elf_get_class - Get elf class
+ * @fw: the ELF firmware image
+ *
+ * Note that we use elf32_hdr to access the class since the start of the
+ * struct is the same for both elf class
+ *
+ * Return: elf class of the firmware
+ */
+static inline u8 fw_elf_get_class(const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
+
+ return ehdr->e_ident[EI_CLASS];
+}
+
+static inline void elf_hdr_init_ident(struct elf32_hdr *hdr, u8 class)
+{
+ memcpy(hdr->e_ident, ELFMAG, SELFMAG);
+ hdr->e_ident[EI_CLASS] = class;
+ hdr->e_ident[EI_DATA] = ELFDATA2LSB;
+ hdr->e_ident[EI_VERSION] = EV_CURRENT;
+ hdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+}
+
+/* Generate getter and setter for a specific elf struct/field */
+#define ELF_GEN_FIELD_GET_SET(__s, __field, __type) \
+static inline __type elf_##__s##_get_##__field(u8 class, const void *arg) \
+{ \
+ if (class == ELFCLASS32) \
+ return (__type) ((const struct elf32_##__s *) arg)->__field; \
+ else \
+ return (__type) ((const struct elf64_##__s *) arg)->__field; \
+} \
+static inline void elf_##__s##_set_##__field(u8 class, void *arg, \
+ __type value) \
+{ \
+ if (class == ELFCLASS32) \
+ ((struct elf32_##__s *) arg)->__field = (__type) value; \
+ else \
+ ((struct elf64_##__s *) arg)->__field = (__type) value; \
+}
+
+ELF_GEN_FIELD_GET_SET(hdr, e_entry, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_phnum, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_shnum, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_phoff, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_shoff, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_shstrndx, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_machine, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_type, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_version, u32)
+ELF_GEN_FIELD_GET_SET(hdr, e_ehsize, u32)
+ELF_GEN_FIELD_GET_SET(hdr, e_phentsize, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_shentsize, u16)
+
+ELF_GEN_FIELD_GET_SET(phdr, p_paddr, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_vaddr, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_filesz, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_memsz, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_type, u32)
+ELF_GEN_FIELD_GET_SET(phdr, p_offset, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_flags, u32)
+ELF_GEN_FIELD_GET_SET(phdr, p_align, u64)
+
+ELF_GEN_FIELD_GET_SET(shdr, sh_type, u32)
+ELF_GEN_FIELD_GET_SET(shdr, sh_flags, u32)
+ELF_GEN_FIELD_GET_SET(shdr, sh_entsize, u16)
+ELF_GEN_FIELD_GET_SET(shdr, sh_size, u64)
+ELF_GEN_FIELD_GET_SET(shdr, sh_offset, u64)
+ELF_GEN_FIELD_GET_SET(shdr, sh_name, u32)
+ELF_GEN_FIELD_GET_SET(shdr, sh_addr, u64)
+
+#define ELF_STRUCT_SIZE(__s) \
+static inline unsigned long elf_size_of_##__s(u8 class) \
+{ \
+ if (class == ELFCLASS32)\
+ return sizeof(struct elf32_##__s); \
+ else \
+ return sizeof(struct elf64_##__s); \
+}
+
+ELF_STRUCT_SIZE(shdr)
+ELF_STRUCT_SIZE(phdr)
+ELF_STRUCT_SIZE(hdr)
+
+static inline unsigned int elf_strtbl_add(const char *name, void *ehdr, u8 class, size_t *index)
+{
+ u16 shstrndx = elf_hdr_get_e_shstrndx(class, ehdr);
+ void *shdr;
+ char *strtab;
+ size_t idx, ret;
+
+ shdr = ehdr + elf_size_of_hdr(class) + shstrndx * elf_size_of_shdr(class);
+ strtab = ehdr + elf_shdr_get_sh_offset(class, shdr);
+ idx = index ? *index : 0;
+ if (!strtab || !name)
+ return 0;
+
+ ret = idx;
+ strcpy((strtab + idx), name);
+ idx += strlen(name) + 1;
+ if (index)
+ *index = idx;
+
+ return ret;
+}
+
+#endif /* REMOTEPROC_ELF_LOADER_H */
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
new file mode 100644
index 000000000..5a412d7b6
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Remote Processor Framework Elf loader
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ * Mark Grosen <mgrosen@ti.com>
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ * Robert Tivy <rtivy@ti.com>
+ * Armando Uribe De Leon <x0095078@ti.com>
+ * Sjur Brændeland <sjur.brandeland@stericsson.com>
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/remoteproc.h>
+#include <linux/elf.h>
+
+#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
+
+/**
+ * rproc_elf_sanity_check() - Sanity Check for ELF32/ELF64 firmware image
+ * @rproc: the remote processor handle
+ * @fw: the ELF firmware image
+ *
+ * Make sure this fw image is sane (ie a correct ELF32/ELF64 file).
+ *
+ * Return: 0 on success and -EINVAL upon any failure
+ */
+int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
+{
+ const char *name = rproc->firmware;
+ struct device *dev = &rproc->dev;
+ /*
+ * Elf files are beginning with the same structure. Thus, to simplify
+ * header parsing, we can use the elf32_hdr one for both elf64 and
+ * elf32.
+ */
+ struct elf32_hdr *ehdr;
+ u32 elf_shdr_get_size;
+ u64 phoff, shoff;
+ char class;
+ u16 phnum;
+
+ if (!fw) {
+ dev_err(dev, "failed to load %s\n", name);
+ return -EINVAL;
+ }
+
+ if (fw->size < sizeof(struct elf32_hdr)) {
+ dev_err(dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ ehdr = (struct elf32_hdr *)fw->data;
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(dev, "Image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
+ class = ehdr->e_ident[EI_CLASS];
+ if (class != ELFCLASS32 && class != ELFCLASS64) {
+ dev_err(dev, "Unsupported class: %d\n", class);
+ return -EINVAL;
+ }
+
+ if (class == ELFCLASS64 && fw->size < sizeof(struct elf64_hdr)) {
+ dev_err(dev, "elf64 header is too small\n");
+ return -EINVAL;
+ }
+
+ /* We assume the firmware has the same endianness as the host */
+# ifdef __LITTLE_ENDIAN
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
+# else /* BIG ENDIAN */
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
+# endif
+ dev_err(dev, "Unsupported firmware endianness\n");
+ return -EINVAL;
+ }
+
+ phoff = elf_hdr_get_e_phoff(class, fw->data);
+ shoff = elf_hdr_get_e_shoff(class, fw->data);
+ phnum = elf_hdr_get_e_phnum(class, fw->data);
+ elf_shdr_get_size = elf_size_of_shdr(class);
+
+ if (fw->size < shoff + elf_shdr_get_size) {
+ dev_err(dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ if (phnum == 0) {
+ dev_err(dev, "No loadable segments\n");
+ return -EINVAL;
+ }
+
+ if (phoff > fw->size) {
+ dev_err(dev, "Firmware size is too small\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "Firmware is an elf%d file\n",
+ class == ELFCLASS32 ? 32 : 64);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_elf_sanity_check);
+
+/**
+ * rproc_elf_get_boot_addr() - Get rproc's boot address.
+ * @rproc: the remote processor handle
+ * @fw: the ELF firmware image
+ *
+ * Note that the boot address is not a configurable property of all remote
+ * processors. Some will always boot at a specific hard-coded address.
+ *
+ * Return: entry point address of the ELF image
+ *
+ */
+u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
+{
+ return elf_hdr_get_e_entry(fw_elf_get_class(fw), fw->data);
+}
+EXPORT_SYMBOL(rproc_elf_get_boot_addr);
+
+/**
+ * rproc_elf_load_segments() - load firmware segments to memory
+ * @rproc: remote processor which will be booted using these fw segments
+ * @fw: the ELF firmware image
+ *
+ * This function loads the firmware segments to memory, where the remote
+ * processor expects them.
+ *
+ * Some remote processors will expect their code and data to be placed
+ * in specific device addresses, and can't have them dynamically assigned.
+ *
+ * We currently support only those kind of remote processors, and expect
+ * the program header's paddr member to contain those addresses. We then go
+ * through the physically contiguous "carveout" memory regions which we
+ * allocated (and mapped) earlier on behalf of the remote processor,
+ * and "translate" device address to kernel addresses, so we can copy the
+ * segments where they are expected.
+ *
+ * Currently we only support remote processors that required carveout
+ * allocations and got them mapped onto their iommus. Some processors
+ * might be different: they might not have iommus, and would prefer to
+ * directly allocate memory for every segment/resource. This is not yet
+ * supported, though.
+ *
+ * Return: 0 on success and an appropriate error code otherwise
+ */
+int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ const void *ehdr, *phdr;
+ int i, ret = 0;
+ u16 phnum;
+ const u8 *elf_data = fw->data;
+ u8 class = fw_elf_get_class(fw);
+ u32 elf_phdr_get_size = elf_size_of_phdr(class);
+
+ ehdr = elf_data;
+ phnum = elf_hdr_get_e_phnum(class, ehdr);
+ phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) {
+ u64 da = elf_phdr_get_p_paddr(class, phdr);
+ u64 memsz = elf_phdr_get_p_memsz(class, phdr);
+ u64 filesz = elf_phdr_get_p_filesz(class, phdr);
+ u64 offset = elf_phdr_get_p_offset(class, phdr);
+ u32 type = elf_phdr_get_p_type(class, phdr);
+ bool is_iomem = false;
+ void *ptr;
+
+ if (type != PT_LOAD || !memsz)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
+ type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!rproc_u64_fit_in_size_t(memsz)) {
+ dev_err(dev, "size (%llx) does not fit in size_t type\n",
+ memsz);
+ ret = -EOVERFLOW;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ ptr = rproc_da_to_va(rproc, da, memsz, &is_iomem);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
+ memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* put the segment where the remote processor expects it */
+ if (filesz) {
+ if (is_iomem)
+ memcpy_toio((void __iomem *)ptr, elf_data + offset, filesz);
+ else
+ memcpy(ptr, elf_data + offset, filesz);
+ }
+
+ /*
+ * Zero out remaining memory for this segment.
+ *
+ * This isn't strictly required since dma_alloc_coherent already
+ * did this for us. albeit harmless, we may consider removing
+ * this.
+ */
+ if (memsz > filesz) {
+ if (is_iomem)
+ memset_io((void __iomem *)(ptr + filesz), 0, memsz - filesz);
+ else
+ memset(ptr + filesz, 0, memsz - filesz);
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(rproc_elf_load_segments);
+
+static const void *
+find_table(struct device *dev, const struct firmware *fw)
+{
+ const void *shdr, *name_table_shdr;
+ int i;
+ const char *name_table;
+ struct resource_table *table = NULL;
+ const u8 *elf_data = (void *)fw->data;
+ u8 class = fw_elf_get_class(fw);
+ size_t fw_size = fw->size;
+ const void *ehdr = elf_data;
+ u16 shnum = elf_hdr_get_e_shnum(class, ehdr);
+ u32 elf_shdr_get_size = elf_size_of_shdr(class);
+ u16 shstrndx = elf_hdr_get_e_shstrndx(class, ehdr);
+
+ /* look for the resource table and handle it */
+ /* First, get the section header according to the elf class */
+ shdr = elf_data + elf_hdr_get_e_shoff(class, ehdr);
+ /* Compute name table section header entry in shdr array */
+ name_table_shdr = shdr + (shstrndx * elf_shdr_get_size);
+ /* Finally, compute the name table section address in elf */
+ name_table = elf_data + elf_shdr_get_sh_offset(class, name_table_shdr);
+
+ for (i = 0; i < shnum; i++, shdr += elf_shdr_get_size) {
+ u64 size = elf_shdr_get_sh_size(class, shdr);
+ u64 offset = elf_shdr_get_sh_offset(class, shdr);
+ u32 name = elf_shdr_get_sh_name(class, shdr);
+
+ if (strcmp(name_table + name, ".resource_table"))
+ continue;
+
+ table = (struct resource_table *)(elf_data + offset);
+
+ /* make sure we have the entire table */
+ if (offset + size > fw_size || offset + size < size) {
+ dev_err(dev, "resource table truncated\n");
+ return NULL;
+ }
+
+ /* make sure table has at least the header */
+ if (sizeof(struct resource_table) > size) {
+ dev_err(dev, "header-less resource table\n");
+ return NULL;
+ }
+
+ /* we don't support any version beyond the first */
+ if (table->ver != 1) {
+ dev_err(dev, "unsupported fw ver: %d\n", table->ver);
+ return NULL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (table->reserved[0] || table->reserved[1]) {
+ dev_err(dev, "non zero reserved bytes\n");
+ return NULL;
+ }
+
+ /* make sure the offsets array isn't truncated */
+ if (struct_size(table, offset, table->num) > size) {
+ dev_err(dev, "resource table incomplete\n");
+ return NULL;
+ }
+
+ return shdr;
+ }
+
+ return NULL;
+}
+
+/**
+ * rproc_elf_load_rsc_table() - load the resource table
+ * @rproc: the rproc handle
+ * @fw: the ELF firmware image
+ *
+ * This function finds the resource table inside the remote processor's
+ * firmware, load it into the @cached_table and update @table_ptr.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw)
+{
+ const void *shdr;
+ struct device *dev = &rproc->dev;
+ struct resource_table *table = NULL;
+ const u8 *elf_data = fw->data;
+ size_t tablesz;
+ u8 class = fw_elf_get_class(fw);
+ u64 sh_offset;
+
+ shdr = find_table(dev, fw);
+ if (!shdr)
+ return -EINVAL;
+
+ sh_offset = elf_shdr_get_sh_offset(class, shdr);
+ table = (struct resource_table *)(elf_data + sh_offset);
+ tablesz = elf_shdr_get_sh_size(class, shdr);
+
+ /*
+ * Create a copy of the resource table. When a virtio device starts
+ * and calls vring_new_virtqueue() the address of the allocated vring
+ * will be stored in the cached_table. Before the device is started,
+ * cached_table will be copied into device memory.
+ */
+ rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ rproc->table_ptr = rproc->cached_table;
+ rproc->table_sz = tablesz;
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_elf_load_rsc_table);
+
+/**
+ * rproc_elf_find_loaded_rsc_table() - find the loaded resource table
+ * @rproc: the rproc handle
+ * @fw: the ELF firmware image
+ *
+ * This function finds the location of the loaded resource table. Don't
+ * call this function if the table wasn't loaded yet - it's a bug if you do.
+ *
+ * Return: pointer to the resource table if it is found or NULL otherwise.
+ * If the table wasn't loaded yet the result is unspecified.
+ */
+struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ const void *shdr;
+ u64 sh_addr, sh_size;
+ u8 class = fw_elf_get_class(fw);
+ struct device *dev = &rproc->dev;
+
+ shdr = find_table(&rproc->dev, fw);
+ if (!shdr)
+ return NULL;
+
+ sh_addr = elf_shdr_get_sh_addr(class, shdr);
+ sh_size = elf_shdr_get_sh_size(class, shdr);
+
+ if (!rproc_u64_fit_in_size_t(sh_size)) {
+ dev_err(dev, "size (%llx) does not fit in size_t type\n",
+ sh_size);
+ return NULL;
+ }
+
+ return rproc_da_to_va(rproc, sh_addr, sh_size, NULL);
+}
+EXPORT_SYMBOL(rproc_elf_find_loaded_rsc_table);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
new file mode 100644
index 000000000..d4dbb8d1d
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Remote processor framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ */
+
+#ifndef REMOTEPROC_INTERNAL_H
+#define REMOTEPROC_INTERNAL_H
+
+#include <linux/irqreturn.h>
+#include <linux/firmware.h>
+
+struct rproc;
+
+struct rproc_debug_trace {
+ struct rproc *rproc;
+ struct dentry *tfile;
+ struct list_head node;
+ struct rproc_mem_entry trace_mem;
+};
+
+/**
+ * struct rproc_vdev_data - remoteproc virtio device data
+ * @rsc_offset: offset of the vdev's resource entry
+ * @id: virtio device id (as in virtio_ids.h)
+ * @index: vdev position versus other vdev declared in resource table
+ * @rsc: pointer to the vdev resource entry. Valid only during vdev init as
+ * the resource can be cached by rproc.
+ */
+struct rproc_vdev_data {
+ u32 rsc_offset;
+ unsigned int id;
+ u32 index;
+ struct fw_rsc_vdev *rsc;
+};
+
+static inline bool rproc_has_feature(struct rproc *rproc, unsigned int feature)
+{
+ return test_bit(feature, rproc->features);
+}
+
+static inline int rproc_set_feature(struct rproc *rproc, unsigned int feature)
+{
+ if (feature >= RPROC_MAX_FEATURES)
+ return -EINVAL;
+
+ set_bit(feature, rproc->features);
+
+ return 0;
+}
+
+/* from remoteproc_core.c */
+void rproc_release(struct kref *kref);
+int rproc_of_parse_firmware(struct device *dev, int index,
+ const char **fw_name);
+
+/* from remoteproc_virtio.c */
+irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
+
+/* from remoteproc_debugfs.c */
+void rproc_remove_trace_file(struct dentry *tfile);
+struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
+ struct rproc_debug_trace *trace);
+void rproc_delete_debug_dir(struct rproc *rproc);
+void rproc_create_debug_dir(struct rproc *rproc);
+void rproc_init_debugfs(void);
+void rproc_exit_debugfs(void);
+
+/* from remoteproc_sysfs.c */
+extern struct class rproc_class;
+int rproc_init_sysfs(void);
+void rproc_exit_sysfs(void);
+
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
+
+#ifdef CONFIG_REMOTEPROC_CDEV
+void rproc_init_cdev(void);
+void rproc_exit_cdev(void);
+int rproc_char_device_add(struct rproc *rproc);
+void rproc_char_device_remove(struct rproc *rproc);
+#else
+static inline void rproc_init_cdev(void)
+{
+}
+
+static inline void rproc_exit_cdev(void)
+{
+}
+
+/*
+ * The character device interface is an optional feature, if it is not enabled
+ * the function should not return an error.
+ */
+static inline int rproc_char_device_add(struct rproc *rproc)
+{
+ return 0;
+}
+
+static inline void rproc_char_device_remove(struct rproc *rproc)
+{
+}
+#endif
+
+void rproc_free_vring(struct rproc_vring *rvring);
+int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
+int rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i);
+
+phys_addr_t rproc_va_to_pa(void *cpu_addr);
+int rproc_trigger_recovery(struct rproc *rproc);
+
+int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw);
+u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw);
+int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw);
+int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw);
+struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
+ const struct firmware *fw);
+struct rproc_mem_entry *
+rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...);
+void rproc_add_rvdev(struct rproc *rproc, struct rproc_vdev *rvdev);
+void rproc_remove_rvdev(struct rproc_vdev *rvdev);
+
+static inline int rproc_prepare_device(struct rproc *rproc)
+{
+ if (rproc->ops->prepare)
+ return rproc->ops->prepare(rproc);
+
+ return 0;
+}
+
+static inline int rproc_unprepare_device(struct rproc *rproc)
+{
+ if (rproc->ops->unprepare)
+ return rproc->ops->unprepare(rproc);
+
+ return 0;
+}
+
+static inline int rproc_attach_device(struct rproc *rproc)
+{
+ if (rproc->ops->attach)
+ return rproc->ops->attach(rproc);
+
+ return 0;
+}
+
+static inline
+int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc->ops->sanity_check)
+ return rproc->ops->sanity_check(rproc, fw);
+
+ return 0;
+}
+
+static inline
+u64 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc->ops->get_boot_addr)
+ return rproc->ops->get_boot_addr(rproc, fw);
+
+ return 0;
+}
+
+static inline
+int rproc_load_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc->ops->load)
+ return rproc->ops->load(rproc, fw);
+
+ return -EINVAL;
+}
+
+static inline int rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc->ops->parse_fw)
+ return rproc->ops->parse_fw(rproc, fw);
+
+ return 0;
+}
+
+static inline
+int rproc_handle_rsc(struct rproc *rproc, u32 rsc_type, void *rsc, int offset,
+ int avail)
+{
+ if (rproc->ops->handle_rsc)
+ return rproc->ops->handle_rsc(rproc, rsc_type, rsc, offset,
+ avail);
+
+ return RSC_IGNORED;
+}
+
+static inline
+struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ if (rproc->ops->find_loaded_rsc_table)
+ return rproc->ops->find_loaded_rsc_table(rproc, fw);
+
+ return NULL;
+}
+
+static inline
+struct resource_table *rproc_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *size)
+{
+ if (rproc->ops->get_loaded_rsc_table)
+ return rproc->ops->get_loaded_rsc_table(rproc, size);
+
+ return NULL;
+}
+
+static inline
+bool rproc_u64_fit_in_size_t(u64 val)
+{
+ if (sizeof(size_t) == sizeof(u64))
+ return true;
+
+ return (val <= (size_t) -1);
+}
+
+#endif /* REMOTEPROC_INTERNAL_H */
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
new file mode 100644
index 000000000..8c7ea8922
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Remote Processor Framework
+ */
+
+#include <linux/remoteproc.h>
+#include <linux/slab.h>
+
+#include "remoteproc_internal.h"
+
+#define to_rproc(d) container_of(d, struct rproc, dev)
+
+static ssize_t recovery_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ return sysfs_emit(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n");
+}
+
+/*
+ * By writing to the 'recovery' sysfs entry, we control the behavior of the
+ * recovery mechanism dynamically. The default value of this entry is "enabled".
+ *
+ * The 'recovery' sysfs entry supports these commands:
+ *
+ * enabled: When enabled, the remote processor will be automatically
+ * recovered whenever it crashes. Moreover, if the remote
+ * processor crashes while recovery is disabled, it will
+ * be automatically recovered too as soon as recovery is enabled.
+ *
+ * disabled: When disabled, a remote processor will remain in a crashed
+ * state if it crashes. This is useful for debugging purposes;
+ * without it, debugging a crash is substantially harder.
+ *
+ * recover: This function will trigger an immediate recovery if the
+ * remote processor is in a crashed state, without changing
+ * or checking the recovery state (enabled/disabled).
+ * This is useful during debugging sessions, when one expects
+ * additional crashes to happen after enabling recovery. In this
+ * case, enabling recovery will make it hard to debug subsequent
+ * crashes, so it's recommended to keep recovery disabled, and
+ * instead use the "recover" command as needed.
+ */
+static ssize_t recovery_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ if (sysfs_streq(buf, "enabled")) {
+ /* change the flag and begin the recovery process if needed */
+ rproc->recovery_disabled = false;
+ rproc_trigger_recovery(rproc);
+ } else if (sysfs_streq(buf, "disabled")) {
+ rproc->recovery_disabled = true;
+ } else if (sysfs_streq(buf, "recover")) {
+ /* begin the recovery process without changing the flag */
+ rproc_trigger_recovery(rproc);
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_RW(recovery);
+
+/*
+ * A coredump-configuration-to-string lookup table, for exposing a
+ * human readable configuration via sysfs. Always keep in sync with
+ * enum rproc_coredump_mechanism
+ */
+static const char * const rproc_coredump_str[] = {
+ [RPROC_COREDUMP_DISABLED] = "disabled",
+ [RPROC_COREDUMP_ENABLED] = "enabled",
+ [RPROC_COREDUMP_INLINE] = "inline",
+};
+
+/* Expose the current coredump configuration via debugfs */
+static ssize_t coredump_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ return sysfs_emit(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]);
+}
+
+/*
+ * By writing to the 'coredump' sysfs entry, we control the behavior of the
+ * coredump mechanism dynamically. The default value of this entry is "default".
+ *
+ * The 'coredump' sysfs entry supports these commands:
+ *
+ * disabled: This is the default coredump mechanism. Recovery will proceed
+ * without collecting any dump.
+ *
+ * default: When the remoteproc crashes the entire coredump will be
+ * copied to a separate buffer and exposed to userspace.
+ *
+ * inline: The coredump will not be copied to a separate buffer and the
+ * recovery process will have to wait until data is read by
+ * userspace. But this avoid usage of extra memory.
+ */
+static ssize_t coredump_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ if (rproc->state == RPROC_CRASHED) {
+ dev_err(&rproc->dev, "can't change coredump configuration\n");
+ return -EBUSY;
+ }
+
+ if (sysfs_streq(buf, "disabled")) {
+ rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+ } else if (sysfs_streq(buf, "enabled")) {
+ rproc->dump_conf = RPROC_COREDUMP_ENABLED;
+ } else if (sysfs_streq(buf, "inline")) {
+ rproc->dump_conf = RPROC_COREDUMP_INLINE;
+ } else {
+ dev_err(&rproc->dev, "Invalid coredump configuration\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_RW(coredump);
+
+/* Expose the loaded / running firmware name via sysfs */
+static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+ const char *firmware = rproc->firmware;
+
+ /*
+ * If the remote processor has been started by an external
+ * entity we have no idea of what image it is running. As such
+ * simply display a generic string rather then rproc->firmware.
+ */
+ if (rproc->state == RPROC_ATTACHED)
+ firmware = "unknown";
+
+ return sprintf(buf, "%s\n", firmware);
+}
+
+/* Change firmware name via sysfs */
+static ssize_t firmware_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+ int err;
+
+ err = rproc_set_firmware(rproc, buf);
+
+ return err ? err : count;
+}
+static DEVICE_ATTR_RW(firmware);
+
+/*
+ * A state-to-string lookup table, for exposing a human readable state
+ * via sysfs. Always keep in sync with enum rproc_state
+ */
+static const char * const rproc_state_string[] = {
+ [RPROC_OFFLINE] = "offline",
+ [RPROC_SUSPENDED] = "suspended",
+ [RPROC_RUNNING] = "running",
+ [RPROC_CRASHED] = "crashed",
+ [RPROC_DELETED] = "deleted",
+ [RPROC_ATTACHED] = "attached",
+ [RPROC_DETACHED] = "detached",
+ [RPROC_LAST] = "invalid",
+};
+
+/* Expose the state of the remote processor via sysfs */
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+ unsigned int state;
+
+ state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state;
+ return sprintf(buf, "%s\n", rproc_state_string[state]);
+}
+
+/* Change remote processor state via sysfs */
+static ssize_t state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+ int ret = 0;
+
+ if (sysfs_streq(buf, "start")) {
+ ret = rproc_boot(rproc);
+ if (ret)
+ dev_err(&rproc->dev, "Boot failed: %d\n", ret);
+ } else if (sysfs_streq(buf, "stop")) {
+ ret = rproc_shutdown(rproc);
+ } else if (sysfs_streq(buf, "detach")) {
+ ret = rproc_detach(rproc);
+ } else {
+ dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
+ ret = -EINVAL;
+ }
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(state);
+
+/* Expose the name of the remote processor via sysfs */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ return sprintf(buf, "%s\n", rproc->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static umode_t rproc_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct rproc *rproc = to_rproc(dev);
+ umode_t mode = attr->mode;
+
+ if (rproc->sysfs_read_only && (attr == &dev_attr_recovery.attr ||
+ attr == &dev_attr_firmware.attr ||
+ attr == &dev_attr_state.attr ||
+ attr == &dev_attr_coredump.attr))
+ mode = 0444;
+
+ return mode;
+}
+
+static struct attribute *rproc_attrs[] = {
+ &dev_attr_coredump.attr,
+ &dev_attr_recovery.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_state.attr,
+ &dev_attr_name.attr,
+ NULL
+};
+
+static const struct attribute_group rproc_devgroup = {
+ .attrs = rproc_attrs,
+ .is_visible = rproc_is_visible,
+};
+
+static const struct attribute_group *rproc_devgroups[] = {
+ &rproc_devgroup,
+ NULL
+};
+
+struct class rproc_class = {
+ .name = "remoteproc",
+ .dev_groups = rproc_devgroups,
+};
+
+int __init rproc_init_sysfs(void)
+{
+ /* create remoteproc device class for sysfs */
+ int err = class_register(&rproc_class);
+
+ if (err)
+ pr_err("remoteproc: unable to register class\n");
+ return err;
+}
+
+void __exit rproc_exit_sysfs(void)
+{
+ class_unregister(&rproc_class);
+}
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
new file mode 100644
index 000000000..0e95525c1
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Remote processor messaging transport (OMAP platform-specific bits)
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ */
+
+#include <linux/dma-direct.h>
+#include <linux/dma-map-ops.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_ring.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+
+#include "remoteproc_internal.h"
+
+static int copy_dma_range_map(struct device *to, struct device *from)
+{
+ const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
+ int num_ranges = 0;
+
+ if (!map)
+ return 0;
+
+ for (r = map; r->size; r++)
+ num_ranges++;
+
+ new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
+ GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+ to->dma_range_map = new_map;
+ return 0;
+}
+
+static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
+{
+ struct platform_device *pdev;
+
+ pdev = container_of(vdev->dev.parent, struct platform_device, dev);
+
+ return platform_get_drvdata(pdev);
+}
+
+static struct rproc *vdev_to_rproc(struct virtio_device *vdev)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+
+ return rvdev->rproc;
+}
+
+/* kick the remote processor, and let it know which virtqueue to poke at */
+static bool rproc_virtio_notify(struct virtqueue *vq)
+{
+ struct rproc_vring *rvring = vq->priv;
+ struct rproc *rproc = rvring->rvdev->rproc;
+ int notifyid = rvring->notifyid;
+
+ dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
+
+ rproc->ops->kick(rproc, notifyid);
+ return true;
+}
+
+/**
+ * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
+ * @rproc: handle to the remote processor
+ * @notifyid: index of the signalled virtqueue (unique per this @rproc)
+ *
+ * This function should be called by the platform-specific rproc driver,
+ * when the remote processor signals that a specific virtqueue has pending
+ * messages available.
+ *
+ * Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
+ * and otherwise returns IRQ_HANDLED.
+ */
+irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
+{
+ struct rproc_vring *rvring;
+
+ dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
+
+ rvring = idr_find(&rproc->notifyids, notifyid);
+ if (!rvring || !rvring->vq)
+ return IRQ_NONE;
+
+ return vring_interrupt(0, rvring->vq);
+}
+EXPORT_SYMBOL(rproc_vq_interrupt);
+
+static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
+ unsigned int id,
+ void (*callback)(struct virtqueue *vq),
+ const char *name, bool ctx)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct rproc *rproc = vdev_to_rproc(vdev);
+ struct device *dev = &rproc->dev;
+ struct rproc_mem_entry *mem;
+ struct rproc_vring *rvring;
+ struct fw_rsc_vdev *rsc;
+ struct virtqueue *vq;
+ void *addr;
+ int num, size;
+
+ /* we're temporarily limited to two virtqueues per rvdev */
+ if (id >= ARRAY_SIZE(rvdev->vring))
+ return ERR_PTR(-EINVAL);
+
+ if (!name)
+ return NULL;
+
+ /* Search allocated memory region by name */
+ mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
+ id);
+ if (!mem || !mem->va)
+ return ERR_PTR(-ENOMEM);
+
+ rvring = &rvdev->vring[id];
+ addr = mem->va;
+ num = rvring->num;
+
+ /* zero vring */
+ size = vring_size(num, rvring->align);
+ memset(addr, 0, size);
+
+ dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
+ id, addr, num, rvring->notifyid);
+
+ /*
+ * Create the new vq, and tell virtio we're not interested in
+ * the 'weak' smp barriers, since we're talking with a real device.
+ */
+ vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
+ addr, rproc_virtio_notify, callback, name);
+ if (!vq) {
+ dev_err(dev, "vring_new_virtqueue %s failed\n", name);
+ rproc_free_vring(rvring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vq->num_max = num;
+
+ rvring->vq = vq;
+ vq->priv = rvring;
+
+ /* Update vring in resource table */
+ rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
+ rsc->vring[id].da = mem->da;
+
+ return vq;
+}
+
+static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+ struct rproc_vring *rvring;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ rvring = vq->priv;
+ rvring->vq = NULL;
+ vring_del_virtqueue(vq);
+ }
+}
+
+static void rproc_virtio_del_vqs(struct virtio_device *vdev)
+{
+ __rproc_virtio_del_vqs(vdev);
+}
+
+static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[],
+ const bool * ctx,
+ struct irq_affinity *desc)
+{
+ int i, ret, queue_idx = 0;
+
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
+ ctx ? ctx[i] : false);
+ if (IS_ERR(vqs[i])) {
+ ret = PTR_ERR(vqs[i]);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ __rproc_virtio_del_vqs(vdev);
+ return ret;
+}
+
+static u8 rproc_virtio_get_status(struct virtio_device *vdev)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+
+ return rsc->status;
+}
+
+static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+
+ rsc->status = status;
+ dev_dbg(&vdev->dev, "status: %d\n", status);
+}
+
+static void rproc_virtio_reset(struct virtio_device *vdev)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+
+ rsc->status = 0;
+ dev_dbg(&vdev->dev, "reset !\n");
+}
+
+/* provide the vdev features as retrieved from the firmware */
+static u64 rproc_virtio_get_features(struct virtio_device *vdev)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+
+ return rsc->dfeatures;
+}
+
+static void rproc_transport_features(struct virtio_device *vdev)
+{
+ /*
+ * Packed ring isn't enabled on remoteproc for now,
+ * because remoteproc uses vring_new_virtqueue() which
+ * creates virtio rings on preallocated memory.
+ */
+ __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
+}
+
+static int rproc_virtio_finalize_features(struct virtio_device *vdev)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+
+ /* Give virtio_ring a chance to accept features */
+ vring_transport_features(vdev);
+
+ /* Give virtio_rproc a chance to accept features. */
+ rproc_transport_features(vdev);
+
+ /* Make sure we don't have any features > 32 bits! */
+ BUG_ON((u32)vdev->features != vdev->features);
+
+ /*
+ * Remember the finalized features of our vdev, and provide it
+ * to the remote processor once it is powered on.
+ */
+ rsc->gfeatures = vdev->features;
+
+ return 0;
+}
+
+static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+ void *cfg;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+ cfg = &rsc->vring[rsc->num_of_vrings];
+
+ if (offset + len > rsc->config_len || offset + len < len) {
+ dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
+ return;
+ }
+
+ memcpy(buf, cfg + offset, len);
+}
+
+static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+ struct fw_rsc_vdev *rsc;
+ void *cfg;
+
+ rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
+ cfg = &rsc->vring[rsc->num_of_vrings];
+
+ if (offset + len > rsc->config_len || offset + len < len) {
+ dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
+ return;
+ }
+
+ memcpy(cfg + offset, buf, len);
+}
+
+static const struct virtio_config_ops rproc_virtio_config_ops = {
+ .get_features = rproc_virtio_get_features,
+ .finalize_features = rproc_virtio_finalize_features,
+ .find_vqs = rproc_virtio_find_vqs,
+ .del_vqs = rproc_virtio_del_vqs,
+ .reset = rproc_virtio_reset,
+ .set_status = rproc_virtio_set_status,
+ .get_status = rproc_virtio_get_status,
+ .get = rproc_virtio_get,
+ .set = rproc_virtio_set,
+};
+
+/*
+ * This function is called whenever vdev is released, and is responsible
+ * to decrement the remote processor's refcount which was taken when vdev was
+ * added.
+ *
+ * Never call this function directly; it will be called by the driver
+ * core when needed.
+ */
+static void rproc_virtio_dev_release(struct device *dev)
+{
+ struct virtio_device *vdev = dev_to_virtio(dev);
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+
+ kfree(vdev);
+
+ put_device(&rvdev->pdev->dev);
+}
+
+/**
+ * rproc_add_virtio_dev() - register an rproc-induced virtio device
+ * @rvdev: the remote vdev
+ * @id: the device type identification (used to match it with a driver).
+ *
+ * This function registers a virtio device. This vdev's partent is
+ * the rproc device.
+ *
+ * Return: 0 on success or an appropriate error value otherwise
+ */
+static int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
+{
+ struct rproc *rproc = rvdev->rproc;
+ struct device *dev = &rvdev->pdev->dev;
+ struct virtio_device *vdev;
+ struct rproc_mem_entry *mem;
+ int ret;
+
+ if (rproc->ops->kick == NULL) {
+ ret = -EINVAL;
+ dev_err(dev, ".kick method not defined for %s\n", rproc->name);
+ goto out;
+ }
+
+ /* Try to find dedicated vdev buffer carveout */
+ mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
+ if (mem) {
+ phys_addr_t pa;
+
+ if (mem->of_resm_idx != -1) {
+ struct device_node *np = rproc->dev.parent->of_node;
+
+ /* Associate reserved memory to vdev device */
+ ret = of_reserved_mem_device_init_by_idx(dev, np,
+ mem->of_resm_idx);
+ if (ret) {
+ dev_err(dev, "Can't associate reserved memory\n");
+ goto out;
+ }
+ } else {
+ if (mem->va) {
+ dev_warn(dev, "vdev %d buffer already mapped\n",
+ rvdev->index);
+ pa = rproc_va_to_pa(mem->va);
+ } else {
+ /* Use dma address as carveout no memmapped yet */
+ pa = (phys_addr_t)mem->dma;
+ }
+
+ /* Associate vdev buffer memory pool to vdev subdev */
+ ret = dma_declare_coherent_memory(dev, pa,
+ mem->da,
+ mem->len);
+ if (ret < 0) {
+ dev_err(dev, "Failed to associate buffer\n");
+ goto out;
+ }
+ }
+ } else {
+ struct device_node *np = rproc->dev.parent->of_node;
+
+ /*
+ * If we don't have dedicated buffer, just attempt to re-assign
+ * the reserved memory from our parent. A default memory-region
+ * at index 0 from the parent's memory-regions is assigned for
+ * the rvdev dev to allocate from. Failure is non-critical and
+ * the allocations will fall back to global pools, so don't
+ * check return value either.
+ */
+ of_reserved_mem_device_init_by_idx(dev, np, 0);
+ }
+
+ /* Allocate virtio device */
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ vdev->id.device = id,
+ vdev->config = &rproc_virtio_config_ops,
+ vdev->dev.parent = dev;
+ vdev->dev.release = rproc_virtio_dev_release;
+
+ /* Reference the vdev and vring allocations */
+ get_device(dev);
+
+ ret = register_virtio_device(vdev);
+ if (ret) {
+ put_device(&vdev->dev);
+ dev_err(dev, "failed to register vdev: %d\n", ret);
+ goto out;
+ }
+
+ dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
+
+out:
+ return ret;
+}
+
+/**
+ * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
+ * @dev: the virtio device
+ * @data: must be null
+ *
+ * This function unregisters an existing virtio device.
+ *
+ * Return: 0
+ */
+static int rproc_remove_virtio_dev(struct device *dev, void *data)
+{
+ struct virtio_device *vdev = dev_to_virtio(dev);
+
+ unregister_virtio_device(vdev);
+ return 0;
+}
+
+static int rproc_vdev_do_start(struct rproc_subdev *subdev)
+{
+ struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
+
+ return rproc_add_virtio_dev(rvdev, rvdev->id);
+}
+
+static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
+ struct device *dev = &rvdev->pdev->dev;
+ int ret;
+
+ ret = device_for_each_child(dev, NULL, rproc_remove_virtio_dev);
+ if (ret)
+ dev_warn(dev, "can't remove vdev child device: %d\n", ret);
+}
+
+static int rproc_virtio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rproc_vdev_data *rvdev_data = dev->platform_data;
+ struct rproc_vdev *rvdev;
+ struct rproc *rproc = container_of(dev->parent, struct rproc, dev);
+ struct fw_rsc_vdev *rsc;
+ int i, ret;
+
+ if (!rvdev_data)
+ return -EINVAL;
+
+ rvdev = devm_kzalloc(dev, sizeof(*rvdev), GFP_KERNEL);
+ if (!rvdev)
+ return -ENOMEM;
+
+ rvdev->id = rvdev_data->id;
+ rvdev->rproc = rproc;
+ rvdev->index = rvdev_data->index;
+
+ ret = copy_dma_range_map(dev, rproc->dev.parent);
+ if (ret)
+ return ret;
+
+ /* Make device dma capable by inheriting from parent's capabilities */
+ set_dma_ops(dev, get_dma_ops(rproc->dev.parent));
+
+ ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent));
+ if (ret) {
+ dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
+ dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
+ }
+
+ platform_set_drvdata(pdev, rvdev);
+ rvdev->pdev = pdev;
+
+ rsc = rvdev_data->rsc;
+
+ /* parse the vrings */
+ for (i = 0; i < rsc->num_of_vrings; i++) {
+ ret = rproc_parse_vring(rvdev, rsc, i);
+ if (ret)
+ return ret;
+ }
+
+ /* remember the resource offset*/
+ rvdev->rsc_offset = rvdev_data->rsc_offset;
+
+ /* allocate the vring resources */
+ for (i = 0; i < rsc->num_of_vrings; i++) {
+ ret = rproc_alloc_vring(rvdev, i);
+ if (ret)
+ goto unwind_vring_allocations;
+ }
+
+ rproc_add_rvdev(rproc, rvdev);
+
+ rvdev->subdev.start = rproc_vdev_do_start;
+ rvdev->subdev.stop = rproc_vdev_do_stop;
+
+ rproc_add_subdev(rproc, &rvdev->subdev);
+
+ /*
+ * We're indirectly making a non-temporary copy of the rproc pointer
+ * here, because the platform device or the vdev device will indirectly
+ * access the wrapping rproc.
+ *
+ * Therefore we must increment the rproc refcount here, and decrement
+ * it _only_ on platform remove.
+ */
+ get_device(&rproc->dev);
+
+ return 0;
+
+unwind_vring_allocations:
+ for (i--; i >= 0; i--)
+ rproc_free_vring(&rvdev->vring[i]);
+
+ return ret;
+}
+
+static int rproc_virtio_remove(struct platform_device *pdev)
+{
+ struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev);
+ struct rproc *rproc = rvdev->rproc;
+ struct rproc_vring *rvring;
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
+ rvring = &rvdev->vring[id];
+ rproc_free_vring(rvring);
+ }
+
+ rproc_remove_subdev(rproc, &rvdev->subdev);
+ rproc_remove_rvdev(rvdev);
+
+ of_reserved_mem_device_release(&pdev->dev);
+ dma_release_coherent_memory(&pdev->dev);
+
+ put_device(&rproc->dev);
+
+ return 0;
+}
+
+/* Platform driver */
+static struct platform_driver rproc_virtio_driver = {
+ .probe = rproc_virtio_probe,
+ .remove = rproc_virtio_remove,
+ .driver = {
+ .name = "rproc-virtio",
+ },
+};
+builtin_platform_driver(rproc_virtio_driver);
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
new file mode 100644
index 000000000..e6bd3c7a9
--- /dev/null
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ST's Remote Processor Control Driver
+ *
+ * Copyright (C) 2015 STMicroelectronics - All Rights Reserved
+ *
+ * Author: Ludovic Barre <ludovic.barre@st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+
+#include "remoteproc_internal.h"
+
+#define ST_RPROC_VQ0 0
+#define ST_RPROC_VQ1 1
+#define ST_RPROC_MAX_VRING 2
+
+#define MBOX_RX 0
+#define MBOX_TX 1
+#define MBOX_MAX 2
+
+struct st_rproc_config {
+ bool sw_reset;
+ bool pwr_reset;
+ unsigned long bootaddr_mask;
+};
+
+struct st_rproc {
+ struct st_rproc_config *config;
+ struct reset_control *sw_reset;
+ struct reset_control *pwr_reset;
+ struct clk *clk;
+ u32 clk_rate;
+ struct regmap *boot_base;
+ u32 boot_offset;
+ struct mbox_chan *mbox_chan[ST_RPROC_MAX_VRING * MBOX_MAX];
+ struct mbox_client mbox_client_vq0;
+ struct mbox_client mbox_client_vq1;
+};
+
+static void st_rproc_mbox_callback(struct device *dev, u32 msg)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+
+ if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+}
+
+static
+void st_rproc_mbox_callback_vq0(struct mbox_client *mbox_client, void *data)
+{
+ st_rproc_mbox_callback(mbox_client->dev, 0);
+}
+
+static
+void st_rproc_mbox_callback_vq1(struct mbox_client *mbox_client, void *data)
+{
+ st_rproc_mbox_callback(mbox_client->dev, 1);
+}
+
+static void st_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct st_rproc *ddata = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ int ret;
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ if (WARN_ON(vqid >= ST_RPROC_MAX_VRING))
+ return;
+
+ ret = mbox_send_message(ddata->mbox_chan[vqid * MBOX_MAX + MBOX_TX],
+ (void *)&vqid);
+ if (ret < 0)
+ dev_err(dev, "failed to send message via mbox: %d\n", ret);
+}
+
+static int st_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = rproc->dev.parent;
+ void *va;
+
+ va = ioremap_wc(mem->dma, mem->len);
+ if (!va) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int st_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ struct of_phandle_iterator it;
+ int index = 0;
+
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ of_node_put(it.node);
+ dev_err(dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ /* No need to map vdev buffer */
+ if (strcmp(it.node->name, "vdev0buffer")) {
+ /* Register memory region */
+ mem = rproc_mem_entry_init(dev, NULL,
+ (dma_addr_t)rmem->base,
+ rmem->size, rmem->base,
+ st_rproc_mem_alloc,
+ st_rproc_mem_release,
+ it.node->name);
+ } else {
+ /* Register reserved memory for vdev buffer allocation */
+ mem = rproc_of_resm_mem_entry_init(dev, index,
+ rmem->size,
+ rmem->base,
+ it.node->name);
+ }
+
+ if (!mem) {
+ of_node_put(it.node);
+ return -ENOMEM;
+ }
+
+ rproc_add_carveout(rproc, mem);
+ index++;
+ }
+
+ return rproc_elf_load_rsc_table(rproc, fw);
+}
+
+static int st_rproc_start(struct rproc *rproc)
+{
+ struct st_rproc *ddata = rproc->priv;
+ int err;
+
+ regmap_update_bits(ddata->boot_base, ddata->boot_offset,
+ ddata->config->bootaddr_mask, rproc->bootaddr);
+
+ err = clk_enable(ddata->clk);
+ if (err) {
+ dev_err(&rproc->dev, "Failed to enable clock\n");
+ return err;
+ }
+
+ if (ddata->config->sw_reset) {
+ err = reset_control_deassert(ddata->sw_reset);
+ if (err) {
+ dev_err(&rproc->dev, "Failed to deassert S/W Reset\n");
+ goto sw_reset_fail;
+ }
+ }
+
+ if (ddata->config->pwr_reset) {
+ err = reset_control_deassert(ddata->pwr_reset);
+ if (err) {
+ dev_err(&rproc->dev, "Failed to deassert Power Reset\n");
+ goto pwr_reset_fail;
+ }
+ }
+
+ dev_info(&rproc->dev, "Started from 0x%llx\n", rproc->bootaddr);
+
+ return 0;
+
+
+pwr_reset_fail:
+ if (ddata->config->pwr_reset)
+ reset_control_assert(ddata->sw_reset);
+sw_reset_fail:
+ clk_disable(ddata->clk);
+
+ return err;
+}
+
+static int st_rproc_stop(struct rproc *rproc)
+{
+ struct st_rproc *ddata = rproc->priv;
+ int sw_err = 0, pwr_err = 0;
+
+ if (ddata->config->sw_reset) {
+ sw_err = reset_control_assert(ddata->sw_reset);
+ if (sw_err)
+ dev_err(&rproc->dev, "Failed to assert S/W Reset\n");
+ }
+
+ if (ddata->config->pwr_reset) {
+ pwr_err = reset_control_assert(ddata->pwr_reset);
+ if (pwr_err)
+ dev_err(&rproc->dev, "Failed to assert Power Reset\n");
+ }
+
+ clk_disable(ddata->clk);
+
+ return sw_err ?: pwr_err;
+}
+
+static const struct rproc_ops st_rproc_ops = {
+ .kick = st_rproc_kick,
+ .start = st_rproc_start,
+ .stop = st_rproc_stop,
+ .parse_fw = st_rproc_parse_fw,
+ .load = rproc_elf_load_segments,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+};
+
+/*
+ * Fetch state of the processor: 0 is off, 1 is on.
+ */
+static int st_rproc_state(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct st_rproc *ddata = rproc->priv;
+ int reset_sw = 0, reset_pwr = 0;
+
+ if (ddata->config->sw_reset)
+ reset_sw = reset_control_status(ddata->sw_reset);
+
+ if (ddata->config->pwr_reset)
+ reset_pwr = reset_control_status(ddata->pwr_reset);
+
+ if (reset_sw < 0 || reset_pwr < 0)
+ return -EINVAL;
+
+ return !reset_sw && !reset_pwr;
+}
+
+static const struct st_rproc_config st40_rproc_cfg = {
+ .sw_reset = true,
+ .pwr_reset = true,
+ .bootaddr_mask = GENMASK(28, 1),
+};
+
+static const struct st_rproc_config st231_rproc_cfg = {
+ .sw_reset = true,
+ .pwr_reset = false,
+ .bootaddr_mask = GENMASK(31, 6),
+};
+
+static const struct of_device_id st_rproc_match[] = {
+ { .compatible = "st,st40-rproc", .data = &st40_rproc_cfg },
+ { .compatible = "st,st231-rproc", .data = &st231_rproc_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_rproc_match);
+
+static int st_rproc_parse_dt(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct st_rproc *ddata = rproc->priv;
+ struct device_node *np = dev->of_node;
+ int err;
+
+ if (ddata->config->sw_reset) {
+ ddata->sw_reset = devm_reset_control_get_exclusive(dev,
+ "sw_reset");
+ if (IS_ERR(ddata->sw_reset)) {
+ dev_err(dev, "Failed to get S/W Reset\n");
+ return PTR_ERR(ddata->sw_reset);
+ }
+ }
+
+ if (ddata->config->pwr_reset) {
+ ddata->pwr_reset = devm_reset_control_get_exclusive(dev,
+ "pwr_reset");
+ if (IS_ERR(ddata->pwr_reset)) {
+ dev_err(dev, "Failed to get Power Reset\n");
+ return PTR_ERR(ddata->pwr_reset);
+ }
+ }
+
+ ddata->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ddata->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(ddata->clk);
+ }
+
+ err = of_property_read_u32(np, "clock-frequency", &ddata->clk_rate);
+ if (err) {
+ dev_err(dev, "failed to get clock frequency\n");
+ return err;
+ }
+
+ ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(ddata->boot_base)) {
+ dev_err(dev, "Boot base not found\n");
+ return PTR_ERR(ddata->boot_base);
+ }
+
+ err = of_property_read_u32_index(np, "st,syscfg", 1,
+ &ddata->boot_offset);
+ if (err) {
+ dev_err(dev, "Boot offset not found\n");
+ return -EINVAL;
+ }
+
+ err = clk_prepare(ddata->clk);
+ if (err)
+ dev_err(dev, "failed to get clock\n");
+
+ return err;
+}
+
+static int st_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ struct st_rproc *ddata;
+ struct device_node *np = dev->of_node;
+ struct rproc *rproc;
+ struct mbox_chan *chan;
+ int enabled;
+ int ret, i;
+
+ match = of_match_device(st_rproc_match, dev);
+ if (!match || !match->data) {
+ dev_err(dev, "No device match found\n");
+ return -ENODEV;
+ }
+
+ rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ ddata = rproc->priv;
+ ddata->config = (struct st_rproc_config *)match->data;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = st_rproc_parse_dt(pdev);
+ if (ret)
+ goto free_rproc;
+
+ enabled = st_rproc_state(pdev);
+ if (enabled < 0) {
+ ret = enabled;
+ goto free_clk;
+ }
+
+ if (enabled) {
+ atomic_inc(&rproc->power);
+ rproc->state = RPROC_RUNNING;
+ } else {
+ clk_set_rate(ddata->clk, ddata->clk_rate);
+ }
+
+ if (of_get_property(np, "mbox-names", NULL)) {
+ ddata->mbox_client_vq0.dev = dev;
+ ddata->mbox_client_vq0.tx_done = NULL;
+ ddata->mbox_client_vq0.tx_block = false;
+ ddata->mbox_client_vq0.knows_txdone = false;
+ ddata->mbox_client_vq0.rx_callback = st_rproc_mbox_callback_vq0;
+
+ ddata->mbox_client_vq1.dev = dev;
+ ddata->mbox_client_vq1.tx_done = NULL;
+ ddata->mbox_client_vq1.tx_block = false;
+ ddata->mbox_client_vq1.knows_txdone = false;
+ ddata->mbox_client_vq1.rx_callback = st_rproc_mbox_callback_vq1;
+
+ /*
+ * To control a co-processor without IPC mechanism.
+ * This driver can be used without mbox and rpmsg.
+ */
+ chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_rx");
+ if (IS_ERR(chan)) {
+ dev_err(&rproc->dev, "failed to request mbox chan 0\n");
+ ret = PTR_ERR(chan);
+ goto free_clk;
+ }
+ ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_RX] = chan;
+
+ chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_tx");
+ if (IS_ERR(chan)) {
+ dev_err(&rproc->dev, "failed to request mbox chan 0\n");
+ ret = PTR_ERR(chan);
+ goto free_mbox;
+ }
+ ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_TX] = chan;
+
+ chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_rx");
+ if (IS_ERR(chan)) {
+ dev_err(&rproc->dev, "failed to request mbox chan 1\n");
+ ret = PTR_ERR(chan);
+ goto free_mbox;
+ }
+ ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_RX] = chan;
+
+ chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_tx");
+ if (IS_ERR(chan)) {
+ dev_err(&rproc->dev, "failed to request mbox chan 1\n");
+ ret = PTR_ERR(chan);
+ goto free_mbox;
+ }
+ ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_TX] = chan;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_mbox;
+
+ return 0;
+
+free_mbox:
+ for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++)
+ mbox_free_channel(ddata->mbox_chan[i]);
+free_clk:
+ clk_unprepare(ddata->clk);
+free_rproc:
+ rproc_free(rproc);
+ return ret;
+}
+
+static int st_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct st_rproc *ddata = rproc->priv;
+ int i;
+
+ rproc_del(rproc);
+
+ clk_disable_unprepare(ddata->clk);
+
+ for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++)
+ mbox_free_channel(ddata->mbox_chan[i]);
+
+ rproc_free(rproc);
+
+ return 0;
+}
+
+static struct platform_driver st_rproc_driver = {
+ .probe = st_rproc_probe,
+ .remove = st_rproc_remove,
+ .driver = {
+ .name = "st-rproc",
+ .of_match_table = of_match_ptr(st_rproc_match),
+ },
+};
+module_platform_driver(st_rproc_driver);
+
+MODULE_DESCRIPTION("ST Remote Processor Control Driver");
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
new file mode 100644
index 000000000..4ed946789
--- /dev/null
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SLIM core rproc driver
+ *
+ * Copyright (C) 2016 STMicroelectronics
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/st_slim_rproc.h>
+#include "remoteproc_internal.h"
+
+/* SLIM core registers */
+#define SLIM_ID_OFST 0x0
+#define SLIM_VER_OFST 0x4
+
+#define SLIM_EN_OFST 0x8
+#define SLIM_EN_RUN BIT(0)
+
+#define SLIM_CLK_GATE_OFST 0xC
+#define SLIM_CLK_GATE_DIS BIT(0)
+#define SLIM_CLK_GATE_RESET BIT(2)
+
+#define SLIM_SLIM_PC_OFST 0x20
+
+/* DMEM registers */
+#define SLIM_REV_ID_OFST 0x0
+#define SLIM_REV_ID_MIN_MASK GENMASK(15, 8)
+#define SLIM_REV_ID_MIN(id) ((id & SLIM_REV_ID_MIN_MASK) >> 8)
+#define SLIM_REV_ID_MAJ_MASK GENMASK(23, 16)
+#define SLIM_REV_ID_MAJ(id) ((id & SLIM_REV_ID_MAJ_MASK) >> 16)
+
+
+/* peripherals registers */
+#define SLIM_STBUS_SYNC_OFST 0xF88
+#define SLIM_STBUS_SYNC_DIS BIT(0)
+
+#define SLIM_INT_SET_OFST 0xFD4
+#define SLIM_INT_CLR_OFST 0xFD8
+#define SLIM_INT_MASK_OFST 0xFDC
+
+#define SLIM_CMD_CLR_OFST 0xFC8
+#define SLIM_CMD_MASK_OFST 0xFCC
+
+static const char *mem_names[ST_SLIM_MEM_MAX] = {
+ [ST_SLIM_DMEM] = "dmem",
+ [ST_SLIM_IMEM] = "imem",
+};
+
+static int slim_clk_get(struct st_slim_rproc *slim_rproc, struct device *dev)
+{
+ int clk, err;
+
+ for (clk = 0; clk < ST_SLIM_MAX_CLK; clk++) {
+ slim_rproc->clks[clk] = of_clk_get(dev->of_node, clk);
+ if (IS_ERR(slim_rproc->clks[clk])) {
+ err = PTR_ERR(slim_rproc->clks[clk]);
+ if (err == -EPROBE_DEFER)
+ goto err_put_clks;
+ slim_rproc->clks[clk] = NULL;
+ break;
+ }
+ }
+
+ return 0;
+
+err_put_clks:
+ while (--clk >= 0)
+ clk_put(slim_rproc->clks[clk]);
+
+ return err;
+}
+
+static void slim_clk_disable(struct st_slim_rproc *slim_rproc)
+{
+ int clk;
+
+ for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++)
+ clk_disable_unprepare(slim_rproc->clks[clk]);
+}
+
+static int slim_clk_enable(struct st_slim_rproc *slim_rproc)
+{
+ int clk, ret;
+
+ for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) {
+ ret = clk_prepare_enable(slim_rproc->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ return 0;
+
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(slim_rproc->clks[clk]);
+
+ return ret;
+}
+
+/*
+ * Remoteproc slim specific device handlers
+ */
+static int slim_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct st_slim_rproc *slim_rproc = rproc->priv;
+ unsigned long hw_id, hw_ver, fw_rev;
+ u32 val;
+
+ /* disable CPU pipeline clock & reset CPU pipeline */
+ val = SLIM_CLK_GATE_DIS | SLIM_CLK_GATE_RESET;
+ writel(val, slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
+
+ /* disable SLIM core STBus sync */
+ writel(SLIM_STBUS_SYNC_DIS, slim_rproc->peri + SLIM_STBUS_SYNC_OFST);
+
+ /* enable cpu pipeline clock */
+ writel(!SLIM_CLK_GATE_DIS,
+ slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
+
+ /* clear int & cmd mailbox */
+ writel(~0U, slim_rproc->peri + SLIM_INT_CLR_OFST);
+ writel(~0U, slim_rproc->peri + SLIM_CMD_CLR_OFST);
+
+ /* enable all channels cmd & int */
+ writel(~0U, slim_rproc->peri + SLIM_INT_MASK_OFST);
+ writel(~0U, slim_rproc->peri + SLIM_CMD_MASK_OFST);
+
+ /* enable cpu */
+ writel(SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST);
+
+ hw_id = readl_relaxed(slim_rproc->slimcore + SLIM_ID_OFST);
+ hw_ver = readl_relaxed(slim_rproc->slimcore + SLIM_VER_OFST);
+
+ fw_rev = readl(slim_rproc->mem[ST_SLIM_DMEM].cpu_addr +
+ SLIM_REV_ID_OFST);
+
+ dev_info(dev, "fw rev:%ld.%ld on SLIM %ld.%ld\n",
+ SLIM_REV_ID_MAJ(fw_rev), SLIM_REV_ID_MIN(fw_rev),
+ hw_id, hw_ver);
+
+ return 0;
+}
+
+static int slim_rproc_stop(struct rproc *rproc)
+{
+ struct st_slim_rproc *slim_rproc = rproc->priv;
+ u32 val;
+
+ /* mask all (cmd & int) channels */
+ writel(0UL, slim_rproc->peri + SLIM_INT_MASK_OFST);
+ writel(0UL, slim_rproc->peri + SLIM_CMD_MASK_OFST);
+
+ /* disable cpu pipeline clock */
+ writel(SLIM_CLK_GATE_DIS, slim_rproc->slimcore + SLIM_CLK_GATE_OFST);
+
+ writel(!SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST);
+
+ val = readl(slim_rproc->slimcore + SLIM_EN_OFST);
+ if (val & SLIM_EN_RUN)
+ dev_warn(&rproc->dev, "Failed to disable SLIM");
+
+ dev_dbg(&rproc->dev, "slim stopped\n");
+
+ return 0;
+}
+
+static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct st_slim_rproc *slim_rproc = rproc->priv;
+ void *va = NULL;
+ int i;
+
+ for (i = 0; i < ST_SLIM_MEM_MAX; i++) {
+ if (da != slim_rproc->mem[i].bus_addr)
+ continue;
+
+ if (len <= slim_rproc->mem[i].size) {
+ /* __force to make sparse happy with type conversion */
+ va = (__force void *)slim_rproc->mem[i].cpu_addr;
+ break;
+ }
+ }
+
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%pK\n",
+ da, len, va);
+
+ return va;
+}
+
+static const struct rproc_ops slim_rproc_ops = {
+ .start = slim_rproc_start,
+ .stop = slim_rproc_stop,
+ .da_to_va = slim_rproc_da_to_va,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .load = rproc_elf_load_segments,
+ .sanity_check = rproc_elf_sanity_check,
+};
+
+/**
+ * st_slim_rproc_alloc() - allocate and initialise slim rproc
+ * @pdev: Pointer to the platform_device struct
+ * @fw_name: Name of firmware for rproc to use
+ *
+ * Function for allocating and initialising a slim rproc for use by
+ * device drivers whose IP is based around the SLIM core. It
+ * obtains and enables any clocks required by the SLIM core and also
+ * ioremaps the various IO.
+ *
+ * Return: st_slim_rproc pointer or PTR_ERR() on error.
+ */
+
+struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
+ char *fw_name)
+{
+ struct device *dev = &pdev->dev;
+ struct st_slim_rproc *slim_rproc;
+ struct device_node *np = dev->of_node;
+ struct rproc *rproc;
+ struct resource *res;
+ int err, i;
+
+ if (!fw_name)
+ return ERR_PTR(-EINVAL);
+
+ if (!of_device_is_compatible(np, "st,slim-rproc"))
+ return ERR_PTR(-EINVAL);
+
+ rproc = rproc_alloc(dev, np->name, &slim_rproc_ops,
+ fw_name, sizeof(*slim_rproc));
+ if (!rproc)
+ return ERR_PTR(-ENOMEM);
+
+ rproc->has_iommu = false;
+
+ slim_rproc = rproc->priv;
+ slim_rproc->rproc = rproc;
+
+ /* get imem and dmem */
+ for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+
+ slim_rproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(slim_rproc->mem[i].cpu_addr)) {
+ dev_err(&pdev->dev, "devm_ioremap_resource failed\n");
+ err = PTR_ERR(slim_rproc->mem[i].cpu_addr);
+ goto err;
+ }
+ slim_rproc->mem[i].bus_addr = res->start;
+ slim_rproc->mem[i].size = resource_size(res);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore");
+ slim_rproc->slimcore = devm_ioremap_resource(dev, res);
+ if (IS_ERR(slim_rproc->slimcore)) {
+ dev_err(&pdev->dev, "failed to ioremap slimcore IO\n");
+ err = PTR_ERR(slim_rproc->slimcore);
+ goto err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals");
+ slim_rproc->peri = devm_ioremap_resource(dev, res);
+ if (IS_ERR(slim_rproc->peri)) {
+ dev_err(&pdev->dev, "failed to ioremap peripherals IO\n");
+ err = PTR_ERR(slim_rproc->peri);
+ goto err;
+ }
+
+ err = slim_clk_get(slim_rproc, dev);
+ if (err)
+ goto err;
+
+ err = slim_clk_enable(slim_rproc);
+ if (err) {
+ dev_err(dev, "Failed to enable clocks\n");
+ goto err_clk_put;
+ }
+
+ /* Register as a remoteproc device */
+ err = rproc_add(rproc);
+ if (err) {
+ dev_err(dev, "registration of slim remoteproc failed\n");
+ goto err_clk_dis;
+ }
+
+ return slim_rproc;
+
+err_clk_dis:
+ slim_clk_disable(slim_rproc);
+err_clk_put:
+ for (i = 0; i < ST_SLIM_MAX_CLK && slim_rproc->clks[i]; i++)
+ clk_put(slim_rproc->clks[i]);
+err:
+ rproc_free(rproc);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(st_slim_rproc_alloc);
+
+/**
+ * st_slim_rproc_put() - put slim rproc resources
+ * @slim_rproc: Pointer to the st_slim_rproc struct
+ *
+ * Function for calling respective _put() functions on slim_rproc resources.
+ *
+ */
+void st_slim_rproc_put(struct st_slim_rproc *slim_rproc)
+{
+ int clk;
+
+ if (!slim_rproc)
+ return;
+
+ slim_clk_disable(slim_rproc);
+
+ for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++)
+ clk_put(slim_rproc->clks[clk]);
+
+ rproc_del(slim_rproc->rproc);
+ rproc_free(slim_rproc->rproc);
+}
+EXPORT_SYMBOL(st_slim_rproc_put);
+
+MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
+MODULE_DESCRIPTION("STMicroelectronics SLIM core rproc driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
new file mode 100644
index 000000000..8746cbb1f
--- /dev/null
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -0,0 +1,932 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
+ * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "remoteproc_internal.h"
+
+#define HOLD_BOOT 0
+#define RELEASE_BOOT 1
+
+#define MBOX_NB_VQ 2
+#define MBOX_NB_MBX 4
+
+#define STM32_SMC_RCC 0x82001000
+#define STM32_SMC_REG_WRITE 0x1
+
+#define STM32_MBX_VQ0 "vq0"
+#define STM32_MBX_VQ0_ID 0
+#define STM32_MBX_VQ1 "vq1"
+#define STM32_MBX_VQ1_ID 1
+#define STM32_MBX_SHUTDOWN "shutdown"
+#define STM32_MBX_DETACH "detach"
+
+#define RSC_TBL_SIZE 1024
+
+#define M4_STATE_OFF 0
+#define M4_STATE_INI 1
+#define M4_STATE_CRUN 2
+#define M4_STATE_CSTOP 3
+#define M4_STATE_STANDBY 4
+#define M4_STATE_CRASH 5
+
+struct stm32_syscon {
+ struct regmap *map;
+ u32 reg;
+ u32 mask;
+};
+
+struct stm32_rproc_mem {
+ char name[20];
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+struct stm32_rproc_mem_ranges {
+ u32 dev_addr;
+ u32 bus_addr;
+ u32 size;
+};
+
+struct stm32_mbox {
+ const unsigned char name[10];
+ struct mbox_chan *chan;
+ struct mbox_client client;
+ struct work_struct vq_work;
+ int vq_id;
+};
+
+struct stm32_rproc {
+ struct reset_control *rst;
+ struct stm32_syscon hold_boot;
+ struct stm32_syscon pdds;
+ struct stm32_syscon m4_state;
+ struct stm32_syscon rsctbl;
+ int wdg_irq;
+ u32 nb_rmems;
+ struct stm32_rproc_mem *rmems;
+ struct stm32_mbox mb[MBOX_NB_MBX];
+ struct workqueue_struct *workqueue;
+ bool secured_soc;
+ void __iomem *rsc_va;
+};
+
+static int stm32_rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da)
+{
+ unsigned int i;
+ struct stm32_rproc *ddata = rproc->priv;
+ struct stm32_rproc_mem *p_mem;
+
+ for (i = 0; i < ddata->nb_rmems; i++) {
+ p_mem = &ddata->rmems[i];
+
+ if (pa < p_mem->bus_addr ||
+ pa >= p_mem->bus_addr + p_mem->size)
+ continue;
+ *da = pa - p_mem->bus_addr + p_mem->dev_addr;
+ dev_dbg(rproc->dev.parent, "pa %pa to da %llx\n", &pa, *da);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = rproc->dev.parent;
+ void *va;
+
+ dev_dbg(dev, "map memory: %pa+%x\n", &mem->dma, mem->len);
+ va = ioremap_wc(mem->dma, mem->len);
+ if (IS_ERR_OR_NULL(va)) {
+ dev_err(dev, "Unable to map memory region: %pa+%x\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int stm32_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int stm32_rproc_of_memory_translations(struct platform_device *pdev,
+ struct stm32_rproc *ddata)
+{
+ struct device *parent, *dev = &pdev->dev;
+ struct device_node *np;
+ struct stm32_rproc_mem *p_mems;
+ struct stm32_rproc_mem_ranges *mem_range;
+ int cnt, array_size, i, ret = 0;
+
+ parent = dev->parent;
+ np = parent->of_node;
+
+ cnt = of_property_count_elems_of_size(np, "dma-ranges",
+ sizeof(*mem_range));
+ if (cnt <= 0) {
+ dev_err(dev, "%s: dma-ranges property not defined\n", __func__);
+ return -EINVAL;
+ }
+
+ p_mems = devm_kcalloc(dev, cnt, sizeof(*p_mems), GFP_KERNEL);
+ if (!p_mems)
+ return -ENOMEM;
+ mem_range = kcalloc(cnt, sizeof(*mem_range), GFP_KERNEL);
+ if (!mem_range)
+ return -ENOMEM;
+
+ array_size = cnt * sizeof(struct stm32_rproc_mem_ranges) / sizeof(u32);
+
+ ret = of_property_read_u32_array(np, "dma-ranges",
+ (u32 *)mem_range, array_size);
+ if (ret) {
+ dev_err(dev, "error while get dma-ranges property: %x\n", ret);
+ goto free_mem;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ p_mems[i].bus_addr = mem_range[i].bus_addr;
+ p_mems[i].dev_addr = mem_range[i].dev_addr;
+ p_mems[i].size = mem_range[i].size;
+
+ dev_dbg(dev, "memory range[%i]: da %#x, pa %pa, size %#zx:\n",
+ i, p_mems[i].dev_addr, &p_mems[i].bus_addr,
+ p_mems[i].size);
+ }
+
+ ddata->rmems = p_mems;
+ ddata->nb_rmems = cnt;
+
+free_mem:
+ kfree(mem_range);
+ return ret;
+}
+
+static int stm32_rproc_mbox_idx(struct rproc *rproc, const unsigned char *name)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ddata->mb); i++) {
+ if (!strncmp(ddata->mb[i].name, name, strlen(name)))
+ return i;
+ }
+ dev_err(&rproc->dev, "mailbox %s not found\n", name);
+
+ return -EINVAL;
+}
+
+static int stm32_rproc_prepare(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ u64 da;
+ int index = 0;
+
+ /* Register associated reserved memory regions */
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ of_node_put(it.node);
+ dev_err(dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) {
+ of_node_put(it.node);
+ dev_err(dev, "memory region not valid %pa\n",
+ &rmem->base);
+ return -EINVAL;
+ }
+
+ /* No need to map vdev buffer */
+ if (strcmp(it.node->name, "vdev0buffer")) {
+ /* Register memory region */
+ mem = rproc_mem_entry_init(dev, NULL,
+ (dma_addr_t)rmem->base,
+ rmem->size, da,
+ stm32_rproc_mem_alloc,
+ stm32_rproc_mem_release,
+ it.node->name);
+
+ if (mem)
+ rproc_coredump_add_segment(rproc, da,
+ rmem->size);
+ } else {
+ /* Register reserved memory for vdev buffer alloc */
+ mem = rproc_of_resm_mem_entry_init(dev, index,
+ rmem->size,
+ rmem->base,
+ it.node->name);
+ }
+
+ if (!mem) {
+ of_node_put(it.node);
+ return -ENOMEM;
+ }
+
+ rproc_add_carveout(rproc, mem);
+ index++;
+ }
+
+ return 0;
+}
+
+static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc_elf_load_rsc_table(rproc, fw))
+ dev_warn(&rproc->dev, "no resource table found for this firmware\n");
+
+ return 0;
+}
+
+static irqreturn_t stm32_rproc_wdg(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ rproc_report_crash(rproc, RPROC_WATCHDOG);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_rproc_mb_vq_work(struct work_struct *work)
+{
+ struct stm32_mbox *mb = container_of(work, struct stm32_mbox, vq_work);
+ struct rproc *rproc = dev_get_drvdata(mb->client.dev);
+
+ mutex_lock(&rproc->lock);
+
+ if (rproc->state != RPROC_RUNNING)
+ goto unlock_mutex;
+
+ if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
+ dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id);
+
+unlock_mutex:
+ mutex_unlock(&rproc->lock);
+}
+
+static void stm32_rproc_mb_callback(struct mbox_client *cl, void *data)
+{
+ struct rproc *rproc = dev_get_drvdata(cl->dev);
+ struct stm32_mbox *mb = container_of(cl, struct stm32_mbox, client);
+ struct stm32_rproc *ddata = rproc->priv;
+
+ queue_work(ddata->workqueue, &mb->vq_work);
+}
+
+static void stm32_rproc_free_mbox(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ddata->mb); i++) {
+ if (ddata->mb[i].chan)
+ mbox_free_channel(ddata->mb[i].chan);
+ ddata->mb[i].chan = NULL;
+ }
+}
+
+static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
+ {
+ .name = STM32_MBX_VQ0,
+ .vq_id = STM32_MBX_VQ0_ID,
+ .client = {
+ .rx_callback = stm32_rproc_mb_callback,
+ .tx_block = false,
+ },
+ },
+ {
+ .name = STM32_MBX_VQ1,
+ .vq_id = STM32_MBX_VQ1_ID,
+ .client = {
+ .rx_callback = stm32_rproc_mb_callback,
+ .tx_block = false,
+ },
+ },
+ {
+ .name = STM32_MBX_SHUTDOWN,
+ .vq_id = -1,
+ .client = {
+ .tx_block = true,
+ .tx_done = NULL,
+ .tx_tout = 500, /* 500 ms time out */
+ },
+ },
+ {
+ .name = STM32_MBX_DETACH,
+ .vq_id = -1,
+ .client = {
+ .tx_block = true,
+ .tx_done = NULL,
+ .tx_tout = 200, /* 200 ms time out to detach should be fair enough */
+ },
+ }
+};
+
+static int stm32_rproc_request_mbox(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = &rproc->dev;
+ unsigned int i;
+ int j;
+ const unsigned char *name;
+ struct mbox_client *cl;
+
+ /* Initialise mailbox structure table */
+ memcpy(ddata->mb, stm32_rproc_mbox, sizeof(stm32_rproc_mbox));
+
+ for (i = 0; i < MBOX_NB_MBX; i++) {
+ name = ddata->mb[i].name;
+
+ cl = &ddata->mb[i].client;
+ cl->dev = dev->parent;
+
+ ddata->mb[i].chan = mbox_request_channel_byname(cl, name);
+ if (IS_ERR(ddata->mb[i].chan)) {
+ if (PTR_ERR(ddata->mb[i].chan) == -EPROBE_DEFER) {
+ dev_err_probe(dev->parent,
+ PTR_ERR(ddata->mb[i].chan),
+ "failed to request mailbox %s\n",
+ name);
+ goto err_probe;
+ }
+ dev_warn(dev, "cannot get %s mbox\n", name);
+ ddata->mb[i].chan = NULL;
+ }
+ if (ddata->mb[i].vq_id >= 0) {
+ INIT_WORK(&ddata->mb[i].vq_work,
+ stm32_rproc_mb_vq_work);
+ }
+ }
+
+ return 0;
+
+err_probe:
+ for (j = i - 1; j >= 0; j--)
+ if (ddata->mb[j].chan)
+ mbox_free_channel(ddata->mb[j].chan);
+ return -EPROBE_DEFER;
+}
+
+static int stm32_rproc_set_hold_boot(struct rproc *rproc, bool hold)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ struct stm32_syscon hold_boot = ddata->hold_boot;
+ struct arm_smccc_res smc_res;
+ int val, err;
+
+ val = hold ? HOLD_BOOT : RELEASE_BOOT;
+
+ if (IS_ENABLED(CONFIG_HAVE_ARM_SMCCC) && ddata->secured_soc) {
+ arm_smccc_smc(STM32_SMC_RCC, STM32_SMC_REG_WRITE,
+ hold_boot.reg, val, 0, 0, 0, 0, &smc_res);
+ err = smc_res.a0;
+ } else {
+ err = regmap_update_bits(hold_boot.map, hold_boot.reg,
+ hold_boot.mask, val);
+ }
+
+ if (err)
+ dev_err(&rproc->dev, "failed to set hold boot\n");
+
+ return err;
+}
+
+static void stm32_rproc_add_coredump_trace(struct rproc *rproc)
+{
+ struct rproc_debug_trace *trace;
+ struct rproc_dump_segment *segment;
+ bool already_added;
+
+ list_for_each_entry(trace, &rproc->traces, node) {
+ already_added = false;
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ if (segment->da == trace->trace_mem.da) {
+ already_added = true;
+ break;
+ }
+ }
+
+ if (!already_added)
+ rproc_coredump_add_segment(rproc, trace->trace_mem.da,
+ trace->trace_mem.len);
+ }
+}
+
+static int stm32_rproc_start(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err;
+
+ stm32_rproc_add_coredump_trace(rproc);
+
+ /* clear remote proc Deep Sleep */
+ if (ddata->pdds.map) {
+ err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg,
+ ddata->pdds.mask, 0);
+ if (err) {
+ dev_err(&rproc->dev, "failed to clear pdds\n");
+ return err;
+ }
+ }
+
+ err = stm32_rproc_set_hold_boot(rproc, false);
+ if (err)
+ return err;
+
+ return stm32_rproc_set_hold_boot(rproc, true);
+}
+
+static int stm32_rproc_attach(struct rproc *rproc)
+{
+ stm32_rproc_add_coredump_trace(rproc);
+
+ return stm32_rproc_set_hold_boot(rproc, true);
+}
+
+static int stm32_rproc_detach(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, idx;
+
+ /* Inform the remote processor of the detach */
+ idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_DETACH);
+ if (idx >= 0 && ddata->mb[idx].chan) {
+ err = mbox_send_message(ddata->mb[idx].chan, "stop");
+ if (err < 0)
+ dev_warn(&rproc->dev, "warning: remote FW detach without ack\n");
+ }
+
+ /* Allow remote processor to auto-reboot */
+ return stm32_rproc_set_hold_boot(rproc, false);
+}
+
+static int stm32_rproc_stop(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, idx;
+
+ /* request shutdown of the remote processor */
+ if (rproc->state != RPROC_OFFLINE && rproc->state != RPROC_CRASHED) {
+ idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN);
+ if (idx >= 0 && ddata->mb[idx].chan) {
+ err = mbox_send_message(ddata->mb[idx].chan, "detach");
+ if (err < 0)
+ dev_warn(&rproc->dev, "warning: remote FW shutdown without ack\n");
+ }
+ }
+
+ err = stm32_rproc_set_hold_boot(rproc, true);
+ if (err)
+ return err;
+
+ err = reset_control_assert(ddata->rst);
+ if (err) {
+ dev_err(&rproc->dev, "failed to assert the reset\n");
+ return err;
+ }
+
+ /* to allow platform Standby power mode, set remote proc Deep Sleep */
+ if (ddata->pdds.map) {
+ err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg,
+ ddata->pdds.mask, 1);
+ if (err) {
+ dev_err(&rproc->dev, "failed to set pdds\n");
+ return err;
+ }
+ }
+
+ /* update coprocessor state to OFF if available */
+ if (ddata->m4_state.map) {
+ err = regmap_update_bits(ddata->m4_state.map,
+ ddata->m4_state.reg,
+ ddata->m4_state.mask,
+ M4_STATE_OFF);
+ if (err) {
+ dev_err(&rproc->dev, "failed to set copro state\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void stm32_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ unsigned int i;
+ int err;
+
+ if (WARN_ON(vqid >= MBOX_NB_VQ))
+ return;
+
+ for (i = 0; i < MBOX_NB_MBX; i++) {
+ if (vqid != ddata->mb[i].vq_id)
+ continue;
+ if (!ddata->mb[i].chan)
+ return;
+ err = mbox_send_message(ddata->mb[i].chan, "kick");
+ if (err < 0)
+ dev_err(&rproc->dev, "%s: failed (%s, err:%d)\n",
+ __func__, ddata->mb[i].name, err);
+ return;
+ }
+}
+
+static int stm32_rproc_da_to_pa(struct rproc *rproc,
+ u64 da, phys_addr_t *pa)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct stm32_rproc_mem *p_mem;
+ unsigned int i;
+
+ for (i = 0; i < ddata->nb_rmems; i++) {
+ p_mem = &ddata->rmems[i];
+
+ if (da < p_mem->dev_addr ||
+ da >= p_mem->dev_addr + p_mem->size)
+ continue;
+
+ *pa = da - p_mem->dev_addr + p_mem->bus_addr;
+ dev_dbg(dev, "da %llx to pa %pap\n", da, pa);
+
+ return 0;
+ }
+
+ dev_err(dev, "can't translate da %llx\n", da);
+
+ return -EINVAL;
+}
+
+static struct resource_table *
+stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ phys_addr_t rsc_pa;
+ u32 rsc_da;
+ int err;
+
+ /* The resource table has already been mapped, nothing to do */
+ if (ddata->rsc_va)
+ goto done;
+
+ err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
+ if (err) {
+ dev_err(dev, "failed to read rsc tbl addr\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!rsc_da)
+ /* no rsc table */
+ return ERR_PTR(-ENOENT);
+
+ err = stm32_rproc_da_to_pa(rproc, rsc_da, &rsc_pa);
+ if (err)
+ return ERR_PTR(err);
+
+ ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
+ if (IS_ERR_OR_NULL(ddata->rsc_va)) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &rsc_pa, RSC_TBL_SIZE);
+ ddata->rsc_va = NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+
+done:
+ /*
+ * Assuming the resource table fits in 1kB is fair.
+ * Notice for the detach, that this 1 kB memory area has to be reserved in the coprocessor
+ * firmware for the resource table. On detach, the remoteproc core re-initializes this
+ * entire area by overwriting it with the initial values stored in rproc->clean_table.
+ */
+ *table_sz = RSC_TBL_SIZE;
+ return (struct resource_table *)ddata->rsc_va;
+}
+
+static const struct rproc_ops st_rproc_ops = {
+ .prepare = stm32_rproc_prepare,
+ .start = stm32_rproc_start,
+ .stop = stm32_rproc_stop,
+ .attach = stm32_rproc_attach,
+ .detach = stm32_rproc_detach,
+ .kick = stm32_rproc_kick,
+ .load = rproc_elf_load_segments,
+ .parse_fw = stm32_rproc_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .get_loaded_rsc_table = stm32_rproc_get_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+};
+
+static const struct of_device_id stm32_rproc_match[] = {
+ { .compatible = "st,stm32mp1-m4" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_rproc_match);
+
+static int stm32_rproc_get_syscon(struct device_node *np, const char *prop,
+ struct stm32_syscon *syscon)
+{
+ int err = 0;
+
+ syscon->map = syscon_regmap_lookup_by_phandle(np, prop);
+ if (IS_ERR(syscon->map)) {
+ err = PTR_ERR(syscon->map);
+ syscon->map = NULL;
+ goto out;
+ }
+
+ err = of_property_read_u32_index(np, prop, 1, &syscon->reg);
+ if (err)
+ goto out;
+
+ err = of_property_read_u32_index(np, prop, 2, &syscon->mask);
+
+out:
+ return err;
+}
+
+static int stm32_rproc_parse_dt(struct platform_device *pdev,
+ struct stm32_rproc *ddata, bool *auto_boot)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct stm32_syscon tz;
+ unsigned int tzen;
+ int err, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq == -EPROBE_DEFER)
+ return dev_err_probe(dev, irq, "failed to get interrupt\n");
+
+ if (irq > 0) {
+ err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0,
+ dev_name(dev), pdev);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to request wdg irq\n");
+
+ ddata->wdg_irq = irq;
+
+ if (of_property_read_bool(np, "wakeup-source")) {
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+ }
+
+ dev_info(dev, "wdg irq registered\n");
+ }
+
+ ddata->rst = devm_reset_control_get_by_index(dev, 0);
+ if (IS_ERR(ddata->rst))
+ return dev_err_probe(dev, PTR_ERR(ddata->rst),
+ "failed to get mcu_reset\n");
+
+ /*
+ * if platform is secured the hold boot bit must be written by
+ * smc call and read normally.
+ * if not secure the hold boot bit could be read/write normally
+ */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-tz", &tz);
+ if (err) {
+ dev_err(dev, "failed to get tz syscfg\n");
+ return err;
+ }
+
+ err = regmap_read(tz.map, tz.reg, &tzen);
+ if (err) {
+ dev_err(dev, "failed to read tzen\n");
+ return err;
+ }
+ ddata->secured_soc = tzen & tz.mask;
+
+ err = stm32_rproc_get_syscon(np, "st,syscfg-holdboot",
+ &ddata->hold_boot);
+ if (err) {
+ dev_err(dev, "failed to get hold boot\n");
+ return err;
+ }
+
+ err = stm32_rproc_get_syscon(np, "st,syscfg-pdds", &ddata->pdds);
+ if (err)
+ dev_info(dev, "failed to get pdds\n");
+
+ *auto_boot = of_property_read_bool(np, "st,auto-boot");
+
+ /*
+ * See if we can check the M4 status, i.e if it was started
+ * from the boot loader or not.
+ */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-m4-state",
+ &ddata->m4_state);
+ if (err) {
+ /* remember this */
+ ddata->m4_state.map = NULL;
+ /* no coprocessor state syscon (optional) */
+ dev_warn(dev, "m4 state not supported\n");
+
+ /* no need to go further */
+ return 0;
+ }
+
+ /* See if we can get the resource table */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-rsc-tbl",
+ &ddata->rsctbl);
+ if (err) {
+ /* no rsc table syscon (optional) */
+ dev_warn(dev, "rsc tbl syscon not supported\n");
+ }
+
+ return 0;
+}
+
+static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
+ unsigned int *state)
+{
+ /* See stm32_rproc_parse_dt() */
+ if (!ddata->m4_state.map) {
+ /*
+ * We couldn't get the coprocessor's state, assume
+ * it is not running.
+ */
+ *state = M4_STATE_OFF;
+ return 0;
+ }
+
+ return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state);
+}
+
+static int stm32_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_rproc *ddata;
+ struct device_node *np = dev->of_node;
+ struct rproc *rproc;
+ unsigned int state;
+ int ret;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
+ if (!rproc)
+ return -ENOMEM;
+
+ ddata = rproc->priv;
+
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
+ ret = stm32_rproc_parse_dt(pdev, ddata, &rproc->auto_boot);
+ if (ret)
+ goto free_rproc;
+
+ ret = stm32_rproc_of_memory_translations(pdev, ddata);
+ if (ret)
+ goto free_rproc;
+
+ ret = stm32_rproc_get_m4_status(ddata, &state);
+ if (ret)
+ goto free_rproc;
+
+ if (state == M4_STATE_CRUN)
+ rproc->state = RPROC_DETACHED;
+
+ rproc->has_iommu = false;
+ ddata->workqueue = create_workqueue(dev_name(dev));
+ if (!ddata->workqueue) {
+ dev_err(dev, "cannot create workqueue\n");
+ ret = -ENOMEM;
+ goto free_resources;
+ }
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = stm32_rproc_request_mbox(rproc);
+ if (ret)
+ goto free_wkq;
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_mb;
+
+ return 0;
+
+free_mb:
+ stm32_rproc_free_mbox(rproc);
+free_wkq:
+ destroy_workqueue(ddata->workqueue);
+free_resources:
+ rproc_resource_cleanup(rproc);
+free_rproc:
+ if (device_may_wakeup(dev)) {
+ dev_pm_clear_wake_irq(dev);
+ device_init_wakeup(dev, false);
+ }
+ rproc_free(rproc);
+ return ret;
+}
+
+static int stm32_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = &pdev->dev;
+
+ if (atomic_read(&rproc->power) > 0)
+ rproc_shutdown(rproc);
+
+ rproc_del(rproc);
+ stm32_rproc_free_mbox(rproc);
+ destroy_workqueue(ddata->workqueue);
+
+ if (device_may_wakeup(dev)) {
+ dev_pm_clear_wake_irq(dev);
+ device_init_wakeup(dev, false);
+ }
+ rproc_free(rproc);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_rproc_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct stm32_rproc *ddata = rproc->priv;
+
+ if (device_may_wakeup(dev))
+ return enable_irq_wake(ddata->wdg_irq);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_rproc_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct stm32_rproc *ddata = rproc->priv;
+
+ if (device_may_wakeup(dev))
+ return disable_irq_wake(ddata->wdg_irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
+ stm32_rproc_suspend, stm32_rproc_resume);
+
+static struct platform_driver stm32_rproc_driver = {
+ .probe = stm32_rproc_probe,
+ .remove = stm32_rproc_remove,
+ .driver = {
+ .name = "stm32-rproc",
+ .pm = &stm32_rproc_pm_ops,
+ .of_match_table = of_match_ptr(stm32_rproc_match),
+ },
+};
+module_platform_driver(stm32_rproc_driver);
+
+MODULE_DESCRIPTION("STM32 Remote Processor Control Driver");
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
new file mode 100644
index 000000000..eb9c64f7b
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -0,0 +1,908 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 DSP Remote Processor(s) driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct k3_dsp_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from DSP view
+ * @size: Size of the memory region
+ */
+struct k3_dsp_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_dsp_mem_data - memory definitions for a DSP
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_dsp_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_dsp_dev_data - device data structure for a DSP
+ * @mems: pointer to memory definitions for a DSP
+ * @num_mems: number of memory regions in @mems
+ * @boot_align_addr: boot vector address alignment granularity
+ * @uses_lreset: flag to denote the need for local reset management
+ */
+struct k3_dsp_dev_data {
+ const struct k3_dsp_mem_data *mems;
+ u32 num_mems;
+ u32 boot_align_addr;
+ bool uses_lreset;
+};
+
+/**
+ * struct k3_dsp_rproc - k3 DSP remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @data: pointer to DSP-specific device data
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ */
+struct k3_dsp_rproc {
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_dsp_mem *mem;
+ int num_mems;
+ struct k3_dsp_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ const struct k3_dsp_dev_data *data;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+};
+
+/**
+ * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the OMAP mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
+ client);
+ struct device *dev = kproc->rproc->dev.parent;
+ const char *name = kproc->rproc->name;
+ u32 msg = omap_mbox_message(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 DSP rproc %s crashed\n", name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > kproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ mbox_msg_t msg = (mbox_msg_t)vqid;
+ int ret;
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+/* Put the DSP processor into reset */
+static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ if (kproc->data->uses_lreset)
+ return ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+ if (reset_control_deassert(kproc->reset))
+ dev_warn(dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+/* Release the DSP processor from reset */
+static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset)
+ goto lreset;
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+lreset:
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_dsp_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
+/*
+ * The C66x DSP cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the DSP internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on C66x DSPs to allow loading into the DSP
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the C66x DSP cores run. This callback is invoked only in
+ * remoteproc mode.
+ */
+static int k3_dsp_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
+ ret);
+
+ return ret;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable C66x cores. This completes the second portion of
+ * powering down the C66x DSP cores. The cores themselves are only halted in the
+ * .stop() callback through the local reset, and the .unprepare() ops is invoked
+ * by the remoteproc core after the remoteproc is stopped to balance the global
+ * reset. This callback is invoked only in remoteproc mode.
+ */
+static int k3_dsp_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * Power up the DSP remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met. This callback is invoked only in remoteproc mode.
+ */
+static int k3_dsp_rproc_start(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ u32 boot_addr;
+ int ret;
+
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ boot_addr = rproc->bootaddr;
+ if (boot_addr & (kproc->data->boot_align_addr - 1)) {
+ dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
+ boot_addr, kproc->data->boot_align_addr);
+ ret = -EINVAL;
+ goto put_mbox;
+ }
+
+ dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
+ ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
+ if (ret)
+ goto put_mbox;
+
+ ret = k3_dsp_rproc_release(kproc);
+ if (ret)
+ goto put_mbox;
+
+ return 0;
+
+put_mbox:
+ mbox_free_channel(kproc->mbox);
+ return ret;
+}
+
+/*
+ * Stop the DSP remote processor.
+ *
+ * This function puts the DSP processor into reset, and finishes processing
+ * of any pending messages. This callback is invoked only in remoteproc mode.
+ */
+static int k3_dsp_rproc_stop(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+
+ mbox_free_channel(kproc->mbox);
+
+ k3_dsp_rproc_reset(kproc);
+
+ return 0;
+}
+
+/*
+ * Attach to a running DSP remote processor (IPC-only mode)
+ *
+ * This rproc attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the DSP core. This callback is invoked only in IPC-only
+ * mode.
+ */
+static int k3_dsp_rproc_attach(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "DSP initialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * Detach from a running DSP remote processor (IPC-only mode)
+ *
+ * This rproc detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the DSP core is not stopped and will
+ * be left to continue to run its booted firmware. This callback is invoked only
+ * in IPC-only mode.
+ */
+static int k3_dsp_rproc_detach(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ mbox_free_channel(kproc->mbox);
+ dev_info(dev, "DSP deinitialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property. This callback is invoked only in
+ * IPC-only mode.
+ */
+static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
+ * Custom function to translate a DSP device address (internal RAMs only) to a
+ * kernel virtual address. The DSPs can access their RAMs at either an internal
+ * address visible only from a DSP, or at the SoC-level bus address. Both these
+ * addresses need to be looked through for translation. The translated addresses
+ * can be used either by the remoteproc core for loading (when using kernel
+ * remoteproc loader), or by any rpmsg bus drivers.
+ */
+static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
+ /* handle DSP-view addresses */
+ if (da >= dev_addr &&
+ ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ } else {
+ /* handle SoC-view addresses */
+ if (da >= bus_addr &&
+ (da + len) <= (bus_addr + size)) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct rproc_ops k3_dsp_rproc_ops = {
+ .start = k3_dsp_rproc_start,
+ .stop = k3_dsp_rproc_stop,
+ .kick = k3_dsp_rproc_kick,
+ .da_to_va = k3_dsp_rproc_da_to_va,
+};
+
+static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_dsp_rproc *kproc)
+{
+ const struct k3_dsp_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems = 0;
+ int i;
+
+ num_mems = kproc->data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ data->mems[i].name);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ data->mems[i].name);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!kproc->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n",
+ data->mems[i].name);
+ return -ENOMEM;
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ data->mems[i].name, &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems <= 0) {
+ dev_err(dev, "device does not reserved memory regions, ret = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_rmems--;
+ kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem) {
+ ret = -ENOMEM;
+ goto release_rmem;
+ }
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np) {
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ if (!rmem) {
+ of_node_put(rmem_np);
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+ of_node_put(rmem_np);
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto unmap_rmem;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+
+unmap_rmem:
+ for (i--; i >= 0; i--)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+release_rmem:
+ of_reserved_mem_device_release(kproc->dev);
+ return ret;
+}
+
+static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
+{
+ int i;
+
+ for (i = 0; i < kproc->num_rmems; i++)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+
+ of_reserved_mem_device_release(kproc->dev);
+}
+
+static
+struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
+static int k3_dsp_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct k3_dsp_dev_data *data;
+ struct k3_dsp_rproc *kproc;
+ struct rproc *rproc;
+ const char *fw_name;
+ bool p_state = false;
+ int ret = 0;
+ int ret1;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret) {
+ dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
+ sizeof(*kproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ rproc->recovery_disabled = true;
+ if (data->uses_lreset) {
+ rproc->ops->prepare = k3_dsp_rproc_prepare;
+ rproc->ops->unprepare = k3_dsp_rproc_unprepare;
+ }
+ kproc = rproc->priv;
+ kproc->rproc = rproc;
+ kproc->dev = dev;
+ kproc->data = data;
+
+ kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
+ if (IS_ERR(kproc->ti_sci)) {
+ ret = PTR_ERR(kproc->ti_sci);
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
+ ret);
+ }
+ kproc->ti_sci = NULL;
+ goto free_rproc;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ goto put_sci;
+ }
+
+ kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(kproc->reset)) {
+ ret = PTR_ERR(kproc->reset);
+ dev_err(dev, "failed to get reset, status = %d\n", ret);
+ goto put_sci;
+ }
+
+ kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp)) {
+ dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
+ ret);
+ ret = PTR_ERR(kproc->tsp);
+ goto put_sci;
+ }
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0) {
+ dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto free_tsp;
+ }
+
+ ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ goto release_tsp;
+
+ ret = k3_dsp_reserved_mem_init(kproc);
+ if (ret) {
+ dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
+ goto release_tsp;
+ }
+
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ NULL, &p_state);
+ if (ret) {
+ dev_err(dev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ /* configure J721E devices for either remoteproc or IPC-only mode */
+ if (p_state) {
+ dev_info(dev, "configured DSP for IPC-only mode\n");
+ rproc->state = RPROC_DETACHED;
+ /* override rproc ops with only required IPC-only mode ops */
+ rproc->ops->prepare = NULL;
+ rproc->ops->unprepare = NULL;
+ rproc->ops->start = NULL;
+ rproc->ops->stop = NULL;
+ rproc->ops->attach = k3_dsp_rproc_attach;
+ rproc->ops->detach = k3_dsp_rproc_detach;
+ rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table;
+ } else {
+ dev_info(dev, "configured DSP for remoteproc mode\n");
+ /*
+ * ensure the DSP local reset is asserted to ensure the DSP
+ * doesn't execute bogus code in .prepare() when the module
+ * reset is released.
+ */
+ if (data->uses_lreset) {
+ ret = reset_control_status(kproc->reset);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset status, status = %d\n",
+ ret);
+ goto release_mem;
+ } else if (ret == 0) {
+ dev_warn(dev, "local reset is deasserted for device\n");
+ k3_dsp_rproc_reset(kproc);
+ }
+ }
+ }
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ platform_set_drvdata(pdev, kproc);
+
+ return 0;
+
+release_mem:
+ k3_dsp_reserved_mem_exit(kproc);
+release_tsp:
+ ret1 = ti_sci_proc_release(kproc->tsp);
+ if (ret1)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret1);
+free_tsp:
+ kfree(kproc->tsp);
+put_sci:
+ ret1 = ti_sci_put_handle(kproc->ti_sci);
+ if (ret1)
+ dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
+free_rproc:
+ rproc_free(rproc);
+ return ret;
+}
+
+static int k3_dsp_rproc_remove(struct platform_device *pdev)
+{
+ struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
+ struct rproc *rproc = kproc->rproc;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (rproc->state == RPROC_ATTACHED) {
+ ret = rproc_detach(rproc);
+ if (ret) {
+ dev_err(dev, "failed to detach proc, ret = %d\n", ret);
+ return ret;
+ }
+ }
+
+ rproc_del(kproc->rproc);
+
+ ret = ti_sci_proc_release(kproc->tsp);
+ if (ret)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret);
+
+ kfree(kproc->tsp);
+
+ ret = ti_sci_put_handle(kproc->ti_sci);
+ if (ret)
+ dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
+
+ k3_dsp_reserved_mem_exit(kproc);
+ rproc_free(kproc->rproc);
+
+ return 0;
+}
+
+static const struct k3_dsp_mem_data c66_mems[] = {
+ { .name = "l2sram", .dev_addr = 0x800000 },
+ { .name = "l1pram", .dev_addr = 0xe00000 },
+ { .name = "l1dram", .dev_addr = 0xf00000 },
+};
+
+/* C71x cores only have a L1P Cache, there are no L1P SRAMs */
+static const struct k3_dsp_mem_data c71_mems[] = {
+ { .name = "l2sram", .dev_addr = 0x800000 },
+ { .name = "l1dram", .dev_addr = 0xe00000 },
+};
+
+static const struct k3_dsp_dev_data c66_data = {
+ .mems = c66_mems,
+ .num_mems = ARRAY_SIZE(c66_mems),
+ .boot_align_addr = SZ_1K,
+ .uses_lreset = true,
+};
+
+static const struct k3_dsp_dev_data c71_data = {
+ .mems = c71_mems,
+ .num_mems = ARRAY_SIZE(c71_mems),
+ .boot_align_addr = SZ_2M,
+ .uses_lreset = false,
+};
+
+static const struct of_device_id k3_dsp_of_match[] = {
+ { .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
+ { .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
+ { .compatible = "ti,j721s2-c71-dsp", .data = &c71_data, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
+
+static struct platform_driver k3_dsp_rproc_driver = {
+ .probe = k3_dsp_rproc_probe,
+ .remove = k3_dsp_rproc_remove,
+ .driver = {
+ .name = "k3-dsp-rproc",
+ .of_match_table = k3_dsp_of_match,
+ },
+};
+
+module_platform_driver(k3_dsp_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver");
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
new file mode 100644
index 000000000..0481926c6
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -0,0 +1,1799 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 R5F (MCU) Remote Processor driver
+ *
+ * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+/* This address can either be for ATCM or BTCM with the other at address 0x0 */
+#define K3_R5_TCM_DEV_ADDR 0x41010000
+
+/* R5 TI-SCI Processor Configuration Flags */
+#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
+#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
+#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
+#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
+#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
+#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
+#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
+#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
+/* Available from J7200 SoCs onwards */
+#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
+/* Applicable to only AM64x SoCs */
+#define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE 0x00008000
+
+/* R5 TI-SCI Processor Control Flags */
+#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
+
+/* R5 TI-SCI Processor Status Flags */
+#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
+#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
+#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
+#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
+/* Applicable to only AM64x SoCs */
+#define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200
+
+/**
+ * struct k3_r5_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address from remoteproc view
+ * @size: Size of the memory region
+ */
+struct k3_r5_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/*
+ * All cluster mode values are not applicable on all SoCs. The following
+ * are the modes supported on various SoCs:
+ * Split mode : AM65x, J721E, J7200 and AM64x SoCs
+ * LockStep mode : AM65x, J721E and J7200 SoCs
+ * Single-CPU mode : AM64x SoCs only
+ */
+enum cluster_mode {
+ CLUSTER_MODE_SPLIT = 0,
+ CLUSTER_MODE_LOCKSTEP,
+ CLUSTER_MODE_SINGLECPU,
+};
+
+/**
+ * struct k3_r5_soc_data - match data to handle SoC variations
+ * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
+ * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
+ * @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode
+ */
+struct k3_r5_soc_data {
+ bool tcm_is_double;
+ bool tcm_ecc_autoinit;
+ bool single_cpu_mode;
+};
+
+/**
+ * struct k3_r5_cluster - K3 R5F Cluster structure
+ * @dev: cached device pointer
+ * @mode: Mode to configure the Cluster - Split or LockStep
+ * @cores: list of R5 cores within the cluster
+ * @soc_data: SoC-specific feature data for a R5FSS
+ */
+struct k3_r5_cluster {
+ struct device *dev;
+ enum cluster_mode mode;
+ struct list_head cores;
+ const struct k3_r5_soc_data *soc_data;
+};
+
+/**
+ * struct k3_r5_core - K3 R5 core structure
+ * @elem: linked list item
+ * @dev: cached device pointer
+ * @rproc: rproc handle representing this core
+ * @mem: internal memory regions data
+ * @sram: on-chip SRAM memory regions data
+ * @num_mems: number of internal memory regions
+ * @num_sram: number of on-chip SRAM memory regions
+ * @reset: reset control handle
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @atcm_enable: flag to control ATCM enablement
+ * @btcm_enable: flag to control BTCM enablement
+ * @loczrama: flag to dictate which TCM is at device address 0x0
+ */
+struct k3_r5_core {
+ struct list_head elem;
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_r5_mem *mem;
+ struct k3_r5_mem *sram;
+ int num_mems;
+ int num_sram;
+ struct reset_control *reset;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ u32 atcm_enable;
+ u32 btcm_enable;
+ u32 loczrama;
+};
+
+/**
+ * struct k3_r5_rproc - K3 remote processor state
+ * @dev: cached device pointer
+ * @cluster: cached pointer to parent cluster structure
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ * @rproc: rproc handle
+ * @core: cached pointer to r5 core structure being used
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ */
+struct k3_r5_rproc {
+ struct device *dev;
+ struct k3_r5_cluster *cluster;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+ struct rproc *rproc;
+ struct k3_r5_core *core;
+ struct k3_r5_mem *rmem;
+ int num_rmems;
+};
+
+/**
+ * k3_r5_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the OMAP mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
+ client);
+ struct device *dev = kproc->rproc->dev.parent;
+ const char *name = kproc->rproc->name;
+ u32 msg = omap_mbox_message(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 R5F rproc %s crashed\n", name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > kproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/* kick a virtqueue */
+static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ mbox_msg_t msg = (mbox_msg_t)vqid;
+ int ret;
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+static int k3_r5_split_reset(struct k3_r5_core *core)
+{
+ int ret;
+
+ ret = reset_control_assert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "local-reset assert failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset assert failed, ret = %d\n",
+ ret);
+ if (reset_control_deassert(core->reset))
+ dev_warn(core->dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_r5_split_release(struct k3_r5_core *core)
+{
+ int ret;
+
+ ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
+ ret);
+ if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id))
+ dev_warn(core->dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
+{
+ struct k3_r5_core *core;
+ int ret;
+
+ /* assert local reset on all applicable cores */
+ list_for_each_entry(core, &cluster->cores, elem) {
+ ret = reset_control_assert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "local-reset assert failed, ret = %d\n",
+ ret);
+ core = list_prev_entry(core, elem);
+ goto unroll_local_reset;
+ }
+ }
+
+ /* disable PSC modules on all applicable cores */
+ list_for_each_entry(core, &cluster->cores, elem) {
+ ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset assert failed, ret = %d\n",
+ ret);
+ goto unroll_module_reset;
+ }
+ }
+
+ return 0;
+
+unroll_module_reset:
+ list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
+ if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id))
+ dev_warn(core->dev, "module-reset assert back failed\n");
+ }
+ core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+unroll_local_reset:
+ list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
+ if (reset_control_deassert(core->reset))
+ dev_warn(core->dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
+{
+ struct k3_r5_core *core;
+ int ret;
+
+ /* enable PSC modules on all applicable cores */
+ list_for_each_entry_reverse(core, &cluster->cores, elem) {
+ ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ ret);
+ core = list_next_entry(core, elem);
+ goto unroll_module_reset;
+ }
+ }
+
+ /* deassert local reset on all applicable cores */
+ list_for_each_entry_reverse(core, &cluster->cores, elem) {
+ ret = reset_control_deassert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ ret);
+ goto unroll_local_reset;
+ }
+ }
+
+ return 0;
+
+unroll_local_reset:
+ list_for_each_entry_continue(core, &cluster->cores, elem) {
+ if (reset_control_assert(core->reset))
+ dev_warn(core->dev, "local-reset assert back failed\n");
+ }
+ core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+unroll_module_reset:
+ list_for_each_entry_from(core, &cluster->cores, elem) {
+ if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id))
+ dev_warn(core->dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+static inline int k3_r5_core_halt(struct k3_r5_core *core)
+{
+ return ti_sci_proc_set_control(core->tsp,
+ PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
+}
+
+static inline int k3_r5_core_run(struct k3_r5_core *core)
+{
+ return ti_sci_proc_set_control(core->tsp,
+ 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
+}
+
+static int k3_r5_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_r5_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * The R5F cores have controls for both a reset and a halt/run. The code
+ * execution from DDR requires the initial boot-strapping code to be run
+ * from the internal TCMs. This function is used to release the resets on
+ * applicable cores to allow loading into the TCMs. The .prepare() ops is
+ * invoked by remoteproc core before any firmware loading, and is followed
+ * by the .start() ops after loading to actually let the R5 cores run.
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to
+ * execute code, but combines the TCMs from both cores. The resets for both
+ * cores need to be released to make this possible, as the TCMs are in general
+ * private to each core. Only Core0 needs to be unhalted for running the
+ * cluster in this mode. The function uses the same reset logic as LockStep
+ * mode for this (though the behavior is agnostic of the reset release order).
+ * This callback is invoked only in remoteproc mode.
+ */
+static int k3_r5_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *dev = kproc->dev;
+ u32 ctrl = 0, cfg = 0, stat = 0;
+ u64 boot_vec = 0;
+ bool mem_init_dis;
+ int ret;
+
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
+ if (ret < 0)
+ return ret;
+ mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
+
+ /* Re-use LockStep-mode reset logic for Single-CPU mode */
+ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
+ k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
+ if (ret) {
+ dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Newer IP revisions like on J7200 SoCs support h/w auto-initialization
+ * of TCMs, so there is no need to perform the s/w memzero. This bit is
+ * configurable through System Firmware, the default value does perform
+ * auto-init, but account for it in case it is disabled
+ */
+ if (cluster->soc_data->tcm_ecc_autoinit && !mem_init_dis) {
+ dev_dbg(dev, "leveraging h/w init for TCM memories\n");
+ return 0;
+ }
+
+ /*
+ * Zero out both TCMs unconditionally (access from v8 Arm core is not
+ * affected by ATCM & BTCM enable configuration values) so that ECC
+ * can be effective on all TCM addresses.
+ */
+ dev_dbg(dev, "zeroing out ATCM memory\n");
+ memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
+
+ dev_dbg(dev, "zeroing out BTCM memory\n");
+ memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
+
+ return 0;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * resets on all applicable cores for the rproc device (depending on LockStep
+ * or Split mode). This completes the second portion of powering down the R5F
+ * cores. The cores themselves are only halted in the .stop() ops, and the
+ * .unprepare() ops is invoked by the remoteproc core after the remoteproc is
+ * stopped.
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) combines the TCMs from
+ * both cores. The access is made possible only with releasing the resets for
+ * both cores, but with only Core0 unhalted. This function re-uses the same
+ * reset assert logic as LockStep mode for this mode (though the behavior is
+ * agnostic of the reset assert order). This callback is invoked only in
+ * remoteproc mode.
+ */
+static int k3_r5_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* Re-use LockStep-mode reset logic for Single-CPU mode */
+ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
+ k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
+ if (ret)
+ dev_err(dev, "unable to disable cores, ret = %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * The R5F start sequence includes two different operations
+ * 1. Configure the boot vector for R5F core(s)
+ * 2. Unhalt/Run the R5F core(s)
+ *
+ * The sequence is different between LockStep and Split modes. The LockStep
+ * mode requires the boot vector to be configured only for Core0, and then
+ * unhalt both the cores to start the execution - Core1 needs to be unhalted
+ * first followed by Core0. The Split-mode requires that Core0 to be maintained
+ * always in a higher power state that Core1 (implying Core1 needs to be started
+ * always only after Core0 is started).
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
+ * code, so only Core0 needs to be unhalted. The function uses the same logic
+ * flow as Split-mode for this. This callback is invoked only in remoteproc
+ * mode.
+ */
+static int k3_r5_rproc_start(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct device *dev = kproc->dev;
+ struct k3_r5_core *core;
+ u32 boot_addr;
+ int ret;
+
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ boot_addr = rproc->bootaddr;
+ /* TODO: add boot_addr sanity checking */
+ dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
+
+ /* boot vector need not be programmed for Core1 in LockStep mode */
+ core = kproc->core;
+ ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
+ if (ret)
+ goto put_mbox;
+
+ /* unhalt/run all applicable cores */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ list_for_each_entry_reverse(core, &cluster->cores, elem) {
+ ret = k3_r5_core_run(core);
+ if (ret)
+ goto unroll_core_run;
+ }
+ } else {
+ ret = k3_r5_core_run(core);
+ if (ret)
+ goto put_mbox;
+ }
+
+ return 0;
+
+unroll_core_run:
+ list_for_each_entry_continue(core, &cluster->cores, elem) {
+ if (k3_r5_core_halt(core))
+ dev_warn(core->dev, "core halt back failed\n");
+ }
+put_mbox:
+ mbox_free_channel(kproc->mbox);
+ return ret;
+}
+
+/*
+ * The R5F stop function includes the following operations
+ * 1. Halt R5F core(s)
+ *
+ * The sequence is different between LockStep and Split modes, and the order
+ * of cores the operations are performed are also in general reverse to that
+ * of the start function. The LockStep mode requires each operation to be
+ * performed first on Core0 followed by Core1. The Split-mode requires that
+ * Core0 to be maintained always in a higher power state that Core1 (implying
+ * Core1 needs to be stopped first before Core0).
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
+ * code, so only Core0 needs to be halted. The function uses the same logic
+ * flow as Split-mode for this.
+ *
+ * Note that the R5F halt operation in general is not effective when the R5F
+ * core is running, but is needed to make sure the core won't run after
+ * deasserting the reset the subsequent time. The asserting of reset can
+ * be done here, but is preferred to be done in the .unprepare() ops - this
+ * maintains the symmetric behavior between the .start(), .stop(), .prepare()
+ * and .unprepare() ops, and also balances them well between sysfs 'state'
+ * flow and device bind/unbind or module removal. This callback is invoked
+ * only in remoteproc mode.
+ */
+static int k3_r5_rproc_stop(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ int ret;
+
+ /* halt all applicable cores */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ list_for_each_entry(core, &cluster->cores, elem) {
+ ret = k3_r5_core_halt(core);
+ if (ret) {
+ core = list_prev_entry(core, elem);
+ goto unroll_core_halt;
+ }
+ }
+ } else {
+ ret = k3_r5_core_halt(core);
+ if (ret)
+ goto out;
+ }
+
+ mbox_free_channel(kproc->mbox);
+
+ return 0;
+
+unroll_core_halt:
+ list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
+ if (k3_r5_core_run(core))
+ dev_warn(core->dev, "core run back failed\n");
+ }
+out:
+ return ret;
+}
+
+/*
+ * Attach to a running R5F remote processor (IPC-only mode)
+ *
+ * The R5F attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the R5F cores in IPC-only mode. This callback is invoked
+ * only in IPC-only mode.
+ */
+static int k3_r5_rproc_attach(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "R5F core initialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * Detach from a running R5F remote processor (IPC-only mode)
+ *
+ * The R5F detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the R5F cores are not stopped and
+ * will be left in booted state in IPC-only mode. This callback is invoked
+ * only in IPC-only mode.
+ */
+static int k3_r5_rproc_detach(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ mbox_free_channel(kproc->mbox);
+ dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property. This callback is invoked only in
+ * IPC-only mode.
+ */
+static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
+ * Internal Memory translation helper
+ *
+ * Custom function implementing the rproc .da_to_va ops to provide address
+ * translation (device address to kernel virtual address) for internal RAMs
+ * present in a DSP or IPU device). The translated addresses can be used
+ * either by the remoteproc core for loading, or by any rpmsg bus drivers.
+ */
+static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->core;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ /* handle both R5 and SoC views of ATCM and BTCM */
+ for (i = 0; i < core->num_mems; i++) {
+ bus_addr = core->mem[i].bus_addr;
+ dev_addr = core->mem[i].dev_addr;
+ size = core->mem[i].size;
+
+ /* handle R5-view addresses of TCMs */
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = core->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+
+ /* handle SoC-view addresses of TCMs */
+ if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
+ offset = da - bus_addr;
+ va = core->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle any SRAM regions using SoC-view addresses */
+ for (i = 0; i < core->num_sram; i++) {
+ dev_addr = core->sram[i].dev_addr;
+ size = core->sram[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = core->sram[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct rproc_ops k3_r5_rproc_ops = {
+ .prepare = k3_r5_rproc_prepare,
+ .unprepare = k3_r5_rproc_unprepare,
+ .start = k3_r5_rproc_start,
+ .stop = k3_r5_rproc_stop,
+ .kick = k3_r5_rproc_kick,
+ .da_to_va = k3_r5_rproc_da_to_va,
+};
+
+/*
+ * Internal R5F Core configuration
+ *
+ * Each R5FSS has a cluster-level setting for configuring the processor
+ * subsystem either in a safety/fault-tolerant LockStep mode or a performance
+ * oriented Split mode on most SoCs. A fewer SoCs support a non-safety mode
+ * as an alternate for LockStep mode that exercises only a single R5F core
+ * called Single-CPU mode. Each R5F core has a number of settings to either
+ * enable/disable each of the TCMs, control which TCM appears at the R5F core's
+ * address 0x0. These settings need to be configured before the resets for the
+ * corresponding core are released. These settings are all protected and managed
+ * by the System Processor.
+ *
+ * This function is used to pre-configure these settings for each R5F core, and
+ * the configuration is all done through various ti_sci_proc functions that
+ * communicate with the System Processor. The function also ensures that both
+ * the cores are halted before the .prepare() step.
+ *
+ * The function is called from k3_r5_cluster_rproc_init() and is invoked either
+ * once (in LockStep mode or Single-CPU modes) or twice (in Split mode). Support
+ * for LockStep-mode is dictated by an eFUSE register bit, and the config
+ * settings retrieved from DT are adjusted accordingly as per the permitted
+ * cluster mode. Another eFUSE register bit dictates if the R5F cluster only
+ * supports a Single-CPU mode. All cluster level settings like Cluster mode and
+ * TEINIT (exception handling state dictating ARM or Thumb mode) can only be set
+ * and retrieved using Core0.
+ *
+ * The function behavior is different based on the cluster mode. The R5F cores
+ * are configured independently as per their individual settings in Split mode.
+ * They are identically configured in LockStep mode using the primary Core0
+ * settings. However, some individual settings cannot be set in LockStep mode.
+ * This is overcome by switching to Split-mode initially and then programming
+ * both the cores with the same settings, before reconfiguing again for
+ * LockStep mode.
+ */
+static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct device *dev = kproc->dev;
+ struct k3_r5_core *core0, *core, *temp;
+ u32 ctrl = 0, cfg = 0, stat = 0;
+ u32 set_cfg = 0, clr_cfg = 0;
+ u64 boot_vec = 0;
+ bool lockstep_en;
+ bool single_cpu;
+ int ret;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) {
+ core = core0;
+ } else {
+ core = kproc->core;
+ }
+
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ &stat);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
+ boot_vec, cfg, ctrl, stat);
+
+ /* check if only Single-CPU mode is supported on applicable SoCs */
+ if (cluster->soc_data->single_cpu_mode) {
+ single_cpu =
+ !!(stat & PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY);
+ if (single_cpu && cluster->mode == CLUSTER_MODE_SPLIT) {
+ dev_err(cluster->dev, "split-mode not permitted, force configuring for single-cpu mode\n");
+ cluster->mode = CLUSTER_MODE_SINGLECPU;
+ }
+ goto config;
+ }
+
+ /* check conventional LockStep vs Split mode configuration */
+ lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
+ if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
+ cluster->mode = CLUSTER_MODE_SPLIT;
+ }
+
+config:
+ /* always enable ARM mode and set boot vector to 0 */
+ boot_vec = 0x0;
+ if (core == core0) {
+ clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
+ if (cluster->soc_data->single_cpu_mode) {
+ /*
+ * Single-CPU configuration bit can only be configured
+ * on Core0 and system firmware will NACK any requests
+ * with the bit configured, so program it only on
+ * permitted cores
+ */
+ if (cluster->mode == CLUSTER_MODE_SINGLECPU)
+ set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE;
+ } else {
+ /*
+ * LockStep configuration bit is Read-only on Split-mode
+ * _only_ devices and system firmware will NACK any
+ * requests with the bit configured, so program it only
+ * on permitted devices
+ */
+ if (lockstep_en)
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ }
+ }
+
+ if (core->atcm_enable)
+ set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
+ else
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
+
+ if (core->btcm_enable)
+ set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
+ else
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
+
+ if (core->loczrama)
+ set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
+ else
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
+
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ /*
+ * work around system firmware limitations to make sure both
+ * cores are programmed symmetrically in LockStep. LockStep
+ * and TEINIT config is only allowed with Core0.
+ */
+ list_for_each_entry(temp, &cluster->cores, elem) {
+ ret = k3_r5_core_halt(temp);
+ if (ret)
+ goto out;
+
+ if (temp != core) {
+ clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
+ }
+ ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
+ set_cfg, clr_cfg);
+ if (ret)
+ goto out;
+ }
+
+ set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ clr_cfg = 0;
+ ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ set_cfg, clr_cfg);
+ } else {
+ ret = k3_r5_core_halt(core);
+ if (ret)
+ goto out;
+
+ ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ set_cfg, clr_cfg);
+ }
+
+out:
+ return ret;
+}
+
+static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems <= 0) {
+ dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_rmems--;
+ kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem) {
+ ret = -ENOMEM;
+ goto release_rmem;
+ }
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np) {
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ if (!rmem) {
+ of_node_put(rmem_np);
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+ of_node_put(rmem_np);
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /*
+ * R5Fs do not have an MMU, but have a Region Address Translator
+ * (RAT) module that provides a fixed entry translation between
+ * the 32-bit processor addresses to 64-bit bus addresses. The
+ * RAT is programmable only by the R5F cores. Support for RAT
+ * is currently not supported, so 64-bit address regions are not
+ * supported. The absence of MMUs implies that the R5F device
+ * addresses/supported memory regions are restricted to 32-bit
+ * bus addresses, and are identical
+ */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto unmap_rmem;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+
+unmap_rmem:
+ for (i--; i >= 0; i--)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+release_rmem:
+ of_reserved_mem_device_release(dev);
+ return ret;
+}
+
+static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
+{
+ int i;
+
+ for (i = 0; i < kproc->num_rmems; i++)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+
+ of_reserved_mem_device_release(kproc->dev);
+}
+
+/*
+ * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
+ * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
+ * cores are usable in Split-mode, but only the Core0 TCMs can be used in
+ * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
+ * leveraging the Core1 TCMs as well in certain modes where they would have
+ * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs, Single-CPU mode on
+ * AM64x SoCs). This is done by making a Core1 TCM visible immediately after the
+ * corresponding Core0 TCM. The SoC memory map uses the larger 64 KB sizes for
+ * the Core0 TCMs, and the dts representation reflects this increased size on
+ * supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only
+ * half the original size in Split mode.
+ */
+static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *cdev = core->dev;
+ struct k3_r5_core *core0;
+
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU ||
+ !cluster->soc_data->tcm_is_double)
+ return;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (core == core0) {
+ WARN_ON(core->mem[0].size != SZ_64K);
+ WARN_ON(core->mem[1].size != SZ_64K);
+
+ core->mem[0].size /= 2;
+ core->mem[1].size /= 2;
+
+ dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
+ core->mem[0].size, core->mem[1].size);
+ }
+}
+
+/*
+ * This function checks and configures a R5F core for IPC-only or remoteproc
+ * mode. The driver is configured to be in IPC-only mode for a R5F core when
+ * the core has been loaded and started by a bootloader. The IPC-only mode is
+ * detected by querying the System Firmware for reset, power on and halt status
+ * and ensuring that the core is running. Any incomplete steps at bootloader
+ * are validated and errored out.
+ *
+ * In IPC-only mode, the driver state flags for ATCM, BTCM and LOCZRAMA settings
+ * and cluster mode parsed originally from kernel DT are updated to reflect the
+ * actual values configured by bootloader. The driver internal device memory
+ * addresses for TCMs are also updated.
+ */
+static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *cdev = core->dev;
+ bool r_state = false, c_state = false;
+ u32 ctrl = 0, cfg = 0, stat = 0, halted = 0;
+ u64 boot_vec = 0;
+ u32 atcm_enable, btcm_enable, loczrama;
+ struct k3_r5_core *core0;
+ enum cluster_mode mode;
+ int ret;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+
+ ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id,
+ &r_state, &c_state);
+ if (ret) {
+ dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+ ret);
+ return ret;
+ }
+ if (r_state != c_state) {
+ dev_warn(cdev, "R5F core may have been powered on by a different host, programmed state (%d) != actual state (%d)\n",
+ r_state, c_state);
+ }
+
+ ret = reset_control_status(core->reset);
+ if (ret < 0) {
+ dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ &stat);
+ if (ret < 0) {
+ dev_err(cdev, "failed to get initial processor status, ret = %d\n",
+ ret);
+ return ret;
+ }
+ atcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_ATCM_EN ? 1 : 0;
+ btcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_BTCM_EN ? 1 : 0;
+ loczrama = cfg & PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE ? 1 : 0;
+ if (cluster->soc_data->single_cpu_mode) {
+ mode = cfg & PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE ?
+ CLUSTER_MODE_SINGLECPU : CLUSTER_MODE_SPLIT;
+ } else {
+ mode = cfg & PROC_BOOT_CFG_FLAG_R5_LOCKSTEP ?
+ CLUSTER_MODE_LOCKSTEP : CLUSTER_MODE_SPLIT;
+ }
+ halted = ctrl & PROC_BOOT_CTRL_FLAG_R5_CORE_HALT;
+
+ /*
+ * IPC-only mode detection requires both local and module resets to
+ * be deasserted and R5F core to be unhalted. Local reset status is
+ * irrelevant if module reset is asserted (POR value has local reset
+ * deasserted), and is deemed as remoteproc mode
+ */
+ if (c_state && !ret && !halted) {
+ dev_info(cdev, "configured R5F for IPC-only mode\n");
+ kproc->rproc->state = RPROC_DETACHED;
+ ret = 1;
+ /* override rproc ops with only required IPC-only mode ops */
+ kproc->rproc->ops->prepare = NULL;
+ kproc->rproc->ops->unprepare = NULL;
+ kproc->rproc->ops->start = NULL;
+ kproc->rproc->ops->stop = NULL;
+ kproc->rproc->ops->attach = k3_r5_rproc_attach;
+ kproc->rproc->ops->detach = k3_r5_rproc_detach;
+ kproc->rproc->ops->get_loaded_rsc_table =
+ k3_r5_get_loaded_rsc_table;
+ } else if (!c_state) {
+ dev_info(cdev, "configured R5F for remoteproc mode\n");
+ ret = 0;
+ } else {
+ dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n",
+ !ret ? "deasserted" : "asserted",
+ c_state ? "deasserted" : "asserted",
+ halted ? "halted" : "unhalted");
+ ret = -EINVAL;
+ }
+
+ /* fixup TCMs, cluster & core flags to actual values in IPC-only mode */
+ if (ret > 0) {
+ if (core == core0)
+ cluster->mode = mode;
+ core->atcm_enable = atcm_enable;
+ core->btcm_enable = btcm_enable;
+ core->loczrama = loczrama;
+ core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
+ core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
+ }
+
+ return ret;
+}
+
+static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct k3_r5_rproc *kproc;
+ struct k3_r5_core *core, *core1;
+ struct device *cdev;
+ const char *fw_name;
+ struct rproc *rproc;
+ int ret, ret1;
+
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+ list_for_each_entry(core, &cluster->cores, elem) {
+ cdev = core->dev;
+ ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
+ if (ret) {
+ dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+ ret);
+ goto out;
+ }
+
+ rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
+ fw_name, sizeof(*kproc));
+ if (!rproc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* K3 R5s have a Region Address Translator (RAT) but no MMU */
+ rproc->has_iommu = false;
+ /* error recovery is not supported at present */
+ rproc->recovery_disabled = true;
+
+ kproc = rproc->priv;
+ kproc->cluster = cluster;
+ kproc->core = core;
+ kproc->dev = cdev;
+ kproc->rproc = rproc;
+ core->rproc = rproc;
+
+ ret = k3_r5_rproc_configure_mode(kproc);
+ if (ret < 0)
+ goto err_config;
+ if (ret)
+ goto init_rmem;
+
+ ret = k3_r5_rproc_configure(kproc);
+ if (ret) {
+ dev_err(dev, "initial configure failed, ret = %d\n",
+ ret);
+ goto err_config;
+ }
+
+init_rmem:
+ k3_r5_adjust_tcm_sizes(kproc);
+
+ ret = k3_r5_reserved_mem_init(kproc);
+ if (ret) {
+ dev_err(dev, "reserved memory init failed, ret = %d\n",
+ ret);
+ goto err_config;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed, ret = %d\n", ret);
+ goto err_add;
+ }
+
+ /* create only one rproc in lockstep mode or single-cpu mode */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU)
+ break;
+ }
+
+ return 0;
+
+err_split:
+ if (rproc->state == RPROC_ATTACHED) {
+ ret1 = rproc_detach(rproc);
+ if (ret1) {
+ dev_err(kproc->dev, "failed to detach rproc, ret = %d\n",
+ ret1);
+ return ret1;
+ }
+ }
+
+ rproc_del(rproc);
+err_add:
+ k3_r5_reserved_mem_exit(kproc);
+err_config:
+ rproc_free(rproc);
+ core->rproc = NULL;
+out:
+ /* undo core0 upon any failures on core1 in split-mode */
+ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
+ core = list_prev_entry(core, elem);
+ rproc = core->rproc;
+ kproc = rproc->priv;
+ goto err_split;
+ }
+ return ret;
+}
+
+static void k3_r5_cluster_rproc_exit(void *data)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(data);
+ struct k3_r5_rproc *kproc;
+ struct k3_r5_core *core;
+ struct rproc *rproc;
+ int ret;
+
+ /*
+ * lockstep mode and single-cpu modes have only one rproc associated
+ * with first core, whereas split-mode has two rprocs associated with
+ * each core, and requires that core1 be powered down first
+ */
+ core = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
+ list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
+ list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+
+ list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
+ rproc = core->rproc;
+ kproc = rproc->priv;
+
+ if (rproc->state == RPROC_ATTACHED) {
+ ret = rproc_detach(rproc);
+ if (ret) {
+ dev_err(kproc->dev, "failed to detach rproc, ret = %d\n", ret);
+ return;
+ }
+ }
+
+ rproc_del(rproc);
+
+ k3_r5_reserved_mem_exit(kproc);
+
+ rproc_free(rproc);
+ core->rproc = NULL;
+ }
+}
+
+static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
+ struct k3_r5_core *core)
+{
+ static const char * const mem_names[] = {"atcm", "btcm"};
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems;
+ int i;
+
+ num_mems = ARRAY_SIZE(mem_names);
+ core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
+ if (!core->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ mem_names[i]);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ mem_names[i]);
+ return -EBUSY;
+ }
+
+ /*
+ * TCMs are designed in general to support RAM-like backing
+ * memories. So, map these as Normal Non-Cached memories. This
+ * also avoids/fixes any potential alignment faults due to
+ * unaligned data accesses when using memcpy() or memset()
+ * functions (normally seen with device type memory).
+ */
+ core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!core->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n", mem_names[i]);
+ return -ENOMEM;
+ }
+ core->mem[i].bus_addr = res->start;
+
+ /*
+ * TODO:
+ * The R5F cores can place ATCM & BTCM anywhere in its address
+ * based on the corresponding Region Registers in the System
+ * Control coprocessor. For now, place ATCM and BTCM at
+ * addresses 0 and 0x41010000 (same as the bus address on AM65x
+ * SoCs) based on loczrama setting
+ */
+ if (!strcmp(mem_names[i], "atcm")) {
+ core->mem[i].dev_addr = core->loczrama ?
+ 0 : K3_R5_TCM_DEV_ADDR;
+ } else {
+ core->mem[i].dev_addr = core->loczrama ?
+ K3_R5_TCM_DEV_ADDR : 0;
+ }
+ core->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ mem_names[i], &core->mem[i].bus_addr,
+ core->mem[i].size, core->mem[i].cpu_addr,
+ core->mem[i].dev_addr);
+ }
+ core->num_mems = num_mems;
+
+ return 0;
+}
+
+static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
+ struct k3_r5_core *core)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *sram_np;
+ struct resource res;
+ int num_sram;
+ int i, ret;
+
+ num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+ if (num_sram <= 0) {
+ dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
+ num_sram);
+ return 0;
+ }
+
+ core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
+ if (!core->sram)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sram; i++) {
+ sram_np = of_parse_phandle(np, "sram", i);
+ if (!sram_np)
+ return -EINVAL;
+
+ if (!of_device_is_available(sram_np)) {
+ of_node_put(sram_np);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(sram_np, 0, &res);
+ of_node_put(sram_np);
+ if (ret)
+ return -EINVAL;
+
+ core->sram[i].bus_addr = res.start;
+ core->sram[i].dev_addr = res.start;
+ core->sram[i].size = resource_size(&res);
+ core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
+ resource_size(&res));
+ if (!core->sram[i].cpu_addr) {
+ dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
+ i, &res.start);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i, &core->sram[i].bus_addr,
+ core->sram[i].size, core->sram[i].cpu_addr,
+ core->sram[i].dev_addr);
+ }
+ core->num_sram = num_sram;
+
+ return 0;
+}
+
+static
+struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
+static int k3_r5_core_of_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct k3_r5_core *core;
+ int ret;
+
+ if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL))
+ return -ENOMEM;
+
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ core->dev = dev;
+ /*
+ * Use SoC Power-on-Reset values as default if no DT properties are
+ * used to dictate the TCM configurations
+ */
+ core->atcm_enable = 0;
+ core->btcm_enable = 1;
+ core->loczrama = 1;
+
+ ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret);
+ goto err;
+ }
+
+ core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(core->ti_sci)) {
+ ret = PTR_ERR(core->ti_sci);
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
+ ret);
+ }
+ core->ti_sci = NULL;
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ goto err;
+ }
+
+ core->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR_OR_NULL(core->reset)) {
+ ret = PTR_ERR_OR_ZERO(core->reset);
+ if (!ret)
+ ret = -ENODEV;
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get reset handle, ret = %d\n",
+ ret);
+ }
+ goto err;
+ }
+
+ core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
+ if (IS_ERR(core->tsp)) {
+ ret = PTR_ERR(core->tsp);
+ dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = k3_r5_core_of_get_internal_memories(pdev, core);
+ if (ret) {
+ dev_err(dev, "failed to get internal memories, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = k3_r5_core_of_get_sram_memories(pdev, core);
+ if (ret) {
+ dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
+ goto err;
+ }
+
+ ret = ti_sci_proc_request(core->tsp);
+ if (ret < 0) {
+ dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, core);
+ devres_close_group(dev, k3_r5_core_of_init);
+
+ return 0;
+
+err:
+ devres_release_group(dev, k3_r5_core_of_init);
+ return ret;
+}
+
+/*
+ * free the resources explicitly since driver model is not being used
+ * for the child R5F devices
+ */
+static void k3_r5_core_of_exit(struct platform_device *pdev)
+{
+ struct k3_r5_core *core = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = ti_sci_proc_release(core->tsp);
+ if (ret)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret);
+
+ platform_set_drvdata(pdev, NULL);
+ devres_release_group(dev, k3_r5_core_of_init);
+}
+
+static void k3_r5_cluster_of_exit(void *data)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(data);
+ struct platform_device *cpdev;
+ struct k3_r5_core *core, *temp;
+
+ list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) {
+ list_del(&core->elem);
+ cpdev = to_platform_device(core->dev);
+ k3_r5_core_of_exit(cpdev);
+ }
+}
+
+static int k3_r5_cluster_of_init(struct platform_device *pdev)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct platform_device *cpdev;
+ struct device_node *child;
+ struct k3_r5_core *core;
+ int ret;
+
+ for_each_available_child_of_node(np, child) {
+ cpdev = of_find_device_by_node(child);
+ if (!cpdev) {
+ ret = -ENODEV;
+ dev_err(dev, "could not get R5 core platform device\n");
+ of_node_put(child);
+ goto fail;
+ }
+
+ ret = k3_r5_core_of_init(cpdev);
+ if (ret) {
+ dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
+ ret);
+ put_device(&cpdev->dev);
+ of_node_put(child);
+ goto fail;
+ }
+
+ core = platform_get_drvdata(cpdev);
+ put_device(&cpdev->dev);
+ list_add_tail(&core->elem, &cluster->cores);
+ }
+
+ return 0;
+
+fail:
+ k3_r5_cluster_of_exit(pdev);
+ return ret;
+}
+
+static int k3_r5_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct k3_r5_cluster *cluster;
+ const struct k3_r5_soc_data *data;
+ int ret;
+ int num_cores;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(dev, "SoC-specific data is not defined\n");
+ return -ENODEV;
+ }
+
+ cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
+ if (!cluster)
+ return -ENOMEM;
+
+ cluster->dev = dev;
+ /*
+ * default to most common efuse configurations - Split-mode on AM64x
+ * and LockStep-mode on all others
+ */
+ cluster->mode = data->single_cpu_mode ?
+ CLUSTER_MODE_SPLIT : CLUSTER_MODE_LOCKSTEP;
+ cluster->soc_data = data;
+ INIT_LIST_HEAD(&cluster->cores);
+
+ ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_cores = of_get_available_child_count(np);
+ if (num_cores != 2) {
+ dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n",
+ num_cores);
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, cluster);
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = k3_r5_cluster_of_init(pdev);
+ if (ret) {
+ dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev);
+ if (ret)
+ return ret;
+
+ ret = k3_r5_cluster_rproc_init(pdev);
+ if (ret) {
+ dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct k3_r5_soc_data am65_j721e_soc_data = {
+ .tcm_is_double = false,
+ .tcm_ecc_autoinit = false,
+ .single_cpu_mode = false,
+};
+
+static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
+ .tcm_is_double = true,
+ .tcm_ecc_autoinit = true,
+ .single_cpu_mode = false,
+};
+
+static const struct k3_r5_soc_data am64_soc_data = {
+ .tcm_is_double = true,
+ .tcm_ecc_autoinit = true,
+ .single_cpu_mode = true,
+};
+
+static const struct of_device_id k3_r5_of_match[] = {
+ { .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
+ { .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
+ { .compatible = "ti,j7200-r5fss", .data = &j7200_j721s2_soc_data, },
+ { .compatible = "ti,am64-r5fss", .data = &am64_soc_data, },
+ { .compatible = "ti,j721s2-r5fss", .data = &j7200_j721s2_soc_data, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_r5_of_match);
+
+static struct platform_driver k3_r5_rproc_driver = {
+ .probe = k3_r5_probe,
+ .driver = {
+ .name = "k3_r5_rproc",
+ .of_match_table = k3_r5_of_match,
+ },
+};
+
+module_platform_driver(k3_r5_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI K3 R5F remote processor driver");
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h
new file mode 100644
index 000000000..778558abc
--- /dev/null
+++ b/drivers/remoteproc/ti_sci_proc.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Texas Instruments TI-SCI Processor Controller Helper Functions
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef REMOTEPROC_TI_SCI_PROC_H
+#define REMOTEPROC_TI_SCI_PROC_H
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/**
+ * struct ti_sci_proc - structure representing a processor control client
+ * @sci: cached TI-SCI protocol handle
+ * @ops: cached TI-SCI proc ops
+ * @dev: cached client device pointer
+ * @proc_id: processor id for the consumer remoteproc device
+ * @host_id: host id to pass the control over for this consumer remoteproc
+ * device
+ */
+struct ti_sci_proc {
+ const struct ti_sci_handle *sci;
+ const struct ti_sci_proc_ops *ops;
+ struct device *dev;
+ u8 proc_id;
+ u8 host_id;
+};
+
+static inline int ti_sci_proc_request(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->request(tsp->sci, tsp->proc_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor request failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_release(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->release(tsp->sci, tsp->proc_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor release failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_handover(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->handover(tsp->sci, tsp->proc_id, tsp->host_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor handover of %d to %d failed: %d\n",
+ tsp->proc_id, tsp->host_id, ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_set_config(struct ti_sci_proc *tsp,
+ u64 boot_vector,
+ u32 cfg_set, u32 cfg_clr)
+{
+ int ret;
+
+ ret = tsp->ops->set_config(tsp->sci, tsp->proc_id, boot_vector,
+ cfg_set, cfg_clr);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor set_config failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_set_control(struct ti_sci_proc *tsp,
+ u32 ctrl_set, u32 ctrl_clr)
+{
+ int ret;
+
+ ret = tsp->ops->set_control(tsp->sci, tsp->proc_id, ctrl_set, ctrl_clr);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor set_control failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_get_status(struct ti_sci_proc *tsp,
+ u64 *boot_vector, u32 *cfg_flags,
+ u32 *ctrl_flags, u32 *status_flags)
+{
+ int ret;
+
+ ret = tsp->ops->get_status(tsp->sci, tsp->proc_id, boot_vector,
+ cfg_flags, ctrl_flags, status_flags);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor get_status failed: %d\n",
+ ret);
+ return ret;
+}
+
+#endif /* REMOTEPROC_TI_SCI_PROC_H */
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
new file mode 100644
index 000000000..a0c204cb0
--- /dev/null
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI AMx3 Wakeup M3 Remote Processor driver
+ *
+ * Copyright (C) 2014-2015 Texas Instruments, Inc.
+ *
+ * Dave Gerlach <d-gerlach@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+
+#include <linux/platform_data/wkup_m3.h>
+
+#include "remoteproc_internal.h"
+
+#define WKUPM3_MEM_MAX 2
+
+/**
+ * struct wkup_m3_mem - WkupM3 internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address from Wakeup M3 view
+ * @size: Size of the memory region
+ */
+struct wkup_m3_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct wkup_m3_rproc - WkupM3 remote processor state
+ * @rproc: rproc handle
+ * @pdev: pointer to platform device
+ * @mem: WkupM3 memory information
+ * @rsts: reset control
+ */
+struct wkup_m3_rproc {
+ struct rproc *rproc;
+ struct platform_device *pdev;
+ struct wkup_m3_mem mem[WKUPM3_MEM_MAX];
+ struct reset_control *rsts;
+};
+
+static int wkup_m3_rproc_start(struct rproc *rproc)
+{
+ struct wkup_m3_rproc *wkupm3 = rproc->priv;
+ struct platform_device *pdev = wkupm3->pdev;
+ struct device *dev = &pdev->dev;
+ struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+ int error = 0;
+
+ error = reset_control_deassert(wkupm3->rsts);
+
+ if (!wkupm3->rsts && pdata->deassert_reset(pdev, pdata->reset_name)) {
+ dev_err(dev, "Unable to reset wkup_m3!\n");
+ error = -ENODEV;
+ }
+
+ return error;
+}
+
+static int wkup_m3_rproc_stop(struct rproc *rproc)
+{
+ struct wkup_m3_rproc *wkupm3 = rproc->priv;
+ struct platform_device *pdev = wkupm3->pdev;
+ struct device *dev = &pdev->dev;
+ struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+ int error = 0;
+
+ error = reset_control_assert(wkupm3->rsts);
+
+ if (!wkupm3->rsts && pdata->assert_reset(pdev, pdata->reset_name)) {
+ dev_err(dev, "Unable to assert reset of wkup_m3!\n");
+ error = -ENODEV;
+ }
+
+ return error;
+}
+
+static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct wkup_m3_rproc *wkupm3 = rproc->priv;
+ void *va = NULL;
+ int i;
+ u32 offset;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < WKUPM3_MEM_MAX; i++) {
+ if (da >= wkupm3->mem[i].dev_addr && da + len <=
+ wkupm3->mem[i].dev_addr + wkupm3->mem[i].size) {
+ offset = da - wkupm3->mem[i].dev_addr;
+ /* __force to make sparse happy with type conversion */
+ va = (__force void *)(wkupm3->mem[i].cpu_addr + offset);
+ break;
+ }
+ }
+
+ return va;
+}
+
+static const struct rproc_ops wkup_m3_rproc_ops = {
+ .start = wkup_m3_rproc_start,
+ .stop = wkup_m3_rproc_stop,
+ .da_to_va = wkup_m3_rproc_da_to_va,
+};
+
+static const struct of_device_id wkup_m3_rproc_of_match[] = {
+ { .compatible = "ti,am3352-wkup-m3", },
+ { .compatible = "ti,am4372-wkup-m3", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, wkup_m3_rproc_of_match);
+
+static int wkup_m3_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct wkup_m3_platform_data *pdata = dev->platform_data;
+ /* umem always needs to be processed first */
+ const char *mem_names[WKUPM3_MEM_MAX] = { "umem", "dmem" };
+ struct wkup_m3_rproc *wkupm3;
+ const char *fw_name;
+ struct rproc *rproc;
+ struct resource *res;
+ const __be32 *addrp;
+ u32 l4_offset = 0;
+ u64 size;
+ int ret;
+ int i;
+
+ ret = of_property_read_string(dev->of_node, "ti,pm-firmware",
+ &fw_name);
+ if (ret) {
+ dev_err(dev, "No firmware filename given\n");
+ return -ENODEV;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
+ goto err;
+ }
+
+ rproc = rproc_alloc(dev, "wkup_m3", &wkup_m3_rproc_ops,
+ fw_name, sizeof(*wkupm3));
+ if (!rproc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ rproc->auto_boot = false;
+ rproc->sysfs_read_only = true;
+
+ wkupm3 = rproc->priv;
+ wkupm3->rproc = rproc;
+ wkupm3->pdev = pdev;
+
+ wkupm3->rsts = devm_reset_control_get_optional_shared(dev, "rstctrl");
+ if (IS_ERR(wkupm3->rsts))
+ return PTR_ERR(wkupm3->rsts);
+ if (!wkupm3->rsts) {
+ if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
+ pdata->reset_name)) {
+ dev_err(dev, "Platform data missing!\n");
+ ret = -ENODEV;
+ goto err_put_rproc;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ wkupm3->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wkupm3->mem[i].cpu_addr)) {
+ dev_err(&pdev->dev, "devm_ioremap_resource failed for resource %d\n",
+ i);
+ ret = PTR_ERR(wkupm3->mem[i].cpu_addr);
+ goto err_put_rproc;
+ }
+ wkupm3->mem[i].bus_addr = res->start;
+ wkupm3->mem[i].size = resource_size(res);
+ addrp = of_get_address(dev->of_node, i, &size, NULL);
+ /*
+ * The wkupm3 has umem at address 0 in its view, so the device
+ * addresses for each memory region is computed as a relative
+ * offset of the bus address for umem, and therefore needs to be
+ * processed first.
+ */
+ if (!strcmp(mem_names[i], "umem"))
+ l4_offset = be32_to_cpu(*addrp);
+ wkupm3->mem[i].dev_addr = be32_to_cpu(*addrp) - l4_offset;
+ }
+
+ dev_set_drvdata(dev, rproc);
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed\n");
+ goto err_put_rproc;
+ }
+
+ return 0;
+
+err_put_rproc:
+ rproc_free(rproc);
+err:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int wkup_m3_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ rproc_del(rproc);
+ rproc_free(rproc);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wkup_m3_rpm_suspend(struct device *dev)
+{
+ return -EBUSY;
+}
+
+static int wkup_m3_rpm_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops wkup_m3_rproc_pm_ops = {
+ SET_RUNTIME_PM_OPS(wkup_m3_rpm_suspend, wkup_m3_rpm_resume, NULL)
+};
+
+static struct platform_driver wkup_m3_rproc_driver = {
+ .probe = wkup_m3_rproc_probe,
+ .remove = wkup_m3_rproc_remove,
+ .driver = {
+ .name = "wkup_m3_rproc",
+ .of_match_table = wkup_m3_rproc_of_match,
+ .pm = &wkup_m3_rproc_pm_ops,
+ },
+};
+
+module_platform_driver(wkup_m3_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Wakeup M3 remote processor control driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");