diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/remoteproc | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/remoteproc')
41 files changed, 20089 insertions, 0 deletions
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig new file mode 100644 index 000000000..d99548fb5 --- /dev/null +++ b/drivers/remoteproc/Kconfig @@ -0,0 +1,293 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "Remoteproc drivers" + +config REMOTEPROC + bool "Support for Remote Processor subsystem" + depends on HAS_DMA + select CRC32 + select FW_LOADER + select VIRTIO + select WANT_DEV_COREDUMP + help + Support for remote processors (such as DSP coprocessors). These + are mainly used on embedded systems. + +if REMOTEPROC + +config REMOTEPROC_CDEV + bool "Remoteproc character device interface" + help + Say y here to have a character device interface for the remoteproc + framework. Userspace can boot/shutdown remote processors through + this interface. + + It's safe to say N if you don't want to use this interface. + +config IMX_REMOTEPROC + tristate "IMX6/7 remoteproc support" + depends on ARCH_MXC + help + Say y here to support iMX's remote processors (Cortex M4 + on iMX7D) via the remote processor framework. + + It's safe to say N here. + +config INGENIC_VPU_RPROC + tristate "Ingenic JZ47xx VPU remoteproc support" + depends on MIPS || COMPILE_TEST + help + Say y or m here to support the VPU in the JZ47xx SoCs from Ingenic. + + This can be either built-in or a loadable module. + If unsure say N. + +config MTK_SCP + tristate "Mediatek SCP support" + depends on ARCH_MEDIATEK || COMPILE_TEST + select RPMSG_MTK_SCP + help + Say y here to support Mediatek's System Companion Processor (SCP) via + the remote processor framework. + + It's safe to say N here. + +config OMAP_REMOTEPROC + tristate "OMAP remoteproc support" + depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX + depends on OMAP_IOMMU + select MAILBOX + select OMAP2PLUS_MBOX + help + Say y here to support OMAP's remote processors (dual M3 + and DSP on OMAP4) via the remote processor framework. + + Currently only supported on OMAP4. + + Usually you want to say Y here, in order to enable multimedia + use-cases to run on your platform (multimedia codecs are + offloaded to remote DSP processors using this framework). + + It's safe to say N here if you're not interested in multimedia + offloading or just want a bare minimum kernel. + +config OMAP_REMOTEPROC_WATCHDOG + bool "OMAP remoteproc watchdog timer" + depends on OMAP_REMOTEPROC + default n + help + Say Y here to enable watchdog timer for remote processors. + + This option controls the watchdog functionality for the remote + processors in OMAP. Dedicated OMAP DMTimers are used by the remote + processors and triggers the timer interrupt upon a watchdog + detection. + +config WKUP_M3_RPROC + tristate "AMx3xx Wakeup M3 remoteproc support" + depends on SOC_AM33XX || SOC_AM43XX + help + Say y here to support Wakeup M3 remote processor on TI AM33xx + and AM43xx family of SoCs. + + Required for Suspend-to-RAM on AM33xx and AM43xx SoCs. Also needed + for deep CPUIdle states on AM33xx SoCs. Allows for loading of the + firmware onto these remote processors. + If unsure say N. + +config DA8XX_REMOTEPROC + tristate "DA8xx/OMAP-L13x remoteproc support" + depends on ARCH_DAVINCI_DA8XX + depends on DMA_CMA + help + Say y here to support DA8xx/OMAP-L13x remote processors via the + remote processor framework. + + You want to say y here in order to enable AMP + use-cases to run on your platform (multimedia codecs are + offloaded to remote DSP processors using this framework). + + This module controls the name of the firmware file that gets + loaded on the DSP. This file must reside in the /lib/firmware + directory. It can be specified via the module parameter + da8xx_fw_name=<filename>, and if not specified will default to + "rproc-dsp-fw". + + It's safe to say n here if you're not interested in multimedia + offloading. + +config KEYSTONE_REMOTEPROC + tristate "Keystone Remoteproc support" + depends on ARCH_KEYSTONE + help + Say Y here here to support Keystone remote processors (DSP) + via the remote processor framework. + + It's safe to say N here if you're not interested in the Keystone + DSPs or just want to use a bare minimum kernel. + +config QCOM_PIL_INFO + tristate + +config QCOM_RPROC_COMMON + tristate + +config QCOM_Q6V5_COMMON + tristate + depends on ARCH_QCOM + depends on QCOM_SMEM + +config QCOM_Q6V5_ADSP + tristate "Qualcomm Technology Inc ADSP Peripheral Image Loader" + depends on OF && ARCH_QCOM + depends on QCOM_SMEM + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n + depends on QCOM_SYSMON || QCOM_SYSMON=n + select MFD_SYSCON + select QCOM_PIL_INFO + select QCOM_MDT_LOADER + select QCOM_Q6V5_COMMON + select QCOM_RPROC_COMMON + help + Say y here to support the Peripheral Image Loader + for the Qualcomm Technology Inc. ADSP remote processors. + +config QCOM_Q6V5_MSS + tristate "Qualcomm Hexagon V5 self-authenticating modem subsystem support" + depends on OF && ARCH_QCOM + depends on QCOM_SMEM + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n + depends on QCOM_SYSMON || QCOM_SYSMON=n + select MFD_SYSCON + select QCOM_MDT_LOADER + select QCOM_PIL_INFO + select QCOM_Q6V5_COMMON + select QCOM_RPROC_COMMON + select QCOM_SCM + help + Say y here to support the Qualcomm self-authenticating modem + subsystem based on Hexagon V5. + +config QCOM_Q6V5_PAS + tristate "Qualcomm Hexagon v5 Peripheral Authentication Service support" + depends on OF && ARCH_QCOM + depends on QCOM_SMEM + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n + depends on QCOM_SYSMON || QCOM_SYSMON=n + select MFD_SYSCON + select QCOM_PIL_INFO + select QCOM_MDT_LOADER + select QCOM_Q6V5_COMMON + select QCOM_RPROC_COMMON + select QCOM_SCM + help + Say y here to support the TrustZone based Peripherial Image Loader + for the Qualcomm Hexagon v5 based remote processors. This is commonly + used to control subsystems such as ADSP, Compute and Sensor. + +config QCOM_Q6V5_WCSS + tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader" + depends on OF && ARCH_QCOM + depends on QCOM_SMEM + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n + depends on QCOM_SYSMON || QCOM_SYSMON=n + select MFD_SYSCON + select QCOM_MDT_LOADER + select QCOM_PIL_INFO + select QCOM_Q6V5_COMMON + select QCOM_RPROC_COMMON + select QCOM_SCM + help + Say y here to support the Qualcomm Peripheral Image Loader for the + Hexagon V5 based WCSS remote processors. + +config QCOM_SYSMON + tristate "Qualcomm sysmon driver" + depends on RPMSG + depends on ARCH_QCOM + depends on NET + select QCOM_QMI_HELPERS + help + The sysmon driver implements a sysmon QMI client and a handler for + the sys_mon SMD and GLINK channel, which are used for graceful + shutdown, retrieving failure information and propagating information + about other subsystems being shut down. + + Say y here if your system runs firmware on any other subsystems, e.g. + modem or DSP. + +config QCOM_WCNSS_PIL + tristate "Qualcomm WCNSS Peripheral Image Loader" + depends on OF && ARCH_QCOM + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n + depends on QCOM_SMEM + depends on QCOM_SYSMON || QCOM_SYSMON=n + select QCOM_MDT_LOADER + select QCOM_PIL_INFO + select QCOM_RPROC_COMMON + select QCOM_SCM + help + Say y here to support the Peripheral Image Loader for the Qualcomm + Wireless Connectivity Subsystem. + +config ST_REMOTEPROC + tristate "ST remoteproc support" + depends on ARCH_STI + select MAILBOX + select STI_MBOX + help + Say y here to support ST's adjunct processors via the remote + processor framework. + This can be either built-in or a loadable module. + +config ST_SLIM_REMOTEPROC + tristate + +config STM32_RPROC + tristate "STM32 remoteproc support" + depends on ARCH_STM32 + depends on REMOTEPROC + select MAILBOX + help + Say y here to support STM32 MCU processors via the + remote processor framework. + + You want to say y here in order to enable AMP + use-cases to run on your platform (dedicated firmware could be + offloaded to remote MCU processors using this framework). + + This can be either built-in or a loadable module. + +config TI_K3_DSP_REMOTEPROC + tristate "TI K3 DSP remoteproc support" + depends on ARCH_K3 + select MAILBOX + select OMAP2PLUS_MBOX + help + Say m here to support TI's C66x and C71x DSP remote processor + subsystems on various TI K3 family of SoCs through the remote + processor framework. + + It's safe to say N here if you're not interested in utilizing + the DSP slave processors. + +config TI_K3_R5_REMOTEPROC + tristate "TI K3 R5 remoteproc support" + depends on ARCH_K3 + select MAILBOX + select OMAP2PLUS_MBOX + help + Say m here to support TI's R5F remote processor subsystems + on various TI K3 family of SoCs through the remote processor + framework. + + It's safe to say N here if you're not interested in utilizing + a slave processor. + +endif # REMOTEPROC + +endmenu diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile new file mode 100644 index 000000000..da2ace4ec --- /dev/null +++ b/drivers/remoteproc/Makefile @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Generic framework for controlling remote processors +# + +obj-$(CONFIG_REMOTEPROC) += remoteproc.o +remoteproc-y := remoteproc_core.o +remoteproc-y += remoteproc_coredump.o +remoteproc-y += remoteproc_debugfs.o +remoteproc-y += remoteproc_sysfs.o +remoteproc-y += remoteproc_virtio.o +remoteproc-y += remoteproc_elf_loader.o +obj-$(CONFIG_REMOTEPROC_CDEV) += remoteproc_cdev.o +obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o +obj-$(CONFIG_INGENIC_VPU_RPROC) += ingenic_rproc.o +obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o +obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o +obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o +obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o +obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o +obj-$(CONFIG_QCOM_PIL_INFO) += qcom_pil_info.o +obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o +obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o +obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o +obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o +obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o +obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o +obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o +obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o +qcom_wcnss_pil-y += qcom_wcnss.o +qcom_wcnss_pil-y += qcom_wcnss_iris.o +obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o +obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o +obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o +obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o +obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c new file mode 100644 index 000000000..98e0be947 --- /dev/null +++ b/drivers/remoteproc/da8xx_remoteproc.c @@ -0,0 +1,399 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Remote processor machine-specific module for DA8XX + * + * Copyright (C) 2013 Texas Instruments, Inc. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/reset.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> + +#include "remoteproc_internal.h" + +static char *da8xx_fw_name; +module_param(da8xx_fw_name, charp, 0444); +MODULE_PARM_DESC(da8xx_fw_name, + "Name of DSP firmware file in /lib/firmware (if not specified defaults to 'rproc-dsp-fw')"); + +/* + * OMAP-L138 Technical References: + * http://www.ti.com/product/omap-l138 + */ +#define SYSCFG_CHIPSIG0 BIT(0) +#define SYSCFG_CHIPSIG1 BIT(1) +#define SYSCFG_CHIPSIG2 BIT(2) +#define SYSCFG_CHIPSIG3 BIT(3) +#define SYSCFG_CHIPSIG4 BIT(4) + +#define DA8XX_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1) + +/** + * struct da8xx_rproc_mem - internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @dev_addr: Device address of the memory region from DSP view + * @size: Size of the memory region + */ +struct da8xx_rproc_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +/** + * struct da8xx_rproc - da8xx remote processor instance state + * @rproc: rproc handle + * @mem: internal memory regions data + * @num_mems: number of internal memory regions + * @dsp_clk: placeholder for platform's DSP clk + * @ack_fxn: chip-specific ack function for ack'ing irq + * @irq_data: ack_fxn function parameter + * @chipsig: virt ptr to DSP interrupt registers (CHIPSIG & CHIPSIG_CLR) + * @bootreg: virt ptr to DSP boot address register (HOST1CFG) + * @irq: irq # used by this instance + */ +struct da8xx_rproc { + struct rproc *rproc; + struct da8xx_rproc_mem *mem; + int num_mems; + struct clk *dsp_clk; + struct reset_control *dsp_reset; + void (*ack_fxn)(struct irq_data *data); + struct irq_data *irq_data; + void __iomem *chipsig; + void __iomem *bootreg; + int irq; +}; + +/** + * handle_event() - inbound virtqueue message workqueue function + * + * This function is registered as a kernel thread and is scheduled by the + * kernel handler. + */ +static irqreturn_t handle_event(int irq, void *p) +{ + struct rproc *rproc = (struct rproc *)p; + + /* Process incoming buffers on all our vrings */ + rproc_vq_interrupt(rproc, 0); + rproc_vq_interrupt(rproc, 1); + + return IRQ_HANDLED; +} + +/** + * da8xx_rproc_callback() - inbound virtqueue message handler + * + * This handler is invoked directly by the kernel whenever the remote + * core (DSP) has modified the state of a virtqueue. There is no + * "payload" message indicating the virtqueue index as is the case with + * mailbox-based implementations on OMAP4. As such, this handler "polls" + * each known virtqueue index for every invocation. + */ +static irqreturn_t da8xx_rproc_callback(int irq, void *p) +{ + struct rproc *rproc = (struct rproc *)p; + struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv; + u32 chipsig; + + chipsig = readl(drproc->chipsig); + if (chipsig & SYSCFG_CHIPSIG0) { + /* Clear interrupt level source */ + writel(SYSCFG_CHIPSIG0, drproc->chipsig + 4); + + /* + * ACK intr to AINTC. + * + * It has already been ack'ed by the kernel before calling + * this function, but since the ARM<->DSP interrupts in the + * CHIPSIG register are "level" instead of "pulse" variety, + * we need to ack it after taking down the level else we'll + * be called again immediately after returning. + */ + drproc->ack_fxn(drproc->irq_data); + + return IRQ_WAKE_THREAD; + } + + return IRQ_HANDLED; +} + +static int da8xx_rproc_start(struct rproc *rproc) +{ + struct device *dev = rproc->dev.parent; + struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv; + struct clk *dsp_clk = drproc->dsp_clk; + struct reset_control *dsp_reset = drproc->dsp_reset; + int ret; + + /* hw requires the start (boot) address be on 1KB boundary */ + if (rproc->bootaddr & 0x3ff) { + dev_err(dev, "invalid boot address: must be aligned to 1KB\n"); + + return -EINVAL; + } + + writel(rproc->bootaddr, drproc->bootreg); + + ret = clk_prepare_enable(dsp_clk); + if (ret) { + dev_err(dev, "clk_prepare_enable() failed: %d\n", ret); + return ret; + } + + ret = reset_control_deassert(dsp_reset); + if (ret) { + dev_err(dev, "reset_control_deassert() failed: %d\n", ret); + clk_disable_unprepare(dsp_clk); + return ret; + } + + return 0; +} + +static int da8xx_rproc_stop(struct rproc *rproc) +{ + struct da8xx_rproc *drproc = rproc->priv; + struct device *dev = rproc->dev.parent; + int ret; + + ret = reset_control_assert(drproc->dsp_reset); + if (ret) { + dev_err(dev, "reset_control_assert() failed: %d\n", ret); + return ret; + } + + clk_disable_unprepare(drproc->dsp_clk); + + return 0; +} + +/* kick a virtqueue */ +static void da8xx_rproc_kick(struct rproc *rproc, int vqid) +{ + struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv; + + /* Interrupt remote proc */ + writel(SYSCFG_CHIPSIG2, drproc->chipsig); +} + +static const struct rproc_ops da8xx_rproc_ops = { + .start = da8xx_rproc_start, + .stop = da8xx_rproc_stop, + .kick = da8xx_rproc_kick, +}; + +static int da8xx_rproc_get_internal_memories(struct platform_device *pdev, + struct da8xx_rproc *drproc) +{ + static const char * const mem_names[] = {"l2sram", "l1pram", "l1dram"}; + int num_mems = ARRAY_SIZE(mem_names); + struct device *dev = &pdev->dev; + struct resource *res; + int i; + + drproc->mem = devm_kcalloc(dev, num_mems, sizeof(*drproc->mem), + GFP_KERNEL); + if (!drproc->mem) + return -ENOMEM; + + for (i = 0; i < num_mems; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mem_names[i]); + drproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(drproc->mem[i].cpu_addr)) { + dev_err(dev, "failed to parse and map %s memory\n", + mem_names[i]); + return PTR_ERR(drproc->mem[i].cpu_addr); + } + drproc->mem[i].bus_addr = res->start; + drproc->mem[i].dev_addr = + res->start & DA8XX_RPROC_LOCAL_ADDRESS_MASK; + drproc->mem[i].size = resource_size(res); + + dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n", + mem_names[i], &drproc->mem[i].bus_addr, + drproc->mem[i].size, drproc->mem[i].cpu_addr, + drproc->mem[i].dev_addr); + } + drproc->num_mems = num_mems; + + return 0; +} + +static int da8xx_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct da8xx_rproc *drproc; + struct rproc *rproc; + struct irq_data *irq_data; + struct resource *bootreg_res; + struct resource *chipsig_res; + struct clk *dsp_clk; + struct reset_control *dsp_reset; + void __iomem *chipsig; + void __iomem *bootreg; + int irq; + int ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + irq_data = irq_get_irq_data(irq); + if (!irq_data) { + dev_err(dev, "irq_get_irq_data(%d): NULL\n", irq); + return -EINVAL; + } + + bootreg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "host1cfg"); + bootreg = devm_ioremap_resource(dev, bootreg_res); + if (IS_ERR(bootreg)) + return PTR_ERR(bootreg); + + chipsig_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "chipsig"); + chipsig = devm_ioremap_resource(dev, chipsig_res); + if (IS_ERR(chipsig)) + return PTR_ERR(chipsig); + + dsp_clk = devm_clk_get(dev, NULL); + if (IS_ERR(dsp_clk)) { + dev_err(dev, "clk_get error: %ld\n", PTR_ERR(dsp_clk)); + + return PTR_ERR(dsp_clk); + } + + dsp_reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(dsp_reset)) { + if (PTR_ERR(dsp_reset) != -EPROBE_DEFER) + dev_err(dev, "unable to get reset control: %ld\n", + PTR_ERR(dsp_reset)); + + return PTR_ERR(dsp_reset); + } + + if (dev->of_node) { + ret = of_reserved_mem_device_init(dev); + if (ret) { + dev_err(dev, "device does not have specific CMA pool: %d\n", + ret); + return ret; + } + } + + rproc = rproc_alloc(dev, "dsp", &da8xx_rproc_ops, da8xx_fw_name, + sizeof(*drproc)); + if (!rproc) { + ret = -ENOMEM; + goto free_mem; + } + + /* error recovery is not supported at present */ + rproc->recovery_disabled = true; + + drproc = rproc->priv; + drproc->rproc = rproc; + drproc->dsp_clk = dsp_clk; + drproc->dsp_reset = dsp_reset; + rproc->has_iommu = false; + + ret = da8xx_rproc_get_internal_memories(pdev, drproc); + if (ret) + goto free_rproc; + + platform_set_drvdata(pdev, rproc); + + /* everything the ISR needs is now setup, so hook it up */ + ret = devm_request_threaded_irq(dev, irq, da8xx_rproc_callback, + handle_event, 0, "da8xx-remoteproc", + rproc); + if (ret) { + dev_err(dev, "devm_request_threaded_irq error: %d\n", ret); + goto free_rproc; + } + + /* + * rproc_add() can end up enabling the DSP's clk with the DSP + * *not* in reset, but da8xx_rproc_start() needs the DSP to be + * held in reset at the time it is called. + */ + ret = reset_control_assert(dsp_reset); + if (ret) + goto free_rproc; + + drproc->chipsig = chipsig; + drproc->bootreg = bootreg; + drproc->ack_fxn = irq_data->chip->irq_ack; + drproc->irq_data = irq_data; + drproc->irq = irq; + + ret = rproc_add(rproc); + if (ret) { + dev_err(dev, "rproc_add failed: %d\n", ret); + goto free_rproc; + } + + return 0; + +free_rproc: + rproc_free(rproc); +free_mem: + if (dev->of_node) + of_reserved_mem_device_release(dev); + return ret; +} + +static int da8xx_rproc_remove(struct platform_device *pdev) +{ + struct rproc *rproc = platform_get_drvdata(pdev); + struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv; + struct device *dev = &pdev->dev; + + /* + * The devm subsystem might end up releasing things before + * freeing the irq, thus allowing an interrupt to sneak in while + * the device is being removed. This should prevent that. + */ + disable_irq(drproc->irq); + + rproc_del(rproc); + rproc_free(rproc); + if (dev->of_node) + of_reserved_mem_device_release(dev); + + return 0; +} + +static const struct of_device_id davinci_rproc_of_match[] __maybe_unused = { + { .compatible = "ti,da850-dsp", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, davinci_rproc_of_match); + +static struct platform_driver da8xx_rproc_driver = { + .probe = da8xx_rproc_probe, + .remove = da8xx_rproc_remove, + .driver = { + .name = "davinci-rproc", + .of_match_table = of_match_ptr(davinci_rproc_of_match), + }, +}; + +module_platform_driver(da8xx_rproc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DA8XX Remote Processor control driver"); diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c new file mode 100644 index 000000000..8957ed271 --- /dev/null +++ b/drivers/remoteproc/imx_rproc.c @@ -0,0 +1,422 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/remoteproc.h> + +#define IMX7D_SRC_SCR 0x0C +#define IMX7D_ENABLE_M4 BIT(3) +#define IMX7D_SW_M4P_RST BIT(2) +#define IMX7D_SW_M4C_RST BIT(1) +#define IMX7D_SW_M4C_NON_SCLR_RST BIT(0) + +#define IMX7D_M4_RST_MASK (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \ + | IMX7D_SW_M4C_RST \ + | IMX7D_SW_M4C_NON_SCLR_RST) + +#define IMX7D_M4_START (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \ + | IMX7D_SW_M4C_RST) +#define IMX7D_M4_STOP IMX7D_SW_M4C_NON_SCLR_RST + +/* Address: 0x020D8000 */ +#define IMX6SX_SRC_SCR 0x00 +#define IMX6SX_ENABLE_M4 BIT(22) +#define IMX6SX_SW_M4P_RST BIT(12) +#define IMX6SX_SW_M4C_NON_SCLR_RST BIT(4) +#define IMX6SX_SW_M4C_RST BIT(3) + +#define IMX6SX_M4_START (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \ + | IMX6SX_SW_M4C_RST) +#define IMX6SX_M4_STOP IMX6SX_SW_M4C_NON_SCLR_RST +#define IMX6SX_M4_RST_MASK (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \ + | IMX6SX_SW_M4C_NON_SCLR_RST \ + | IMX6SX_SW_M4C_RST) + +#define IMX7D_RPROC_MEM_MAX 8 + +/** + * struct imx_rproc_mem - slim internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @sys_addr: Bus address used to access the memory region + * @size: Size of the memory region + */ +struct imx_rproc_mem { + void __iomem *cpu_addr; + phys_addr_t sys_addr; + size_t size; +}; + +/* att flags */ +/* M4 own area. Can be mapped at probe */ +#define ATT_OWN BIT(1) + +/* address translation table */ +struct imx_rproc_att { + u32 da; /* device address (From Cortex M4 view)*/ + u32 sa; /* system bus address */ + u32 size; /* size of reg range */ + int flags; +}; + +struct imx_rproc_dcfg { + u32 src_reg; + u32 src_mask; + u32 src_start; + u32 src_stop; + const struct imx_rproc_att *att; + size_t att_size; +}; + +struct imx_rproc { + struct device *dev; + struct regmap *regmap; + struct rproc *rproc; + const struct imx_rproc_dcfg *dcfg; + struct imx_rproc_mem mem[IMX7D_RPROC_MEM_MAX]; + struct clk *clk; +}; + +static const struct imx_rproc_att imx_rproc_att_imx7d[] = { + /* dev addr , sys addr , size , flags */ + /* OCRAM_S (M4 Boot code) - alias */ + { 0x00000000, 0x00180000, 0x00008000, 0 }, + /* OCRAM_S (Code) */ + { 0x00180000, 0x00180000, 0x00008000, ATT_OWN }, + /* OCRAM (Code) - alias */ + { 0x00900000, 0x00900000, 0x00020000, 0 }, + /* OCRAM_EPDC (Code) - alias */ + { 0x00920000, 0x00920000, 0x00020000, 0 }, + /* OCRAM_PXP (Code) - alias */ + { 0x00940000, 0x00940000, 0x00008000, 0 }, + /* TCML (Code) */ + { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN }, + /* DDR (Code) - alias, first part of DDR (Data) */ + { 0x10000000, 0x80000000, 0x0FFF0000, 0 }, + + /* TCMU (Data) */ + { 0x20000000, 0x00800000, 0x00008000, ATT_OWN }, + /* OCRAM (Data) */ + { 0x20200000, 0x00900000, 0x00020000, 0 }, + /* OCRAM_EPDC (Data) */ + { 0x20220000, 0x00920000, 0x00020000, 0 }, + /* OCRAM_PXP (Data) */ + { 0x20240000, 0x00940000, 0x00008000, 0 }, + /* DDR (Data) */ + { 0x80000000, 0x80000000, 0x60000000, 0 }, +}; + +static const struct imx_rproc_att imx_rproc_att_imx6sx[] = { + /* dev addr , sys addr , size , flags */ + /* TCML (M4 Boot Code) - alias */ + { 0x00000000, 0x007F8000, 0x00008000, 0 }, + /* OCRAM_S (Code) */ + { 0x00180000, 0x008F8000, 0x00004000, 0 }, + /* OCRAM_S (Code) - alias */ + { 0x00180000, 0x008FC000, 0x00004000, 0 }, + /* TCML (Code) */ + { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN }, + /* DDR (Code) - alias, first part of DDR (Data) */ + { 0x10000000, 0x80000000, 0x0FFF8000, 0 }, + + /* TCMU (Data) */ + { 0x20000000, 0x00800000, 0x00008000, ATT_OWN }, + /* OCRAM_S (Data) - alias? */ + { 0x208F8000, 0x008F8000, 0x00004000, 0 }, + /* DDR (Data) */ + { 0x80000000, 0x80000000, 0x60000000, 0 }, +}; + +static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = { + .src_reg = IMX7D_SRC_SCR, + .src_mask = IMX7D_M4_RST_MASK, + .src_start = IMX7D_M4_START, + .src_stop = IMX7D_M4_STOP, + .att = imx_rproc_att_imx7d, + .att_size = ARRAY_SIZE(imx_rproc_att_imx7d), +}; + +static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = { + .src_reg = IMX6SX_SRC_SCR, + .src_mask = IMX6SX_M4_RST_MASK, + .src_start = IMX6SX_M4_START, + .src_stop = IMX6SX_M4_STOP, + .att = imx_rproc_att_imx6sx, + .att_size = ARRAY_SIZE(imx_rproc_att_imx6sx), +}; + +static int imx_rproc_start(struct rproc *rproc) +{ + struct imx_rproc *priv = rproc->priv; + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + struct device *dev = priv->dev; + int ret; + + ret = regmap_update_bits(priv->regmap, dcfg->src_reg, + dcfg->src_mask, dcfg->src_start); + if (ret) + dev_err(dev, "Failed to enable M4!\n"); + + return ret; +} + +static int imx_rproc_stop(struct rproc *rproc) +{ + struct imx_rproc *priv = rproc->priv; + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + struct device *dev = priv->dev; + int ret; + + ret = regmap_update_bits(priv->regmap, dcfg->src_reg, + dcfg->src_mask, dcfg->src_stop); + if (ret) + dev_err(dev, "Failed to stop M4!\n"); + + return ret; +} + +static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da, + size_t len, u64 *sys) +{ + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + int i; + + /* parse address translation table */ + for (i = 0; i < dcfg->att_size; i++) { + const struct imx_rproc_att *att = &dcfg->att[i]; + + if (da >= att->da && da + len < att->da + att->size) { + unsigned int offset = da - att->da; + + *sys = att->sa + offset; + return 0; + } + } + + dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%zx\n", + da, len); + return -ENOENT; +} + +static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct imx_rproc *priv = rproc->priv; + void *va = NULL; + u64 sys; + int i; + + if (len == 0) + return NULL; + + /* + * On device side we have many aliases, so we need to convert device + * address (M4) to system bus address first. + */ + if (imx_rproc_da_to_sys(priv, da, len, &sys)) + return NULL; + + for (i = 0; i < IMX7D_RPROC_MEM_MAX; i++) { + if (sys >= priv->mem[i].sys_addr && sys + len < + priv->mem[i].sys_addr + priv->mem[i].size) { + unsigned int offset = sys - priv->mem[i].sys_addr; + /* __force to make sparse happy with type conversion */ + va = (__force void *)(priv->mem[i].cpu_addr + offset); + break; + } + } + + dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%p\n", + da, len, va); + + return va; +} + +static const struct rproc_ops imx_rproc_ops = { + .start = imx_rproc_start, + .stop = imx_rproc_stop, + .da_to_va = imx_rproc_da_to_va, +}; + +static int imx_rproc_addr_init(struct imx_rproc *priv, + struct platform_device *pdev) +{ + const struct imx_rproc_dcfg *dcfg = priv->dcfg; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + int a, b = 0, err, nph; + + /* remap required addresses */ + for (a = 0; a < dcfg->att_size; a++) { + const struct imx_rproc_att *att = &dcfg->att[a]; + + if (!(att->flags & ATT_OWN)) + continue; + + if (b >= IMX7D_RPROC_MEM_MAX) + break; + + priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, + att->sa, att->size); + if (!priv->mem[b].cpu_addr) { + dev_err(dev, "devm_ioremap_resource failed\n"); + return -ENOMEM; + } + priv->mem[b].sys_addr = att->sa; + priv->mem[b].size = att->size; + b++; + } + + /* memory-region is optional property */ + nph = of_count_phandle_with_args(np, "memory-region", NULL); + if (nph <= 0) + return 0; + + /* remap optional addresses */ + for (a = 0; a < nph; a++) { + struct device_node *node; + struct resource res; + + node = of_parse_phandle(np, "memory-region", a); + err = of_address_to_resource(node, 0, &res); + if (err) { + dev_err(dev, "unable to resolve memory region\n"); + return err; + } + + if (b >= IMX7D_RPROC_MEM_MAX) + break; + + priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); + if (IS_ERR(priv->mem[b].cpu_addr)) { + dev_err(dev, "devm_ioremap_resource failed\n"); + err = PTR_ERR(priv->mem[b].cpu_addr); + return err; + } + priv->mem[b].sys_addr = res.start; + priv->mem[b].size = resource_size(&res); + b++; + } + + return 0; +} + +static int imx_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct imx_rproc *priv; + struct rproc *rproc; + struct regmap_config config = { .name = "imx-rproc" }; + const struct imx_rproc_dcfg *dcfg; + struct regmap *regmap; + int ret; + + regmap = syscon_regmap_lookup_by_phandle(np, "syscon"); + if (IS_ERR(regmap)) { + dev_err(dev, "failed to find syscon\n"); + return PTR_ERR(regmap); + } + regmap_attach_dev(dev, regmap, &config); + + /* set some other name then imx */ + rproc = rproc_alloc(dev, "imx-rproc", &imx_rproc_ops, + NULL, sizeof(*priv)); + if (!rproc) + return -ENOMEM; + + dcfg = of_device_get_match_data(dev); + if (!dcfg) { + ret = -EINVAL; + goto err_put_rproc; + } + + priv = rproc->priv; + priv->rproc = rproc; + priv->regmap = regmap; + priv->dcfg = dcfg; + priv->dev = dev; + + dev_set_drvdata(dev, rproc); + + ret = imx_rproc_addr_init(priv, pdev); + if (ret) { + dev_err(dev, "failed on imx_rproc_addr_init\n"); + goto err_put_rproc; + } + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "Failed to get clock\n"); + ret = PTR_ERR(priv->clk); + goto err_put_rproc; + } + + /* + * clk for M4 block including memory. Should be + * enabled before .start for FW transfer. + */ + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(&rproc->dev, "Failed to enable clock\n"); + goto err_put_rproc; + } + + ret = rproc_add(rproc); + if (ret) { + dev_err(dev, "rproc_add failed\n"); + goto err_put_clk; + } + + return 0; + +err_put_clk: + clk_disable_unprepare(priv->clk); +err_put_rproc: + rproc_free(rproc); + + return ret; +} + +static int imx_rproc_remove(struct platform_device *pdev) +{ + struct rproc *rproc = platform_get_drvdata(pdev); + struct imx_rproc *priv = rproc->priv; + + clk_disable_unprepare(priv->clk); + rproc_del(rproc); + rproc_free(rproc); + + return 0; +} + +static const struct of_device_id imx_rproc_of_match[] = { + { .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d }, + { .compatible = "fsl,imx6sx-cm4", .data = &imx_rproc_cfg_imx6sx }, + {}, +}; +MODULE_DEVICE_TABLE(of, imx_rproc_of_match); + +static struct platform_driver imx_rproc_driver = { + .probe = imx_rproc_probe, + .remove = imx_rproc_remove, + .driver = { + .name = "imx-rproc", + .of_match_table = imx_rproc_of_match, + }, +}; + +module_platform_driver(imx_rproc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IMX6SX/7D remote processor control driver"); +MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>"); diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c new file mode 100644 index 000000000..1c2b21a5d --- /dev/null +++ b/drivers/remoteproc/ingenic_rproc.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Ingenic JZ47xx remoteproc driver + * Copyright 2019, Paul Cercueil <paul@crapouillou.net> + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> + +#include "remoteproc_internal.h" + +#define REG_AUX_CTRL 0x0 +#define REG_AUX_MSG_ACK 0x10 +#define REG_AUX_MSG 0x14 +#define REG_CORE_MSG_ACK 0x18 +#define REG_CORE_MSG 0x1C + +#define AUX_CTRL_SLEEP BIT(31) +#define AUX_CTRL_MSG_IRQ_EN BIT(3) +#define AUX_CTRL_NMI_RESETS BIT(2) +#define AUX_CTRL_NMI BIT(1) +#define AUX_CTRL_SW_RESET BIT(0) + +struct vpu_mem_map { + const char *name; + unsigned int da; +}; + +struct vpu_mem_info { + const struct vpu_mem_map *map; + unsigned long len; + void __iomem *base; +}; + +static const struct vpu_mem_map vpu_mem_map[] = { + { "tcsm0", 0x132b0000 }, + { "tcsm1", 0xf4000000 }, + { "sram", 0x132f0000 }, +}; + +/** + * struct vpu - Ingenic VPU remoteproc private structure + * @irq: interrupt number + * @clks: pointers to the VPU and AUX clocks + * @aux_base: raw pointer to the AUX interface registers + * @mem_info: array of struct vpu_mem_info, which contain the mapping info of + * each of the external memories + * @dev: private pointer to the device + */ +struct vpu { + int irq; + struct clk_bulk_data clks[2]; + void __iomem *aux_base; + struct vpu_mem_info mem_info[ARRAY_SIZE(vpu_mem_map)]; + struct device *dev; +}; + +static int ingenic_rproc_prepare(struct rproc *rproc) +{ + struct vpu *vpu = rproc->priv; + int ret; + + /* The clocks must be enabled for the firmware to be loaded in TCSM */ + ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks); + if (ret) + dev_err(vpu->dev, "Unable to start clocks: %d\n", ret); + + return ret; +} + +static int ingenic_rproc_unprepare(struct rproc *rproc) +{ + struct vpu *vpu = rproc->priv; + + clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks); + + return 0; +} + +static int ingenic_rproc_start(struct rproc *rproc) +{ + struct vpu *vpu = rproc->priv; + u32 ctrl; + + enable_irq(vpu->irq); + + /* Reset the AUX and enable message IRQ */ + ctrl = AUX_CTRL_NMI_RESETS | AUX_CTRL_NMI | AUX_CTRL_MSG_IRQ_EN; + writel(ctrl, vpu->aux_base + REG_AUX_CTRL); + + return 0; +} + +static int ingenic_rproc_stop(struct rproc *rproc) +{ + struct vpu *vpu = rproc->priv; + + disable_irq(vpu->irq); + + /* Keep AUX in reset mode */ + writel(AUX_CTRL_SW_RESET, vpu->aux_base + REG_AUX_CTRL); + + return 0; +} + +static void ingenic_rproc_kick(struct rproc *rproc, int vqid) +{ + struct vpu *vpu = rproc->priv; + + writel(vqid, vpu->aux_base + REG_CORE_MSG); +} + +static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct vpu *vpu = rproc->priv; + void __iomem *va = NULL; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(vpu_mem_map); i++) { + const struct vpu_mem_info *info = &vpu->mem_info[i]; + const struct vpu_mem_map *map = info->map; + + if (da >= map->da && (da + len) < (map->da + info->len)) { + va = info->base + (da - map->da); + break; + } + } + + return (__force void *)va; +} + +static struct rproc_ops ingenic_rproc_ops = { + .prepare = ingenic_rproc_prepare, + .unprepare = ingenic_rproc_unprepare, + .start = ingenic_rproc_start, + .stop = ingenic_rproc_stop, + .kick = ingenic_rproc_kick, + .da_to_va = ingenic_rproc_da_to_va, +}; + +static irqreturn_t vpu_interrupt(int irq, void *data) +{ + struct rproc *rproc = data; + struct vpu *vpu = rproc->priv; + u32 vring; + + vring = readl(vpu->aux_base + REG_AUX_MSG); + + /* Ack the interrupt */ + writel(0, vpu->aux_base + REG_AUX_MSG_ACK); + + return rproc_vq_interrupt(rproc, vring); +} + +static int ingenic_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *mem; + struct rproc *rproc; + struct vpu *vpu; + unsigned int i; + int ret; + + rproc = devm_rproc_alloc(dev, "ingenic-vpu", + &ingenic_rproc_ops, NULL, sizeof(*vpu)); + if (!rproc) + return -ENOMEM; + + vpu = rproc->priv; + vpu->dev = &pdev->dev; + platform_set_drvdata(pdev, vpu); + + mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux"); + vpu->aux_base = devm_ioremap_resource(dev, mem); + if (IS_ERR(vpu->aux_base)) { + dev_err(dev, "Failed to ioremap\n"); + return PTR_ERR(vpu->aux_base); + } + + for (i = 0; i < ARRAY_SIZE(vpu_mem_map); i++) { + mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, + vpu_mem_map[i].name); + + vpu->mem_info[i].base = devm_ioremap_resource(dev, mem); + if (IS_ERR(vpu->mem_info[i].base)) { + ret = PTR_ERR(vpu->mem_info[i].base); + dev_err(dev, "Failed to ioremap\n"); + return ret; + } + + vpu->mem_info[i].len = resource_size(mem); + vpu->mem_info[i].map = &vpu_mem_map[i]; + } + + vpu->clks[0].id = "vpu"; + vpu->clks[1].id = "aux"; + + ret = devm_clk_bulk_get(dev, ARRAY_SIZE(vpu->clks), vpu->clks); + if (ret) { + dev_err(dev, "Failed to get clocks\n"); + return ret; + } + + vpu->irq = platform_get_irq(pdev, 0); + if (vpu->irq < 0) + return vpu->irq; + + ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, 0, "VPU", rproc); + if (ret < 0) { + dev_err(dev, "Failed to request IRQ\n"); + return ret; + } + + disable_irq(vpu->irq); + + ret = devm_rproc_add(dev, rproc); + if (ret) { + dev_err(dev, "Failed to register remote processor\n"); + return ret; + } + + return 0; +} + +static const struct of_device_id ingenic_rproc_of_matches[] = { + { .compatible = "ingenic,jz4770-vpu-rproc", }, + {} +}; +MODULE_DEVICE_TABLE(of, ingenic_rproc_of_matches); + +static struct platform_driver ingenic_rproc_driver = { + .probe = ingenic_rproc_probe, + .driver = { + .name = "ingenic-vpu", + .of_match_table = ingenic_rproc_of_matches, + }, +}; +module_platform_driver(ingenic_rproc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); +MODULE_DESCRIPTION("Ingenic JZ47xx Remote Processor control driver"); diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c new file mode 100644 index 000000000..cd266163a --- /dev/null +++ b/drivers/remoteproc/keystone_remoteproc.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI Keystone DSP remoteproc driver + * + * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/workqueue.h> +#include <linux/of_address.h> +#include <linux/of_reserved_mem.h> +#include <linux/of_gpio.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> + +#include "remoteproc_internal.h" + +#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1) + +/** + * struct keystone_rproc_mem - internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @dev_addr: Device address of the memory region from DSP view + * @size: Size of the memory region + */ +struct keystone_rproc_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +/** + * struct keystone_rproc - keystone remote processor driver structure + * @dev: cached device pointer + * @rproc: remoteproc device handle + * @mem: internal memory regions data + * @num_mems: number of internal memory regions + * @dev_ctrl: device control regmap handle + * @reset: reset control handle + * @boot_offset: boot register offset in @dev_ctrl regmap + * @irq_ring: irq entry for vring + * @irq_fault: irq entry for exception + * @kick_gpio: gpio used for virtio kicks + * @workqueue: workqueue for processing virtio interrupts + */ +struct keystone_rproc { + struct device *dev; + struct rproc *rproc; + struct keystone_rproc_mem *mem; + int num_mems; + struct regmap *dev_ctrl; + struct reset_control *reset; + u32 boot_offset; + int irq_ring; + int irq_fault; + int kick_gpio; + struct work_struct workqueue; +}; + +/* Put the DSP processor into reset */ +static void keystone_rproc_dsp_reset(struct keystone_rproc *ksproc) +{ + reset_control_assert(ksproc->reset); +} + +/* Configure the boot address and boot the DSP processor */ +static int keystone_rproc_dsp_boot(struct keystone_rproc *ksproc, u32 boot_addr) +{ + int ret; + + if (boot_addr & (SZ_1K - 1)) { + dev_err(ksproc->dev, "invalid boot address 0x%x, must be aligned on a 1KB boundary\n", + boot_addr); + return -EINVAL; + } + + ret = regmap_write(ksproc->dev_ctrl, ksproc->boot_offset, boot_addr); + if (ret) { + dev_err(ksproc->dev, "regmap_write of boot address failed, status = %d\n", + ret); + return ret; + } + + reset_control_deassert(ksproc->reset); + + return 0; +} + +/* + * Process the remoteproc exceptions + * + * The exception reporting on Keystone DSP remote processors is very simple + * compared to the equivalent processors on the OMAP family, it is notified + * through a software-designed specific interrupt source in the IPC interrupt + * generation register. + * + * This function just invokes the rproc_report_crash to report the exception + * to the remoteproc driver core, to trigger a recovery. + */ +static irqreturn_t keystone_rproc_exception_interrupt(int irq, void *dev_id) +{ + struct keystone_rproc *ksproc = dev_id; + + rproc_report_crash(ksproc->rproc, RPROC_FATAL_ERROR); + + return IRQ_HANDLED; +} + +/* + * Main virtqueue message workqueue function + * + * This function is executed upon scheduling of the keystone remoteproc + * driver's workqueue. The workqueue is scheduled by the vring ISR handler. + * + * There is no payload message indicating the virtqueue index as is the + * case with mailbox-based implementations on OMAP family. As such, this + * handler processes both the Tx and Rx virtqueue indices on every invocation. + * The rproc_vq_interrupt function can detect if there are new unprocessed + * messages or not (returns IRQ_NONE vs IRQ_HANDLED), but there is no need + * to check for these return values. The index 0 triggering will process all + * pending Rx buffers, and the index 1 triggering will process all newly + * available Tx buffers and will wakeup any potentially blocked senders. + * + * NOTE: + * 1. A payload could be added by using some of the source bits in the + * IPC interrupt generation registers, but this would need additional + * changes to the overall IPC stack, and currently there are no benefits + * of adapting that approach. + * 2. The current logic is based on an inherent design assumption of supporting + * only 2 vrings, but this can be changed if needed. + */ +static void handle_event(struct work_struct *work) +{ + struct keystone_rproc *ksproc = + container_of(work, struct keystone_rproc, workqueue); + + rproc_vq_interrupt(ksproc->rproc, 0); + rproc_vq_interrupt(ksproc->rproc, 1); +} + +/* + * Interrupt handler for processing vring kicks from remote processor + */ +static irqreturn_t keystone_rproc_vring_interrupt(int irq, void *dev_id) +{ + struct keystone_rproc *ksproc = dev_id; + + schedule_work(&ksproc->workqueue); + + return IRQ_HANDLED; +} + +/* + * Power up the DSP remote processor. + * + * This function will be invoked only after the firmware for this rproc + * was loaded, parsed successfully, and all of its resource requirements + * were met. + */ +static int keystone_rproc_start(struct rproc *rproc) +{ + struct keystone_rproc *ksproc = rproc->priv; + int ret; + + INIT_WORK(&ksproc->workqueue, handle_event); + + ret = request_irq(ksproc->irq_ring, keystone_rproc_vring_interrupt, 0, + dev_name(ksproc->dev), ksproc); + if (ret) { + dev_err(ksproc->dev, "failed to enable vring interrupt, ret = %d\n", + ret); + goto out; + } + + ret = request_irq(ksproc->irq_fault, keystone_rproc_exception_interrupt, + 0, dev_name(ksproc->dev), ksproc); + if (ret) { + dev_err(ksproc->dev, "failed to enable exception interrupt, ret = %d\n", + ret); + goto free_vring_irq; + } + + ret = keystone_rproc_dsp_boot(ksproc, rproc->bootaddr); + if (ret) + goto free_exc_irq; + + return 0; + +free_exc_irq: + free_irq(ksproc->irq_fault, ksproc); +free_vring_irq: + free_irq(ksproc->irq_ring, ksproc); + flush_work(&ksproc->workqueue); +out: + return ret; +} + +/* + * Stop the DSP remote processor. + * + * This function puts the DSP processor into reset, and finishes processing + * of any pending messages. + */ +static int keystone_rproc_stop(struct rproc *rproc) +{ + struct keystone_rproc *ksproc = rproc->priv; + + keystone_rproc_dsp_reset(ksproc); + free_irq(ksproc->irq_fault, ksproc); + free_irq(ksproc->irq_ring, ksproc); + flush_work(&ksproc->workqueue); + + return 0; +} + +/* + * Kick the remote processor to notify about pending unprocessed messages. + * The vqid usage is not used and is inconsequential, as the kick is performed + * through a simulated GPIO (a bit in an IPC interrupt-triggering register), + * the remote processor is expected to process both its Tx and Rx virtqueues. + */ +static void keystone_rproc_kick(struct rproc *rproc, int vqid) +{ + struct keystone_rproc *ksproc = rproc->priv; + + if (WARN_ON(ksproc->kick_gpio < 0)) + return; + + gpio_set_value(ksproc->kick_gpio, 1); +} + +/* + * Custom function to translate a DSP device address (internal RAMs only) to a + * kernel virtual address. The DSPs can access their RAMs at either an internal + * address visible only from a DSP, or at the SoC-level bus address. Both these + * addresses need to be looked through for translation. The translated addresses + * can be used either by the remoteproc core for loading (when using kernel + * remoteproc loader), or by any rpmsg bus drivers. + */ +static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct keystone_rproc *ksproc = rproc->priv; + void __iomem *va = NULL; + phys_addr_t bus_addr; + u32 dev_addr, offset; + size_t size; + int i; + + if (len == 0) + return NULL; + + for (i = 0; i < ksproc->num_mems; i++) { + bus_addr = ksproc->mem[i].bus_addr; + dev_addr = ksproc->mem[i].dev_addr; + size = ksproc->mem[i].size; + + if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) { + /* handle DSP-view addresses */ + if ((da >= dev_addr) && + ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = ksproc->mem[i].cpu_addr + offset; + break; + } + } else { + /* handle SoC-view addresses */ + if ((da >= bus_addr) && + (da + len) <= (bus_addr + size)) { + offset = da - bus_addr; + va = ksproc->mem[i].cpu_addr + offset; + break; + } + } + } + + return (__force void *)va; +} + +static const struct rproc_ops keystone_rproc_ops = { + .start = keystone_rproc_start, + .stop = keystone_rproc_stop, + .kick = keystone_rproc_kick, + .da_to_va = keystone_rproc_da_to_va, +}; + +static int keystone_rproc_of_get_memories(struct platform_device *pdev, + struct keystone_rproc *ksproc) +{ + static const char * const mem_names[] = {"l2sram", "l1pram", "l1dram"}; + struct device *dev = &pdev->dev; + struct resource *res; + int num_mems = 0; + int i; + + num_mems = ARRAY_SIZE(mem_names); + ksproc->mem = devm_kcalloc(ksproc->dev, num_mems, + sizeof(*ksproc->mem), GFP_KERNEL); + if (!ksproc->mem) + return -ENOMEM; + + for (i = 0; i < num_mems; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mem_names[i]); + ksproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(ksproc->mem[i].cpu_addr)) { + dev_err(dev, "failed to parse and map %s memory\n", + mem_names[i]); + return PTR_ERR(ksproc->mem[i].cpu_addr); + } + ksproc->mem[i].bus_addr = res->start; + ksproc->mem[i].dev_addr = + res->start & KEYSTONE_RPROC_LOCAL_ADDRESS_MASK; + ksproc->mem[i].size = resource_size(res); + + /* zero out memories to start in a pristine state */ + memset((__force void *)ksproc->mem[i].cpu_addr, 0, + ksproc->mem[i].size); + } + ksproc->num_mems = num_mems; + + return 0; +} + +static int keystone_rproc_of_get_dev_syscon(struct platform_device *pdev, + struct keystone_rproc *ksproc) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + int ret; + + if (!of_property_read_bool(np, "ti,syscon-dev")) { + dev_err(dev, "ti,syscon-dev property is absent\n"); + return -EINVAL; + } + + ksproc->dev_ctrl = + syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev"); + if (IS_ERR(ksproc->dev_ctrl)) { + ret = PTR_ERR(ksproc->dev_ctrl); + return ret; + } + + if (of_property_read_u32_index(np, "ti,syscon-dev", 1, + &ksproc->boot_offset)) { + dev_err(dev, "couldn't read the boot register offset\n"); + return -EINVAL; + } + + return 0; +} + +static int keystone_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct keystone_rproc *ksproc; + struct rproc *rproc; + int dsp_id; + char *fw_name = NULL; + char *template = "keystone-dsp%d-fw"; + int name_len = 0; + int ret = 0; + + if (!np) { + dev_err(dev, "only DT-based devices are supported\n"); + return -ENODEV; + } + + dsp_id = of_alias_get_id(np, "rproc"); + if (dsp_id < 0) { + dev_warn(dev, "device does not have an alias id\n"); + return dsp_id; + } + + /* construct a custom default fw name - subject to change in future */ + name_len = strlen(template); /* assuming a single digit alias */ + fw_name = devm_kzalloc(dev, name_len, GFP_KERNEL); + if (!fw_name) + return -ENOMEM; + snprintf(fw_name, name_len, template, dsp_id); + + rproc = rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops, fw_name, + sizeof(*ksproc)); + if (!rproc) + return -ENOMEM; + + rproc->has_iommu = false; + ksproc = rproc->priv; + ksproc->rproc = rproc; + ksproc->dev = dev; + + ret = keystone_rproc_of_get_dev_syscon(pdev, ksproc); + if (ret) + goto free_rproc; + + ksproc->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(ksproc->reset)) { + ret = PTR_ERR(ksproc->reset); + goto free_rproc; + } + + /* enable clock for accessing DSP internal memories */ + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "failed to enable clock, status = %d\n", ret); + pm_runtime_put_noidle(dev); + goto disable_rpm; + } + + ret = keystone_rproc_of_get_memories(pdev, ksproc); + if (ret) + goto disable_clk; + + ksproc->irq_ring = platform_get_irq_byname(pdev, "vring"); + if (ksproc->irq_ring < 0) { + ret = ksproc->irq_ring; + goto disable_clk; + } + + ksproc->irq_fault = platform_get_irq_byname(pdev, "exception"); + if (ksproc->irq_fault < 0) { + ret = ksproc->irq_fault; + goto disable_clk; + } + + ksproc->kick_gpio = of_get_named_gpio_flags(np, "kick-gpios", 0, NULL); + if (ksproc->kick_gpio < 0) { + ret = ksproc->kick_gpio; + dev_err(dev, "failed to get gpio for virtio kicks, status = %d\n", + ret); + goto disable_clk; + } + + if (of_reserved_mem_device_init(dev)) + dev_warn(dev, "device does not have specific CMA pool\n"); + + /* ensure the DSP is in reset before loading firmware */ + ret = reset_control_status(ksproc->reset); + if (ret < 0) { + dev_err(dev, "failed to get reset status, status = %d\n", ret); + goto release_mem; + } else if (ret == 0) { + WARN(1, "device is not in reset\n"); + keystone_rproc_dsp_reset(ksproc); + } + + ret = rproc_add(rproc); + if (ret) { + dev_err(dev, "failed to add register device with remoteproc core, status = %d\n", + ret); + goto release_mem; + } + + platform_set_drvdata(pdev, ksproc); + + return 0; + +release_mem: + of_reserved_mem_device_release(dev); +disable_clk: + pm_runtime_put_sync(dev); +disable_rpm: + pm_runtime_disable(dev); +free_rproc: + rproc_free(rproc); + return ret; +} + +static int keystone_rproc_remove(struct platform_device *pdev) +{ + struct keystone_rproc *ksproc = platform_get_drvdata(pdev); + + rproc_del(ksproc->rproc); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + rproc_free(ksproc->rproc); + of_reserved_mem_device_release(&pdev->dev); + + return 0; +} + +static const struct of_device_id keystone_rproc_of_match[] = { + { .compatible = "ti,k2hk-dsp", }, + { .compatible = "ti,k2l-dsp", }, + { .compatible = "ti,k2e-dsp", }, + { .compatible = "ti,k2g-dsp", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, keystone_rproc_of_match); + +static struct platform_driver keystone_rproc_driver = { + .probe = keystone_rproc_probe, + .remove = keystone_rproc_remove, + .driver = { + .name = "keystone-rproc", + .of_match_table = keystone_rproc_of_match, + }, +}; + +module_platform_driver(keystone_rproc_driver); + +MODULE_AUTHOR("Suman Anna <s-anna@ti.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI Keystone DSP Remoteproc driver"); diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h new file mode 100644 index 000000000..583880570 --- /dev/null +++ b/drivers/remoteproc/mtk_common.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + */ + +#ifndef __RPROC_MTK_COMMON_H +#define __RPROC_MTK_COMMON_H + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> +#include <linux/remoteproc/mtk_scp.h> + +#define MT8183_SW_RSTN 0x0 +#define MT8183_SW_RSTN_BIT BIT(0) +#define MT8183_SCP_TO_HOST 0x1C +#define MT8183_SCP_IPC_INT_BIT BIT(0) +#define MT8183_SCP_WDT_INT_BIT BIT(8) +#define MT8183_HOST_TO_SCP 0x28 +#define MT8183_HOST_IPC_INT_BIT BIT(0) +#define MT8183_WDT_CFG 0x84 +#define MT8183_SCP_CLK_SW_SEL 0x4000 +#define MT8183_SCP_CLK_DIV_SEL 0x4024 +#define MT8183_SCP_SRAM_PDN 0x402C +#define MT8183_SCP_L1_SRAM_PD 0x4080 +#define MT8183_SCP_TCM_TAIL_SRAM_PD 0x4094 + +#define MT8183_SCP_CACHE_SEL(x) (0x14000 + (x) * 0x3000) +#define MT8183_SCP_CACHE_CON MT8183_SCP_CACHE_SEL(0) +#define MT8183_SCP_DCACHE_CON MT8183_SCP_CACHE_SEL(1) +#define MT8183_SCP_CACHESIZE_8KB BIT(8) +#define MT8183_SCP_CACHE_CON_WAYEN BIT(10) + +#define MT8192_L2TCM_SRAM_PD_0 0x10C0 +#define MT8192_L2TCM_SRAM_PD_1 0x10C4 +#define MT8192_L2TCM_SRAM_PD_2 0x10C8 +#define MT8192_L1TCM_SRAM_PDN 0x102C +#define MT8192_CPU0_SRAM_PD 0x1080 + +#define MT8192_SCP2APMCU_IPC_SET 0x4080 +#define MT8192_SCP2APMCU_IPC_CLR 0x4084 +#define MT8192_SCP_IPC_INT_BIT BIT(0) +#define MT8192_SCP2SPM_IPC_CLR 0x4094 +#define MT8192_GIPC_IN_SET 0x4098 +#define MT8192_HOST_IPC_INT_BIT BIT(0) + +#define MT8192_CORE0_SW_RSTN_CLR 0x10000 +#define MT8192_CORE0_SW_RSTN_SET 0x10004 +#define MT8192_CORE0_WDT_IRQ 0x10030 +#define MT8192_CORE0_WDT_CFG 0x10034 + +#define SCP_FW_VER_LEN 32 +#define SCP_SHARE_BUFFER_SIZE 288 + +struct scp_run { + u32 signaled; + s8 fw_ver[SCP_FW_VER_LEN]; + u32 dec_capability; + u32 enc_capability; + wait_queue_head_t wq; +}; + +struct scp_ipi_desc { + /* For protecting handler. */ + struct mutex lock; + scp_ipi_handler_t handler; + void *priv; +}; + +struct mtk_scp; + +struct mtk_scp_of_data { + int (*scp_before_load)(struct mtk_scp *scp); + void (*scp_irq_handler)(struct mtk_scp *scp); + void (*scp_reset_assert)(struct mtk_scp *scp); + void (*scp_reset_deassert)(struct mtk_scp *scp); + void (*scp_stop)(struct mtk_scp *scp); + + u32 host_to_scp_reg; + u32 host_to_scp_int_bit; +}; + +struct mtk_scp { + struct device *dev; + struct rproc *rproc; + struct clk *clk; + void __iomem *reg_base; + void __iomem *sram_base; + size_t sram_size; + + const struct mtk_scp_of_data *data; + + struct mtk_share_obj __iomem *recv_buf; + struct mtk_share_obj __iomem *send_buf; + struct scp_run run; + /* To prevent multiple ipi_send run concurrently. */ + struct mutex send_lock; + struct scp_ipi_desc ipi_desc[SCP_IPI_MAX]; + bool ipi_id_ack[SCP_IPI_MAX]; + wait_queue_head_t ack_wq; + + void __iomem *cpu_addr; + dma_addr_t dma_addr; + size_t dram_size; + + struct rproc_subdev *rpmsg_subdev; +}; + +/** + * struct mtk_share_obj - SRAM buffer shared with AP and SCP + * + * @id: IPI id + * @len: share buffer length + * @share_buf: share buffer data + */ +struct mtk_share_obj { + u32 id; + u32 len; + u8 share_buf[SCP_SHARE_BUFFER_SIZE]; +}; + +void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len); +void scp_ipi_lock(struct mtk_scp *scp, u32 id); +void scp_ipi_unlock(struct mtk_scp *scp, u32 id); + +#endif diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c new file mode 100644 index 000000000..63c501a42 --- /dev/null +++ b/drivers/remoteproc/mtk_scp.c @@ -0,0 +1,797 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2019 MediaTek Inc. + +#include <asm/barrier.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> +#include <linux/remoteproc/mtk_scp.h> +#include <linux/rpmsg/mtk_rpmsg.h> + +#include "mtk_common.h" +#include "remoteproc_internal.h" + +#define MAX_CODE_SIZE 0x500000 +#define SCP_FW_END 0x7C000 + +/** + * scp_get() - get a reference to SCP. + * + * @pdev: the platform device of the module requesting SCP platform + * device for using SCP API. + * + * Return: Return NULL if failed. otherwise reference to SCP. + **/ +struct mtk_scp *scp_get(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *scp_node; + struct platform_device *scp_pdev; + + scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0); + if (!scp_node) { + dev_err(dev, "can't get SCP node\n"); + return NULL; + } + + scp_pdev = of_find_device_by_node(scp_node); + of_node_put(scp_node); + + if (WARN_ON(!scp_pdev)) { + dev_err(dev, "SCP pdev failed\n"); + return NULL; + } + + return platform_get_drvdata(scp_pdev); +} +EXPORT_SYMBOL_GPL(scp_get); + +/** + * scp_put() - "free" the SCP + * + * @scp: mtk_scp structure from scp_get(). + **/ +void scp_put(struct mtk_scp *scp) +{ + put_device(scp->dev); +} +EXPORT_SYMBOL_GPL(scp_put); + +static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host) +{ + dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host); + rproc_report_crash(scp->rproc, RPROC_WATCHDOG); +} + +static void scp_init_ipi_handler(void *data, unsigned int len, void *priv) +{ + struct mtk_scp *scp = (struct mtk_scp *)priv; + struct scp_run *run = (struct scp_run *)data; + + scp->run.signaled = run->signaled; + strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN); + scp->run.dec_capability = run->dec_capability; + scp->run.enc_capability = run->enc_capability; + wake_up_interruptible(&scp->run.wq); +} + +static void scp_ipi_handler(struct mtk_scp *scp) +{ + struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf; + struct scp_ipi_desc *ipi_desc = scp->ipi_desc; + u8 tmp_data[SCP_SHARE_BUFFER_SIZE]; + scp_ipi_handler_t handler; + u32 id = readl(&rcv_obj->id); + u32 len = readl(&rcv_obj->len); + + if (len > SCP_SHARE_BUFFER_SIZE) { + dev_err(scp->dev, "ipi message too long (len %d, max %d)", len, + SCP_SHARE_BUFFER_SIZE); + return; + } + if (id >= SCP_IPI_MAX) { + dev_err(scp->dev, "No such ipi id = %d\n", id); + return; + } + + scp_ipi_lock(scp, id); + handler = ipi_desc[id].handler; + if (!handler) { + dev_err(scp->dev, "No such ipi id = %d\n", id); + scp_ipi_unlock(scp, id); + return; + } + + memcpy_fromio(tmp_data, &rcv_obj->share_buf, len); + handler(tmp_data, len, ipi_desc[id].priv); + scp_ipi_unlock(scp, id); + + scp->ipi_id_ack[id] = true; + wake_up(&scp->ack_wq); +} + +static int scp_ipi_init(struct mtk_scp *scp) +{ + size_t send_offset = SCP_FW_END - sizeof(struct mtk_share_obj); + size_t recv_offset = send_offset - sizeof(struct mtk_share_obj); + + /* shared buffer initialization */ + scp->recv_buf = + (struct mtk_share_obj __iomem *)(scp->sram_base + recv_offset); + scp->send_buf = + (struct mtk_share_obj __iomem *)(scp->sram_base + send_offset); + memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf)); + memset_io(scp->send_buf, 0, sizeof(*scp->send_buf)); + + return 0; +} + +static void mt8183_scp_reset_assert(struct mtk_scp *scp) +{ + u32 val; + + val = readl(scp->reg_base + MT8183_SW_RSTN); + val &= ~MT8183_SW_RSTN_BIT; + writel(val, scp->reg_base + MT8183_SW_RSTN); +} + +static void mt8183_scp_reset_deassert(struct mtk_scp *scp) +{ + u32 val; + + val = readl(scp->reg_base + MT8183_SW_RSTN); + val |= MT8183_SW_RSTN_BIT; + writel(val, scp->reg_base + MT8183_SW_RSTN); +} + +static void mt8192_scp_reset_assert(struct mtk_scp *scp) +{ + writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET); +} + +static void mt8192_scp_reset_deassert(struct mtk_scp *scp) +{ + writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_CLR); +} + +static void mt8183_scp_irq_handler(struct mtk_scp *scp) +{ + u32 scp_to_host; + + scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST); + if (scp_to_host & MT8183_SCP_IPC_INT_BIT) + scp_ipi_handler(scp); + else + scp_wdt_handler(scp, scp_to_host); + + /* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */ + writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT, + scp->reg_base + MT8183_SCP_TO_HOST); +} + +static void mt8192_scp_irq_handler(struct mtk_scp *scp) +{ + u32 scp_to_host; + + scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET); + + if (scp_to_host & MT8192_SCP_IPC_INT_BIT) { + scp_ipi_handler(scp); + + /* + * SCP won't send another interrupt until we clear + * MT8192_SCP2APMCU_IPC. + */ + writel(MT8192_SCP_IPC_INT_BIT, + scp->reg_base + MT8192_SCP2APMCU_IPC_CLR); + } else { + scp_wdt_handler(scp, scp_to_host); + writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ); + } +} + +static irqreturn_t scp_irq_handler(int irq, void *priv) +{ + struct mtk_scp *scp = priv; + int ret; + + ret = clk_prepare_enable(scp->clk); + if (ret) { + dev_err(scp->dev, "failed to enable clocks\n"); + return IRQ_NONE; + } + + scp->data->scp_irq_handler(scp); + + clk_disable_unprepare(scp->clk); + + return IRQ_HANDLED; +} + +static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw) +{ + struct device *dev = &rproc->dev; + struct elf32_hdr *ehdr; + struct elf32_phdr *phdr; + int i, ret = 0; + const u8 *elf_data = fw->data; + + ehdr = (struct elf32_hdr *)elf_data; + phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff); + + /* go through the available ELF segments */ + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { + u32 da = phdr->p_paddr; + u32 memsz = phdr->p_memsz; + u32 filesz = phdr->p_filesz; + u32 offset = phdr->p_offset; + void __iomem *ptr; + + if (phdr->p_type != PT_LOAD) + continue; + + dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n", + phdr->p_type, da, memsz, filesz); + + if (filesz > memsz) { + dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n", + filesz, memsz); + ret = -EINVAL; + break; + } + + if (offset + filesz > fw->size) { + dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n", + offset + filesz, fw->size); + ret = -EINVAL; + break; + } + + /* grab the kernel address for this device address */ + ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz); + if (!ptr) { + dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz); + ret = -EINVAL; + break; + } + + /* put the segment where the remote processor expects it */ + if (phdr->p_filesz) + scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, + filesz); + } + + return ret; +} + +static int mt8183_scp_before_load(struct mtk_scp *scp) +{ + /* Clear SCP to host interrupt */ + writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST); + + /* Reset clocks before loading FW */ + writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL); + writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL); + + /* Initialize TCM before loading FW. */ + writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD); + writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD); + + /* Turn on the power of SCP's SRAM before using it. */ + writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN); + + /* + * Set I-cache and D-cache size before loading SCP FW. + * SCP SRAM logical address may change when cache size setting differs. + */ + writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB, + scp->reg_base + MT8183_SCP_CACHE_CON); + writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON); + + return 0; +} + +static void mt8192_power_on_sram(void *addr) +{ + int i; + + for (i = 31; i >= 0; i--) + writel(GENMASK(i, 0), addr); + writel(0, addr); +} + +static void mt8192_power_off_sram(void *addr) +{ + int i; + + writel(0, addr); + for (i = 0; i < 32; i++) + writel(GENMASK(i, 0), addr); +} + +static int mt8192_scp_before_load(struct mtk_scp *scp) +{ + /* clear SPM interrupt, SCP2SPM_IPC_CLR */ + writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR); + + writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET); + + /* enable SRAM clock */ + mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0); + mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1); + mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2); + mt8192_power_on_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN); + mt8192_power_on_sram(scp->reg_base + MT8192_CPU0_SRAM_PD); + + return 0; +} + +static int scp_load(struct rproc *rproc, const struct firmware *fw) +{ + struct mtk_scp *scp = rproc->priv; + struct device *dev = scp->dev; + int ret; + + ret = clk_prepare_enable(scp->clk); + if (ret) { + dev_err(dev, "failed to enable clocks\n"); + return ret; + } + + /* Hold SCP in reset while loading FW. */ + scp->data->scp_reset_assert(scp); + + ret = scp->data->scp_before_load(scp); + if (ret < 0) + goto leave; + + ret = scp_elf_load_segments(rproc, fw); +leave: + clk_disable_unprepare(scp->clk); + + return ret; +} + +static int scp_start(struct rproc *rproc) +{ + struct mtk_scp *scp = (struct mtk_scp *)rproc->priv; + struct device *dev = scp->dev; + struct scp_run *run = &scp->run; + int ret; + + ret = clk_prepare_enable(scp->clk); + if (ret) { + dev_err(dev, "failed to enable clocks\n"); + return ret; + } + + run->signaled = false; + + scp->data->scp_reset_deassert(scp); + + ret = wait_event_interruptible_timeout( + run->wq, + run->signaled, + msecs_to_jiffies(2000)); + + if (ret == 0) { + dev_err(dev, "wait SCP initialization timeout!\n"); + ret = -ETIME; + goto stop; + } + if (ret == -ERESTARTSYS) { + dev_err(dev, "wait SCP interrupted by a signal!\n"); + goto stop; + } + + clk_disable_unprepare(scp->clk); + dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver); + + return 0; + +stop: + scp->data->scp_reset_assert(scp); + clk_disable_unprepare(scp->clk); + return ret; +} + +static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct mtk_scp *scp = (struct mtk_scp *)rproc->priv; + int offset; + + if (da < scp->sram_size) { + offset = da; + if (offset >= 0 && (offset + len) < scp->sram_size) + return (void __force *)scp->sram_base + offset; + } else if (scp->dram_size) { + offset = da - scp->dma_addr; + if (offset >= 0 && (offset + len) < scp->dram_size) + return (void __force *)scp->cpu_addr + offset; + } + + return NULL; +} + +static void mt8183_scp_stop(struct mtk_scp *scp) +{ + /* Disable SCP watchdog */ + writel(0, scp->reg_base + MT8183_WDT_CFG); +} + +static void mt8192_scp_stop(struct mtk_scp *scp) +{ + /* Disable SRAM clock */ + mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0); + mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1); + mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2); + mt8192_power_off_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN); + mt8192_power_off_sram(scp->reg_base + MT8192_CPU0_SRAM_PD); + + /* Disable SCP watchdog */ + writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG); +} + +static int scp_stop(struct rproc *rproc) +{ + struct mtk_scp *scp = (struct mtk_scp *)rproc->priv; + int ret; + + ret = clk_prepare_enable(scp->clk); + if (ret) { + dev_err(scp->dev, "failed to enable clocks\n"); + return ret; + } + + scp->data->scp_reset_assert(scp); + scp->data->scp_stop(scp); + clk_disable_unprepare(scp->clk); + + return 0; +} + +static const struct rproc_ops scp_ops = { + .start = scp_start, + .stop = scp_stop, + .load = scp_load, + .da_to_va = scp_da_to_va, +}; + +/** + * scp_get_device() - get device struct of SCP + * + * @scp: mtk_scp structure + **/ +struct device *scp_get_device(struct mtk_scp *scp) +{ + return scp->dev; +} +EXPORT_SYMBOL_GPL(scp_get_device); + +/** + * scp_get_rproc() - get rproc struct of SCP + * + * @scp: mtk_scp structure + **/ +struct rproc *scp_get_rproc(struct mtk_scp *scp) +{ + return scp->rproc; +} +EXPORT_SYMBOL_GPL(scp_get_rproc); + +/** + * scp_get_vdec_hw_capa() - get video decoder hardware capability + * + * @scp: mtk_scp structure + * + * Return: video decoder hardware capability + **/ +unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp) +{ + return scp->run.dec_capability; +} +EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa); + +/** + * scp_get_venc_hw_capa() - get video encoder hardware capability + * + * @scp: mtk_scp structure + * + * Return: video encoder hardware capability + **/ +unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp) +{ + return scp->run.enc_capability; +} +EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa); + +/** + * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address + * + * @scp: mtk_scp structure + * @mem_addr: SCP views memory address + * + * Mapping the SCP's SRAM address / + * DMEM (Data Extended Memory) memory address / + * Working buffer memory address to + * kernel virtual address. + * + * Return: Return ERR_PTR(-EINVAL) if mapping failed, + * otherwise the mapped kernel virtual address + **/ +void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr) +{ + void *ptr; + + ptr = scp_da_to_va(scp->rproc, mem_addr, 0); + if (!ptr) + return ERR_PTR(-EINVAL); + + return ptr; +} +EXPORT_SYMBOL_GPL(scp_mapping_dm_addr); + +static int scp_map_memory_region(struct mtk_scp *scp) +{ + int ret; + + ret = of_reserved_mem_device_init(scp->dev); + + /* reserved memory is optional. */ + if (ret == -ENODEV) { + dev_info(scp->dev, "skipping reserved memory initialization."); + return 0; + } + + if (ret) { + dev_err(scp->dev, "failed to assign memory-region: %d\n", ret); + return -ENOMEM; + } + + /* Reserved SCP code size */ + scp->dram_size = MAX_CODE_SIZE; + scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size, + &scp->dma_addr, GFP_KERNEL); + if (!scp->cpu_addr) + return -ENOMEM; + + return 0; +} + +static void scp_unmap_memory_region(struct mtk_scp *scp) +{ + if (scp->dram_size == 0) + return; + + dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr, + scp->dma_addr); + of_reserved_mem_device_release(scp->dev); +} + +static int scp_register_ipi(struct platform_device *pdev, u32 id, + ipi_handler_t handler, void *priv) +{ + struct mtk_scp *scp = platform_get_drvdata(pdev); + + return scp_ipi_register(scp, id, handler, priv); +} + +static void scp_unregister_ipi(struct platform_device *pdev, u32 id) +{ + struct mtk_scp *scp = platform_get_drvdata(pdev); + + scp_ipi_unregister(scp, id); +} + +static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf, + unsigned int len, unsigned int wait) +{ + struct mtk_scp *scp = platform_get_drvdata(pdev); + + return scp_ipi_send(scp, id, buf, len, wait); +} + +static struct mtk_rpmsg_info mtk_scp_rpmsg_info = { + .send_ipi = scp_send_ipi, + .register_ipi = scp_register_ipi, + .unregister_ipi = scp_unregister_ipi, + .ns_ipi_id = SCP_IPI_NS_SERVICE, +}; + +static void scp_add_rpmsg_subdev(struct mtk_scp *scp) +{ + scp->rpmsg_subdev = + mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev), + &mtk_scp_rpmsg_info); + if (scp->rpmsg_subdev) + rproc_add_subdev(scp->rproc, scp->rpmsg_subdev); +} + +static void scp_remove_rpmsg_subdev(struct mtk_scp *scp) +{ + if (scp->rpmsg_subdev) { + rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev); + mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev); + scp->rpmsg_subdev = NULL; + } +} + +static int scp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct mtk_scp *scp; + struct rproc *rproc; + struct resource *res; + char *fw_name = "scp.img"; + int ret, i; + + rproc = rproc_alloc(dev, + np->name, + &scp_ops, + fw_name, + sizeof(*scp)); + if (!rproc) { + dev_err(dev, "unable to allocate remoteproc\n"); + return -ENOMEM; + } + + scp = (struct mtk_scp *)rproc->priv; + scp->rproc = rproc; + scp->dev = dev; + scp->data = of_device_get_match_data(dev); + platform_set_drvdata(pdev, scp); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); + scp->sram_base = devm_ioremap_resource(dev, res); + if (IS_ERR((__force void *)scp->sram_base)) { + dev_err(dev, "Failed to parse and map sram memory\n"); + ret = PTR_ERR((__force void *)scp->sram_base); + goto free_rproc; + } + scp->sram_size = resource_size(res); + + mutex_init(&scp->send_lock); + for (i = 0; i < SCP_IPI_MAX; i++) + mutex_init(&scp->ipi_desc[i].lock); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + scp->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR((__force void *)scp->reg_base)) { + dev_err(dev, "Failed to parse and map cfg memory\n"); + ret = PTR_ERR((__force void *)scp->reg_base); + goto destroy_mutex; + } + + ret = scp_map_memory_region(scp); + if (ret) + goto destroy_mutex; + + scp->clk = devm_clk_get(dev, "main"); + if (IS_ERR(scp->clk)) { + dev_err(dev, "Failed to get clock\n"); + ret = PTR_ERR(scp->clk); + goto release_dev_mem; + } + + ret = clk_prepare_enable(scp->clk); + if (ret) { + dev_err(dev, "failed to enable clocks\n"); + goto release_dev_mem; + } + + ret = scp_ipi_init(scp); + clk_disable_unprepare(scp->clk); + if (ret) { + dev_err(dev, "Failed to init ipi\n"); + goto release_dev_mem; + } + + /* register SCP initialization IPI */ + ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp); + if (ret) { + dev_err(dev, "Failed to register IPI_SCP_INIT\n"); + goto release_dev_mem; + } + + init_waitqueue_head(&scp->run.wq); + init_waitqueue_head(&scp->ack_wq); + + scp_add_rpmsg_subdev(scp); + + ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL, + scp_irq_handler, IRQF_ONESHOT, + pdev->name, scp); + + if (ret) { + dev_err(dev, "failed to request irq\n"); + goto remove_subdev; + } + + ret = rproc_add(rproc); + if (ret) + goto remove_subdev; + + return 0; + +remove_subdev: + scp_remove_rpmsg_subdev(scp); + scp_ipi_unregister(scp, SCP_IPI_INIT); +release_dev_mem: + scp_unmap_memory_region(scp); +destroy_mutex: + for (i = 0; i < SCP_IPI_MAX; i++) + mutex_destroy(&scp->ipi_desc[i].lock); + mutex_destroy(&scp->send_lock); +free_rproc: + rproc_free(rproc); + + return ret; +} + +static int scp_remove(struct platform_device *pdev) +{ + struct mtk_scp *scp = platform_get_drvdata(pdev); + int i; + + rproc_del(scp->rproc); + scp_remove_rpmsg_subdev(scp); + scp_ipi_unregister(scp, SCP_IPI_INIT); + scp_unmap_memory_region(scp); + for (i = 0; i < SCP_IPI_MAX; i++) + mutex_destroy(&scp->ipi_desc[i].lock); + mutex_destroy(&scp->send_lock); + rproc_free(scp->rproc); + + return 0; +} + +static const struct mtk_scp_of_data mt8183_of_data = { + .scp_before_load = mt8183_scp_before_load, + .scp_irq_handler = mt8183_scp_irq_handler, + .scp_reset_assert = mt8183_scp_reset_assert, + .scp_reset_deassert = mt8183_scp_reset_deassert, + .scp_stop = mt8183_scp_stop, + .host_to_scp_reg = MT8183_HOST_TO_SCP, + .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT, +}; + +static const struct mtk_scp_of_data mt8192_of_data = { + .scp_before_load = mt8192_scp_before_load, + .scp_irq_handler = mt8192_scp_irq_handler, + .scp_reset_assert = mt8192_scp_reset_assert, + .scp_reset_deassert = mt8192_scp_reset_deassert, + .scp_stop = mt8192_scp_stop, + .host_to_scp_reg = MT8192_GIPC_IN_SET, + .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT, +}; + +static const struct of_device_id mtk_scp_of_match[] = { + { .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data }, + { .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_scp_of_match); + +static struct platform_driver mtk_scp_driver = { + .probe = scp_probe, + .remove = scp_remove, + .driver = { + .name = "mtk-scp", + .of_match_table = mtk_scp_of_match, + }, +}; + +module_platform_driver(mtk_scp_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MediaTek SCP control driver"); diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c new file mode 100644 index 000000000..968128b78 --- /dev/null +++ b/drivers/remoteproc/mtk_scp_ipi.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2019 MediaTek Inc. + +#include <asm/barrier.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/remoteproc/mtk_scp.h> + +#include "mtk_common.h" + +/** + * scp_ipi_register() - register an ipi function + * + * @scp: mtk_scp structure + * @id: IPI ID + * @handler: IPI handler + * @priv: private data for IPI handler + * + * Register an ipi function to receive ipi interrupt from SCP. + * + * Returns 0 if ipi registers successfully, -error on error. + */ +int scp_ipi_register(struct mtk_scp *scp, + u32 id, + scp_ipi_handler_t handler, + void *priv) +{ + if (!scp) + return -EPROBE_DEFER; + + if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL)) + return -EINVAL; + + scp_ipi_lock(scp, id); + scp->ipi_desc[id].handler = handler; + scp->ipi_desc[id].priv = priv; + scp_ipi_unlock(scp, id); + + return 0; +} +EXPORT_SYMBOL_GPL(scp_ipi_register); + +/** + * scp_ipi_unregister() - unregister an ipi function + * + * @scp: mtk_scp structure + * @id: IPI ID + * + * Unregister an ipi function to receive ipi interrupt from SCP. + */ +void scp_ipi_unregister(struct mtk_scp *scp, u32 id) +{ + if (!scp) + return; + + if (WARN_ON(id >= SCP_IPI_MAX)) + return; + + scp_ipi_lock(scp, id); + scp->ipi_desc[id].handler = NULL; + scp->ipi_desc[id].priv = NULL; + scp_ipi_unlock(scp, id); +} +EXPORT_SYMBOL_GPL(scp_ipi_unregister); + +/* + * scp_memcpy_aligned() - Copy src to dst, where dst is in SCP SRAM region. + * + * @dst: Pointer to the destination buffer, should be in SCP SRAM region. + * @src: Pointer to the source buffer. + * @len: Length of the source buffer to be copied. + * + * Since AP access of SCP SRAM don't support byte write, this always write a + * full word at a time, and may cause some extra bytes to be written at the + * beginning & ending of dst. + */ +void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len) +{ + void __iomem *ptr; + u32 val; + unsigned int i = 0, remain; + + if (!IS_ALIGNED((unsigned long)dst, 4)) { + ptr = (void __iomem *)ALIGN_DOWN((unsigned long)dst, 4); + i = 4 - (dst - ptr); + val = readl_relaxed(ptr); + memcpy((u8 *)&val + (4 - i), src, i); + writel_relaxed(val, ptr); + } + + __iowrite32_copy(dst + i, src + i, (len - i) / 4); + remain = (len - i) % 4; + + if (remain > 0) { + val = readl_relaxed(dst + len - remain); + memcpy(&val, src + len - remain, remain); + writel_relaxed(val, dst + len - remain); + } +} +EXPORT_SYMBOL_GPL(scp_memcpy_aligned); + +/** + * scp_ipi_lock() - Lock before operations of an IPI ID + * + * @scp: mtk_scp structure + * @id: IPI ID + * + * Note: This should not be used by drivers other than mtk_scp. + */ +void scp_ipi_lock(struct mtk_scp *scp, u32 id) +{ + if (WARN_ON(id >= SCP_IPI_MAX)) + return; + mutex_lock(&scp->ipi_desc[id].lock); +} +EXPORT_SYMBOL_GPL(scp_ipi_lock); + +/** + * scp_ipi_lock() - Unlock after operations of an IPI ID + * + * @scp: mtk_scp structure + * @id: IPI ID + * + * Note: This should not be used by drivers other than mtk_scp. + */ +void scp_ipi_unlock(struct mtk_scp *scp, u32 id) +{ + if (WARN_ON(id >= SCP_IPI_MAX)) + return; + mutex_unlock(&scp->ipi_desc[id].lock); +} +EXPORT_SYMBOL_GPL(scp_ipi_unlock); + +/** + * scp_ipi_send() - send data from AP to scp. + * + * @scp: mtk_scp structure + * @id: IPI ID + * @buf: the data buffer + * @len: the data buffer length + * @wait: number of msecs to wait for ack. 0 to skip waiting. + * + * This function is thread-safe. When this function returns, + * SCP has received the data and starts the processing. + * When the processing completes, IPI handler registered + * by scp_ipi_register will be called in interrupt context. + * + * Returns 0 if sending data successfully, -error on error. + **/ +int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len, + unsigned int wait) +{ + struct mtk_share_obj __iomem *send_obj = scp->send_buf; + unsigned long timeout; + int ret; + + if (WARN_ON(id <= SCP_IPI_INIT) || WARN_ON(id >= SCP_IPI_MAX) || + WARN_ON(id == SCP_IPI_NS_SERVICE) || + WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf)) + return -EINVAL; + + ret = clk_prepare_enable(scp->clk); + if (ret) { + dev_err(scp->dev, "failed to enable clock\n"); + return ret; + } + + mutex_lock(&scp->send_lock); + + /* Wait until SCP receives the last command */ + timeout = jiffies + msecs_to_jiffies(2000); + do { + if (time_after(jiffies, timeout)) { + dev_err(scp->dev, "%s: IPI timeout!\n", __func__); + ret = -ETIMEDOUT; + goto unlock_mutex; + } + } while (readl(scp->reg_base + scp->data->host_to_scp_reg)); + + scp_memcpy_aligned(send_obj->share_buf, buf, len); + + writel(len, &send_obj->len); + writel(id, &send_obj->id); + + scp->ipi_id_ack[id] = false; + /* send the command to SCP */ + writel(scp->data->host_to_scp_int_bit, + scp->reg_base + scp->data->host_to_scp_reg); + + if (wait) { + /* wait for SCP's ACK */ + timeout = msecs_to_jiffies(wait); + ret = wait_event_timeout(scp->ack_wq, + scp->ipi_id_ack[id], + timeout); + scp->ipi_id_ack[id] = false; + if (WARN(!ret, "scp ipi %d ack time out !", id)) + ret = -EIO; + else + ret = 0; + } + +unlock_mutex: + mutex_unlock(&scp->send_lock); + clk_disable_unprepare(scp->clk); + + return ret; +} +EXPORT_SYMBOL_GPL(scp_ipi_send); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MediaTek scp IPI interface"); diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c new file mode 100644 index 000000000..d94b7391b --- /dev/null +++ b/drivers/remoteproc/omap_remoteproc.c @@ -0,0 +1,1398 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * OMAP Remote Processor driver + * + * Copyright (C) 2011-2020 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Google, Inc. + * + * Ohad Ben-Cohen <ohad@wizery.com> + * Brian Swetland <swetland@google.com> + * Fernando Guzman Lugo <fernando.lugo@ti.com> + * Mark Grosen <mgrosen@ti.com> + * Suman Anna <s-anna@ti.com> + * Hari Kanigeri <h-kanigeri2@ti.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/clk/ti.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/remoteproc.h> +#include <linux/mailbox_client.h> +#include <linux/omap-iommu.h> +#include <linux/omap-mailbox.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <linux/reset.h> +#include <clocksource/timer-ti-dm.h> + +#include <linux/platform_data/dmtimer-omap.h> + +#include "omap_remoteproc.h" +#include "remoteproc_internal.h" + +/* default auto-suspend delay (ms) */ +#define DEFAULT_AUTOSUSPEND_DELAY 10000 + +/** + * struct omap_rproc_boot_data - boot data structure for the DSP omap rprocs + * @syscon: regmap handle for the system control configuration module + * @boot_reg: boot register offset within the @syscon regmap + * @boot_reg_shift: bit-field shift required for the boot address value in + * @boot_reg + */ +struct omap_rproc_boot_data { + struct regmap *syscon; + unsigned int boot_reg; + unsigned int boot_reg_shift; +}; + +/** + * struct omap_rproc_mem - internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: bus address used to access the memory region + * @dev_addr: device address of the memory region from DSP view + * @size: size of the memory region + */ +struct omap_rproc_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +/** + * struct omap_rproc_timer - data structure for a timer used by a omap rproc + * @odt: timer pointer + * @timer_ops: OMAP dmtimer ops for @odt timer + * @irq: timer irq + */ +struct omap_rproc_timer { + struct omap_dm_timer *odt; + const struct omap_dm_timer_ops *timer_ops; + int irq; +}; + +/** + * struct omap_rproc - omap remote processor state + * @mbox: mailbox channel handle + * @client: mailbox client to request the mailbox channel + * @boot_data: boot data structure for setting processor boot address + * @mem: internal memory regions data + * @num_mems: number of internal memory regions + * @num_timers: number of rproc timer(s) + * @num_wd_timers: number of rproc watchdog timers + * @timers: timer(s) info used by rproc + * @autosuspend_delay: auto-suspend delay value to be used for runtime pm + * @need_resume: if true a resume is needed in the system resume callback + * @rproc: rproc handle + * @reset: reset handle + * @pm_comp: completion primitive to sync for suspend response + * @fck: functional clock for the remoteproc + * @suspend_acked: state machine flag to store the suspend request ack + */ +struct omap_rproc { + struct mbox_chan *mbox; + struct mbox_client client; + struct omap_rproc_boot_data *boot_data; + struct omap_rproc_mem *mem; + int num_mems; + int num_timers; + int num_wd_timers; + struct omap_rproc_timer *timers; + int autosuspend_delay; + bool need_resume; + struct rproc *rproc; + struct reset_control *reset; + struct completion pm_comp; + struct clk *fck; + bool suspend_acked; +}; + +/** + * struct omap_rproc_mem_data - memory definitions for an omap remote processor + * @name: name for this memory entry + * @dev_addr: device address for the memory entry + */ +struct omap_rproc_mem_data { + const char *name; + const u32 dev_addr; +}; + +/** + * struct omap_rproc_dev_data - device data for the omap remote processor + * @device_name: device name of the remote processor + * @mems: memory definitions for this remote processor + */ +struct omap_rproc_dev_data { + const char *device_name; + const struct omap_rproc_mem_data *mems; +}; + +/** + * omap_rproc_request_timer() - request a timer for a remoteproc + * @dev: device requesting the timer + * @np: device node pointer to the desired timer + * @timer: handle to a struct omap_rproc_timer to return the timer handle + * + * This helper function is used primarily to request a timer associated with + * a remoteproc. The returned handle is stored in the .odt field of the + * @timer structure passed in, and is used to invoke other timer specific + * ops (like starting a timer either during device initialization or during + * a resume operation, or for stopping/freeing a timer). + * + * Return: 0 on success, otherwise an appropriate failure + */ +static int omap_rproc_request_timer(struct device *dev, struct device_node *np, + struct omap_rproc_timer *timer) +{ + int ret; + + timer->odt = timer->timer_ops->request_by_node(np); + if (!timer->odt) { + dev_err(dev, "request for timer node %p failed\n", np); + return -EBUSY; + } + + ret = timer->timer_ops->set_source(timer->odt, OMAP_TIMER_SRC_SYS_CLK); + if (ret) { + dev_err(dev, "error setting OMAP_TIMER_SRC_SYS_CLK as source for timer node %p\n", + np); + timer->timer_ops->free(timer->odt); + return ret; + } + + /* clean counter, remoteproc code will set the value */ + timer->timer_ops->set_load(timer->odt, 0); + + return 0; +} + +/** + * omap_rproc_start_timer() - start a timer for a remoteproc + * @timer: handle to a OMAP rproc timer + * + * This helper function is used to start a timer associated with a remoteproc, + * obtained using the request_timer ops. The helper function needs to be + * invoked by the driver to start the timer (during device initialization) + * or to just resume the timer. + * + * Return: 0 on success, otherwise a failure as returned by DMTimer ops + */ +static inline int omap_rproc_start_timer(struct omap_rproc_timer *timer) +{ + return timer->timer_ops->start(timer->odt); +} + +/** + * omap_rproc_stop_timer() - stop a timer for a remoteproc + * @timer: handle to a OMAP rproc timer + * + * This helper function is used to disable a timer associated with a + * remoteproc, and needs to be called either during a device shutdown + * or suspend operation. The separate helper function allows the driver + * to just stop a timer without having to release the timer during a + * suspend operation. + * + * Return: 0 on success, otherwise a failure as returned by DMTimer ops + */ +static inline int omap_rproc_stop_timer(struct omap_rproc_timer *timer) +{ + return timer->timer_ops->stop(timer->odt); +} + +/** + * omap_rproc_release_timer() - release a timer for a remoteproc + * @timer: handle to a OMAP rproc timer + * + * This helper function is used primarily to release a timer associated + * with a remoteproc. The dmtimer will be available for other clients to + * use once released. + * + * Return: 0 on success, otherwise a failure as returned by DMTimer ops + */ +static inline int omap_rproc_release_timer(struct omap_rproc_timer *timer) +{ + return timer->timer_ops->free(timer->odt); +} + +/** + * omap_rproc_get_timer_irq() - get the irq for a timer + * @timer: handle to a OMAP rproc timer + * + * This function is used to get the irq associated with a watchdog timer. The + * function is called by the OMAP remoteproc driver to register a interrupt + * handler to handle watchdog events on the remote processor. + * + * Return: irq id on success, otherwise a failure as returned by DMTimer ops + */ +static inline int omap_rproc_get_timer_irq(struct omap_rproc_timer *timer) +{ + return timer->timer_ops->get_irq(timer->odt); +} + +/** + * omap_rproc_ack_timer_irq() - acknowledge a timer irq + * @timer: handle to a OMAP rproc timer + * + * This function is used to clear the irq associated with a watchdog timer. The + * The function is called by the OMAP remoteproc upon a watchdog event on the + * remote processor to clear the interrupt status of the watchdog timer. + */ +static inline void omap_rproc_ack_timer_irq(struct omap_rproc_timer *timer) +{ + timer->timer_ops->write_status(timer->odt, OMAP_TIMER_INT_OVERFLOW); +} + +/** + * omap_rproc_watchdog_isr() - Watchdog ISR handler for remoteproc device + * @irq: IRQ number associated with a watchdog timer + * @data: IRQ handler data + * + * This ISR routine executes the required necessary low-level code to + * acknowledge a watchdog timer interrupt. There can be multiple watchdog + * timers associated with a rproc (like IPUs which have 2 watchdog timers, + * one per Cortex M3/M4 core), so a lookup has to be performed to identify + * the timer to acknowledge its interrupt. + * + * The function also invokes rproc_report_crash to report the watchdog event + * to the remoteproc driver core, to trigger a recovery. + * + * Return: IRQ_HANDLED on success, otherwise IRQ_NONE + */ +static irqreturn_t omap_rproc_watchdog_isr(int irq, void *data) +{ + struct rproc *rproc = data; + struct omap_rproc *oproc = rproc->priv; + struct device *dev = rproc->dev.parent; + struct omap_rproc_timer *timers = oproc->timers; + struct omap_rproc_timer *wd_timer = NULL; + int num_timers = oproc->num_timers + oproc->num_wd_timers; + int i; + + for (i = oproc->num_timers; i < num_timers; i++) { + if (timers[i].irq > 0 && irq == timers[i].irq) { + wd_timer = &timers[i]; + break; + } + } + + if (!wd_timer) { + dev_err(dev, "invalid timer\n"); + return IRQ_NONE; + } + + omap_rproc_ack_timer_irq(wd_timer); + + rproc_report_crash(rproc, RPROC_WATCHDOG); + + return IRQ_HANDLED; +} + +/** + * omap_rproc_enable_timers() - enable the timers for a remoteproc + * @rproc: handle of a remote processor + * @configure: boolean flag used to acquire and configure the timer handle + * + * This function is used primarily to enable the timers associated with + * a remoteproc. The configure flag is provided to allow the driver to + * to either acquire and start a timer (during device initialization) or + * to just start a timer (during a resume operation). + * + * Return: 0 on success, otherwise an appropriate failure + */ +static int omap_rproc_enable_timers(struct rproc *rproc, bool configure) +{ + int i; + int ret = 0; + struct platform_device *tpdev; + struct dmtimer_platform_data *tpdata; + const struct omap_dm_timer_ops *timer_ops; + struct omap_rproc *oproc = rproc->priv; + struct omap_rproc_timer *timers = oproc->timers; + struct device *dev = rproc->dev.parent; + struct device_node *np = NULL; + int num_timers = oproc->num_timers + oproc->num_wd_timers; + + if (!num_timers) + return 0; + + if (!configure) + goto start_timers; + + for (i = 0; i < num_timers; i++) { + if (i < oproc->num_timers) + np = of_parse_phandle(dev->of_node, "ti,timers", i); + else + np = of_parse_phandle(dev->of_node, + "ti,watchdog-timers", + (i - oproc->num_timers)); + if (!np) { + ret = -ENXIO; + dev_err(dev, "device node lookup for timer at index %d failed: %d\n", + i < oproc->num_timers ? i : + i - oproc->num_timers, ret); + goto free_timers; + } + + tpdev = of_find_device_by_node(np); + if (!tpdev) { + ret = -ENODEV; + dev_err(dev, "could not get timer platform device\n"); + goto put_node; + } + + tpdata = dev_get_platdata(&tpdev->dev); + put_device(&tpdev->dev); + if (!tpdata) { + ret = -EINVAL; + dev_err(dev, "dmtimer pdata structure NULL\n"); + goto put_node; + } + + timer_ops = tpdata->timer_ops; + if (!timer_ops || !timer_ops->request_by_node || + !timer_ops->set_source || !timer_ops->set_load || + !timer_ops->free || !timer_ops->start || + !timer_ops->stop || !timer_ops->get_irq || + !timer_ops->write_status) { + ret = -EINVAL; + dev_err(dev, "device does not have required timer ops\n"); + goto put_node; + } + + timers[i].irq = -1; + timers[i].timer_ops = timer_ops; + ret = omap_rproc_request_timer(dev, np, &timers[i]); + if (ret) { + dev_err(dev, "request for timer %p failed: %d\n", np, + ret); + goto put_node; + } + of_node_put(np); + + if (i >= oproc->num_timers) { + timers[i].irq = omap_rproc_get_timer_irq(&timers[i]); + if (timers[i].irq < 0) { + dev_err(dev, "get_irq for timer %p failed: %d\n", + np, timers[i].irq); + ret = -EBUSY; + goto free_timers; + } + + ret = request_irq(timers[i].irq, + omap_rproc_watchdog_isr, IRQF_SHARED, + "rproc-wdt", rproc); + if (ret) { + dev_err(dev, "error requesting irq for timer %p\n", + np); + omap_rproc_release_timer(&timers[i]); + timers[i].odt = NULL; + timers[i].timer_ops = NULL; + timers[i].irq = -1; + goto free_timers; + } + } + } + +start_timers: + for (i = 0; i < num_timers; i++) { + ret = omap_rproc_start_timer(&timers[i]); + if (ret) { + dev_err(dev, "start timer %p failed failed: %d\n", np, + ret); + break; + } + } + if (ret) { + while (i >= 0) { + omap_rproc_stop_timer(&timers[i]); + i--; + } + goto put_node; + } + return 0; + +put_node: + if (configure) + of_node_put(np); +free_timers: + while (i--) { + if (i >= oproc->num_timers) + free_irq(timers[i].irq, rproc); + omap_rproc_release_timer(&timers[i]); + timers[i].odt = NULL; + timers[i].timer_ops = NULL; + timers[i].irq = -1; + } + + return ret; +} + +/** + * omap_rproc_disable_timers() - disable the timers for a remoteproc + * @rproc: handle of a remote processor + * @configure: boolean flag used to release the timer handle + * + * This function is used primarily to disable the timers associated with + * a remoteproc. The configure flag is provided to allow the driver to + * to either stop and release a timer (during device shutdown) or to just + * stop a timer (during a suspend operation). + * + * Return: 0 on success or no timers + */ +static int omap_rproc_disable_timers(struct rproc *rproc, bool configure) +{ + int i; + struct omap_rproc *oproc = rproc->priv; + struct omap_rproc_timer *timers = oproc->timers; + int num_timers = oproc->num_timers + oproc->num_wd_timers; + + if (!num_timers) + return 0; + + for (i = 0; i < num_timers; i++) { + omap_rproc_stop_timer(&timers[i]); + if (configure) { + if (i >= oproc->num_timers) + free_irq(timers[i].irq, rproc); + omap_rproc_release_timer(&timers[i]); + timers[i].odt = NULL; + timers[i].timer_ops = NULL; + timers[i].irq = -1; + } + } + + return 0; +} + +/** + * omap_rproc_mbox_callback() - inbound mailbox message handler + * @client: mailbox client pointer used for requesting the mailbox channel + * @data: mailbox payload + * + * This handler is invoked by omap's mailbox driver whenever a mailbox + * message is received. Usually, the mailbox payload simply contains + * the index of the virtqueue that is kicked by the remote processor, + * and we let remoteproc core handle it. + * + * In addition to virtqueue indices, we also have some out-of-band values + * that indicates different events. Those values are deliberately very + * big so they don't coincide with virtqueue indices. + */ +static void omap_rproc_mbox_callback(struct mbox_client *client, void *data) +{ + struct omap_rproc *oproc = container_of(client, struct omap_rproc, + client); + struct device *dev = oproc->rproc->dev.parent; + const char *name = oproc->rproc->name; + u32 msg = (u32)data; + + dev_dbg(dev, "mbox msg: 0x%x\n", msg); + + switch (msg) { + case RP_MBOX_CRASH: + /* + * remoteproc detected an exception, notify the rproc core. + * The remoteproc core will handle the recovery. + */ + dev_err(dev, "omap rproc %s crashed\n", name); + rproc_report_crash(oproc->rproc, RPROC_FATAL_ERROR); + break; + case RP_MBOX_ECHO_REPLY: + dev_info(dev, "received echo reply from %s\n", name); + break; + case RP_MBOX_SUSPEND_ACK: + case RP_MBOX_SUSPEND_CANCEL: + oproc->suspend_acked = msg == RP_MBOX_SUSPEND_ACK; + complete(&oproc->pm_comp); + break; + default: + if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) + return; + if (msg > oproc->rproc->max_notifyid) { + dev_dbg(dev, "dropping unknown message 0x%x", msg); + return; + } + /* msg contains the index of the triggered vring */ + if (rproc_vq_interrupt(oproc->rproc, msg) == IRQ_NONE) + dev_dbg(dev, "no message was found in vqid %d\n", msg); + } +} + +/* kick a virtqueue */ +static void omap_rproc_kick(struct rproc *rproc, int vqid) +{ + struct omap_rproc *oproc = rproc->priv; + struct device *dev = rproc->dev.parent; + int ret; + + /* wake up the rproc before kicking it */ + ret = pm_runtime_get_sync(dev); + if (WARN_ON(ret < 0)) { + dev_err(dev, "pm_runtime_get_sync() failed during kick, ret = %d\n", + ret); + pm_runtime_put_noidle(dev); + return; + } + + /* send the index of the triggered virtqueue in the mailbox payload */ + ret = mbox_send_message(oproc->mbox, (void *)vqid); + if (ret < 0) + dev_err(dev, "failed to send mailbox message, status = %d\n", + ret); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); +} + +/** + * omap_rproc_write_dsp_boot_addr() - set boot address for DSP remote processor + * @rproc: handle of a remote processor + * + * Set boot address for a supported DSP remote processor. + * + * Return: 0 on success, or -EINVAL if boot address is not aligned properly + */ +static int omap_rproc_write_dsp_boot_addr(struct rproc *rproc) +{ + struct device *dev = rproc->dev.parent; + struct omap_rproc *oproc = rproc->priv; + struct omap_rproc_boot_data *bdata = oproc->boot_data; + u32 offset = bdata->boot_reg; + u32 value; + u32 mask; + + if (rproc->bootaddr & (SZ_1K - 1)) { + dev_err(dev, "invalid boot address 0x%llx, must be aligned on a 1KB boundary\n", + rproc->bootaddr); + return -EINVAL; + } + + value = rproc->bootaddr >> bdata->boot_reg_shift; + mask = ~(SZ_1K - 1) >> bdata->boot_reg_shift; + + return regmap_update_bits(bdata->syscon, offset, mask, value); +} + +/* + * Power up the remote processor. + * + * This function will be invoked only after the firmware for this rproc + * was loaded, parsed successfully, and all of its resource requirements + * were met. + */ +static int omap_rproc_start(struct rproc *rproc) +{ + struct omap_rproc *oproc = rproc->priv; + struct device *dev = rproc->dev.parent; + int ret; + struct mbox_client *client = &oproc->client; + + if (oproc->boot_data) { + ret = omap_rproc_write_dsp_boot_addr(rproc); + if (ret) + return ret; + } + + client->dev = dev; + client->tx_done = NULL; + client->rx_callback = omap_rproc_mbox_callback; + client->tx_block = false; + client->knows_txdone = false; + + oproc->mbox = mbox_request_channel(client, 0); + if (IS_ERR(oproc->mbox)) { + ret = -EBUSY; + dev_err(dev, "mbox_request_channel failed: %ld\n", + PTR_ERR(oproc->mbox)); + return ret; + } + + /* + * Ping the remote processor. this is only for sanity-sake; + * there is no functional effect whatsoever. + * + * Note that the reply will _not_ arrive immediately: this message + * will wait in the mailbox fifo until the remote processor is booted. + */ + ret = mbox_send_message(oproc->mbox, (void *)RP_MBOX_ECHO_REQUEST); + if (ret < 0) { + dev_err(dev, "mbox_send_message failed: %d\n", ret); + goto put_mbox; + } + + ret = omap_rproc_enable_timers(rproc, true); + if (ret) { + dev_err(dev, "omap_rproc_enable_timers failed: %d\n", ret); + goto put_mbox; + } + + ret = reset_control_deassert(oproc->reset); + if (ret) { + dev_err(dev, "reset control deassert failed: %d\n", ret); + goto disable_timers; + } + + /* + * remote processor is up, so update the runtime pm status and + * enable the auto-suspend. The device usage count is incremented + * manually for balancing it for auto-suspend + */ + pm_runtime_set_active(dev); + pm_runtime_use_autosuspend(dev); + pm_runtime_get_noresume(dev); + pm_runtime_enable(dev); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; + +disable_timers: + omap_rproc_disable_timers(rproc, true); +put_mbox: + mbox_free_channel(oproc->mbox); + return ret; +} + +/* power off the remote processor */ +static int omap_rproc_stop(struct rproc *rproc) +{ + struct device *dev = rproc->dev.parent; + struct omap_rproc *oproc = rproc->priv; + int ret; + + /* + * cancel any possible scheduled runtime suspend by incrementing + * the device usage count, and resuming the device. The remoteproc + * also needs to be woken up if suspended, to avoid the remoteproc + * OS to continue to remember any context that it has saved, and + * avoid potential issues in misindentifying a subsequent device + * reboot as a power restore boot + */ + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + ret = reset_control_assert(oproc->reset); + if (ret) + goto out; + + ret = omap_rproc_disable_timers(rproc, true); + if (ret) + goto enable_device; + + mbox_free_channel(oproc->mbox); + + /* + * update the runtime pm states and status now that the remoteproc + * has stopped + */ + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_put_noidle(dev); + pm_runtime_set_suspended(dev); + + return 0; + +enable_device: + reset_control_deassert(oproc->reset); +out: + /* schedule the next auto-suspend */ + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + return ret; +} + +/** + * omap_rproc_da_to_va() - internal memory translation helper + * @rproc: remote processor to apply the address translation for + * @da: device address to translate + * @len: length of the memory buffer + * + * Custom function implementing the rproc .da_to_va ops to provide address + * translation (device address to kernel virtual address) for internal RAMs + * present in a DSP or IPU device). The translated addresses can be used + * either by the remoteproc core for loading, or by any rpmsg bus drivers. + * + * Return: translated virtual address in kernel memory space on success, + * or NULL on failure. + */ +static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct omap_rproc *oproc = rproc->priv; + int i; + u32 offset; + + if (len <= 0) + return NULL; + + if (!oproc->num_mems) + return NULL; + + for (i = 0; i < oproc->num_mems; i++) { + if (da >= oproc->mem[i].dev_addr && da + len <= + oproc->mem[i].dev_addr + oproc->mem[i].size) { + offset = da - oproc->mem[i].dev_addr; + /* __force to make sparse happy with type conversion */ + return (__force void *)(oproc->mem[i].cpu_addr + + offset); + } + } + + return NULL; +} + +static const struct rproc_ops omap_rproc_ops = { + .start = omap_rproc_start, + .stop = omap_rproc_stop, + .kick = omap_rproc_kick, + .da_to_va = omap_rproc_da_to_va, +}; + +#ifdef CONFIG_PM +static bool _is_rproc_in_standby(struct omap_rproc *oproc) +{ + return ti_clk_is_in_standby(oproc->fck); +} + +/* 1 sec is long enough time to let the remoteproc side suspend the device */ +#define DEF_SUSPEND_TIMEOUT 1000 +static int _omap_rproc_suspend(struct rproc *rproc, bool auto_suspend) +{ + struct device *dev = rproc->dev.parent; + struct omap_rproc *oproc = rproc->priv; + unsigned long to = msecs_to_jiffies(DEF_SUSPEND_TIMEOUT); + unsigned long ta = jiffies + to; + u32 suspend_msg = auto_suspend ? + RP_MBOX_SUSPEND_AUTO : RP_MBOX_SUSPEND_SYSTEM; + int ret; + + reinit_completion(&oproc->pm_comp); + oproc->suspend_acked = false; + ret = mbox_send_message(oproc->mbox, (void *)suspend_msg); + if (ret < 0) { + dev_err(dev, "PM mbox_send_message failed: %d\n", ret); + return ret; + } + + ret = wait_for_completion_timeout(&oproc->pm_comp, to); + if (!oproc->suspend_acked) + return -EBUSY; + + /* + * The remoteproc side is returning the ACK message before saving the + * context, because the context saving is performed within a SYS/BIOS + * function, and it cannot have any inter-dependencies against the IPC + * layer. Also, as the SYS/BIOS needs to preserve properly the processor + * register set, sending this ACK or signalling the completion of the + * context save through a shared memory variable can never be the + * absolute last thing to be executed on the remoteproc side, and the + * MPU cannot use the ACK message as a sync point to put the remoteproc + * into reset. The only way to ensure that the remote processor has + * completed saving the context is to check that the module has reached + * STANDBY state (after saving the context, the SYS/BIOS executes the + * appropriate target-specific WFI instruction causing the module to + * enter STANDBY). + */ + while (!_is_rproc_in_standby(oproc)) { + if (time_after(jiffies, ta)) + return -ETIME; + schedule(); + } + + ret = reset_control_assert(oproc->reset); + if (ret) { + dev_err(dev, "reset assert during suspend failed %d\n", ret); + return ret; + } + + ret = omap_rproc_disable_timers(rproc, false); + if (ret) { + dev_err(dev, "disabling timers during suspend failed %d\n", + ret); + goto enable_device; + } + + /* + * IOMMUs would have to be disabled specifically for runtime suspend. + * They are handled automatically through System PM callbacks for + * regular system suspend + */ + if (auto_suspend) { + ret = omap_iommu_domain_deactivate(rproc->domain); + if (ret) { + dev_err(dev, "iommu domain deactivate failed %d\n", + ret); + goto enable_timers; + } + } + + return 0; + +enable_timers: + /* ignore errors on re-enabling code */ + omap_rproc_enable_timers(rproc, false); +enable_device: + reset_control_deassert(oproc->reset); + return ret; +} + +static int _omap_rproc_resume(struct rproc *rproc, bool auto_suspend) +{ + struct device *dev = rproc->dev.parent; + struct omap_rproc *oproc = rproc->priv; + int ret; + + /* + * IOMMUs would have to be enabled specifically for runtime resume. + * They would have been already enabled automatically through System + * PM callbacks for regular system resume + */ + if (auto_suspend) { + ret = omap_iommu_domain_activate(rproc->domain); + if (ret) { + dev_err(dev, "omap_iommu activate failed %d\n", ret); + goto out; + } + } + + /* boot address could be lost after suspend, so restore it */ + if (oproc->boot_data) { + ret = omap_rproc_write_dsp_boot_addr(rproc); + if (ret) { + dev_err(dev, "boot address restore failed %d\n", ret); + goto suspend_iommu; + } + } + + ret = omap_rproc_enable_timers(rproc, false); + if (ret) { + dev_err(dev, "enabling timers during resume failed %d\n", ret); + goto suspend_iommu; + } + + ret = reset_control_deassert(oproc->reset); + if (ret) { + dev_err(dev, "reset deassert during resume failed %d\n", ret); + goto disable_timers; + } + + return 0; + +disable_timers: + omap_rproc_disable_timers(rproc, false); +suspend_iommu: + if (auto_suspend) + omap_iommu_domain_deactivate(rproc->domain); +out: + return ret; +} + +static int __maybe_unused omap_rproc_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct rproc *rproc = platform_get_drvdata(pdev); + struct omap_rproc *oproc = rproc->priv; + int ret = 0; + + mutex_lock(&rproc->lock); + if (rproc->state == RPROC_OFFLINE) + goto out; + + if (rproc->state == RPROC_SUSPENDED) + goto out; + + if (rproc->state != RPROC_RUNNING) { + ret = -EBUSY; + goto out; + } + + ret = _omap_rproc_suspend(rproc, false); + if (ret) { + dev_err(dev, "suspend failed %d\n", ret); + goto out; + } + + /* + * remoteproc is running at the time of system suspend, so remember + * it so as to wake it up during system resume + */ + oproc->need_resume = true; + rproc->state = RPROC_SUSPENDED; + +out: + mutex_unlock(&rproc->lock); + return ret; +} + +static int __maybe_unused omap_rproc_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct rproc *rproc = platform_get_drvdata(pdev); + struct omap_rproc *oproc = rproc->priv; + int ret = 0; + + mutex_lock(&rproc->lock); + if (rproc->state == RPROC_OFFLINE) + goto out; + + if (rproc->state != RPROC_SUSPENDED) { + ret = -EBUSY; + goto out; + } + + /* + * remoteproc was auto-suspended at the time of system suspend, + * so no need to wake-up the processor (leave it in suspended + * state, will be woken up during a subsequent runtime_resume) + */ + if (!oproc->need_resume) + goto out; + + ret = _omap_rproc_resume(rproc, false); + if (ret) { + dev_err(dev, "resume failed %d\n", ret); + goto out; + } + + oproc->need_resume = false; + rproc->state = RPROC_RUNNING; + + pm_runtime_mark_last_busy(dev); +out: + mutex_unlock(&rproc->lock); + return ret; +} + +static int omap_rproc_runtime_suspend(struct device *dev) +{ + struct rproc *rproc = dev_get_drvdata(dev); + struct omap_rproc *oproc = rproc->priv; + int ret; + + mutex_lock(&rproc->lock); + if (rproc->state == RPROC_CRASHED) { + dev_dbg(dev, "rproc cannot be runtime suspended when crashed!\n"); + ret = -EBUSY; + goto out; + } + + if (WARN_ON(rproc->state != RPROC_RUNNING)) { + dev_err(dev, "rproc cannot be runtime suspended when not running!\n"); + ret = -EBUSY; + goto out; + } + + /* + * do not even attempt suspend if the remote processor is not + * idled for runtime auto-suspend + */ + if (!_is_rproc_in_standby(oproc)) { + ret = -EBUSY; + goto abort; + } + + ret = _omap_rproc_suspend(rproc, true); + if (ret) + goto abort; + + rproc->state = RPROC_SUSPENDED; + mutex_unlock(&rproc->lock); + return 0; + +abort: + pm_runtime_mark_last_busy(dev); +out: + mutex_unlock(&rproc->lock); + return ret; +} + +static int omap_rproc_runtime_resume(struct device *dev) +{ + struct rproc *rproc = dev_get_drvdata(dev); + int ret; + + mutex_lock(&rproc->lock); + if (WARN_ON(rproc->state != RPROC_SUSPENDED)) { + dev_err(dev, "rproc cannot be runtime resumed if not suspended! state=%d\n", + rproc->state); + ret = -EBUSY; + goto out; + } + + ret = _omap_rproc_resume(rproc, true); + if (ret) { + dev_err(dev, "runtime resume failed %d\n", ret); + goto out; + } + + rproc->state = RPROC_RUNNING; +out: + mutex_unlock(&rproc->lock); + return ret; +} +#endif /* CONFIG_PM */ + +static const struct omap_rproc_mem_data ipu_mems[] = { + { .name = "l2ram", .dev_addr = 0x20000000 }, + { }, +}; + +static const struct omap_rproc_mem_data dra7_dsp_mems[] = { + { .name = "l2ram", .dev_addr = 0x800000 }, + { .name = "l1pram", .dev_addr = 0xe00000 }, + { .name = "l1dram", .dev_addr = 0xf00000 }, + { }, +}; + +static const struct omap_rproc_dev_data omap4_dsp_dev_data = { + .device_name = "dsp", +}; + +static const struct omap_rproc_dev_data omap4_ipu_dev_data = { + .device_name = "ipu", + .mems = ipu_mems, +}; + +static const struct omap_rproc_dev_data omap5_dsp_dev_data = { + .device_name = "dsp", +}; + +static const struct omap_rproc_dev_data omap5_ipu_dev_data = { + .device_name = "ipu", + .mems = ipu_mems, +}; + +static const struct omap_rproc_dev_data dra7_dsp_dev_data = { + .device_name = "dsp", + .mems = dra7_dsp_mems, +}; + +static const struct omap_rproc_dev_data dra7_ipu_dev_data = { + .device_name = "ipu", + .mems = ipu_mems, +}; + +static const struct of_device_id omap_rproc_of_match[] = { + { + .compatible = "ti,omap4-dsp", + .data = &omap4_dsp_dev_data, + }, + { + .compatible = "ti,omap4-ipu", + .data = &omap4_ipu_dev_data, + }, + { + .compatible = "ti,omap5-dsp", + .data = &omap5_dsp_dev_data, + }, + { + .compatible = "ti,omap5-ipu", + .data = &omap5_ipu_dev_data, + }, + { + .compatible = "ti,dra7-dsp", + .data = &dra7_dsp_dev_data, + }, + { + .compatible = "ti,dra7-ipu", + .data = &dra7_ipu_dev_data, + }, + { + /* end */ + }, +}; +MODULE_DEVICE_TABLE(of, omap_rproc_of_match); + +static const char *omap_rproc_get_firmware(struct platform_device *pdev) +{ + const char *fw_name; + int ret; + + ret = of_property_read_string(pdev->dev.of_node, "firmware-name", + &fw_name); + if (ret) + return ERR_PTR(ret); + + return fw_name; +} + +static int omap_rproc_get_boot_data(struct platform_device *pdev, + struct rproc *rproc) +{ + struct device_node *np = pdev->dev.of_node; + struct omap_rproc *oproc = rproc->priv; + const struct omap_rproc_dev_data *data; + int ret; + + data = of_device_get_match_data(&pdev->dev); + if (!data) + return -ENODEV; + + if (!of_property_read_bool(np, "ti,bootreg")) + return 0; + + oproc->boot_data = devm_kzalloc(&pdev->dev, sizeof(*oproc->boot_data), + GFP_KERNEL); + if (!oproc->boot_data) + return -ENOMEM; + + oproc->boot_data->syscon = + syscon_regmap_lookup_by_phandle(np, "ti,bootreg"); + if (IS_ERR(oproc->boot_data->syscon)) { + ret = PTR_ERR(oproc->boot_data->syscon); + return ret; + } + + if (of_property_read_u32_index(np, "ti,bootreg", 1, + &oproc->boot_data->boot_reg)) { + dev_err(&pdev->dev, "couldn't get the boot register\n"); + return -EINVAL; + } + + of_property_read_u32_index(np, "ti,bootreg", 2, + &oproc->boot_data->boot_reg_shift); + + return 0; +} + +static int omap_rproc_of_get_internal_memories(struct platform_device *pdev, + struct rproc *rproc) +{ + struct omap_rproc *oproc = rproc->priv; + struct device *dev = &pdev->dev; + const struct omap_rproc_dev_data *data; + struct resource *res; + int num_mems; + int i; + + data = of_device_get_match_data(dev); + if (!data) + return -ENODEV; + + if (!data->mems) + return 0; + + num_mems = of_property_count_elems_of_size(dev->of_node, "reg", + sizeof(u32)) / 2; + + oproc->mem = devm_kcalloc(dev, num_mems, sizeof(*oproc->mem), + GFP_KERNEL); + if (!oproc->mem) + return -ENOMEM; + + for (i = 0; data->mems[i].name; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + data->mems[i].name); + if (!res) { + dev_err(dev, "no memory defined for %s\n", + data->mems[i].name); + return -ENOMEM; + } + oproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(oproc->mem[i].cpu_addr)) { + dev_err(dev, "failed to parse and map %s memory\n", + data->mems[i].name); + return PTR_ERR(oproc->mem[i].cpu_addr); + } + oproc->mem[i].bus_addr = res->start; + oproc->mem[i].dev_addr = data->mems[i].dev_addr; + oproc->mem[i].size = resource_size(res); + + dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %pK da 0x%x\n", + data->mems[i].name, &oproc->mem[i].bus_addr, + oproc->mem[i].size, oproc->mem[i].cpu_addr, + oproc->mem[i].dev_addr); + } + oproc->num_mems = num_mems; + + return 0; +} + +#ifdef CONFIG_OMAP_REMOTEPROC_WATCHDOG +static int omap_rproc_count_wdog_timers(struct device *dev) +{ + struct device_node *np = dev->of_node; + int ret; + + ret = of_count_phandle_with_args(np, "ti,watchdog-timers", NULL); + if (ret <= 0) { + dev_dbg(dev, "device does not have watchdog timers, status = %d\n", + ret); + ret = 0; + } + + return ret; +} +#else +static int omap_rproc_count_wdog_timers(struct device *dev) +{ + return 0; +} +#endif + +static int omap_rproc_of_get_timers(struct platform_device *pdev, + struct rproc *rproc) +{ + struct device_node *np = pdev->dev.of_node; + struct omap_rproc *oproc = rproc->priv; + struct device *dev = &pdev->dev; + int num_timers; + + /* + * Timer nodes are directly used in client nodes as phandles, so + * retrieve the count using appropriate size + */ + oproc->num_timers = of_count_phandle_with_args(np, "ti,timers", NULL); + if (oproc->num_timers <= 0) { + dev_dbg(dev, "device does not have timers, status = %d\n", + oproc->num_timers); + oproc->num_timers = 0; + } + + oproc->num_wd_timers = omap_rproc_count_wdog_timers(dev); + + num_timers = oproc->num_timers + oproc->num_wd_timers; + if (num_timers) { + oproc->timers = devm_kcalloc(dev, num_timers, + sizeof(*oproc->timers), + GFP_KERNEL); + if (!oproc->timers) + return -ENOMEM; + + dev_dbg(dev, "device has %d tick timers and %d watchdog timers\n", + oproc->num_timers, oproc->num_wd_timers); + } + + return 0; +} + +static int omap_rproc_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct omap_rproc *oproc; + struct rproc *rproc; + const char *firmware; + int ret; + struct reset_control *reset; + + if (!np) { + dev_err(&pdev->dev, "only DT-based devices are supported\n"); + return -ENODEV; + } + + reset = devm_reset_control_array_get_exclusive(&pdev->dev); + if (IS_ERR(reset)) + return PTR_ERR(reset); + + firmware = omap_rproc_get_firmware(pdev); + if (IS_ERR(firmware)) + return PTR_ERR(firmware); + + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret); + return ret; + } + + rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops, + firmware, sizeof(*oproc)); + if (!rproc) + return -ENOMEM; + + oproc = rproc->priv; + oproc->rproc = rproc; + oproc->reset = reset; + /* All existing OMAP IPU and DSP processors have an MMU */ + rproc->has_iommu = true; + + ret = omap_rproc_of_get_internal_memories(pdev, rproc); + if (ret) + goto free_rproc; + + ret = omap_rproc_get_boot_data(pdev, rproc); + if (ret) + goto free_rproc; + + ret = omap_rproc_of_get_timers(pdev, rproc); + if (ret) + goto free_rproc; + + init_completion(&oproc->pm_comp); + oproc->autosuspend_delay = DEFAULT_AUTOSUSPEND_DELAY; + + of_property_read_u32(pdev->dev.of_node, "ti,autosuspend-delay-ms", + &oproc->autosuspend_delay); + + pm_runtime_set_autosuspend_delay(&pdev->dev, oproc->autosuspend_delay); + + oproc->fck = devm_clk_get(&pdev->dev, 0); + if (IS_ERR(oproc->fck)) { + ret = PTR_ERR(oproc->fck); + goto free_rproc; + } + + ret = of_reserved_mem_device_init(&pdev->dev); + if (ret) { + dev_warn(&pdev->dev, "device does not have specific CMA pool.\n"); + dev_warn(&pdev->dev, "Typically this should be provided,\n"); + dev_warn(&pdev->dev, "only omit if you know what you are doing.\n"); + } + + platform_set_drvdata(pdev, rproc); + + ret = rproc_add(rproc); + if (ret) + goto release_mem; + + return 0; + +release_mem: + of_reserved_mem_device_release(&pdev->dev); +free_rproc: + rproc_free(rproc); + return ret; +} + +static int omap_rproc_remove(struct platform_device *pdev) +{ + struct rproc *rproc = platform_get_drvdata(pdev); + + rproc_del(rproc); + rproc_free(rproc); + of_reserved_mem_device_release(&pdev->dev); + + return 0; +} + +static const struct dev_pm_ops omap_rproc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(omap_rproc_suspend, omap_rproc_resume) + SET_RUNTIME_PM_OPS(omap_rproc_runtime_suspend, + omap_rproc_runtime_resume, NULL) +}; + +static struct platform_driver omap_rproc_driver = { + .probe = omap_rproc_probe, + .remove = omap_rproc_remove, + .driver = { + .name = "omap-rproc", + .pm = &omap_rproc_pm_ops, + .of_match_table = omap_rproc_of_match, + }, +}; + +module_platform_driver(omap_rproc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("OMAP Remote Processor control driver"); diff --git a/drivers/remoteproc/omap_remoteproc.h b/drivers/remoteproc/omap_remoteproc.h new file mode 100644 index 000000000..828e13256 --- /dev/null +++ b/drivers/remoteproc/omap_remoteproc.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Remote processor messaging + * + * Copyright (C) 2011-2020 Texas Instruments, Inc. + * Copyright (C) 2011 Google, Inc. + * All rights reserved. + */ + +#ifndef _OMAP_RPMSG_H +#define _OMAP_RPMSG_H + +/* + * enum - Predefined Mailbox Messages + * + * @RP_MBOX_READY: informs the M3's that we're up and running. this is + * part of the init sequence sent that the M3 expects to see immediately + * after it is booted. + * + * @RP_MBOX_PENDING_MSG: informs the receiver that there is an inbound + * message waiting in its own receive-side vring. please note that currently + * this message is optional: alternatively, one can explicitly send the index + * of the triggered virtqueue itself. the preferred approach will be decided + * as we progress and experiment with those two different approaches. + * + * @RP_MBOX_CRASH: this message is sent if BIOS crashes + * + * @RP_MBOX_ECHO_REQUEST: a mailbox-level "ping" message. + * + * @RP_MBOX_ECHO_REPLY: a mailbox-level reply to a "ping" + * + * @RP_MBOX_ABORT_REQUEST: a "please crash" request, used for testing the + * recovery mechanism (to some extent). + * + * @RP_MBOX_SUSPEND_AUTO: auto suspend request for the remote processor + * + * @RP_MBOX_SUSPEND_SYSTEM: system suspend request for the remote processor + * + * @RP_MBOX_SUSPEND_ACK: successful response from remote processor for a + * suspend request + * + * @RP_MBOX_SUSPEND_CANCEL: a cancel suspend response from a remote processor + * on a suspend request + * + * Introduce new message definitions if any here. + * + * @RP_MBOX_END_MSG: Indicates end of known/defined messages from remote core + * This should be the last definition. + * + */ +enum omap_rp_mbox_messages { + RP_MBOX_READY = 0xFFFFFF00, + RP_MBOX_PENDING_MSG = 0xFFFFFF01, + RP_MBOX_CRASH = 0xFFFFFF02, + RP_MBOX_ECHO_REQUEST = 0xFFFFFF03, + RP_MBOX_ECHO_REPLY = 0xFFFFFF04, + RP_MBOX_ABORT_REQUEST = 0xFFFFFF05, + RP_MBOX_SUSPEND_AUTO = 0xFFFFFF10, + RP_MBOX_SUSPEND_SYSTEM = 0xFFFFFF11, + RP_MBOX_SUSPEND_ACK = 0xFFFFFF12, + RP_MBOX_SUSPEND_CANCEL = 0xFFFFFF13, + RP_MBOX_END_MSG = 0xFFFFFF14, +}; + +#endif /* _OMAP_RPMSG_H */ diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c new file mode 100644 index 000000000..085fd73fa --- /dev/null +++ b/drivers/remoteproc/qcom_common.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm Peripheral Image Loader helpers + * + * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2015 Sony Mobile Communications Inc + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/firmware.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/remoteproc.h> +#include <linux/remoteproc/qcom_rproc.h> +#include <linux/rpmsg/qcom_glink.h> +#include <linux/rpmsg/qcom_smd.h> +#include <linux/slab.h> +#include <linux/soc/qcom/mdt_loader.h> + +#include "remoteproc_internal.h" +#include "qcom_common.h" + +#define to_glink_subdev(d) container_of(d, struct qcom_rproc_glink, subdev) +#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev) +#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev) + +struct qcom_ssr_subsystem { + const char *name; + struct srcu_notifier_head notifier_list; + struct list_head list; +}; + +static LIST_HEAD(qcom_ssr_subsystem_list); +static DEFINE_MUTEX(qcom_ssr_subsys_lock); + +static int glink_subdev_start(struct rproc_subdev *subdev) +{ + struct qcom_rproc_glink *glink = to_glink_subdev(subdev); + + glink->edge = qcom_glink_smem_register(glink->dev, glink->node); + + return PTR_ERR_OR_ZERO(glink->edge); +} + +static void glink_subdev_stop(struct rproc_subdev *subdev, bool crashed) +{ + struct qcom_rproc_glink *glink = to_glink_subdev(subdev); + + qcom_glink_smem_unregister(glink->edge); + glink->edge = NULL; +} + +static void glink_subdev_unprepare(struct rproc_subdev *subdev) +{ + struct qcom_rproc_glink *glink = to_glink_subdev(subdev); + + qcom_glink_ssr_notify(glink->ssr_name); +} + +/** + * qcom_add_glink_subdev() - try to add a GLINK subdevice to rproc + * @rproc: rproc handle to parent the subdevice + * @glink: reference to a GLINK subdev context + * @ssr_name: identifier of the associated remoteproc for ssr notifications + */ +void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink, + const char *ssr_name) +{ + struct device *dev = &rproc->dev; + + glink->node = of_get_child_by_name(dev->parent->of_node, "glink-edge"); + if (!glink->node) + return; + + glink->ssr_name = kstrdup_const(ssr_name, GFP_KERNEL); + if (!glink->ssr_name) + return; + + glink->dev = dev; + glink->subdev.start = glink_subdev_start; + glink->subdev.stop = glink_subdev_stop; + glink->subdev.unprepare = glink_subdev_unprepare; + + rproc_add_subdev(rproc, &glink->subdev); +} +EXPORT_SYMBOL_GPL(qcom_add_glink_subdev); + +/** + * qcom_remove_glink_subdev() - remove a GLINK subdevice from rproc + * @rproc: rproc handle + * @glink: reference to a GLINK subdev context + */ +void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink) +{ + if (!glink->node) + return; + + rproc_remove_subdev(rproc, &glink->subdev); + kfree_const(glink->ssr_name); + of_node_put(glink->node); +} +EXPORT_SYMBOL_GPL(qcom_remove_glink_subdev); + +/** + * qcom_register_dump_segments() - register segments for coredump + * @rproc: remoteproc handle + * @fw: firmware header + * + * Register all segments of the ELF in the remoteproc coredump segment list + * + * Return: 0 on success, negative errno on failure. + */ +int qcom_register_dump_segments(struct rproc *rproc, + const struct firmware *fw) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + int ret; + int i; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (phdr->p_type != PT_LOAD) + continue; + + if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) + continue; + + if (!phdr->p_memsz) + continue; + + ret = rproc_coredump_add_segment(rproc, phdr->p_paddr, + phdr->p_memsz); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_register_dump_segments); + +static int smd_subdev_start(struct rproc_subdev *subdev) +{ + struct qcom_rproc_subdev *smd = to_smd_subdev(subdev); + + smd->edge = qcom_smd_register_edge(smd->dev, smd->node); + + return PTR_ERR_OR_ZERO(smd->edge); +} + +static void smd_subdev_stop(struct rproc_subdev *subdev, bool crashed) +{ + struct qcom_rproc_subdev *smd = to_smd_subdev(subdev); + + qcom_smd_unregister_edge(smd->edge); + smd->edge = NULL; +} + +/** + * qcom_add_smd_subdev() - try to add a SMD subdevice to rproc + * @rproc: rproc handle to parent the subdevice + * @smd: reference to a Qualcomm subdev context + */ +void qcom_add_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd) +{ + struct device *dev = &rproc->dev; + + smd->node = of_get_child_by_name(dev->parent->of_node, "smd-edge"); + if (!smd->node) + return; + + smd->dev = dev; + smd->subdev.start = smd_subdev_start; + smd->subdev.stop = smd_subdev_stop; + + rproc_add_subdev(rproc, &smd->subdev); +} +EXPORT_SYMBOL_GPL(qcom_add_smd_subdev); + +/** + * qcom_remove_smd_subdev() - remove the smd subdevice from rproc + * @rproc: rproc handle + * @smd: the SMD subdevice to remove + */ +void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd) +{ + if (!smd->node) + return; + + rproc_remove_subdev(rproc, &smd->subdev); + of_node_put(smd->node); +} +EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev); + +static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name) +{ + struct qcom_ssr_subsystem *info; + + mutex_lock(&qcom_ssr_subsys_lock); + /* Match in the global qcom_ssr_subsystem_list with name */ + list_for_each_entry(info, &qcom_ssr_subsystem_list, list) + if (!strcmp(info->name, name)) + goto out; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + info = ERR_PTR(-ENOMEM); + goto out; + } + info->name = kstrdup_const(name, GFP_KERNEL); + srcu_init_notifier_head(&info->notifier_list); + + /* Add to global notification list */ + list_add_tail(&info->list, &qcom_ssr_subsystem_list); + +out: + mutex_unlock(&qcom_ssr_subsys_lock); + return info; +} + +/** + * qcom_register_ssr_notifier() - register SSR notification handler + * @name: Subsystem's SSR name + * @nb: notifier_block to be invoked upon subsystem's state change + * + * This registers the @nb notifier block as part the notifier chain for a + * remoteproc associated with @name. The notifier block's callback + * will be invoked when the remote processor's SSR events occur + * (pre/post startup and pre/post shutdown). + * + * Return: a subsystem cookie on success, ERR_PTR on failure. + */ +void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb) +{ + struct qcom_ssr_subsystem *info; + + info = qcom_ssr_get_subsys(name); + if (IS_ERR(info)) + return info; + + srcu_notifier_chain_register(&info->notifier_list, nb); + + return &info->notifier_list; +} +EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier); + +/** + * qcom_unregister_ssr_notifier() - unregister SSR notification handler + * @notify: subsystem cookie returned from qcom_register_ssr_notifier + * @nb: notifier_block to unregister + * + * This function will unregister the notifier from the particular notifier + * chain. + * + * Return: 0 on success, %ENOENT otherwise. + */ +int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb) +{ + return srcu_notifier_chain_unregister(notify, nb); +} +EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier); + +static int ssr_notify_prepare(struct rproc_subdev *subdev) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_BEFORE_POWERUP, &data); + return 0; +} + +static int ssr_notify_start(struct rproc_subdev *subdev) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_AFTER_POWERUP, &data); + return 0; +} + +static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = crashed, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_BEFORE_SHUTDOWN, &data); +} + +static void ssr_notify_unprepare(struct rproc_subdev *subdev) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_AFTER_SHUTDOWN, &data); +} + +/** + * qcom_add_ssr_subdev() - register subdevice as restart notification source + * @rproc: rproc handle + * @ssr: SSR subdevice handle + * @ssr_name: identifier to use for notifications originating from @rproc + * + * As the @ssr is registered with the @rproc SSR events will be sent to all + * registered listeners for the remoteproc when it's SSR events occur + * (pre/post startup and pre/post shutdown). + */ +void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr, + const char *ssr_name) +{ + struct qcom_ssr_subsystem *info; + + info = qcom_ssr_get_subsys(ssr_name); + if (IS_ERR(info)) { + dev_err(&rproc->dev, "Failed to add ssr subdevice\n"); + return; + } + + ssr->info = info; + ssr->subdev.prepare = ssr_notify_prepare; + ssr->subdev.start = ssr_notify_start; + ssr->subdev.stop = ssr_notify_stop; + ssr->subdev.unprepare = ssr_notify_unprepare; + + rproc_add_subdev(rproc, &ssr->subdev); +} +EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev); + +/** + * qcom_remove_ssr_subdev() - remove subdevice as restart notification source + * @rproc: rproc handle + * @ssr: SSR subdevice handle + */ +void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr) +{ + rproc_remove_subdev(rproc, &ssr->subdev); + ssr->info = NULL; +} +EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev); + +MODULE_DESCRIPTION("Qualcomm Remoteproc helper driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h new file mode 100644 index 000000000..dfc641c3a --- /dev/null +++ b/drivers/remoteproc/qcom_common.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __RPROC_QCOM_COMMON_H__ +#define __RPROC_QCOM_COMMON_H__ + +#include <linux/remoteproc.h> +#include "remoteproc_internal.h" +#include <linux/soc/qcom/qmi.h> + +struct qcom_sysmon; + +struct qcom_rproc_glink { + struct rproc_subdev subdev; + + const char *ssr_name; + + struct device *dev; + struct device_node *node; + struct qcom_glink *edge; +}; + +struct qcom_rproc_subdev { + struct rproc_subdev subdev; + + struct device *dev; + struct device_node *node; + struct qcom_smd_edge *edge; +}; + +struct qcom_ssr_subsystem; + +struct qcom_rproc_ssr { + struct rproc_subdev subdev; + struct qcom_ssr_subsystem *info; +}; + +void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink, + const char *ssr_name); +void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink); + +int qcom_register_dump_segments(struct rproc *rproc, const struct firmware *fw); + +void qcom_add_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd); +void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd); + +void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr, + const char *ssr_name); +void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr); + +#if IS_ENABLED(CONFIG_QCOM_SYSMON) +struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, + const char *name, + int ssctl_instance); +void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon); +#else +static inline struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, + const char *name, + int ssctl_instance) +{ + return NULL; +} + +static inline void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon) +{ +} +#endif + +#endif diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c new file mode 100644 index 000000000..aca21560e --- /dev/null +++ b/drivers/remoteproc/qcom_pil_info.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020 Linaro Ltd. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_address.h> +#include "qcom_pil_info.h" + +/* + * The PIL relocation information region is used to communicate memory regions + * occupied by co-processor firmware for post mortem crash analysis. + * + * It consists of an array of entries with an 8 byte textual identifier of the + * region followed by a 64 bit base address and 32 bit size, both little + * endian. + */ +#define PIL_RELOC_NAME_LEN 8 +#define PIL_RELOC_ENTRY_SIZE (PIL_RELOC_NAME_LEN + sizeof(__le64) + sizeof(__le32)) + +struct pil_reloc { + void __iomem *base; + size_t num_entries; +}; + +static struct pil_reloc _reloc __read_mostly; +static DEFINE_MUTEX(pil_reloc_lock); + +static int qcom_pil_info_init(void) +{ + struct device_node *np; + struct resource imem; + void __iomem *base; + int ret; + + /* Already initialized? */ + if (_reloc.base) + return 0; + + np = of_find_compatible_node(NULL, NULL, "qcom,pil-reloc-info"); + if (!np) + return -ENOENT; + + ret = of_address_to_resource(np, 0, &imem); + of_node_put(np); + if (ret < 0) + return ret; + + base = ioremap(imem.start, resource_size(&imem)); + if (!base) { + pr_err("failed to map PIL relocation info region\n"); + return -ENOMEM; + } + + memset_io(base, 0, resource_size(&imem)); + + _reloc.base = base; + _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE; + + return 0; +} + +/** + * qcom_pil_info_store() - store PIL information of image in IMEM + * @image: name of the image + * @base: base address of the loaded image + * @size: size of the loaded image + * + * Return: 0 on success, negative errno on failure + */ +int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size) +{ + char buf[PIL_RELOC_NAME_LEN]; + void __iomem *entry; + int ret; + int i; + + mutex_lock(&pil_reloc_lock); + ret = qcom_pil_info_init(); + if (ret < 0) { + mutex_unlock(&pil_reloc_lock); + return ret; + } + + for (i = 0; i < _reloc.num_entries; i++) { + entry = _reloc.base + i * PIL_RELOC_ENTRY_SIZE; + + memcpy_fromio(buf, entry, PIL_RELOC_NAME_LEN); + + /* + * An empty record means we didn't find it, given that the + * records are packed. + */ + if (!buf[0]) + goto found_unused; + + if (!strncmp(buf, image, PIL_RELOC_NAME_LEN)) + goto found_existing; + } + + pr_warn("insufficient PIL info slots\n"); + mutex_unlock(&pil_reloc_lock); + return -ENOMEM; + +found_unused: + memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN)); +found_existing: + /* Use two writel() as base is only aligned to 4 bytes on odd entries */ + writel(base, entry + PIL_RELOC_NAME_LEN); + writel((u64)base >> 32, entry + PIL_RELOC_NAME_LEN + 4); + writel(size, entry + PIL_RELOC_NAME_LEN + sizeof(__le64)); + mutex_unlock(&pil_reloc_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_pil_info_store); + +static void __exit pil_reloc_exit(void) +{ + mutex_lock(&pil_reloc_lock); + iounmap(_reloc.base); + _reloc.base = NULL; + mutex_unlock(&pil_reloc_lock); +} +module_exit(pil_reloc_exit); + +MODULE_DESCRIPTION("Qualcomm PIL relocation info"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_pil_info.h b/drivers/remoteproc/qcom_pil_info.h new file mode 100644 index 000000000..0dce61429 --- /dev/null +++ b/drivers/remoteproc/qcom_pil_info.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_PIL_INFO_H__ +#define __QCOM_PIL_INFO_H__ + +#include <linux/types.h> + +int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size); + +#endif diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c new file mode 100644 index 000000000..fd6fd3626 --- /dev/null +++ b/drivers/remoteproc/qcom_q6v5.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Qualcomm Peripheral Image Loader for Q6V5 + * + * Copyright (C) 2016-2018 Linaro Ltd. + * Copyright (C) 2014 Sony Mobile Communications AB + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/smem_state.h> +#include <linux/remoteproc.h> +#include "qcom_q6v5.h" + +#define Q6V5_PANIC_DELAY_MS 200 + +/** + * qcom_q6v5_prepare() - reinitialize the qcom_q6v5 context before start + * @q6v5: reference to qcom_q6v5 context to be reinitialized + * + * Return: 0 on success, negative errno on failure + */ +int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5) +{ + reinit_completion(&q6v5->start_done); + reinit_completion(&q6v5->stop_done); + + q6v5->running = true; + q6v5->handover_issued = false; + + enable_irq(q6v5->handover_irq); + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_q6v5_prepare); + +/** + * qcom_q6v5_unprepare() - unprepare the qcom_q6v5 context after stop + * @q6v5: reference to qcom_q6v5 context to be unprepared + * + * Return: 0 on success, 1 if handover hasn't yet been called + */ +int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5) +{ + disable_irq(q6v5->handover_irq); + + return !q6v5->handover_issued; +} +EXPORT_SYMBOL_GPL(qcom_q6v5_unprepare); + +static irqreturn_t q6v5_wdog_interrupt(int irq, void *data) +{ + struct qcom_q6v5 *q6v5 = data; + size_t len; + char *msg; + + /* Sometimes the stop triggers a watchdog rather than a stop-ack */ + if (!q6v5->running) { + complete(&q6v5->stop_done); + return IRQ_HANDLED; + } + + msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len); + if (!IS_ERR(msg) && len > 0 && msg[0]) + dev_err(q6v5->dev, "watchdog received: %s\n", msg); + else + dev_err(q6v5->dev, "watchdog without message\n"); + + rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG); + + return IRQ_HANDLED; +} + +static irqreturn_t q6v5_fatal_interrupt(int irq, void *data) +{ + struct qcom_q6v5 *q6v5 = data; + size_t len; + char *msg; + + msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len); + if (!IS_ERR(msg) && len > 0 && msg[0]) + dev_err(q6v5->dev, "fatal error received: %s\n", msg); + else + dev_err(q6v5->dev, "fatal error without message\n"); + + q6v5->running = false; + rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR); + + return IRQ_HANDLED; +} + +static irqreturn_t q6v5_ready_interrupt(int irq, void *data) +{ + struct qcom_q6v5 *q6v5 = data; + + complete(&q6v5->start_done); + + return IRQ_HANDLED; +} + +/** + * qcom_q6v5_wait_for_start() - wait for remote processor start signal + * @q6v5: reference to qcom_q6v5 context + * @timeout: timeout to wait for the event, in jiffies + * + * qcom_q6v5_unprepare() should not be called when this function fails. + * + * Return: 0 on success, -ETIMEDOUT on timeout + */ +int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout) +{ + int ret; + + ret = wait_for_completion_timeout(&q6v5->start_done, timeout); + if (!ret) + disable_irq(q6v5->handover_irq); + + return !ret ? -ETIMEDOUT : 0; +} +EXPORT_SYMBOL_GPL(qcom_q6v5_wait_for_start); + +static irqreturn_t q6v5_handover_interrupt(int irq, void *data) +{ + struct qcom_q6v5 *q6v5 = data; + + if (q6v5->handover) + q6v5->handover(q6v5); + + q6v5->handover_issued = true; + + return IRQ_HANDLED; +} + +static irqreturn_t q6v5_stop_interrupt(int irq, void *data) +{ + struct qcom_q6v5 *q6v5 = data; + + complete(&q6v5->stop_done); + + return IRQ_HANDLED; +} + +/** + * qcom_q6v5_request_stop() - request the remote processor to stop + * @q6v5: reference to qcom_q6v5 context + * + * Return: 0 on success, negative errno on failure + */ +int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5) +{ + int ret; + + q6v5->running = false; + + qcom_smem_state_update_bits(q6v5->state, + BIT(q6v5->stop_bit), BIT(q6v5->stop_bit)); + + ret = wait_for_completion_timeout(&q6v5->stop_done, 5 * HZ); + + qcom_smem_state_update_bits(q6v5->state, BIT(q6v5->stop_bit), 0); + + return ret == 0 ? -ETIMEDOUT : 0; +} +EXPORT_SYMBOL_GPL(qcom_q6v5_request_stop); + +/** + * qcom_q6v5_panic() - panic handler to invoke a stop on the remote + * @q6v5: reference to qcom_q6v5 context + * + * Set the stop bit and sleep in order to allow the remote processor to flush + * its caches etc for post mortem debugging. + * + * Return: 200ms + */ +unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5) +{ + qcom_smem_state_update_bits(q6v5->state, + BIT(q6v5->stop_bit), BIT(q6v5->stop_bit)); + + return Q6V5_PANIC_DELAY_MS; +} +EXPORT_SYMBOL_GPL(qcom_q6v5_panic); + +/** + * qcom_q6v5_init() - initializer of the q6v5 common struct + * @q6v5: handle to be initialized + * @pdev: platform_device reference for acquiring resources + * @rproc: associated remoteproc instance + * @crash_reason: SMEM id for crash reason string, or 0 if none + * @handover: function to be called when proxy resources should be released + * + * Return: 0 on success, negative errno on failure + */ +int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, + struct rproc *rproc, int crash_reason, + void (*handover)(struct qcom_q6v5 *q6v5)) +{ + int ret; + + q6v5->rproc = rproc; + q6v5->dev = &pdev->dev; + q6v5->crash_reason = crash_reason; + q6v5->handover = handover; + + init_completion(&q6v5->start_done); + init_completion(&q6v5->stop_done); + + q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog"); + if (q6v5->wdog_irq < 0) + return q6v5->wdog_irq; + + ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq, + NULL, q6v5_wdog_interrupt, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "q6v5 wdog", q6v5); + if (ret) { + dev_err(&pdev->dev, "failed to acquire wdog IRQ\n"); + return ret; + } + + q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal"); + if (q6v5->fatal_irq < 0) + return q6v5->fatal_irq; + + ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq, + NULL, q6v5_fatal_interrupt, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "q6v5 fatal", q6v5); + if (ret) { + dev_err(&pdev->dev, "failed to acquire fatal IRQ\n"); + return ret; + } + + q6v5->ready_irq = platform_get_irq_byname(pdev, "ready"); + if (q6v5->ready_irq < 0) + return q6v5->ready_irq; + + ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq, + NULL, q6v5_ready_interrupt, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "q6v5 ready", q6v5); + if (ret) { + dev_err(&pdev->dev, "failed to acquire ready IRQ\n"); + return ret; + } + + q6v5->handover_irq = platform_get_irq_byname(pdev, "handover"); + if (q6v5->handover_irq < 0) + return q6v5->handover_irq; + + ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq, + NULL, q6v5_handover_interrupt, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "q6v5 handover", q6v5); + if (ret) { + dev_err(&pdev->dev, "failed to acquire handover IRQ\n"); + return ret; + } + disable_irq(q6v5->handover_irq); + + q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack"); + if (q6v5->stop_irq < 0) + return q6v5->stop_irq; + + ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq, + NULL, q6v5_stop_interrupt, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "q6v5 stop", q6v5); + if (ret) { + dev_err(&pdev->dev, "failed to acquire stop-ack IRQ\n"); + return ret; + } + + q6v5->state = qcom_smem_state_get(&pdev->dev, "stop", &q6v5->stop_bit); + if (IS_ERR(q6v5->state)) { + dev_err(&pdev->dev, "failed to acquire stop state\n"); + return PTR_ERR(q6v5->state); + } + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_q6v5_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Q6V5"); diff --git a/drivers/remoteproc/qcom_q6v5.h b/drivers/remoteproc/qcom_q6v5.h new file mode 100644 index 000000000..c4ed887c1 --- /dev/null +++ b/drivers/remoteproc/qcom_q6v5.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __QCOM_Q6V5_H__ +#define __QCOM_Q6V5_H__ + +#include <linux/kernel.h> +#include <linux/completion.h> + +struct rproc; +struct qcom_smem_state; + +struct qcom_q6v5 { + struct device *dev; + struct rproc *rproc; + + struct qcom_smem_state *state; + unsigned stop_bit; + + int wdog_irq; + int fatal_irq; + int ready_irq; + int handover_irq; + int stop_irq; + + bool handover_issued; + + struct completion start_done; + struct completion stop_done; + + int crash_reason; + + bool running; + + void (*handover)(struct qcom_q6v5 *q6v5); +}; + +int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev, + struct rproc *rproc, int crash_reason, + void (*handover)(struct qcom_q6v5 *q6v5)); + +int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5); +int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5); +int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5); +int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout); +unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5); + +#endif diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c new file mode 100644 index 000000000..c39138d39 --- /dev/null +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Qualcomm Technology Inc. ADSP Peripheral Image Loader for SDM845. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> +#include <linux/soc/qcom/mdt_loader.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/smem_state.h> + +#include "qcom_common.h" +#include "qcom_pil_info.h" +#include "qcom_q6v5.h" +#include "remoteproc_internal.h" + +/* time out value */ +#define ACK_TIMEOUT 1000 +#define BOOT_FSM_TIMEOUT 10000 +/* mask values */ +#define EVB_MASK GENMASK(27, 4) +/*QDSP6SS register offsets*/ +#define RST_EVB_REG 0x10 +#define CORE_START_REG 0x400 +#define BOOT_CMD_REG 0x404 +#define BOOT_STATUS_REG 0x408 +#define RET_CFG_REG 0x1C +/*TCSR register offsets*/ +#define LPASS_MASTER_IDLE_REG 0x8 +#define LPASS_HALTACK_REG 0x4 +#define LPASS_PWR_ON_REG 0x10 +#define LPASS_HALTREQ_REG 0x0 + +#define QDSP6SS_XO_CBCR 0x38 +#define QDSP6SS_CORE_CBCR 0x20 +#define QDSP6SS_SLEEP_CBCR 0x3c + +struct adsp_pil_data { + int crash_reason_smem; + const char *firmware_name; + + const char *ssr_name; + const char *sysmon_name; + int ssctl_id; + + const char **clk_ids; + int num_clks; +}; + +struct qcom_adsp { + struct device *dev; + struct rproc *rproc; + + struct qcom_q6v5 q6v5; + + struct clk *xo; + + int num_clks; + struct clk_bulk_data *clks; + + void __iomem *qdsp6ss_base; + + struct reset_control *pdc_sync_reset; + struct reset_control *restart; + + struct regmap *halt_map; + unsigned int halt_lpass; + + int crash_reason_smem; + const char *info_name; + + struct completion start_done; + struct completion stop_done; + + phys_addr_t mem_phys; + phys_addr_t mem_reloc; + void *mem_region; + size_t mem_size; + + struct qcom_rproc_glink glink_subdev; + struct qcom_rproc_ssr ssr_subdev; + struct qcom_sysmon *sysmon; +}; + +static int qcom_adsp_shutdown(struct qcom_adsp *adsp) +{ + unsigned long timeout; + unsigned int val; + int ret; + + /* Reset the retention logic */ + val = readl(adsp->qdsp6ss_base + RET_CFG_REG); + val |= 0x1; + writel(val, adsp->qdsp6ss_base + RET_CFG_REG); + + clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks); + + /* QDSP6 master port needs to be explicitly halted */ + ret = regmap_read(adsp->halt_map, + adsp->halt_lpass + LPASS_PWR_ON_REG, &val); + if (ret || !val) + goto reset; + + ret = regmap_read(adsp->halt_map, + adsp->halt_lpass + LPASS_MASTER_IDLE_REG, + &val); + if (ret || val) + goto reset; + + regmap_write(adsp->halt_map, + adsp->halt_lpass + LPASS_HALTREQ_REG, 1); + + /* Wait for halt ACK from QDSP6 */ + timeout = jiffies + msecs_to_jiffies(ACK_TIMEOUT); + for (;;) { + ret = regmap_read(adsp->halt_map, + adsp->halt_lpass + LPASS_HALTACK_REG, &val); + if (ret || val || time_after(jiffies, timeout)) + break; + + usleep_range(1000, 1100); + } + + ret = regmap_read(adsp->halt_map, + adsp->halt_lpass + LPASS_MASTER_IDLE_REG, &val); + if (ret || !val) + dev_err(adsp->dev, "port failed halt\n"); + +reset: + /* Assert the LPASS PDC Reset */ + reset_control_assert(adsp->pdc_sync_reset); + /* Place the LPASS processor into reset */ + reset_control_assert(adsp->restart); + /* wait after asserting subsystem restart from AOSS */ + usleep_range(200, 300); + + /* Clear the halt request for the AXIM and AHBM for Q6 */ + regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0); + + /* De-assert the LPASS PDC Reset */ + reset_control_deassert(adsp->pdc_sync_reset); + /* Remove the LPASS reset */ + reset_control_deassert(adsp->restart); + /* wait after de-asserting subsystem restart from AOSS */ + usleep_range(200, 300); + + return 0; +} + +static int adsp_load(struct rproc *rproc, const struct firmware *fw) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int ret; + + ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0, + adsp->mem_region, adsp->mem_phys, + adsp->mem_size, &adsp->mem_reloc); + if (ret) + return ret; + + qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size); + + return 0; +} + +static int adsp_start(struct rproc *rproc) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int ret; + unsigned int val; + + qcom_q6v5_prepare(&adsp->q6v5); + + ret = clk_prepare_enable(adsp->xo); + if (ret) + goto disable_irqs; + + dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX); + ret = pm_runtime_get_sync(adsp->dev); + if (ret) { + pm_runtime_put_noidle(adsp->dev); + goto disable_xo_clk; + } + + ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks); + if (ret) { + dev_err(adsp->dev, "adsp clk_enable failed\n"); + goto disable_power_domain; + } + + /* Enable the XO clock */ + writel(1, adsp->qdsp6ss_base + QDSP6SS_XO_CBCR); + + /* Enable the QDSP6SS sleep clock */ + writel(1, adsp->qdsp6ss_base + QDSP6SS_SLEEP_CBCR); + + /* Enable the QDSP6 core clock */ + writel(1, adsp->qdsp6ss_base + QDSP6SS_CORE_CBCR); + + /* Program boot address */ + writel(adsp->mem_phys >> 4, adsp->qdsp6ss_base + RST_EVB_REG); + + /* De-assert QDSP6 stop core. QDSP6 will execute after out of reset */ + writel(0x1, adsp->qdsp6ss_base + CORE_START_REG); + + /* Trigger boot FSM to start QDSP6 */ + writel(0x1, adsp->qdsp6ss_base + BOOT_CMD_REG); + + /* Wait for core to come out of reset */ + ret = readl_poll_timeout(adsp->qdsp6ss_base + BOOT_STATUS_REG, + val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); + if (ret) { + dev_err(adsp->dev, "failed to bootup adsp\n"); + goto disable_adsp_clks; + } + + ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5 * HZ)); + if (ret == -ETIMEDOUT) { + dev_err(adsp->dev, "start timed out\n"); + goto disable_adsp_clks; + } + + return 0; + +disable_adsp_clks: + clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks); +disable_power_domain: + dev_pm_genpd_set_performance_state(adsp->dev, 0); + pm_runtime_put(adsp->dev); +disable_xo_clk: + clk_disable_unprepare(adsp->xo); +disable_irqs: + qcom_q6v5_unprepare(&adsp->q6v5); + + return ret; +} + +static void qcom_adsp_pil_handover(struct qcom_q6v5 *q6v5) +{ + struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5); + + clk_disable_unprepare(adsp->xo); + dev_pm_genpd_set_performance_state(adsp->dev, 0); + pm_runtime_put(adsp->dev); +} + +static int adsp_stop(struct rproc *rproc) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int handover; + int ret; + + ret = qcom_q6v5_request_stop(&adsp->q6v5); + if (ret == -ETIMEDOUT) + dev_err(adsp->dev, "timed out on wait\n"); + + ret = qcom_adsp_shutdown(adsp); + if (ret) + dev_err(adsp->dev, "failed to shutdown: %d\n", ret); + + handover = qcom_q6v5_unprepare(&adsp->q6v5); + if (handover) + qcom_adsp_pil_handover(&adsp->q6v5); + + return ret; +} + +static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int offset; + + offset = da - adsp->mem_reloc; + if (offset < 0 || offset + len > adsp->mem_size) + return NULL; + + return adsp->mem_region + offset; +} + +static unsigned long adsp_panic(struct rproc *rproc) +{ + struct qcom_adsp *adsp = rproc->priv; + + return qcom_q6v5_panic(&adsp->q6v5); +} + +static const struct rproc_ops adsp_ops = { + .start = adsp_start, + .stop = adsp_stop, + .da_to_va = adsp_da_to_va, + .parse_fw = qcom_register_dump_segments, + .load = adsp_load, + .panic = adsp_panic, +}; + +static int adsp_init_clock(struct qcom_adsp *adsp, const char **clk_ids) +{ + int num_clks = 0; + int i, ret; + + adsp->xo = devm_clk_get(adsp->dev, "xo"); + if (IS_ERR(adsp->xo)) { + ret = PTR_ERR(adsp->xo); + if (ret != -EPROBE_DEFER) + dev_err(adsp->dev, "failed to get xo clock"); + return ret; + } + + for (i = 0; clk_ids[i]; i++) + num_clks++; + + adsp->num_clks = num_clks; + adsp->clks = devm_kcalloc(adsp->dev, adsp->num_clks, + sizeof(*adsp->clks), GFP_KERNEL); + if (!adsp->clks) + return -ENOMEM; + + for (i = 0; i < adsp->num_clks; i++) + adsp->clks[i].id = clk_ids[i]; + + return devm_clk_bulk_get(adsp->dev, adsp->num_clks, adsp->clks); +} + +static int adsp_init_reset(struct qcom_adsp *adsp) +{ + adsp->pdc_sync_reset = devm_reset_control_get_optional_exclusive(adsp->dev, + "pdc_sync"); + if (IS_ERR(adsp->pdc_sync_reset)) { + dev_err(adsp->dev, "failed to acquire pdc_sync reset\n"); + return PTR_ERR(adsp->pdc_sync_reset); + } + + adsp->restart = devm_reset_control_get_optional_exclusive(adsp->dev, "restart"); + + /* Fall back to the old "cc_lpass" if "restart" is absent */ + if (!adsp->restart) + adsp->restart = devm_reset_control_get_exclusive(adsp->dev, "cc_lpass"); + + if (IS_ERR(adsp->restart)) { + dev_err(adsp->dev, "failed to acquire restart\n"); + return PTR_ERR(adsp->restart); + } + + return 0; +} + +static int adsp_init_mmio(struct qcom_adsp *adsp, + struct platform_device *pdev) +{ + struct device_node *syscon; + int ret; + + adsp->qdsp6ss_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(adsp->qdsp6ss_base)) { + dev_err(adsp->dev, "failed to map QDSP6SS registers\n"); + return PTR_ERR(adsp->qdsp6ss_base); + } + + syscon = of_parse_phandle(pdev->dev.of_node, "qcom,halt-regs", 0); + if (!syscon) { + dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); + return -EINVAL; + } + + adsp->halt_map = syscon_node_to_regmap(syscon); + of_node_put(syscon); + if (IS_ERR(adsp->halt_map)) + return PTR_ERR(adsp->halt_map); + + ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,halt-regs", + 1, &adsp->halt_lpass); + if (ret < 0) { + dev_err(&pdev->dev, "no offset in syscon\n"); + return ret; + } + + return 0; +} + +static int adsp_alloc_memory_region(struct qcom_adsp *adsp) +{ + struct device_node *node; + struct resource r; + int ret; + + node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0); + if (!node) { + dev_err(adsp->dev, "no memory-region specified\n"); + return -EINVAL; + } + + ret = of_address_to_resource(node, 0, &r); + of_node_put(node); + if (ret) + return ret; + + adsp->mem_phys = adsp->mem_reloc = r.start; + adsp->mem_size = resource_size(&r); + adsp->mem_region = devm_ioremap_wc(adsp->dev, + adsp->mem_phys, adsp->mem_size); + if (!adsp->mem_region) { + dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n", + &r.start, adsp->mem_size); + return -EBUSY; + } + + return 0; +} + +static int adsp_probe(struct platform_device *pdev) +{ + const struct adsp_pil_data *desc; + struct qcom_adsp *adsp; + struct rproc *rproc; + int ret; + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops, + desc->firmware_name, sizeof(*adsp)); + if (!rproc) { + dev_err(&pdev->dev, "unable to allocate remoteproc\n"); + return -ENOMEM; + } + rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); + + adsp = (struct qcom_adsp *)rproc->priv; + adsp->dev = &pdev->dev; + adsp->rproc = rproc; + adsp->info_name = desc->sysmon_name; + platform_set_drvdata(pdev, adsp); + + ret = adsp_alloc_memory_region(adsp); + if (ret) + goto free_rproc; + + ret = adsp_init_clock(adsp, desc->clk_ids); + if (ret) + goto free_rproc; + + pm_runtime_enable(adsp->dev); + + ret = adsp_init_reset(adsp); + if (ret) + goto disable_pm; + + ret = adsp_init_mmio(adsp, pdev); + if (ret) + goto disable_pm; + + ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, + qcom_adsp_pil_handover); + if (ret) + goto disable_pm; + + qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name); + qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name); + adsp->sysmon = qcom_add_sysmon_subdev(rproc, + desc->sysmon_name, + desc->ssctl_id); + if (IS_ERR(adsp->sysmon)) { + ret = PTR_ERR(adsp->sysmon); + goto disable_pm; + } + + ret = rproc_add(rproc); + if (ret) + goto disable_pm; + + return 0; + +disable_pm: + pm_runtime_disable(adsp->dev); +free_rproc: + rproc_free(rproc); + + return ret; +} + +static int adsp_remove(struct platform_device *pdev) +{ + struct qcom_adsp *adsp = platform_get_drvdata(pdev); + + rproc_del(adsp->rproc); + + qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev); + qcom_remove_sysmon_subdev(adsp->sysmon); + qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev); + pm_runtime_disable(adsp->dev); + rproc_free(adsp->rproc); + + return 0; +} + +static const struct adsp_pil_data adsp_resource_init = { + .crash_reason_smem = 423, + .firmware_name = "adsp.mdt", + .ssr_name = "lpass", + .sysmon_name = "adsp", + .ssctl_id = 0x14, + .clk_ids = (const char*[]) { + "sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr", + "qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", NULL + }, + .num_clks = 7, +}; + +static const struct adsp_pil_data cdsp_resource_init = { + .crash_reason_smem = 601, + .firmware_name = "cdsp.mdt", + .ssr_name = "cdsp", + .sysmon_name = "cdsp", + .ssctl_id = 0x17, + .clk_ids = (const char*[]) { + "sway", "tbu", "bimc", "ahb_aon", "q6ss_slave", "q6ss_master", + "q6_axim", NULL + }, + .num_clks = 7, +}; + +static const struct of_device_id adsp_of_match[] = { + { .compatible = "qcom,qcs404-cdsp-pil", .data = &cdsp_resource_init }, + { .compatible = "qcom,sdm845-adsp-pil", .data = &adsp_resource_init }, + { }, +}; +MODULE_DEVICE_TABLE(of, adsp_of_match); + +static struct platform_driver adsp_pil_driver = { + .probe = adsp_probe, + .remove = adsp_remove, + .driver = { + .name = "qcom_q6v5_adsp", + .of_match_table = adsp_of_match, + }, +}; + +module_platform_driver(adsp_pil_driver); +MODULE_DESCRIPTION("QTI SDM845 ADSP Peripheral Image Loader"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c new file mode 100644 index 000000000..3d975ecd9 --- /dev/null +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -0,0 +1,2092 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm self-authenticating modem subsystem remoteproc driver + * + * Copyright (C) 2016 Linaro Ltd. + * Copyright (C) 2014 Sony Mobile Communications AB + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/devcoredump.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> +#include <linux/soc/qcom/mdt_loader.h> +#include <linux/iopoll.h> +#include <linux/slab.h> + +#include "remoteproc_internal.h" +#include "qcom_common.h" +#include "qcom_pil_info.h" +#include "qcom_q6v5.h" + +#include <linux/qcom_scm.h> + +#define MPSS_CRASH_REASON_SMEM 421 + +#define MBA_LOG_SIZE SZ_4K + +/* RMB Status Register Values */ +#define RMB_PBL_SUCCESS 0x1 + +#define RMB_MBA_XPU_UNLOCKED 0x1 +#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 +#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 +#define RMB_MBA_AUTH_COMPLETE 0x4 + +/* PBL/MBA interface registers */ +#define RMB_MBA_IMAGE_REG 0x00 +#define RMB_PBL_STATUS_REG 0x04 +#define RMB_MBA_COMMAND_REG 0x08 +#define RMB_MBA_STATUS_REG 0x0C +#define RMB_PMI_META_DATA_REG 0x10 +#define RMB_PMI_CODE_START_REG 0x14 +#define RMB_PMI_CODE_LENGTH_REG 0x18 +#define RMB_MBA_MSS_STATUS 0x40 +#define RMB_MBA_ALT_RESET 0x44 + +#define RMB_CMD_META_DATA_READY 0x1 +#define RMB_CMD_LOAD_READY 0x2 + +/* QDSP6SS Register Offsets */ +#define QDSP6SS_RESET_REG 0x014 +#define QDSP6SS_GFMUX_CTL_REG 0x020 +#define QDSP6SS_PWR_CTL_REG 0x030 +#define QDSP6SS_MEM_PWR_CTL 0x0B0 +#define QDSP6V6SS_MEM_PWR_CTL 0x034 +#define QDSP6SS_STRAP_ACC 0x110 + +/* AXI Halt Register Offsets */ +#define AXI_HALTREQ_REG 0x0 +#define AXI_HALTACK_REG 0x4 +#define AXI_IDLE_REG 0x8 +#define AXI_GATING_VALID_OVERRIDE BIT(0) + +#define HALT_ACK_TIMEOUT_US 100000 + +/* QDSP6SS_RESET */ +#define Q6SS_STOP_CORE BIT(0) +#define Q6SS_CORE_ARES BIT(1) +#define Q6SS_BUS_ARES_ENABLE BIT(2) + +/* QDSP6SS CBCR */ +#define Q6SS_CBCR_CLKEN BIT(0) +#define Q6SS_CBCR_CLKOFF BIT(31) +#define Q6SS_CBCR_TIMEOUT_US 200 + +/* QDSP6SS_GFMUX_CTL */ +#define Q6SS_CLK_ENABLE BIT(1) + +/* QDSP6SS_PWR_CTL */ +#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) +#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) +#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) +#define Q6SS_L2TAG_SLP_NRET_N BIT(16) +#define Q6SS_ETB_SLP_NRET_N BIT(17) +#define Q6SS_L2DATA_STBY_N BIT(18) +#define Q6SS_SLP_RET_N BIT(19) +#define Q6SS_CLAMP_IO BIT(20) +#define QDSS_BHS_ON BIT(21) +#define QDSS_LDO_BYP BIT(22) + +/* QDSP6v56 parameters */ +#define QDSP6v56_LDO_BYP BIT(25) +#define QDSP6v56_BHS_ON BIT(24) +#define QDSP6v56_CLAMP_WL BIT(21) +#define QDSP6v56_CLAMP_QMC_MEM BIT(22) +#define QDSP6SS_XO_CBCR 0x0038 +#define QDSP6SS_ACC_OVERRIDE_VAL 0x20 + +/* QDSP6v65 parameters */ +#define QDSP6SS_CORE_CBCR 0x20 +#define QDSP6SS_SLEEP 0x3C +#define QDSP6SS_BOOT_CORE_START 0x400 +#define QDSP6SS_BOOT_CMD 0x404 +#define BOOT_FSM_TIMEOUT 10000 + +struct reg_info { + struct regulator *reg; + int uV; + int uA; +}; + +struct qcom_mss_reg_res { + const char *supply; + int uV; + int uA; +}; + +struct rproc_hexagon_res { + const char *hexagon_mba_image; + struct qcom_mss_reg_res *proxy_supply; + struct qcom_mss_reg_res *active_supply; + char **proxy_clk_names; + char **reset_clk_names; + char **active_clk_names; + char **active_pd_names; + char **proxy_pd_names; + int version; + bool need_mem_protection; + bool has_alt_reset; + bool has_mba_logs; + bool has_spare_reg; +}; + +struct q6v5 { + struct device *dev; + struct rproc *rproc; + + void __iomem *reg_base; + void __iomem *rmb_base; + + struct regmap *halt_map; + struct regmap *conn_map; + + u32 halt_q6; + u32 halt_modem; + u32 halt_nc; + u32 conn_box; + + struct reset_control *mss_restart; + struct reset_control *pdc_reset; + + struct qcom_q6v5 q6v5; + + struct clk *active_clks[8]; + struct clk *reset_clks[4]; + struct clk *proxy_clks[4]; + struct device *active_pds[1]; + struct device *proxy_pds[3]; + int active_clk_count; + int reset_clk_count; + int proxy_clk_count; + int active_pd_count; + int proxy_pd_count; + + struct reg_info active_regs[1]; + struct reg_info proxy_regs[3]; + int active_reg_count; + int proxy_reg_count; + + bool dump_mba_loaded; + size_t current_dump_size; + size_t total_dump_size; + + phys_addr_t mba_phys; + void *mba_region; + size_t mba_size; + size_t dp_size; + + phys_addr_t mdata_phys; + size_t mdata_size; + + phys_addr_t mpss_phys; + phys_addr_t mpss_reloc; + size_t mpss_size; + + struct qcom_rproc_glink glink_subdev; + struct qcom_rproc_subdev smd_subdev; + struct qcom_rproc_ssr ssr_subdev; + struct qcom_sysmon *sysmon; + bool need_mem_protection; + bool has_alt_reset; + bool has_mba_logs; + bool has_spare_reg; + int mpss_perm; + int mba_perm; + const char *hexagon_mdt_image; + int version; +}; + +enum { + MSS_MSM8916, + MSS_MSM8974, + MSS_MSM8996, + MSS_MSM8998, + MSS_SC7180, + MSS_SDM845, +}; + +static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, + const struct qcom_mss_reg_res *reg_res) +{ + int rc; + int i; + + if (!reg_res) + return 0; + + for (i = 0; reg_res[i].supply; i++) { + regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); + if (IS_ERR(regs[i].reg)) { + rc = PTR_ERR(regs[i].reg); + if (rc != -EPROBE_DEFER) + dev_err(dev, "Failed to get %s\n regulator", + reg_res[i].supply); + return rc; + } + + regs[i].uV = reg_res[i].uV; + regs[i].uA = reg_res[i].uA; + } + + return i; +} + +static int q6v5_regulator_enable(struct q6v5 *qproc, + struct reg_info *regs, int count) +{ + int ret; + int i; + + for (i = 0; i < count; i++) { + if (regs[i].uV > 0) { + ret = regulator_set_voltage(regs[i].reg, + regs[i].uV, INT_MAX); + if (ret) { + dev_err(qproc->dev, + "Failed to request voltage for %d.\n", + i); + goto err; + } + } + + if (regs[i].uA > 0) { + ret = regulator_set_load(regs[i].reg, + regs[i].uA); + if (ret < 0) { + dev_err(qproc->dev, + "Failed to set regulator mode\n"); + goto err; + } + } + + ret = regulator_enable(regs[i].reg); + if (ret) { + dev_err(qproc->dev, "Regulator enable failed\n"); + goto err; + } + } + + return 0; +err: + for (; i >= 0; i--) { + if (regs[i].uV > 0) + regulator_set_voltage(regs[i].reg, 0, INT_MAX); + + if (regs[i].uA > 0) + regulator_set_load(regs[i].reg, 0); + + regulator_disable(regs[i].reg); + } + + return ret; +} + +static void q6v5_regulator_disable(struct q6v5 *qproc, + struct reg_info *regs, int count) +{ + int i; + + for (i = 0; i < count; i++) { + if (regs[i].uV > 0) + regulator_set_voltage(regs[i].reg, 0, INT_MAX); + + if (regs[i].uA > 0) + regulator_set_load(regs[i].reg, 0); + + regulator_disable(regs[i].reg); + } +} + +static int q6v5_clk_enable(struct device *dev, + struct clk **clks, int count) +{ + int rc; + int i; + + for (i = 0; i < count; i++) { + rc = clk_prepare_enable(clks[i]); + if (rc) { + dev_err(dev, "Clock enable failed\n"); + goto err; + } + } + + return 0; +err: + for (i--; i >= 0; i--) + clk_disable_unprepare(clks[i]); + + return rc; +} + +static void q6v5_clk_disable(struct device *dev, + struct clk **clks, int count) +{ + int i; + + for (i = 0; i < count; i++) + clk_disable_unprepare(clks[i]); +} + +static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, + size_t pd_count) +{ + int ret; + int i; + + for (i = 0; i < pd_count; i++) { + dev_pm_genpd_set_performance_state(pds[i], INT_MAX); + ret = pm_runtime_get_sync(pds[i]); + if (ret < 0) { + pm_runtime_put_noidle(pds[i]); + dev_pm_genpd_set_performance_state(pds[i], 0); + goto unroll_pd_votes; + } + } + + return 0; + +unroll_pd_votes: + for (i--; i >= 0; i--) { + dev_pm_genpd_set_performance_state(pds[i], 0); + pm_runtime_put(pds[i]); + } + + return ret; +} + +static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, + size_t pd_count) +{ + int i; + + for (i = 0; i < pd_count; i++) { + dev_pm_genpd_set_performance_state(pds[i], 0); + pm_runtime_put(pds[i]); + } +} + +static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, + bool local, bool remote, phys_addr_t addr, + size_t size) +{ + struct qcom_scm_vmperm next[2]; + int perms = 0; + + if (!qproc->need_mem_protection) + return 0; + + if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && + remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) + return 0; + + if (local) { + next[perms].vmid = QCOM_SCM_VMID_HLOS; + next[perms].perm = QCOM_SCM_PERM_RWX; + perms++; + } + + if (remote) { + next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; + next[perms].perm = QCOM_SCM_PERM_RW; + perms++; + } + + return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), + current_perm, next, perms); +} + +static void q6v5_debug_policy_load(struct q6v5 *qproc) +{ + const struct firmware *dp_fw; + + if (request_firmware_direct(&dp_fw, "msadp", qproc->dev)) + return; + + if (SZ_1M + dp_fw->size <= qproc->mba_size) { + memcpy(qproc->mba_region + SZ_1M, dp_fw->data, dp_fw->size); + qproc->dp_size = dp_fw->size; + } + + release_firmware(dp_fw); +} + +static int q6v5_load(struct rproc *rproc, const struct firmware *fw) +{ + struct q6v5 *qproc = rproc->priv; + + /* MBA is restricted to a maximum size of 1M */ + if (fw->size > qproc->mba_size || fw->size > SZ_1M) { + dev_err(qproc->dev, "MBA firmware load failed\n"); + return -EINVAL; + } + + memcpy(qproc->mba_region, fw->data, fw->size); + q6v5_debug_policy_load(qproc); + + return 0; +} + +static int q6v5_reset_assert(struct q6v5 *qproc) +{ + int ret; + + if (qproc->has_alt_reset) { + reset_control_assert(qproc->pdc_reset); + ret = reset_control_reset(qproc->mss_restart); + reset_control_deassert(qproc->pdc_reset); + } else if (qproc->has_spare_reg) { + /* + * When the AXI pipeline is being reset with the Q6 modem partly + * operational there is possibility of AXI valid signal to + * glitch, leading to spurious transactions and Q6 hangs. A work + * around is employed by asserting the AXI_GATING_VALID_OVERRIDE + * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE + * is withdrawn post MSS assert followed by a MSS deassert, + * while holding the PDC reset. + */ + reset_control_assert(qproc->pdc_reset); + regmap_update_bits(qproc->conn_map, qproc->conn_box, + AXI_GATING_VALID_OVERRIDE, 1); + reset_control_assert(qproc->mss_restart); + reset_control_deassert(qproc->pdc_reset); + regmap_update_bits(qproc->conn_map, qproc->conn_box, + AXI_GATING_VALID_OVERRIDE, 0); + ret = reset_control_deassert(qproc->mss_restart); + } else { + ret = reset_control_assert(qproc->mss_restart); + } + + return ret; +} + +static int q6v5_reset_deassert(struct q6v5 *qproc) +{ + int ret; + + if (qproc->has_alt_reset) { + reset_control_assert(qproc->pdc_reset); + writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); + ret = reset_control_reset(qproc->mss_restart); + writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); + reset_control_deassert(qproc->pdc_reset); + } else if (qproc->has_spare_reg) { + ret = reset_control_reset(qproc->mss_restart); + } else { + ret = reset_control_deassert(qproc->mss_restart); + } + + return ret; +} + +static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) +{ + unsigned long timeout; + s32 val; + + timeout = jiffies + msecs_to_jiffies(ms); + for (;;) { + val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); + if (val) + break; + + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + msleep(1); + } + + return val; +} + +static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) +{ + + unsigned long timeout; + s32 val; + + timeout = jiffies + msecs_to_jiffies(ms); + for (;;) { + val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); + if (val < 0) + break; + + if (!status && val) + break; + else if (status && val == status) + break; + + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + msleep(1); + } + + return val; +} + +static void q6v5_dump_mba_logs(struct q6v5 *qproc) +{ + struct rproc *rproc = qproc->rproc; + void *data; + + if (!qproc->has_mba_logs) + return; + + if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, + qproc->mba_size)) + return; + + data = vmalloc(MBA_LOG_SIZE); + if (!data) + return; + + memcpy(data, qproc->mba_region, MBA_LOG_SIZE); + dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL); +} + +static int q6v5proc_reset(struct q6v5 *qproc) +{ + u32 val; + int ret; + int i; + + if (qproc->version == MSS_SDM845) { + val = readl(qproc->reg_base + QDSP6SS_SLEEP); + val |= Q6SS_CBCR_CLKEN; + writel(val, qproc->reg_base + QDSP6SS_SLEEP); + + ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, + val, !(val & Q6SS_CBCR_CLKOFF), 1, + Q6SS_CBCR_TIMEOUT_US); + if (ret) { + dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); + return -ETIMEDOUT; + } + + /* De-assert QDSP6 stop core */ + writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); + /* Trigger boot FSM */ + writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); + + ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, + val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); + if (ret) { + dev_err(qproc->dev, "Boot FSM failed to complete.\n"); + /* Reset the modem so that boot FSM is in reset state */ + q6v5_reset_deassert(qproc); + return ret; + } + + goto pbl_wait; + } else if (qproc->version == MSS_SC7180) { + val = readl(qproc->reg_base + QDSP6SS_SLEEP); + val |= Q6SS_CBCR_CLKEN; + writel(val, qproc->reg_base + QDSP6SS_SLEEP); + + ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, + val, !(val & Q6SS_CBCR_CLKOFF), 1, + Q6SS_CBCR_TIMEOUT_US); + if (ret) { + dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); + return -ETIMEDOUT; + } + + /* Turn on the XO clock needed for PLL setup */ + val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); + val |= Q6SS_CBCR_CLKEN; + writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); + + ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, + val, !(val & Q6SS_CBCR_CLKOFF), 1, + Q6SS_CBCR_TIMEOUT_US); + if (ret) { + dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); + return -ETIMEDOUT; + } + + /* Configure Q6 core CBCR to auto-enable after reset sequence */ + val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); + val |= Q6SS_CBCR_CLKEN; + writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); + + /* De-assert the Q6 stop core signal */ + writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); + + /* Wait for 10 us for any staggering logic to settle */ + usleep_range(10, 20); + + /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ + writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); + + /* Poll the MSS_STATUS for FSM completion */ + ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, + val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); + if (ret) { + dev_err(qproc->dev, "Boot FSM failed to complete.\n"); + /* Reset the modem so that boot FSM is in reset state */ + q6v5_reset_deassert(qproc); + return ret; + } + goto pbl_wait; + } else if (qproc->version == MSS_MSM8996 || + qproc->version == MSS_MSM8998) { + int mem_pwr_ctl; + + /* Override the ACC value if required */ + writel(QDSP6SS_ACC_OVERRIDE_VAL, + qproc->reg_base + QDSP6SS_STRAP_ACC); + + /* Assert resets, stop core */ + val = readl(qproc->reg_base + QDSP6SS_RESET_REG); + val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; + writel(val, qproc->reg_base + QDSP6SS_RESET_REG); + + /* BHS require xo cbcr to be enabled */ + val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); + val |= Q6SS_CBCR_CLKEN; + writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); + + /* Read CLKOFF bit to go low indicating CLK is enabled */ + ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, + val, !(val & Q6SS_CBCR_CLKOFF), 1, + Q6SS_CBCR_TIMEOUT_US); + if (ret) { + dev_err(qproc->dev, + "xo cbcr enabling timed out (rc:%d)\n", ret); + return ret; + } + /* Enable power block headswitch and wait for it to stabilize */ + val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= QDSP6v56_BHS_ON; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + udelay(1); + + /* Put LDO in bypass mode */ + val |= QDSP6v56_LDO_BYP; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + + /* Deassert QDSP6 compiler memory clamp */ + val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val &= ~QDSP6v56_CLAMP_QMC_MEM; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + + /* Deassert memory peripheral sleep and L2 memory standby */ + val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + + /* Turn on L1, L2, ETB and JU memories 1 at a time */ + if (qproc->version == MSS_MSM8996) { + mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; + i = 19; + } else { + /* MSS_MSM8998 */ + mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; + i = 28; + } + val = readl(qproc->reg_base + mem_pwr_ctl); + for (; i >= 0; i--) { + val |= BIT(i); + writel(val, qproc->reg_base + mem_pwr_ctl); + /* + * Read back value to ensure the write is done then + * wait for 1us for both memory peripheral and data + * array to turn on. + */ + val |= readl(qproc->reg_base + mem_pwr_ctl); + udelay(1); + } + /* Remove word line clamp */ + val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val &= ~QDSP6v56_CLAMP_WL; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + } else { + /* Assert resets, stop core */ + val = readl(qproc->reg_base + QDSP6SS_RESET_REG); + val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; + writel(val, qproc->reg_base + QDSP6SS_RESET_REG); + + /* Enable power block headswitch and wait for it to stabilize */ + val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= QDSS_BHS_ON | QDSS_LDO_BYP; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + udelay(1); + /* + * Turn on memories. L2 banks should be done individually + * to minimize inrush current. + */ + val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | + Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= Q6SS_L2DATA_SLP_NRET_N_2; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= Q6SS_L2DATA_SLP_NRET_N_1; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= Q6SS_L2DATA_SLP_NRET_N_0; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + } + /* Remove IO clamp */ + val &= ~Q6SS_CLAMP_IO; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + + /* Bring core out of reset */ + val = readl(qproc->reg_base + QDSP6SS_RESET_REG); + val &= ~Q6SS_CORE_ARES; + writel(val, qproc->reg_base + QDSP6SS_RESET_REG); + + /* Turn on core clock */ + val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); + val |= Q6SS_CLK_ENABLE; + writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); + + /* Start core execution */ + val = readl(qproc->reg_base + QDSP6SS_RESET_REG); + val &= ~Q6SS_STOP_CORE; + writel(val, qproc->reg_base + QDSP6SS_RESET_REG); + +pbl_wait: + /* Wait for PBL status */ + ret = q6v5_rmb_pbl_wait(qproc, 1000); + if (ret == -ETIMEDOUT) { + dev_err(qproc->dev, "PBL boot timed out\n"); + } else if (ret != RMB_PBL_SUCCESS) { + dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); + ret = -EINVAL; + } else { + ret = 0; + } + + return ret; +} + +static void q6v5proc_halt_axi_port(struct q6v5 *qproc, + struct regmap *halt_map, + u32 offset) +{ + unsigned int val; + int ret; + + /* Check if we're already idle */ + ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); + if (!ret && val) + return; + + /* Assert halt request */ + regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); + + /* Wait for halt */ + regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, + val, 1000, HALT_ACK_TIMEOUT_US); + + ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); + if (ret || !val) + dev_err(qproc->dev, "port failed halt\n"); + + /* Clear halt request (port will remain halted until reset) */ + regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); +} + +static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) +{ + unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; + dma_addr_t phys; + void *metadata; + int mdata_perm; + int xferop_ret; + size_t size; + void *ptr; + int ret; + + metadata = qcom_mdt_read_metadata(fw, &size); + if (IS_ERR(metadata)) + return PTR_ERR(metadata); + + if (qproc->mdata_phys) { + if (size > qproc->mdata_size) { + ret = -EINVAL; + dev_err(qproc->dev, "metadata size outside memory range\n"); + goto free_metadata; + } + + phys = qproc->mdata_phys; + ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC); + if (!ptr) { + ret = -EBUSY; + dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", + &qproc->mdata_phys, size); + goto free_metadata; + } + } else { + ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); + if (!ptr) { + ret = -ENOMEM; + dev_err(qproc->dev, "failed to allocate mdt buffer\n"); + goto free_metadata; + } + } + + memcpy(ptr, metadata, size); + + if (qproc->mdata_phys) + memunmap(ptr); + + /* Hypervisor mapping to access metadata by modem */ + mdata_perm = BIT(QCOM_SCM_VMID_HLOS); + ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, + phys, size); + if (ret) { + dev_err(qproc->dev, + "assigning Q6 access to metadata failed: %d\n", ret); + ret = -EAGAIN; + goto free_dma_attrs; + } + + writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); + writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); + + ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); + if (ret == -ETIMEDOUT) + dev_err(qproc->dev, "MPSS header authentication timed out\n"); + else if (ret < 0) + dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); + + /* Metadata authentication done, remove modem access */ + xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, + phys, size); + if (xferop_ret) + dev_warn(qproc->dev, + "mdt buffer not reclaimed system may become unstable\n"); + +free_dma_attrs: + if (!qproc->mdata_phys) + dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); +free_metadata: + kfree(metadata); + + return ret < 0 ? ret : 0; +} + +static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) +{ + if (phdr->p_type != PT_LOAD) + return false; + + if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) + return false; + + if (!phdr->p_memsz) + return false; + + return true; +} + +static int q6v5_mba_load(struct q6v5 *qproc) +{ + int ret; + int xfermemop_ret; + bool mba_load_err = false; + + qcom_q6v5_prepare(&qproc->q6v5); + + ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count); + if (ret < 0) { + dev_err(qproc->dev, "failed to enable active power domains\n"); + goto disable_irqs; + } + + ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); + if (ret < 0) { + dev_err(qproc->dev, "failed to enable proxy power domains\n"); + goto disable_active_pds; + } + + ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, + qproc->proxy_reg_count); + if (ret) { + dev_err(qproc->dev, "failed to enable proxy supplies\n"); + goto disable_proxy_pds; + } + + ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, + qproc->proxy_clk_count); + if (ret) { + dev_err(qproc->dev, "failed to enable proxy clocks\n"); + goto disable_proxy_reg; + } + + ret = q6v5_regulator_enable(qproc, qproc->active_regs, + qproc->active_reg_count); + if (ret) { + dev_err(qproc->dev, "failed to enable supplies\n"); + goto disable_proxy_clk; + } + + ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, + qproc->reset_clk_count); + if (ret) { + dev_err(qproc->dev, "failed to enable reset clocks\n"); + goto disable_vdd; + } + + ret = q6v5_reset_deassert(qproc); + if (ret) { + dev_err(qproc->dev, "failed to deassert mss restart\n"); + goto disable_reset_clks; + } + + ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, + qproc->active_clk_count); + if (ret) { + dev_err(qproc->dev, "failed to enable clocks\n"); + goto assert_reset; + } + + /* + * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide + * the Q6 access to this region. + */ + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, + qproc->mpss_phys, qproc->mpss_size); + if (ret) { + dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret); + goto disable_active_clks; + } + + /* Assign MBA image access in DDR to q6 */ + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, + qproc->mba_phys, qproc->mba_size); + if (ret) { + dev_err(qproc->dev, + "assigning Q6 access to mba memory failed: %d\n", ret); + goto disable_active_clks; + } + + writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); + if (qproc->dp_size) { + writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG); + writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + } + + ret = q6v5proc_reset(qproc); + if (ret) + goto reclaim_mba; + + ret = q6v5_rmb_mba_wait(qproc, 0, 5000); + if (ret == -ETIMEDOUT) { + dev_err(qproc->dev, "MBA boot timed out\n"); + goto halt_axi_ports; + } else if (ret != RMB_MBA_XPU_UNLOCKED && + ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { + dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); + ret = -EINVAL; + goto halt_axi_ports; + } + + qproc->dump_mba_loaded = true; + return 0; + +halt_axi_ports: + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); + mba_load_err = true; +reclaim_mba: + xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, + false, qproc->mba_phys, + qproc->mba_size); + if (xfermemop_ret) { + dev_err(qproc->dev, + "Failed to reclaim mba buffer, system may become unstable\n"); + } else if (mba_load_err) { + q6v5_dump_mba_logs(qproc); + } + +disable_active_clks: + q6v5_clk_disable(qproc->dev, qproc->active_clks, + qproc->active_clk_count); +assert_reset: + q6v5_reset_assert(qproc); +disable_reset_clks: + q6v5_clk_disable(qproc->dev, qproc->reset_clks, + qproc->reset_clk_count); +disable_vdd: + q6v5_regulator_disable(qproc, qproc->active_regs, + qproc->active_reg_count); +disable_proxy_clk: + q6v5_clk_disable(qproc->dev, qproc->proxy_clks, + qproc->proxy_clk_count); +disable_proxy_reg: + q6v5_regulator_disable(qproc, qproc->proxy_regs, + qproc->proxy_reg_count); +disable_proxy_pds: + q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); +disable_active_pds: + q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); +disable_irqs: + qcom_q6v5_unprepare(&qproc->q6v5); + + return ret; +} + +static void q6v5_mba_reclaim(struct q6v5 *qproc) +{ + int ret; + u32 val; + + qproc->dump_mba_loaded = false; + qproc->dp_size = 0; + + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); + q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); + if (qproc->version == MSS_MSM8996) { + /* + * To avoid high MX current during LPASS/MSS restart. + */ + val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); + val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | + QDSP6v56_CLAMP_QMC_MEM; + writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); + } + + q6v5_reset_assert(qproc); + + q6v5_clk_disable(qproc->dev, qproc->reset_clks, + qproc->reset_clk_count); + q6v5_clk_disable(qproc->dev, qproc->active_clks, + qproc->active_clk_count); + q6v5_regulator_disable(qproc, qproc->active_regs, + qproc->active_reg_count); + q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); + + /* In case of failure or coredump scenario where reclaiming MBA memory + * could not happen reclaim it here. + */ + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, + qproc->mba_phys, + qproc->mba_size); + WARN_ON(ret); + + ret = qcom_q6v5_unprepare(&qproc->q6v5); + if (ret) { + q6v5_pds_disable(qproc, qproc->proxy_pds, + qproc->proxy_pd_count); + q6v5_clk_disable(qproc->dev, qproc->proxy_clks, + qproc->proxy_clk_count); + q6v5_regulator_disable(qproc, qproc->proxy_regs, + qproc->proxy_reg_count); + } +} + +static int q6v5_reload_mba(struct rproc *rproc) +{ + struct q6v5 *qproc = rproc->priv; + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, rproc->firmware, qproc->dev); + if (ret < 0) + return ret; + + q6v5_load(rproc, fw); + ret = q6v5_mba_load(qproc); + release_firmware(fw); + + return ret; +} + +static int q6v5_mpss_load(struct q6v5 *qproc) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct firmware *seg_fw; + const struct firmware *fw; + struct elf32_hdr *ehdr; + phys_addr_t mpss_reloc; + phys_addr_t boot_addr; + phys_addr_t min_addr = PHYS_ADDR_MAX; + phys_addr_t max_addr = 0; + u32 code_length; + bool relocate = false; + char *fw_name; + size_t fw_name_len; + ssize_t offset; + size_t size = 0; + void *ptr; + int ret; + int i; + + fw_name_len = strlen(qproc->hexagon_mdt_image); + if (fw_name_len <= 4) + return -EINVAL; + + fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); + if (!fw_name) + return -ENOMEM; + + ret = request_firmware(&fw, fw_name, qproc->dev); + if (ret < 0) { + dev_err(qproc->dev, "unable to load %s\n", fw_name); + goto out; + } + + /* Initialize the RMB validator */ + writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + + ret = q6v5_mpss_init_image(qproc, fw); + if (ret) + goto release_firmware; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!q6v5_phdr_valid(phdr)) + continue; + + if (phdr->p_flags & QCOM_MDT_RELOCATABLE) + relocate = true; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + + if (phdr->p_paddr + phdr->p_memsz > max_addr) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + + /* + * In case of a modem subsystem restart on secure devices, the modem + * memory can be reclaimed only after MBA is loaded. + */ + q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, + qproc->mpss_phys, qproc->mpss_size); + + /* Share ownership between Linux and MSS, during segment loading */ + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, + qproc->mpss_phys, qproc->mpss_size); + if (ret) { + dev_err(qproc->dev, + "assigning Q6 access to mpss memory failed: %d\n", ret); + ret = -EAGAIN; + goto release_firmware; + } + + mpss_reloc = relocate ? min_addr : qproc->mpss_phys; + qproc->mpss_reloc = mpss_reloc; + /* Load firmware segments */ + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!q6v5_phdr_valid(phdr)) + continue; + + offset = phdr->p_paddr - mpss_reloc; + if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { + dev_err(qproc->dev, "segment outside memory range\n"); + ret = -EINVAL; + goto release_firmware; + } + + if (phdr->p_filesz > phdr->p_memsz) { + dev_err(qproc->dev, + "refusing to load segment %d with p_filesz > p_memsz\n", + i); + ret = -EINVAL; + goto release_firmware; + } + + ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC); + if (!ptr) { + dev_err(qproc->dev, + "unable to map memory region: %pa+%zx-%x\n", + &qproc->mpss_phys, offset, phdr->p_memsz); + goto release_firmware; + } + + if (phdr->p_filesz && phdr->p_offset < fw->size) { + /* Firmware is large enough to be non-split */ + if (phdr->p_offset + phdr->p_filesz > fw->size) { + dev_err(qproc->dev, + "failed to load segment %d from truncated file %s\n", + i, fw_name); + ret = -EINVAL; + memunmap(ptr); + goto release_firmware; + } + + memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); + } else if (phdr->p_filesz) { + /* Replace "xxx.xxx" with "xxx.bxx" */ + sprintf(fw_name + fw_name_len - 3, "b%02d", i); + ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev, + ptr, phdr->p_filesz); + if (ret) { + dev_err(qproc->dev, "failed to load %s\n", fw_name); + memunmap(ptr); + goto release_firmware; + } + + if (seg_fw->size != phdr->p_filesz) { + dev_err(qproc->dev, + "failed to load segment %d from truncated file %s\n", + i, fw_name); + ret = -EINVAL; + release_firmware(seg_fw); + memunmap(ptr); + goto release_firmware; + } + + release_firmware(seg_fw); + } + + if (phdr->p_memsz > phdr->p_filesz) { + memset(ptr + phdr->p_filesz, 0, + phdr->p_memsz - phdr->p_filesz); + } + memunmap(ptr); + size += phdr->p_memsz; + + code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + if (!code_length) { + boot_addr = relocate ? qproc->mpss_phys : min_addr; + writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); + writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); + } + writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + + ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); + if (ret < 0) { + dev_err(qproc->dev, "MPSS authentication failed: %d\n", + ret); + goto release_firmware; + } + } + + /* Transfer ownership of modem ddr region to q6 */ + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, + qproc->mpss_phys, qproc->mpss_size); + if (ret) { + dev_err(qproc->dev, + "assigning Q6 access to mpss memory failed: %d\n", ret); + ret = -EAGAIN; + goto release_firmware; + } + + ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); + if (ret == -ETIMEDOUT) + dev_err(qproc->dev, "MPSS authentication timed out\n"); + else if (ret < 0) + dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); + + qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size); + +release_firmware: + release_firmware(fw); +out: + kfree(fw_name); + + return ret < 0 ? ret : 0; +} + +static void qcom_q6v5_dump_segment(struct rproc *rproc, + struct rproc_dump_segment *segment, + void *dest, size_t cp_offset, size_t size) +{ + int ret = 0; + struct q6v5 *qproc = rproc->priv; + int offset = segment->da - qproc->mpss_reloc; + void *ptr = NULL; + + /* Unlock mba before copying segments */ + if (!qproc->dump_mba_loaded) { + ret = q6v5_reload_mba(rproc); + if (!ret) { + /* Reset ownership back to Linux to copy segments */ + ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, + true, false, + qproc->mpss_phys, + qproc->mpss_size); + } + } + + if (!ret) + ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC); + + if (ptr) { + memcpy(dest, ptr, size); + memunmap(ptr); + } else { + memset(dest, 0xff, size); + } + + qproc->current_dump_size += size; + + /* Reclaim mba after copying segments */ + if (qproc->current_dump_size == qproc->total_dump_size) { + if (qproc->dump_mba_loaded) { + /* Try to reset ownership back to Q6 */ + q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, + false, true, + qproc->mpss_phys, + qproc->mpss_size); + q6v5_mba_reclaim(qproc); + } + } +} + +static int q6v5_start(struct rproc *rproc) +{ + struct q6v5 *qproc = (struct q6v5 *)rproc->priv; + int xfermemop_ret; + int ret; + + ret = q6v5_mba_load(qproc); + if (ret) + return ret; + + dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n", + qproc->dp_size ? "" : "out"); + + ret = q6v5_mpss_load(qproc); + if (ret) + goto reclaim_mpss; + + ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); + if (ret == -ETIMEDOUT) { + dev_err(qproc->dev, "start timed out\n"); + goto reclaim_mpss; + } + + xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, + false, qproc->mba_phys, + qproc->mba_size); + if (xfermemop_ret) + dev_err(qproc->dev, + "Failed to reclaim mba buffer system may become unstable\n"); + + /* Reset Dump Segment Mask */ + qproc->current_dump_size = 0; + + return 0; + +reclaim_mpss: + q6v5_mba_reclaim(qproc); + q6v5_dump_mba_logs(qproc); + + return ret; +} + +static int q6v5_stop(struct rproc *rproc) +{ + struct q6v5 *qproc = (struct q6v5 *)rproc->priv; + int ret; + + ret = qcom_q6v5_request_stop(&qproc->q6v5); + if (ret == -ETIMEDOUT) + dev_err(qproc->dev, "timed out on wait\n"); + + q6v5_mba_reclaim(qproc); + + return 0; +} + +static int qcom_q6v5_register_dump_segments(struct rproc *rproc, + const struct firmware *mba_fw) +{ + const struct firmware *fw; + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + struct q6v5 *qproc = rproc->priv; + unsigned long i; + int ret; + + ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); + if (ret < 0) { + dev_err(qproc->dev, "unable to load %s\n", + qproc->hexagon_mdt_image); + return ret; + } + + rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + qproc->total_dump_size = 0; + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!q6v5_phdr_valid(phdr)) + continue; + + ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, + phdr->p_memsz, + qcom_q6v5_dump_segment, + NULL); + if (ret) + break; + + qproc->total_dump_size += phdr->p_memsz; + } + + release_firmware(fw); + return ret; +} + +static const struct rproc_ops q6v5_ops = { + .start = q6v5_start, + .stop = q6v5_stop, + .parse_fw = qcom_q6v5_register_dump_segments, + .load = q6v5_load, +}; + +static void qcom_msa_handover(struct qcom_q6v5 *q6v5) +{ + struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); + + q6v5_clk_disable(qproc->dev, qproc->proxy_clks, + qproc->proxy_clk_count); + q6v5_regulator_disable(qproc, qproc->proxy_regs, + qproc->proxy_reg_count); + q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); +} + +static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) +{ + struct of_phandle_args args; + struct resource *res; + int ret; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); + qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(qproc->reg_base)) + return PTR_ERR(qproc->reg_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); + qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(qproc->rmb_base)) + return PTR_ERR(qproc->rmb_base); + + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "qcom,halt-regs", 3, 0, &args); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); + return -EINVAL; + } + + qproc->halt_map = syscon_node_to_regmap(args.np); + of_node_put(args.np); + if (IS_ERR(qproc->halt_map)) + return PTR_ERR(qproc->halt_map); + + qproc->halt_q6 = args.args[0]; + qproc->halt_modem = args.args[1]; + qproc->halt_nc = args.args[2]; + + if (qproc->has_spare_reg) { + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "qcom,spare-regs", + 1, 0, &args); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse spare-regs\n"); + return -EINVAL; + } + + qproc->conn_map = syscon_node_to_regmap(args.np); + of_node_put(args.np); + if (IS_ERR(qproc->conn_map)) + return PTR_ERR(qproc->conn_map); + + qproc->conn_box = args.args[0]; + } + + return 0; +} + +static int q6v5_init_clocks(struct device *dev, struct clk **clks, + char **clk_names) +{ + int i; + + if (!clk_names) + return 0; + + for (i = 0; clk_names[i]; i++) { + clks[i] = devm_clk_get(dev, clk_names[i]); + if (IS_ERR(clks[i])) { + int rc = PTR_ERR(clks[i]); + + if (rc != -EPROBE_DEFER) + dev_err(dev, "Failed to get %s clock\n", + clk_names[i]); + return rc; + } + } + + return i; +} + +static int q6v5_pds_attach(struct device *dev, struct device **devs, + char **pd_names) +{ + size_t num_pds = 0; + int ret; + int i; + + if (!pd_names) + return 0; + + while (pd_names[num_pds]) + num_pds++; + + for (i = 0; i < num_pds; i++) { + devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); + if (IS_ERR_OR_NULL(devs[i])) { + ret = PTR_ERR(devs[i]) ? : -ENODATA; + goto unroll_attach; + } + } + + return num_pds; + +unroll_attach: + for (i--; i >= 0; i--) + dev_pm_domain_detach(devs[i], false); + + return ret; +} + +static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, + size_t pd_count) +{ + int i; + + for (i = 0; i < pd_count; i++) + dev_pm_domain_detach(pds[i], false); +} + +static int q6v5_init_reset(struct q6v5 *qproc) +{ + qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, + "mss_restart"); + if (IS_ERR(qproc->mss_restart)) { + dev_err(qproc->dev, "failed to acquire mss restart\n"); + return PTR_ERR(qproc->mss_restart); + } + + if (qproc->has_alt_reset || qproc->has_spare_reg) { + qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, + "pdc_reset"); + if (IS_ERR(qproc->pdc_reset)) { + dev_err(qproc->dev, "failed to acquire pdc reset\n"); + return PTR_ERR(qproc->pdc_reset); + } + } + + return 0; +} + +static int q6v5_alloc_memory_region(struct q6v5 *qproc) +{ + struct device_node *child; + struct reserved_mem *rmem; + struct device_node *node; + struct resource r; + int ret; + + /* + * In the absence of mba/mpss sub-child, extract the mba and mpss + * reserved memory regions from device's memory-region property. + */ + child = of_get_child_by_name(qproc->dev->of_node, "mba"); + if (!child) { + node = of_parse_phandle(qproc->dev->of_node, + "memory-region", 0); + } else { + node = of_parse_phandle(child, "memory-region", 0); + of_node_put(child); + } + + ret = of_address_to_resource(node, 0, &r); + of_node_put(node); + if (ret) { + dev_err(qproc->dev, "unable to resolve mba region\n"); + return ret; + } + + qproc->mba_phys = r.start; + qproc->mba_size = resource_size(&r); + qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); + if (!qproc->mba_region) { + dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", + &r.start, qproc->mba_size); + return -EBUSY; + } + + if (!child) { + node = of_parse_phandle(qproc->dev->of_node, + "memory-region", 1); + } else { + child = of_get_child_by_name(qproc->dev->of_node, "mpss"); + node = of_parse_phandle(child, "memory-region", 0); + of_node_put(child); + } + + ret = of_address_to_resource(node, 0, &r); + of_node_put(node); + if (ret) { + dev_err(qproc->dev, "unable to resolve mpss region\n"); + return ret; + } + + qproc->mpss_phys = qproc->mpss_reloc = r.start; + qproc->mpss_size = resource_size(&r); + + if (!child) { + node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2); + } else { + child = of_get_child_by_name(qproc->dev->of_node, "metadata"); + node = of_parse_phandle(child, "memory-region", 0); + of_node_put(child); + } + + if (!node) + return 0; + + rmem = of_reserved_mem_lookup(node); + if (!rmem) { + dev_err(qproc->dev, "unable to resolve metadata region\n"); + return -EINVAL; + } + + qproc->mdata_phys = rmem->base; + qproc->mdata_size = rmem->size; + + return 0; +} + +static int q6v5_probe(struct platform_device *pdev) +{ + const struct rproc_hexagon_res *desc; + struct q6v5 *qproc; + struct rproc *rproc; + const char *mba_image; + int ret; + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + if (desc->need_mem_protection && !qcom_scm_is_available()) + return -EPROBE_DEFER; + + mba_image = desc->hexagon_mba_image; + ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", + 0, &mba_image); + if (ret < 0 && ret != -EINVAL) + return ret; + + rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, + mba_image, sizeof(*qproc)); + if (!rproc) { + dev_err(&pdev->dev, "failed to allocate rproc\n"); + return -ENOMEM; + } + + rproc->auto_boot = false; + rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); + + qproc = (struct q6v5 *)rproc->priv; + qproc->dev = &pdev->dev; + qproc->rproc = rproc; + qproc->hexagon_mdt_image = "modem.mdt"; + ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", + 1, &qproc->hexagon_mdt_image); + if (ret < 0 && ret != -EINVAL) + goto free_rproc; + + platform_set_drvdata(pdev, qproc); + + qproc->has_spare_reg = desc->has_spare_reg; + ret = q6v5_init_mem(qproc, pdev); + if (ret) + goto free_rproc; + + ret = q6v5_alloc_memory_region(qproc); + if (ret) + goto free_rproc; + + ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, + desc->proxy_clk_names); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); + goto free_rproc; + } + qproc->proxy_clk_count = ret; + + ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, + desc->reset_clk_names); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get reset clocks.\n"); + goto free_rproc; + } + qproc->reset_clk_count = ret; + + ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, + desc->active_clk_names); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get active clocks.\n"); + goto free_rproc; + } + qproc->active_clk_count = ret; + + ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, + desc->proxy_supply); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); + goto free_rproc; + } + qproc->proxy_reg_count = ret; + + ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, + desc->active_supply); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get active regulators.\n"); + goto free_rproc; + } + qproc->active_reg_count = ret; + + ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds, + desc->active_pd_names); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to attach active power domains\n"); + goto free_rproc; + } + qproc->active_pd_count = ret; + + ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, + desc->proxy_pd_names); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to init power domains\n"); + goto detach_active_pds; + } + qproc->proxy_pd_count = ret; + + qproc->has_alt_reset = desc->has_alt_reset; + ret = q6v5_init_reset(qproc); + if (ret) + goto detach_proxy_pds; + + qproc->version = desc->version; + qproc->need_mem_protection = desc->need_mem_protection; + qproc->has_mba_logs = desc->has_mba_logs; + + ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, + qcom_msa_handover); + if (ret) + goto detach_proxy_pds; + + qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); + qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); + qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); + qcom_add_smd_subdev(rproc, &qproc->smd_subdev); + qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); + qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); + if (IS_ERR(qproc->sysmon)) { + ret = PTR_ERR(qproc->sysmon); + goto remove_subdevs; + } + + ret = rproc_add(rproc); + if (ret) + goto remove_sysmon_subdev; + + return 0; + +remove_sysmon_subdev: + qcom_remove_sysmon_subdev(qproc->sysmon); +remove_subdevs: + qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); + qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); + qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); +detach_proxy_pds: + q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); +detach_active_pds: + q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); +free_rproc: + rproc_free(rproc); + + return ret; +} + +static int q6v5_remove(struct platform_device *pdev) +{ + struct q6v5 *qproc = platform_get_drvdata(pdev); + struct rproc *rproc = qproc->rproc; + + rproc_del(rproc); + + qcom_remove_sysmon_subdev(qproc->sysmon); + qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); + qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); + qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); + + q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); + q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); + + rproc_free(rproc); + + return 0; +} + +static const struct rproc_hexagon_res sc7180_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_clk_names = (char*[]){ + "xo", + NULL + }, + .reset_clk_names = (char*[]){ + "iface", + "bus", + "snoc_axi", + NULL + }, + .active_clk_names = (char*[]){ + "mnoc_axi", + "nav", + NULL + }, + .active_pd_names = (char*[]){ + "load_state", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + "mx", + "mss", + NULL + }, + .need_mem_protection = true, + .has_alt_reset = false, + .has_mba_logs = true, + .has_spare_reg = true, + .version = MSS_SC7180, +}; + +static const struct rproc_hexagon_res sdm845_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_clk_names = (char*[]){ + "xo", + "prng", + NULL + }, + .reset_clk_names = (char*[]){ + "iface", + "snoc_axi", + NULL + }, + .active_clk_names = (char*[]){ + "bus", + "mem", + "gpll0_mss", + "mnoc_axi", + NULL + }, + .active_pd_names = (char*[]){ + "load_state", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + "mx", + "mss", + NULL + }, + .need_mem_protection = true, + .has_alt_reset = true, + .has_mba_logs = false, + .has_spare_reg = false, + .version = MSS_SDM845, +}; + +static const struct rproc_hexagon_res msm8998_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_clk_names = (char*[]){ + "xo", + "qdss", + "mem", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "gpll0_mss", + "mnoc_axi", + "snoc_axi", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + "mx", + NULL + }, + .need_mem_protection = true, + .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .version = MSS_MSM8998, +}; + +static const struct rproc_hexagon_res msm8996_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "pll", + .uA = 100000, + }, + {} + }, + .proxy_clk_names = (char*[]){ + "xo", + "pnoc", + "qdss", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "mem", + "gpll0_mss", + "snoc_axi", + "mnoc_axi", + NULL + }, + .need_mem_protection = true, + .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .version = MSS_MSM8996, +}; + +static const struct rproc_hexagon_res msm8916_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "mx", + .uV = 1050000, + }, + { + .supply = "cx", + .uA = 100000, + }, + { + .supply = "pll", + .uA = 100000, + }, + {} + }, + .proxy_clk_names = (char*[]){ + "xo", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "mem", + NULL + }, + .need_mem_protection = false, + .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .version = MSS_MSM8916, +}; + +static const struct rproc_hexagon_res msm8974_mss = { + .hexagon_mba_image = "mba.b00", + .proxy_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "mx", + .uV = 1050000, + }, + { + .supply = "cx", + .uA = 100000, + }, + { + .supply = "pll", + .uA = 100000, + }, + {} + }, + .active_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "mss", + .uV = 1050000, + .uA = 100000, + }, + {} + }, + .proxy_clk_names = (char*[]){ + "xo", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "mem", + NULL + }, + .need_mem_protection = false, + .has_alt_reset = false, + .has_mba_logs = false, + .has_spare_reg = false, + .version = MSS_MSM8974, +}; + +static const struct of_device_id q6v5_of_match[] = { + { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, + { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, + { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, + { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, + { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, + { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, + { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, + { }, +}; +MODULE_DEVICE_TABLE(of, q6v5_of_match); + +static struct platform_driver q6v5_driver = { + .probe = q6v5_probe, + .remove = q6v5_remove, + .driver = { + .name = "qcom-q6v5-mss", + .of_match_table = q6v5_of_match, + }, +}; +module_platform_driver(q6v5_driver); + +MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c new file mode 100644 index 000000000..1a0d6eb94 --- /dev/null +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -0,0 +1,743 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm ADSP/SLPI Peripheral Image Loader for MSM8974 and MSM8996 + * + * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2014 Sony Mobile Communications AB + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/firmware.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/qcom_scm.h> +#include <linux/regulator/consumer.h> +#include <linux/remoteproc.h> +#include <linux/soc/qcom/mdt_loader.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/smem_state.h> + +#include "qcom_common.h" +#include "qcom_pil_info.h" +#include "qcom_q6v5.h" +#include "remoteproc_internal.h" + +struct adsp_data { + int crash_reason_smem; + const char *firmware_name; + int pas_id; + bool has_aggre2_clk; + bool auto_boot; + + char **active_pd_names; + char **proxy_pd_names; + + const char *ssr_name; + const char *sysmon_name; + int ssctl_id; +}; + +struct qcom_adsp { + struct device *dev; + struct rproc *rproc; + + struct qcom_q6v5 q6v5; + + struct clk *xo; + struct clk *aggre2_clk; + + struct regulator *cx_supply; + struct regulator *px_supply; + + struct device *active_pds[1]; + struct device *proxy_pds[3]; + + int active_pd_count; + int proxy_pd_count; + + int pas_id; + int crash_reason_smem; + bool has_aggre2_clk; + const char *info_name; + + struct completion start_done; + struct completion stop_done; + + phys_addr_t mem_phys; + phys_addr_t mem_reloc; + void *mem_region; + size_t mem_size; + + struct qcom_rproc_glink glink_subdev; + struct qcom_rproc_subdev smd_subdev; + struct qcom_rproc_ssr ssr_subdev; + struct qcom_sysmon *sysmon; +}; + +static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds, + size_t pd_count) +{ + int ret; + int i; + + for (i = 0; i < pd_count; i++) { + dev_pm_genpd_set_performance_state(pds[i], INT_MAX); + ret = pm_runtime_get_sync(pds[i]); + if (ret < 0) { + pm_runtime_put_noidle(pds[i]); + dev_pm_genpd_set_performance_state(pds[i], 0); + goto unroll_pd_votes; + } + } + + return 0; + +unroll_pd_votes: + for (i--; i >= 0; i--) { + dev_pm_genpd_set_performance_state(pds[i], 0); + pm_runtime_put(pds[i]); + } + + return ret; +}; + +static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds, + size_t pd_count) +{ + int i; + + for (i = 0; i < pd_count; i++) { + dev_pm_genpd_set_performance_state(pds[i], 0); + pm_runtime_put(pds[i]); + } +} + +static int adsp_load(struct rproc *rproc, const struct firmware *fw) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int ret; + + ret = qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id, + adsp->mem_region, adsp->mem_phys, adsp->mem_size, + &adsp->mem_reloc); + if (ret) + return ret; + + qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size); + + return 0; +} + +static int adsp_start(struct rproc *rproc) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int ret; + + qcom_q6v5_prepare(&adsp->q6v5); + + ret = adsp_pds_enable(adsp, adsp->active_pds, adsp->active_pd_count); + if (ret < 0) + goto disable_irqs; + + ret = adsp_pds_enable(adsp, adsp->proxy_pds, adsp->proxy_pd_count); + if (ret < 0) + goto disable_active_pds; + + ret = clk_prepare_enable(adsp->xo); + if (ret) + goto disable_proxy_pds; + + ret = clk_prepare_enable(adsp->aggre2_clk); + if (ret) + goto disable_xo_clk; + + ret = regulator_enable(adsp->cx_supply); + if (ret) + goto disable_aggre2_clk; + + ret = regulator_enable(adsp->px_supply); + if (ret) + goto disable_cx_supply; + + ret = qcom_scm_pas_auth_and_reset(adsp->pas_id); + if (ret) { + dev_err(adsp->dev, + "failed to authenticate image and release reset\n"); + goto disable_px_supply; + } + + ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5000)); + if (ret == -ETIMEDOUT) { + dev_err(adsp->dev, "start timed out\n"); + qcom_scm_pas_shutdown(adsp->pas_id); + goto disable_px_supply; + } + + return 0; + +disable_px_supply: + regulator_disable(adsp->px_supply); +disable_cx_supply: + regulator_disable(adsp->cx_supply); +disable_aggre2_clk: + clk_disable_unprepare(adsp->aggre2_clk); +disable_xo_clk: + clk_disable_unprepare(adsp->xo); +disable_proxy_pds: + adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count); +disable_active_pds: + adsp_pds_disable(adsp, adsp->active_pds, adsp->active_pd_count); +disable_irqs: + qcom_q6v5_unprepare(&adsp->q6v5); + + return ret; +} + +static void qcom_pas_handover(struct qcom_q6v5 *q6v5) +{ + struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5); + + regulator_disable(adsp->px_supply); + regulator_disable(adsp->cx_supply); + clk_disable_unprepare(adsp->aggre2_clk); + clk_disable_unprepare(adsp->xo); + adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count); +} + +static int adsp_stop(struct rproc *rproc) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int handover; + int ret; + + ret = qcom_q6v5_request_stop(&adsp->q6v5); + if (ret == -ETIMEDOUT) + dev_err(adsp->dev, "timed out on wait\n"); + + ret = qcom_scm_pas_shutdown(adsp->pas_id); + if (ret) + dev_err(adsp->dev, "failed to shutdown: %d\n", ret); + + adsp_pds_disable(adsp, adsp->active_pds, adsp->active_pd_count); + handover = qcom_q6v5_unprepare(&adsp->q6v5); + if (handover) + qcom_pas_handover(&adsp->q6v5); + + return ret; +} + +static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int offset; + + offset = da - adsp->mem_reloc; + if (offset < 0 || offset + len > adsp->mem_size) + return NULL; + + return adsp->mem_region + offset; +} + +static unsigned long adsp_panic(struct rproc *rproc) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + + return qcom_q6v5_panic(&adsp->q6v5); +} + +static const struct rproc_ops adsp_ops = { + .start = adsp_start, + .stop = adsp_stop, + .da_to_va = adsp_da_to_va, + .parse_fw = qcom_register_dump_segments, + .load = adsp_load, + .panic = adsp_panic, +}; + +static int adsp_init_clock(struct qcom_adsp *adsp) +{ + int ret; + + adsp->xo = devm_clk_get(adsp->dev, "xo"); + if (IS_ERR(adsp->xo)) { + ret = PTR_ERR(adsp->xo); + if (ret != -EPROBE_DEFER) + dev_err(adsp->dev, "failed to get xo clock"); + return ret; + } + + if (adsp->has_aggre2_clk) { + adsp->aggre2_clk = devm_clk_get(adsp->dev, "aggre2"); + if (IS_ERR(adsp->aggre2_clk)) { + ret = PTR_ERR(adsp->aggre2_clk); + if (ret != -EPROBE_DEFER) + dev_err(adsp->dev, + "failed to get aggre2 clock"); + return ret; + } + } + + return 0; +} + +static int adsp_init_regulator(struct qcom_adsp *adsp) +{ + adsp->cx_supply = devm_regulator_get(adsp->dev, "cx"); + if (IS_ERR(adsp->cx_supply)) + return PTR_ERR(adsp->cx_supply); + + regulator_set_load(adsp->cx_supply, 100000); + + adsp->px_supply = devm_regulator_get(adsp->dev, "px"); + return PTR_ERR_OR_ZERO(adsp->px_supply); +} + +static int adsp_pds_attach(struct device *dev, struct device **devs, + char **pd_names) +{ + size_t num_pds = 0; + int ret; + int i; + + if (!pd_names) + return 0; + + /* Handle single power domain */ + if (dev->pm_domain) { + devs[0] = dev; + pm_runtime_enable(dev); + return 1; + } + + while (pd_names[num_pds]) + num_pds++; + + for (i = 0; i < num_pds; i++) { + devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); + if (IS_ERR_OR_NULL(devs[i])) { + ret = PTR_ERR(devs[i]) ? : -ENODATA; + goto unroll_attach; + } + } + + return num_pds; + +unroll_attach: + for (i--; i >= 0; i--) + dev_pm_domain_detach(devs[i], false); + + return ret; +}; + +static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds, + size_t pd_count) +{ + struct device *dev = adsp->dev; + int i; + + /* Handle single power domain */ + if (dev->pm_domain && pd_count) { + pm_runtime_disable(dev); + return; + } + + for (i = 0; i < pd_count; i++) + dev_pm_domain_detach(pds[i], false); +} + +static int adsp_alloc_memory_region(struct qcom_adsp *adsp) +{ + struct device_node *node; + struct resource r; + int ret; + + node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0); + if (!node) { + dev_err(adsp->dev, "no memory-region specified\n"); + return -EINVAL; + } + + ret = of_address_to_resource(node, 0, &r); + of_node_put(node); + if (ret) + return ret; + + adsp->mem_phys = adsp->mem_reloc = r.start; + adsp->mem_size = resource_size(&r); + adsp->mem_region = devm_ioremap_wc(adsp->dev, adsp->mem_phys, adsp->mem_size); + if (!adsp->mem_region) { + dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n", + &r.start, adsp->mem_size); + return -EBUSY; + } + + return 0; +} + +static int adsp_probe(struct platform_device *pdev) +{ + const struct adsp_data *desc; + struct qcom_adsp *adsp; + struct rproc *rproc; + const char *fw_name; + int ret; + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + if (!qcom_scm_is_available()) + return -EPROBE_DEFER; + + fw_name = desc->firmware_name; + ret = of_property_read_string(pdev->dev.of_node, "firmware-name", + &fw_name); + if (ret < 0 && ret != -EINVAL) + return ret; + + rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops, + fw_name, sizeof(*adsp)); + if (!rproc) { + dev_err(&pdev->dev, "unable to allocate remoteproc\n"); + return -ENOMEM; + } + + rproc->auto_boot = desc->auto_boot; + rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); + + adsp = (struct qcom_adsp *)rproc->priv; + adsp->dev = &pdev->dev; + adsp->rproc = rproc; + adsp->pas_id = desc->pas_id; + adsp->has_aggre2_clk = desc->has_aggre2_clk; + adsp->info_name = desc->sysmon_name; + platform_set_drvdata(pdev, adsp); + + device_wakeup_enable(adsp->dev); + + ret = adsp_alloc_memory_region(adsp); + if (ret) + goto free_rproc; + + ret = adsp_init_clock(adsp); + if (ret) + goto free_rproc; + + ret = adsp_init_regulator(adsp); + if (ret) + goto free_rproc; + + ret = adsp_pds_attach(&pdev->dev, adsp->active_pds, + desc->active_pd_names); + if (ret < 0) + goto free_rproc; + adsp->active_pd_count = ret; + + ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds, + desc->proxy_pd_names); + if (ret < 0) + goto detach_active_pds; + adsp->proxy_pd_count = ret; + + ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, + qcom_pas_handover); + if (ret) + goto detach_proxy_pds; + + qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name); + qcom_add_smd_subdev(rproc, &adsp->smd_subdev); + qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name); + adsp->sysmon = qcom_add_sysmon_subdev(rproc, + desc->sysmon_name, + desc->ssctl_id); + if (IS_ERR(adsp->sysmon)) { + ret = PTR_ERR(adsp->sysmon); + goto detach_proxy_pds; + } + + ret = rproc_add(rproc); + if (ret) + goto detach_proxy_pds; + + return 0; + +detach_proxy_pds: + adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); +detach_active_pds: + adsp_pds_detach(adsp, adsp->active_pds, adsp->active_pd_count); +free_rproc: + device_init_wakeup(adsp->dev, false); + rproc_free(rproc); + + return ret; +} + +static int adsp_remove(struct platform_device *pdev) +{ + struct qcom_adsp *adsp = platform_get_drvdata(pdev); + + rproc_del(adsp->rproc); + + qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev); + qcom_remove_sysmon_subdev(adsp->sysmon); + qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev); + qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev); + adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); + device_init_wakeup(adsp->dev, false); + rproc_free(adsp->rproc); + + return 0; +} + +static const struct adsp_data adsp_resource_init = { + .crash_reason_smem = 423, + .firmware_name = "adsp.mdt", + .pas_id = 1, + .has_aggre2_clk = false, + .auto_boot = true, + .ssr_name = "lpass", + .sysmon_name = "adsp", + .ssctl_id = 0x14, +}; + +static const struct adsp_data sm8150_adsp_resource = { + .crash_reason_smem = 423, + .firmware_name = "adsp.mdt", + .pas_id = 1, + .has_aggre2_clk = false, + .auto_boot = true, + .active_pd_names = (char*[]){ + "load_state", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + NULL + }, + .ssr_name = "lpass", + .sysmon_name = "adsp", + .ssctl_id = 0x14, +}; + +static const struct adsp_data sm8250_adsp_resource = { + .crash_reason_smem = 423, + .firmware_name = "adsp.mdt", + .pas_id = 1, + .has_aggre2_clk = false, + .auto_boot = true, + .active_pd_names = (char*[]){ + "load_state", + NULL + }, + .proxy_pd_names = (char*[]){ + "lcx", + "lmx", + NULL + }, + .ssr_name = "lpass", + .sysmon_name = "adsp", + .ssctl_id = 0x14, +}; + +static const struct adsp_data msm8998_adsp_resource = { + .crash_reason_smem = 423, + .firmware_name = "adsp.mdt", + .pas_id = 1, + .has_aggre2_clk = false, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "cx", + NULL + }, + .ssr_name = "lpass", + .sysmon_name = "adsp", + .ssctl_id = 0x14, +}; + +static const struct adsp_data cdsp_resource_init = { + .crash_reason_smem = 601, + .firmware_name = "cdsp.mdt", + .pas_id = 18, + .has_aggre2_clk = false, + .auto_boot = true, + .ssr_name = "cdsp", + .sysmon_name = "cdsp", + .ssctl_id = 0x17, +}; + +static const struct adsp_data sm8150_cdsp_resource = { + .crash_reason_smem = 601, + .firmware_name = "cdsp.mdt", + .pas_id = 18, + .has_aggre2_clk = false, + .auto_boot = true, + .active_pd_names = (char*[]){ + "load_state", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + NULL + }, + .ssr_name = "cdsp", + .sysmon_name = "cdsp", + .ssctl_id = 0x17, +}; + +static const struct adsp_data sm8250_cdsp_resource = { + .crash_reason_smem = 601, + .firmware_name = "cdsp.mdt", + .pas_id = 18, + .has_aggre2_clk = false, + .auto_boot = true, + .active_pd_names = (char*[]){ + "load_state", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + NULL + }, + .ssr_name = "cdsp", + .sysmon_name = "cdsp", + .ssctl_id = 0x17, +}; + +static const struct adsp_data mpss_resource_init = { + .crash_reason_smem = 421, + .firmware_name = "modem.mdt", + .pas_id = 4, + .has_aggre2_clk = false, + .auto_boot = false, + .active_pd_names = (char*[]){ + "load_state", + NULL + }, + .proxy_pd_names = (char*[]){ + "cx", + "mss", + NULL + }, + .ssr_name = "mpss", + .sysmon_name = "modem", + .ssctl_id = 0x12, +}; + +static const struct adsp_data slpi_resource_init = { + .crash_reason_smem = 424, + .firmware_name = "slpi.mdt", + .pas_id = 12, + .has_aggre2_clk = true, + .auto_boot = true, + .ssr_name = "dsps", + .sysmon_name = "slpi", + .ssctl_id = 0x16, +}; + +static const struct adsp_data sm8150_slpi_resource = { + .crash_reason_smem = 424, + .firmware_name = "slpi.mdt", + .pas_id = 12, + .has_aggre2_clk = false, + .auto_boot = true, + .active_pd_names = (char*[]){ + "load_state", + NULL + }, + .proxy_pd_names = (char*[]){ + "lcx", + "lmx", + NULL + }, + .ssr_name = "dsps", + .sysmon_name = "slpi", + .ssctl_id = 0x16, +}; + +static const struct adsp_data sm8250_slpi_resource = { + .crash_reason_smem = 424, + .firmware_name = "slpi.mdt", + .pas_id = 12, + .has_aggre2_clk = false, + .auto_boot = true, + .active_pd_names = (char*[]){ + "load_state", + NULL + }, + .proxy_pd_names = (char*[]){ + "lcx", + "lmx", + NULL + }, + .ssr_name = "dsps", + .sysmon_name = "slpi", + .ssctl_id = 0x16, +}; + +static const struct adsp_data msm8998_slpi_resource = { + .crash_reason_smem = 424, + .firmware_name = "slpi.mdt", + .pas_id = 12, + .has_aggre2_clk = true, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "ssc_cx", + NULL + }, + .ssr_name = "dsps", + .sysmon_name = "slpi", + .ssctl_id = 0x16, +}; + +static const struct adsp_data wcss_resource_init = { + .crash_reason_smem = 421, + .firmware_name = "wcnss.mdt", + .pas_id = 6, + .auto_boot = true, + .ssr_name = "mpss", + .sysmon_name = "wcnss", + .ssctl_id = 0x12, +}; + +static const struct of_device_id adsp_of_match[] = { + { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init}, + { .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init}, + { .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init}, + { .compatible = "qcom,msm8998-adsp-pas", .data = &msm8998_adsp_resource}, + { .compatible = "qcom,msm8998-slpi-pas", .data = &msm8998_slpi_resource}, + { .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init }, + { .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init }, + { .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init }, + { .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init}, + { .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init}, + { .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init}, + { .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource}, + { .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource}, + { .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init}, + { .compatible = "qcom,sm8150-slpi-pas", .data = &sm8150_slpi_resource}, + { .compatible = "qcom,sm8250-adsp-pas", .data = &sm8250_adsp_resource}, + { .compatible = "qcom,sm8250-cdsp-pas", .data = &sm8250_cdsp_resource}, + { .compatible = "qcom,sm8250-slpi-pas", .data = &sm8250_slpi_resource}, + { }, +}; +MODULE_DEVICE_TABLE(of, adsp_of_match); + +static struct platform_driver adsp_driver = { + .probe = adsp_probe, + .remove = adsp_remove, + .driver = { + .name = "qcom_q6v5_pas", + .of_match_table = adsp_of_match, + }, +}; + +module_platform_driver(adsp_driver); +MODULE_DESCRIPTION("Qualcomm Hexagon v5 Peripheral Authentication Service driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c new file mode 100644 index 000000000..8846ef0b0 --- /dev/null +++ b/drivers/remoteproc/qcom_q6v5_wcss.c @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016-2018 Linaro Ltd. + * Copyright (C) 2014 Sony Mobile Communications AB + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + */ +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/soc/qcom/mdt_loader.h> +#include "qcom_common.h" +#include "qcom_pil_info.h" +#include "qcom_q6v5.h" + +#define WCSS_CRASH_REASON 421 + +/* Q6SS Register Offsets */ +#define Q6SS_RESET_REG 0x014 +#define Q6SS_GFMUX_CTL_REG 0x020 +#define Q6SS_PWR_CTL_REG 0x030 +#define Q6SS_MEM_PWR_CTL 0x0B0 + +/* AXI Halt Register Offsets */ +#define AXI_HALTREQ_REG 0x0 +#define AXI_HALTACK_REG 0x4 +#define AXI_IDLE_REG 0x8 + +#define HALT_ACK_TIMEOUT_MS 100 + +/* Q6SS_RESET */ +#define Q6SS_STOP_CORE BIT(0) +#define Q6SS_CORE_ARES BIT(1) +#define Q6SS_BUS_ARES_ENABLE BIT(2) + +/* Q6SS_GFMUX_CTL */ +#define Q6SS_CLK_ENABLE BIT(1) + +/* Q6SS_PWR_CTL */ +#define Q6SS_L2DATA_STBY_N BIT(18) +#define Q6SS_SLP_RET_N BIT(19) +#define Q6SS_CLAMP_IO BIT(20) +#define QDSS_BHS_ON BIT(21) + +/* Q6SS parameters */ +#define Q6SS_LDO_BYP BIT(25) +#define Q6SS_BHS_ON BIT(24) +#define Q6SS_CLAMP_WL BIT(21) +#define Q6SS_CLAMP_QMC_MEM BIT(22) +#define HALT_CHECK_MAX_LOOPS 200 +#define Q6SS_XO_CBCR GENMASK(5, 3) + +/* Q6SS config/status registers */ +#define TCSR_GLOBAL_CFG0 0x0 +#define TCSR_GLOBAL_CFG1 0x4 +#define SSCAON_CONFIG 0x8 +#define SSCAON_STATUS 0xc +#define Q6SS_BHS_STATUS 0x78 +#define Q6SS_RST_EVB 0x10 + +#define BHS_EN_REST_ACK BIT(0) +#define SSCAON_ENABLE BIT(13) +#define SSCAON_BUS_EN BIT(15) +#define SSCAON_BUS_MUX_MASK GENMASK(18, 16) + +#define MEM_BANKS 19 +#define TCSR_WCSS_CLK_MASK 0x1F +#define TCSR_WCSS_CLK_ENABLE 0x14 + +struct q6v5_wcss { + struct device *dev; + + void __iomem *reg_base; + void __iomem *rmb_base; + + struct regmap *halt_map; + u32 halt_q6; + u32 halt_wcss; + u32 halt_nc; + + struct reset_control *wcss_aon_reset; + struct reset_control *wcss_reset; + struct reset_control *wcss_q6_reset; + + struct qcom_q6v5 q6v5; + + phys_addr_t mem_phys; + phys_addr_t mem_reloc; + void *mem_region; + size_t mem_size; + + struct qcom_rproc_glink glink_subdev; + struct qcom_rproc_ssr ssr_subdev; +}; + +static int q6v5_wcss_reset(struct q6v5_wcss *wcss) +{ + int ret; + u32 val; + int i; + + /* Assert resets, stop core */ + val = readl(wcss->reg_base + Q6SS_RESET_REG); + val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; + writel(val, wcss->reg_base + Q6SS_RESET_REG); + + /* BHS require xo cbcr to be enabled */ + val = readl(wcss->reg_base + Q6SS_XO_CBCR); + val |= 0x1; + writel(val, wcss->reg_base + Q6SS_XO_CBCR); + + /* Read CLKOFF bit to go low indicating CLK is enabled */ + ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR, + val, !(val & BIT(31)), 1, + HALT_CHECK_MAX_LOOPS); + if (ret) { + dev_err(wcss->dev, + "xo cbcr enabling timed out (rc:%d)\n", ret); + return ret; + } + /* Enable power block headswitch and wait for it to stabilize */ + val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val |= Q6SS_BHS_ON; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + udelay(1); + + /* Put LDO in bypass mode */ + val |= Q6SS_LDO_BYP; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* Deassert Q6 compiler memory clamp */ + val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val &= ~Q6SS_CLAMP_QMC_MEM; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* Deassert memory peripheral sleep and L2 memory standby */ + val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* Turn on L1, L2, ETB and JU memories 1 at a time */ + val = readl(wcss->reg_base + Q6SS_MEM_PWR_CTL); + for (i = MEM_BANKS; i >= 0; i--) { + val |= BIT(i); + writel(val, wcss->reg_base + Q6SS_MEM_PWR_CTL); + /* + * Read back value to ensure the write is done then + * wait for 1us for both memory peripheral and data + * array to turn on. + */ + val |= readl(wcss->reg_base + Q6SS_MEM_PWR_CTL); + udelay(1); + } + /* Remove word line clamp */ + val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val &= ~Q6SS_CLAMP_WL; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* Remove IO clamp */ + val &= ~Q6SS_CLAMP_IO; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* Bring core out of reset */ + val = readl(wcss->reg_base + Q6SS_RESET_REG); + val &= ~Q6SS_CORE_ARES; + writel(val, wcss->reg_base + Q6SS_RESET_REG); + + /* Turn on core clock */ + val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG); + val |= Q6SS_CLK_ENABLE; + writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG); + + /* Start core execution */ + val = readl(wcss->reg_base + Q6SS_RESET_REG); + val &= ~Q6SS_STOP_CORE; + writel(val, wcss->reg_base + Q6SS_RESET_REG); + + return 0; +} + +static int q6v5_wcss_start(struct rproc *rproc) +{ + struct q6v5_wcss *wcss = rproc->priv; + int ret; + + qcom_q6v5_prepare(&wcss->q6v5); + + /* Release Q6 and WCSS reset */ + ret = reset_control_deassert(wcss->wcss_reset); + if (ret) { + dev_err(wcss->dev, "wcss_reset failed\n"); + return ret; + } + + ret = reset_control_deassert(wcss->wcss_q6_reset); + if (ret) { + dev_err(wcss->dev, "wcss_q6_reset failed\n"); + goto wcss_reset; + } + + /* Lithium configuration - clock gating and bus arbitration */ + ret = regmap_update_bits(wcss->halt_map, + wcss->halt_nc + TCSR_GLOBAL_CFG0, + TCSR_WCSS_CLK_MASK, + TCSR_WCSS_CLK_ENABLE); + if (ret) + goto wcss_q6_reset; + + ret = regmap_update_bits(wcss->halt_map, + wcss->halt_nc + TCSR_GLOBAL_CFG1, + 1, 0); + if (ret) + goto wcss_q6_reset; + + /* Write bootaddr to EVB so that Q6WCSS will jump there after reset */ + writel(rproc->bootaddr >> 4, wcss->reg_base + Q6SS_RST_EVB); + + ret = q6v5_wcss_reset(wcss); + if (ret) + goto wcss_q6_reset; + + ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ); + if (ret == -ETIMEDOUT) + dev_err(wcss->dev, "start timed out\n"); + + return ret; + +wcss_q6_reset: + reset_control_assert(wcss->wcss_q6_reset); + +wcss_reset: + reset_control_assert(wcss->wcss_reset); + + return ret; +} + +static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss, + struct regmap *halt_map, + u32 offset) +{ + unsigned long timeout; + unsigned int val; + int ret; + + /* Check if we're already idle */ + ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); + if (!ret && val) + return; + + /* Assert halt request */ + regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); + + /* Wait for halt */ + timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS); + for (;;) { + ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val); + if (ret || val || time_after(jiffies, timeout)) + break; + + msleep(1); + } + + ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); + if (ret || !val) + dev_err(wcss->dev, "port failed halt\n"); + + /* Clear halt request (port will remain halted until reset) */ + regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); +} + +static int q6v5_wcss_powerdown(struct q6v5_wcss *wcss) +{ + int ret; + u32 val; + + /* 1 - Assert WCSS/Q6 HALTREQ */ + q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_wcss); + + /* 2 - Enable WCSSAON_CONFIG */ + val = readl(wcss->rmb_base + SSCAON_CONFIG); + val |= SSCAON_ENABLE; + writel(val, wcss->rmb_base + SSCAON_CONFIG); + + /* 3 - Set SSCAON_CONFIG */ + val |= SSCAON_BUS_EN; + val &= ~SSCAON_BUS_MUX_MASK; + writel(val, wcss->rmb_base + SSCAON_CONFIG); + + /* 4 - SSCAON_CONFIG 1 */ + val |= BIT(1); + writel(val, wcss->rmb_base + SSCAON_CONFIG); + + /* 5 - wait for SSCAON_STATUS */ + ret = readl_poll_timeout(wcss->rmb_base + SSCAON_STATUS, + val, (val & 0xffff) == 0x400, 1000, + HALT_CHECK_MAX_LOOPS); + if (ret) { + dev_err(wcss->dev, + "can't get SSCAON_STATUS rc:%d)\n", ret); + return ret; + } + + /* 6 - De-assert WCSS_AON reset */ + reset_control_assert(wcss->wcss_aon_reset); + + /* 7 - Disable WCSSAON_CONFIG 13 */ + val = readl(wcss->rmb_base + SSCAON_CONFIG); + val &= ~SSCAON_ENABLE; + writel(val, wcss->rmb_base + SSCAON_CONFIG); + + /* 8 - De-assert WCSS/Q6 HALTREQ */ + reset_control_assert(wcss->wcss_reset); + + return 0; +} + +static int q6v5_q6_powerdown(struct q6v5_wcss *wcss) +{ + int ret; + u32 val; + int i; + + /* 1 - Halt Q6 bus interface */ + q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_q6); + + /* 2 - Disable Q6 Core clock */ + val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG); + val &= ~Q6SS_CLK_ENABLE; + writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG); + + /* 3 - Clamp I/O */ + val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val |= Q6SS_CLAMP_IO; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* 4 - Clamp WL */ + val |= QDSS_BHS_ON; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* 5 - Clear Erase standby */ + val &= ~Q6SS_L2DATA_STBY_N; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* 6 - Clear Sleep RTN */ + val &= ~Q6SS_SLP_RET_N; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* 7 - turn off Q6 memory foot/head switch one bank at a time */ + for (i = 0; i < 20; i++) { + val = readl(wcss->reg_base + Q6SS_MEM_PWR_CTL); + val &= ~BIT(i); + writel(val, wcss->reg_base + Q6SS_MEM_PWR_CTL); + mdelay(1); + } + + /* 8 - Assert QMC memory RTN */ + val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val |= Q6SS_CLAMP_QMC_MEM; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* 9 - Turn off BHS */ + val &= ~Q6SS_BHS_ON; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + udelay(1); + + /* 10 - Wait till BHS Reset is done */ + ret = readl_poll_timeout(wcss->reg_base + Q6SS_BHS_STATUS, + val, !(val & BHS_EN_REST_ACK), 1000, + HALT_CHECK_MAX_LOOPS); + if (ret) { + dev_err(wcss->dev, "BHS_STATUS not OFF (rc:%d)\n", ret); + return ret; + } + + /* 11 - Assert WCSS reset */ + reset_control_assert(wcss->wcss_reset); + + /* 12 - Assert Q6 reset */ + reset_control_assert(wcss->wcss_q6_reset); + + return 0; +} + +static int q6v5_wcss_stop(struct rproc *rproc) +{ + struct q6v5_wcss *wcss = rproc->priv; + int ret; + + /* WCSS powerdown */ + ret = qcom_q6v5_request_stop(&wcss->q6v5); + if (ret == -ETIMEDOUT) { + dev_err(wcss->dev, "timed out on wait\n"); + return ret; + } + + ret = q6v5_wcss_powerdown(wcss); + if (ret) + return ret; + + /* Q6 Power down */ + ret = q6v5_q6_powerdown(wcss); + if (ret) + return ret; + + qcom_q6v5_unprepare(&wcss->q6v5); + + return 0; +} + +static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct q6v5_wcss *wcss = rproc->priv; + int offset; + + offset = da - wcss->mem_reloc; + if (offset < 0 || offset + len > wcss->mem_size) + return NULL; + + return wcss->mem_region + offset; +} + +static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw) +{ + struct q6v5_wcss *wcss = rproc->priv; + int ret; + + ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware, + 0, wcss->mem_region, wcss->mem_phys, + wcss->mem_size, &wcss->mem_reloc); + if (ret) + return ret; + + qcom_pil_info_store("wcnss", wcss->mem_phys, wcss->mem_size); + + return ret; +} + +static const struct rproc_ops q6v5_wcss_ops = { + .start = q6v5_wcss_start, + .stop = q6v5_wcss_stop, + .da_to_va = q6v5_wcss_da_to_va, + .load = q6v5_wcss_load, + .get_boot_addr = rproc_elf_get_boot_addr, +}; + +static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss) +{ + struct device *dev = wcss->dev; + + wcss->wcss_aon_reset = devm_reset_control_get(dev, "wcss_aon_reset"); + if (IS_ERR(wcss->wcss_aon_reset)) { + dev_err(wcss->dev, "unable to acquire wcss_aon_reset\n"); + return PTR_ERR(wcss->wcss_aon_reset); + } + + wcss->wcss_reset = devm_reset_control_get(dev, "wcss_reset"); + if (IS_ERR(wcss->wcss_reset)) { + dev_err(wcss->dev, "unable to acquire wcss_reset\n"); + return PTR_ERR(wcss->wcss_reset); + } + + wcss->wcss_q6_reset = devm_reset_control_get(dev, "wcss_q6_reset"); + if (IS_ERR(wcss->wcss_q6_reset)) { + dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n"); + return PTR_ERR(wcss->wcss_q6_reset); + } + + return 0; +} + +static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss, + struct platform_device *pdev) +{ + struct of_phandle_args args; + struct resource *res; + int ret; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); + wcss->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(wcss->reg_base)) + return PTR_ERR(wcss->reg_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); + wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(wcss->rmb_base)) + return PTR_ERR(wcss->rmb_base); + + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "qcom,halt-regs", 3, 0, &args); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); + return -EINVAL; + } + + wcss->halt_map = syscon_node_to_regmap(args.np); + of_node_put(args.np); + if (IS_ERR(wcss->halt_map)) + return PTR_ERR(wcss->halt_map); + + wcss->halt_q6 = args.args[0]; + wcss->halt_wcss = args.args[1]; + wcss->halt_nc = args.args[2]; + + return 0; +} + +static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss) +{ + struct reserved_mem *rmem = NULL; + struct device_node *node; + struct device *dev = wcss->dev; + + node = of_parse_phandle(dev->of_node, "memory-region", 0); + if (node) + rmem = of_reserved_mem_lookup(node); + of_node_put(node); + + if (!rmem) { + dev_err(dev, "unable to acquire memory-region\n"); + return -EINVAL; + } + + wcss->mem_phys = rmem->base; + wcss->mem_reloc = rmem->base; + wcss->mem_size = rmem->size; + wcss->mem_region = devm_ioremap_wc(dev, wcss->mem_phys, wcss->mem_size); + if (!wcss->mem_region) { + dev_err(dev, "unable to map memory region: %pa+%pa\n", + &rmem->base, &rmem->size); + return -EBUSY; + } + + return 0; +} + +static int q6v5_wcss_probe(struct platform_device *pdev) +{ + struct q6v5_wcss *wcss; + struct rproc *rproc; + int ret; + + rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_wcss_ops, + "IPQ8074/q6_fw.mdt", sizeof(*wcss)); + if (!rproc) { + dev_err(&pdev->dev, "failed to allocate rproc\n"); + return -ENOMEM; + } + + wcss = rproc->priv; + wcss->dev = &pdev->dev; + + ret = q6v5_wcss_init_mmio(wcss, pdev); + if (ret) + goto free_rproc; + + ret = q6v5_alloc_memory_region(wcss); + if (ret) + goto free_rproc; + + ret = q6v5_wcss_init_reset(wcss); + if (ret) + goto free_rproc; + + ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, WCSS_CRASH_REASON, NULL); + if (ret) + goto free_rproc; + + qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss"); + qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss"); + + ret = rproc_add(rproc); + if (ret) + goto free_rproc; + + platform_set_drvdata(pdev, rproc); + + return 0; + +free_rproc: + rproc_free(rproc); + + return ret; +} + +static int q6v5_wcss_remove(struct platform_device *pdev) +{ + struct rproc *rproc = platform_get_drvdata(pdev); + + rproc_del(rproc); + rproc_free(rproc); + + return 0; +} + +static const struct of_device_id q6v5_wcss_of_match[] = { + { .compatible = "qcom,ipq8074-wcss-pil" }, + { }, +}; +MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match); + +static struct platform_driver q6v5_wcss_driver = { + .probe = q6v5_wcss_probe, + .remove = q6v5_wcss_remove, + .driver = { + .name = "qcom-q6v5-wcss-pil", + .of_match_table = q6v5_wcss_of_match, + }, +}; +module_platform_driver(q6v5_wcss_driver); + +MODULE_DESCRIPTION("Hexagon WCSS Peripheral Image Loader"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c new file mode 100644 index 000000000..c348ea35e --- /dev/null +++ b/drivers/remoteproc/qcom_sysmon.c @@ -0,0 +1,768 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017, Linaro Ltd. + */ +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/remoteproc/qcom_rproc.h> +#include <linux/rpmsg.h> + +#include "qcom_common.h" + +static BLOCKING_NOTIFIER_HEAD(sysmon_notifiers); + +struct qcom_sysmon { + struct rproc_subdev subdev; + struct rproc *rproc; + + int state; + struct mutex state_lock; + + struct list_head node; + + const char *name; + + int shutdown_irq; + int ssctl_version; + int ssctl_instance; + + struct notifier_block nb; + + struct device *dev; + + struct rpmsg_endpoint *ept; + struct completion comp; + struct completion ind_comp; + struct completion shutdown_comp; + struct completion ssctl_comp; + struct mutex lock; + + bool ssr_ack; + + struct qmi_handle qmi; + struct sockaddr_qrtr ssctl; +}; + +enum { + SSCTL_SSR_EVENT_BEFORE_POWERUP, + SSCTL_SSR_EVENT_AFTER_POWERUP, + SSCTL_SSR_EVENT_BEFORE_SHUTDOWN, + SSCTL_SSR_EVENT_AFTER_SHUTDOWN, +}; + +static const char * const sysmon_state_string[] = { + [SSCTL_SSR_EVENT_BEFORE_POWERUP] = "before_powerup", + [SSCTL_SSR_EVENT_AFTER_POWERUP] = "after_powerup", + [SSCTL_SSR_EVENT_BEFORE_SHUTDOWN] = "before_shutdown", + [SSCTL_SSR_EVENT_AFTER_SHUTDOWN] = "after_shutdown", +}; + +struct sysmon_event { + const char *subsys_name; + u32 ssr_event; +}; + +static DEFINE_MUTEX(sysmon_lock); +static LIST_HEAD(sysmon_list); + +/** + * sysmon_send_event() - send notification of other remote's SSR event + * @sysmon: sysmon context + * @event: sysmon event context + */ +static void sysmon_send_event(struct qcom_sysmon *sysmon, + const struct sysmon_event *event) +{ + char req[50]; + int len; + int ret; + + len = snprintf(req, sizeof(req), "ssr:%s:%s", event->subsys_name, + sysmon_state_string[event->ssr_event]); + if (len >= sizeof(req)) + return; + + mutex_lock(&sysmon->lock); + reinit_completion(&sysmon->comp); + sysmon->ssr_ack = false; + + ret = rpmsg_send(sysmon->ept, req, len); + if (ret < 0) { + dev_err(sysmon->dev, "failed to send sysmon event\n"); + goto out_unlock; + } + + ret = wait_for_completion_timeout(&sysmon->comp, + msecs_to_jiffies(5000)); + if (!ret) { + dev_err(sysmon->dev, "timeout waiting for sysmon ack\n"); + goto out_unlock; + } + + if (!sysmon->ssr_ack) + dev_err(sysmon->dev, "unexpected response to sysmon event\n"); + +out_unlock: + mutex_unlock(&sysmon->lock); +} + +/** + * sysmon_request_shutdown() - request graceful shutdown of remote + * @sysmon: sysmon context + */ +static void sysmon_request_shutdown(struct qcom_sysmon *sysmon) +{ + char *req = "ssr:shutdown"; + int ret; + + mutex_lock(&sysmon->lock); + reinit_completion(&sysmon->comp); + sysmon->ssr_ack = false; + + ret = rpmsg_send(sysmon->ept, req, strlen(req) + 1); + if (ret < 0) { + dev_err(sysmon->dev, "send sysmon shutdown request failed\n"); + goto out_unlock; + } + + ret = wait_for_completion_timeout(&sysmon->comp, + msecs_to_jiffies(5000)); + if (!ret) { + dev_err(sysmon->dev, "timeout waiting for sysmon ack\n"); + goto out_unlock; + } + + if (!sysmon->ssr_ack) + dev_err(sysmon->dev, + "unexpected response to sysmon shutdown request\n"); + +out_unlock: + mutex_unlock(&sysmon->lock); +} + +static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count, + void *priv, u32 addr) +{ + struct qcom_sysmon *sysmon = priv; + const char *ssr_ack = "ssr:ack"; + const int ssr_ack_len = strlen(ssr_ack) + 1; + + if (!sysmon) + return -EINVAL; + + if (count >= ssr_ack_len && !memcmp(data, ssr_ack, ssr_ack_len)) + sysmon->ssr_ack = true; + + complete(&sysmon->comp); + + return 0; +} + +#define SSCTL_SHUTDOWN_REQ 0x21 +#define SSCTL_SHUTDOWN_READY_IND 0x21 +#define SSCTL_SUBSYS_EVENT_REQ 0x23 + +#define SSCTL_MAX_MSG_LEN 7 + +#define SSCTL_SUBSYS_NAME_LENGTH 15 + +enum { + SSCTL_SSR_EVENT_FORCED, + SSCTL_SSR_EVENT_GRACEFUL, +}; + +struct ssctl_shutdown_resp { + struct qmi_response_type_v01 resp; +}; + +static struct qmi_elem_info ssctl_shutdown_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ssctl_shutdown_resp, resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +struct ssctl_subsys_event_req { + u8 subsys_name_len; + char subsys_name[SSCTL_SUBSYS_NAME_LENGTH]; + u32 event; + u8 evt_driven_valid; + u32 evt_driven; +}; + +static struct qmi_elem_info ssctl_subsys_event_req_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ssctl_subsys_event_req, + subsys_name_len), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = SSCTL_SUBSYS_NAME_LENGTH, + .elem_size = sizeof(char), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct ssctl_subsys_event_req, + subsys_name), + .ei_array = NULL, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ssctl_subsys_event_req, + event), + .ei_array = NULL, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ssctl_subsys_event_req, + evt_driven_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct ssctl_subsys_event_req, + evt_driven), + .ei_array = NULL, + }, + {} +}; + +struct ssctl_subsys_event_resp { + struct qmi_response_type_v01 resp; +}; + +static struct qmi_elem_info ssctl_subsys_event_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct ssctl_subsys_event_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +static struct qmi_elem_info ssctl_shutdown_ind_ei[] = { + {} +}; + +static void sysmon_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *data) +{ + struct qcom_sysmon *sysmon = container_of(qmi, struct qcom_sysmon, qmi); + + complete(&sysmon->ind_comp); +} + +static struct qmi_msg_handler qmi_indication_handler[] = { + { + .type = QMI_INDICATION, + .msg_id = SSCTL_SHUTDOWN_READY_IND, + .ei = ssctl_shutdown_ind_ei, + .decoded_size = 0, + .fn = sysmon_ind_cb + }, + {} +}; + +/** + * ssctl_request_shutdown() - request shutdown via SSCTL QMI service + * @sysmon: sysmon context + */ +static void ssctl_request_shutdown(struct qcom_sysmon *sysmon) +{ + struct ssctl_shutdown_resp resp; + struct qmi_txn txn; + int ret; + + reinit_completion(&sysmon->ind_comp); + reinit_completion(&sysmon->shutdown_comp); + ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_shutdown_resp_ei, &resp); + if (ret < 0) { + dev_err(sysmon->dev, "failed to allocate QMI txn\n"); + return; + } + + ret = qmi_send_request(&sysmon->qmi, &sysmon->ssctl, &txn, + SSCTL_SHUTDOWN_REQ, 0, NULL, NULL); + if (ret < 0) { + dev_err(sysmon->dev, "failed to send shutdown request\n"); + qmi_txn_cancel(&txn); + return; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) + dev_err(sysmon->dev, "failed receiving QMI response\n"); + else if (resp.resp.result) + dev_err(sysmon->dev, "shutdown request failed\n"); + else + dev_dbg(sysmon->dev, "shutdown request completed\n"); + + if (sysmon->shutdown_irq > 0) { + ret = wait_for_completion_timeout(&sysmon->shutdown_comp, + 10 * HZ); + if (!ret) { + ret = try_wait_for_completion(&sysmon->ind_comp); + if (!ret) + dev_err(sysmon->dev, + "timeout waiting for shutdown ack\n"); + } + } +} + +/** + * ssctl_send_event() - send notification of other remote's SSR event + * @sysmon: sysmon context + * @event: sysmon event context + */ +static void ssctl_send_event(struct qcom_sysmon *sysmon, + const struct sysmon_event *event) +{ + struct ssctl_subsys_event_resp resp; + struct ssctl_subsys_event_req req; + struct qmi_txn txn; + int ret; + + memset(&resp, 0, sizeof(resp)); + ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_subsys_event_resp_ei, &resp); + if (ret < 0) { + dev_err(sysmon->dev, "failed to allocate QMI txn\n"); + return; + } + + memset(&req, 0, sizeof(req)); + strlcpy(req.subsys_name, event->subsys_name, sizeof(req.subsys_name)); + req.subsys_name_len = strlen(req.subsys_name); + req.event = event->ssr_event; + req.evt_driven_valid = true; + req.evt_driven = SSCTL_SSR_EVENT_FORCED; + + ret = qmi_send_request(&sysmon->qmi, &sysmon->ssctl, &txn, + SSCTL_SUBSYS_EVENT_REQ, 40, + ssctl_subsys_event_req_ei, &req); + if (ret < 0) { + dev_err(sysmon->dev, "failed to send shutdown request\n"); + qmi_txn_cancel(&txn); + return; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) + dev_err(sysmon->dev, "failed receiving QMI response\n"); + else if (resp.resp.result) + dev_err(sysmon->dev, "ssr event send failed\n"); + else + dev_dbg(sysmon->dev, "ssr event send completed\n"); +} + +/** + * ssctl_new_server() - QMI callback indicating a new service + * @qmi: QMI handle + * @svc: service information + * + * Return: 0 if we're interested in this service, -EINVAL otherwise. + */ +static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct qcom_sysmon *sysmon = container_of(qmi, struct qcom_sysmon, qmi); + + switch (svc->version) { + case 1: + if (svc->instance != 0) + return -EINVAL; + if (strcmp(sysmon->name, "modem")) + return -EINVAL; + break; + case 2: + if (svc->instance != sysmon->ssctl_instance) + return -EINVAL; + break; + default: + return -EINVAL; + } + + sysmon->ssctl_version = svc->version; + + sysmon->ssctl.sq_family = AF_QIPCRTR; + sysmon->ssctl.sq_node = svc->node; + sysmon->ssctl.sq_port = svc->port; + + svc->priv = sysmon; + + complete(&sysmon->ssctl_comp); + + return 0; +} + +/** + * ssctl_del_server() - QMI callback indicating that @svc is removed + * @qmi: QMI handle + * @svc: service information + */ +static void ssctl_del_server(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct qcom_sysmon *sysmon = svc->priv; + + sysmon->ssctl_version = 0; +} + +static const struct qmi_ops ssctl_ops = { + .new_server = ssctl_new_server, + .del_server = ssctl_del_server, +}; + +static int sysmon_prepare(struct rproc_subdev *subdev) +{ + struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon, + subdev); + struct sysmon_event event = { + .subsys_name = sysmon->name, + .ssr_event = SSCTL_SSR_EVENT_BEFORE_POWERUP + }; + + mutex_lock(&sysmon->state_lock); + sysmon->state = SSCTL_SSR_EVENT_BEFORE_POWERUP; + blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event); + mutex_unlock(&sysmon->state_lock); + + return 0; +} + +/** + * sysmon_start() - start callback for the sysmon remoteproc subdevice + * @subdev: instance of the sysmon subdevice + * + * Inform all the listners of sysmon notifications that the rproc associated + * to @subdev has booted up. The rproc that booted up also needs to know + * which rprocs are already up and running, so send start notifications + * on behalf of all the online rprocs. + */ +static int sysmon_start(struct rproc_subdev *subdev) +{ + struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon, + subdev); + struct qcom_sysmon *target; + struct sysmon_event event = { + .subsys_name = sysmon->name, + .ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP + }; + + reinit_completion(&sysmon->ssctl_comp); + mutex_lock(&sysmon->state_lock); + sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP; + blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event); + mutex_unlock(&sysmon->state_lock); + + mutex_lock(&sysmon_lock); + list_for_each_entry(target, &sysmon_list, node) { + if (target == sysmon) + continue; + + mutex_lock(&target->state_lock); + event.subsys_name = target->name; + event.ssr_event = target->state; + + if (sysmon->ssctl_version == 2) + ssctl_send_event(sysmon, &event); + else if (sysmon->ept) + sysmon_send_event(sysmon, &event); + mutex_unlock(&target->state_lock); + } + mutex_unlock(&sysmon_lock); + + return 0; +} + +static void sysmon_stop(struct rproc_subdev *subdev, bool crashed) +{ + struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon, subdev); + struct sysmon_event event = { + .subsys_name = sysmon->name, + .ssr_event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN + }; + + mutex_lock(&sysmon->state_lock); + sysmon->state = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN; + blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event); + mutex_unlock(&sysmon->state_lock); + + /* Don't request graceful shutdown if we've crashed */ + if (crashed) + return; + + if (sysmon->ssctl_instance) { + if (!wait_for_completion_timeout(&sysmon->ssctl_comp, HZ / 2)) + dev_err(sysmon->dev, "timeout waiting for ssctl service\n"); + } + + if (sysmon->ssctl_version) + ssctl_request_shutdown(sysmon); + else if (sysmon->ept) + sysmon_request_shutdown(sysmon); +} + +static void sysmon_unprepare(struct rproc_subdev *subdev) +{ + struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon, + subdev); + struct sysmon_event event = { + .subsys_name = sysmon->name, + .ssr_event = SSCTL_SSR_EVENT_AFTER_SHUTDOWN + }; + + mutex_lock(&sysmon->state_lock); + sysmon->state = SSCTL_SSR_EVENT_AFTER_SHUTDOWN; + blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event); + mutex_unlock(&sysmon->state_lock); +} + +/** + * sysmon_notify() - notify sysmon target of another's SSR + * @nb: notifier_block associated with sysmon instance + * @event: unused + * @data: SSR identifier of the remote that is going down + */ +static int sysmon_notify(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct qcom_sysmon *sysmon = container_of(nb, struct qcom_sysmon, nb); + struct sysmon_event *sysmon_event = data; + + /* Skip non-running rprocs and the originating instance */ + if (sysmon->state != SSCTL_SSR_EVENT_AFTER_POWERUP || + !strcmp(sysmon_event->subsys_name, sysmon->name)) { + dev_dbg(sysmon->dev, "not notifying %s\n", sysmon->name); + return NOTIFY_DONE; + } + + /* Only SSCTL version 2 supports SSR events */ + if (sysmon->ssctl_version == 2) + ssctl_send_event(sysmon, sysmon_event); + else if (sysmon->ept) + sysmon_send_event(sysmon, sysmon_event); + + return NOTIFY_DONE; +} + +static irqreturn_t sysmon_shutdown_interrupt(int irq, void *data) +{ + struct qcom_sysmon *sysmon = data; + + complete(&sysmon->shutdown_comp); + + return IRQ_HANDLED; +} + +/** + * qcom_add_sysmon_subdev() - create a sysmon subdev for the given remoteproc + * @rproc: rproc context to associate the subdev with + * @name: name of this subdev, to use in SSR + * @ssctl_instance: instance id of the ssctl QMI service + * + * Return: A new qcom_sysmon object, or NULL on failure + */ +struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, + const char *name, + int ssctl_instance) +{ + struct qcom_sysmon *sysmon; + int ret; + + sysmon = kzalloc(sizeof(*sysmon), GFP_KERNEL); + if (!sysmon) + return ERR_PTR(-ENOMEM); + + sysmon->dev = rproc->dev.parent; + sysmon->rproc = rproc; + + sysmon->name = name; + sysmon->ssctl_instance = ssctl_instance; + + init_completion(&sysmon->comp); + init_completion(&sysmon->ind_comp); + init_completion(&sysmon->shutdown_comp); + init_completion(&sysmon->ssctl_comp); + mutex_init(&sysmon->lock); + mutex_init(&sysmon->state_lock); + + sysmon->shutdown_irq = of_irq_get_byname(sysmon->dev->of_node, + "shutdown-ack"); + if (sysmon->shutdown_irq < 0) { + if (sysmon->shutdown_irq != -ENODATA) { + dev_err(sysmon->dev, + "failed to retrieve shutdown-ack IRQ\n"); + ret = sysmon->shutdown_irq; + kfree(sysmon); + return ERR_PTR(ret); + } + } else { + ret = devm_request_threaded_irq(sysmon->dev, + sysmon->shutdown_irq, + NULL, sysmon_shutdown_interrupt, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "q6v5 shutdown-ack", sysmon); + if (ret) { + dev_err(sysmon->dev, + "failed to acquire shutdown-ack IRQ\n"); + kfree(sysmon); + return ERR_PTR(ret); + } + } + + ret = qmi_handle_init(&sysmon->qmi, SSCTL_MAX_MSG_LEN, &ssctl_ops, + qmi_indication_handler); + if (ret < 0) { + dev_err(sysmon->dev, "failed to initialize qmi handle\n"); + kfree(sysmon); + return ERR_PTR(ret); + } + + qmi_add_lookup(&sysmon->qmi, 43, 0, 0); + + sysmon->subdev.prepare = sysmon_prepare; + sysmon->subdev.start = sysmon_start; + sysmon->subdev.stop = sysmon_stop; + sysmon->subdev.unprepare = sysmon_unprepare; + + rproc_add_subdev(rproc, &sysmon->subdev); + + sysmon->nb.notifier_call = sysmon_notify; + blocking_notifier_chain_register(&sysmon_notifiers, &sysmon->nb); + + mutex_lock(&sysmon_lock); + list_add(&sysmon->node, &sysmon_list); + mutex_unlock(&sysmon_lock); + + return sysmon; +} +EXPORT_SYMBOL_GPL(qcom_add_sysmon_subdev); + +/** + * qcom_remove_sysmon_subdev() - release a qcom_sysmon + * @sysmon: sysmon context, as retrieved by qcom_add_sysmon_subdev() + */ +void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon) +{ + if (!sysmon) + return; + + mutex_lock(&sysmon_lock); + list_del(&sysmon->node); + mutex_unlock(&sysmon_lock); + + blocking_notifier_chain_unregister(&sysmon_notifiers, &sysmon->nb); + + rproc_remove_subdev(sysmon->rproc, &sysmon->subdev); + + qmi_handle_release(&sysmon->qmi); + + kfree(sysmon); +} +EXPORT_SYMBOL_GPL(qcom_remove_sysmon_subdev); + +/** + * sysmon_probe() - probe sys_mon channel + * @rpdev: rpmsg device handle + * + * Find the sysmon context associated with the ancestor remoteproc and assign + * this rpmsg device with said sysmon context. + * + * Return: 0 on success, negative errno on failure. + */ +static int sysmon_probe(struct rpmsg_device *rpdev) +{ + struct qcom_sysmon *sysmon; + struct rproc *rproc; + + rproc = rproc_get_by_child(&rpdev->dev); + if (!rproc) { + dev_err(&rpdev->dev, "sysmon device not child of rproc\n"); + return -EINVAL; + } + + mutex_lock(&sysmon_lock); + list_for_each_entry(sysmon, &sysmon_list, node) { + if (sysmon->rproc == rproc) + goto found; + } + mutex_unlock(&sysmon_lock); + + dev_err(&rpdev->dev, "no sysmon associated with parent rproc\n"); + + return -EINVAL; + +found: + mutex_unlock(&sysmon_lock); + + rpdev->ept->priv = sysmon; + sysmon->ept = rpdev->ept; + + return 0; +} + +/** + * sysmon_remove() - sys_mon channel remove handler + * @rpdev: rpmsg device handle + * + * Disassociate the rpmsg device with the sysmon instance. + */ +static void sysmon_remove(struct rpmsg_device *rpdev) +{ + struct qcom_sysmon *sysmon = rpdev->ept->priv; + + sysmon->ept = NULL; +} + +static const struct rpmsg_device_id sysmon_match[] = { + { "sys_mon" }, + {} +}; + +static struct rpmsg_driver sysmon_driver = { + .probe = sysmon_probe, + .remove = sysmon_remove, + .callback = sysmon_callback, + .id_table = sysmon_match, + .drv = { + .name = "qcom_sysmon", + }, +}; + +module_rpmsg_driver(sysmon_driver); + +MODULE_DESCRIPTION("Qualcomm sysmon driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c new file mode 100644 index 000000000..572f7b8ba --- /dev/null +++ b/drivers/remoteproc/qcom_wcnss.c @@ -0,0 +1,638 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm Wireless Connectivity Subsystem Peripheral Image Loader + * + * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2014 Sony Mobile Communications AB + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/qcom_scm.h> +#include <linux/regulator/consumer.h> +#include <linux/remoteproc.h> +#include <linux/soc/qcom/mdt_loader.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/smem_state.h> +#include <linux/rpmsg/qcom_smd.h> + +#include "qcom_common.h" +#include "remoteproc_internal.h" +#include "qcom_pil_info.h" +#include "qcom_wcnss.h" + +#define WCNSS_CRASH_REASON_SMEM 422 +#define WCNSS_FIRMWARE_NAME "wcnss.mdt" +#define WCNSS_PAS_ID 6 +#define WCNSS_SSCTL_ID 0x13 + +#define WCNSS_SPARE_NVBIN_DLND BIT(25) + +#define WCNSS_PMU_IRIS_XO_CFG BIT(3) +#define WCNSS_PMU_IRIS_XO_EN BIT(4) +#define WCNSS_PMU_GC_BUS_MUX_SEL_TOP BIT(5) +#define WCNSS_PMU_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */ + +#define WCNSS_PMU_IRIS_RESET BIT(7) +#define WCNSS_PMU_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */ +#define WCNSS_PMU_IRIS_XO_READ BIT(9) +#define WCNSS_PMU_IRIS_XO_READ_STS BIT(10) + +#define WCNSS_PMU_XO_MODE_MASK GENMASK(2, 1) +#define WCNSS_PMU_XO_MODE_19p2 0 +#define WCNSS_PMU_XO_MODE_48 3 + +struct wcnss_data { + size_t pmu_offset; + size_t spare_offset; + + const struct wcnss_vreg_info *vregs; + size_t num_vregs; +}; + +struct qcom_wcnss { + struct device *dev; + struct rproc *rproc; + + void __iomem *pmu_cfg; + void __iomem *spare_out; + + bool use_48mhz_xo; + + int wdog_irq; + int fatal_irq; + int ready_irq; + int handover_irq; + int stop_ack_irq; + + struct qcom_smem_state *state; + unsigned stop_bit; + + struct mutex iris_lock; + struct qcom_iris *iris; + + struct regulator_bulk_data *vregs; + size_t num_vregs; + + struct completion start_done; + struct completion stop_done; + + phys_addr_t mem_phys; + phys_addr_t mem_reloc; + void *mem_region; + size_t mem_size; + + struct qcom_rproc_subdev smd_subdev; + struct qcom_sysmon *sysmon; +}; + +static const struct wcnss_data riva_data = { + .pmu_offset = 0x28, + .spare_offset = 0xb4, + + .vregs = (struct wcnss_vreg_info[]) { + { "vddmx", 1050000, 1150000, 0 }, + { "vddcx", 1050000, 1150000, 0 }, + { "vddpx", 1800000, 1800000, 0 }, + }, + .num_vregs = 3, +}; + +static const struct wcnss_data pronto_v1_data = { + .pmu_offset = 0x1004, + .spare_offset = 0x1088, + + .vregs = (struct wcnss_vreg_info[]) { + { "vddmx", 950000, 1150000, 0 }, + { "vddcx", .super_turbo = true}, + { "vddpx", 1800000, 1800000, 0 }, + }, + .num_vregs = 3, +}; + +static const struct wcnss_data pronto_v2_data = { + .pmu_offset = 0x1004, + .spare_offset = 0x1088, + + .vregs = (struct wcnss_vreg_info[]) { + { "vddmx", 1287500, 1287500, 0 }, + { "vddcx", .super_turbo = true }, + { "vddpx", 1800000, 1800000, 0 }, + }, + .num_vregs = 3, +}; + +void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, + struct qcom_iris *iris, + bool use_48mhz_xo) +{ + mutex_lock(&wcnss->iris_lock); + + wcnss->iris = iris; + wcnss->use_48mhz_xo = use_48mhz_xo; + + mutex_unlock(&wcnss->iris_lock); +} + +static int wcnss_load(struct rproc *rproc, const struct firmware *fw) +{ + struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; + int ret; + + ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID, + wcnss->mem_region, wcnss->mem_phys, + wcnss->mem_size, &wcnss->mem_reloc); + if (ret) + return ret; + + qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size); + + return 0; +} + +static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss) +{ + u32 val; + + /* Indicate NV download capability */ + val = readl(wcnss->spare_out); + val |= WCNSS_SPARE_NVBIN_DLND; + writel(val, wcnss->spare_out); +} + +static void wcnss_configure_iris(struct qcom_wcnss *wcnss) +{ + u32 val; + + /* Clear PMU cfg register */ + writel(0, wcnss->pmu_cfg); + + val = WCNSS_PMU_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_IRIS_XO_EN; + writel(val, wcnss->pmu_cfg); + + /* Clear XO_MODE */ + val &= ~WCNSS_PMU_XO_MODE_MASK; + if (wcnss->use_48mhz_xo) + val |= WCNSS_PMU_XO_MODE_48 << 1; + else + val |= WCNSS_PMU_XO_MODE_19p2 << 1; + writel(val, wcnss->pmu_cfg); + + /* Reset IRIS */ + val |= WCNSS_PMU_IRIS_RESET; + writel(val, wcnss->pmu_cfg); + + /* Wait for PMU.iris_reg_reset_sts */ + while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_RESET_STS) + cpu_relax(); + + /* Clear IRIS reset */ + val &= ~WCNSS_PMU_IRIS_RESET; + writel(val, wcnss->pmu_cfg); + + /* Start IRIS XO configuration */ + val |= WCNSS_PMU_IRIS_XO_CFG; + writel(val, wcnss->pmu_cfg); + + /* Wait for XO configuration to finish */ + while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_XO_CFG_STS) + cpu_relax(); + + /* Stop IRIS XO configuration */ + val &= ~WCNSS_PMU_GC_BUS_MUX_SEL_TOP; + val &= ~WCNSS_PMU_IRIS_XO_CFG; + writel(val, wcnss->pmu_cfg); + + /* Add some delay for XO to settle */ + msleep(20); +} + +static int wcnss_start(struct rproc *rproc) +{ + struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; + int ret; + + mutex_lock(&wcnss->iris_lock); + if (!wcnss->iris) { + dev_err(wcnss->dev, "no iris registered\n"); + ret = -EINVAL; + goto release_iris_lock; + } + + ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs); + if (ret) + goto release_iris_lock; + + ret = qcom_iris_enable(wcnss->iris); + if (ret) + goto disable_regulators; + + wcnss_indicate_nv_download(wcnss); + wcnss_configure_iris(wcnss); + + ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID); + if (ret) { + dev_err(wcnss->dev, + "failed to authenticate image and release reset\n"); + goto disable_iris; + } + + ret = wait_for_completion_timeout(&wcnss->start_done, + msecs_to_jiffies(5000)); + if (wcnss->ready_irq > 0 && ret == 0) { + /* We have a ready_irq, but it didn't fire in time. */ + dev_err(wcnss->dev, "start timed out\n"); + qcom_scm_pas_shutdown(WCNSS_PAS_ID); + ret = -ETIMEDOUT; + goto disable_iris; + } + + ret = 0; + +disable_iris: + qcom_iris_disable(wcnss->iris); +disable_regulators: + regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs); +release_iris_lock: + mutex_unlock(&wcnss->iris_lock); + + return ret; +} + +static int wcnss_stop(struct rproc *rproc) +{ + struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; + int ret; + + if (wcnss->state) { + qcom_smem_state_update_bits(wcnss->state, + BIT(wcnss->stop_bit), + BIT(wcnss->stop_bit)); + + ret = wait_for_completion_timeout(&wcnss->stop_done, + msecs_to_jiffies(5000)); + if (ret == 0) + dev_err(wcnss->dev, "timed out on wait\n"); + + qcom_smem_state_update_bits(wcnss->state, + BIT(wcnss->stop_bit), + 0); + } + + ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID); + if (ret) + dev_err(wcnss->dev, "failed to shutdown: %d\n", ret); + + return ret; +} + +static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; + int offset; + + offset = da - wcnss->mem_reloc; + if (offset < 0 || offset + len > wcnss->mem_size) + return NULL; + + return wcnss->mem_region + offset; +} + +static const struct rproc_ops wcnss_ops = { + .start = wcnss_start, + .stop = wcnss_stop, + .da_to_va = wcnss_da_to_va, + .parse_fw = qcom_register_dump_segments, + .load = wcnss_load, +}; + +static irqreturn_t wcnss_wdog_interrupt(int irq, void *dev) +{ + struct qcom_wcnss *wcnss = dev; + + rproc_report_crash(wcnss->rproc, RPROC_WATCHDOG); + + return IRQ_HANDLED; +} + +static irqreturn_t wcnss_fatal_interrupt(int irq, void *dev) +{ + struct qcom_wcnss *wcnss = dev; + size_t len; + char *msg; + + msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, WCNSS_CRASH_REASON_SMEM, &len); + if (!IS_ERR(msg) && len > 0 && msg[0]) + dev_err(wcnss->dev, "fatal error received: %s\n", msg); + + rproc_report_crash(wcnss->rproc, RPROC_FATAL_ERROR); + + return IRQ_HANDLED; +} + +static irqreturn_t wcnss_ready_interrupt(int irq, void *dev) +{ + struct qcom_wcnss *wcnss = dev; + + complete(&wcnss->start_done); + + return IRQ_HANDLED; +} + +static irqreturn_t wcnss_handover_interrupt(int irq, void *dev) +{ + /* + * XXX: At this point we're supposed to release the resources that we + * have been holding on behalf of the WCNSS. Unfortunately this + * interrupt comes way before the other side seems to be done. + * + * So we're currently relying on the ready interrupt firing later then + * this and we just disable the resources at the end of wcnss_start(). + */ + + return IRQ_HANDLED; +} + +static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev) +{ + struct qcom_wcnss *wcnss = dev; + + complete(&wcnss->stop_done); + + return IRQ_HANDLED; +} + +static int wcnss_init_regulators(struct qcom_wcnss *wcnss, + const struct wcnss_vreg_info *info, + int num_vregs) +{ + struct regulator_bulk_data *bulk; + int ret; + int i; + + bulk = devm_kcalloc(wcnss->dev, + num_vregs, sizeof(struct regulator_bulk_data), + GFP_KERNEL); + if (!bulk) + return -ENOMEM; + + for (i = 0; i < num_vregs; i++) + bulk[i].supply = info[i].name; + + ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk); + if (ret) + return ret; + + for (i = 0; i < num_vregs; i++) { + if (info[i].max_voltage) + regulator_set_voltage(bulk[i].consumer, + info[i].min_voltage, + info[i].max_voltage); + + if (info[i].load_uA) + regulator_set_load(bulk[i].consumer, info[i].load_uA); + } + + wcnss->vregs = bulk; + wcnss->num_vregs = num_vregs; + + return 0; +} + +static int wcnss_request_irq(struct qcom_wcnss *wcnss, + struct platform_device *pdev, + const char *name, + bool optional, + irq_handler_t thread_fn) +{ + int ret; + int irq_number; + + ret = platform_get_irq_byname(pdev, name); + if (ret < 0 && optional) { + dev_dbg(&pdev->dev, "no %s IRQ defined, ignoring\n", name); + return 0; + } else if (ret < 0) { + dev_err(&pdev->dev, "no %s IRQ defined\n", name); + return ret; + } + + irq_number = ret; + + ret = devm_request_threaded_irq(&pdev->dev, ret, + NULL, thread_fn, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "wcnss", wcnss); + if (ret) { + dev_err(&pdev->dev, "request %s IRQ failed\n", name); + return ret; + } + + /* Return the IRQ number if the IRQ was successfully acquired */ + return irq_number; +} + +static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss) +{ + struct device_node *node; + struct resource r; + int ret; + + node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0); + if (!node) { + dev_err(wcnss->dev, "no memory-region specified\n"); + return -EINVAL; + } + + ret = of_address_to_resource(node, 0, &r); + of_node_put(node); + if (ret) + return ret; + + wcnss->mem_phys = wcnss->mem_reloc = r.start; + wcnss->mem_size = resource_size(&r); + wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size); + if (!wcnss->mem_region) { + dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n", + &r.start, wcnss->mem_size); + return -EBUSY; + } + + return 0; +} + +static int wcnss_probe(struct platform_device *pdev) +{ + const struct wcnss_data *data; + struct qcom_wcnss *wcnss; + struct resource *res; + struct rproc *rproc; + void __iomem *mmio; + int ret; + + data = of_device_get_match_data(&pdev->dev); + + if (!qcom_scm_is_available()) + return -EPROBE_DEFER; + + if (!qcom_scm_pas_supported(WCNSS_PAS_ID)) { + dev_err(&pdev->dev, "PAS is not available for WCNSS\n"); + return -ENXIO; + } + + rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops, + WCNSS_FIRMWARE_NAME, sizeof(*wcnss)); + if (!rproc) { + dev_err(&pdev->dev, "unable to allocate remoteproc\n"); + return -ENOMEM; + } + rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); + + wcnss = (struct qcom_wcnss *)rproc->priv; + wcnss->dev = &pdev->dev; + wcnss->rproc = rproc; + platform_set_drvdata(pdev, wcnss); + + init_completion(&wcnss->start_done); + init_completion(&wcnss->stop_done); + + mutex_init(&wcnss->iris_lock); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu"); + mmio = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mmio)) { + ret = PTR_ERR(mmio); + goto free_rproc; + }; + + ret = wcnss_alloc_memory_region(wcnss); + if (ret) + goto free_rproc; + + wcnss->pmu_cfg = mmio + data->pmu_offset; + wcnss->spare_out = mmio + data->spare_offset; + + ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs); + if (ret) + goto free_rproc; + + ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt); + if (ret < 0) + goto free_rproc; + wcnss->wdog_irq = ret; + + ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt); + if (ret < 0) + goto free_rproc; + wcnss->fatal_irq = ret; + + ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt); + if (ret < 0) + goto free_rproc; + wcnss->ready_irq = ret; + + ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt); + if (ret < 0) + goto free_rproc; + wcnss->handover_irq = ret; + + ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt); + if (ret < 0) + goto free_rproc; + wcnss->stop_ack_irq = ret; + + if (wcnss->stop_ack_irq) { + wcnss->state = qcom_smem_state_get(&pdev->dev, "stop", + &wcnss->stop_bit); + if (IS_ERR(wcnss->state)) { + ret = PTR_ERR(wcnss->state); + goto free_rproc; + } + } + + qcom_add_smd_subdev(rproc, &wcnss->smd_subdev); + wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID); + if (IS_ERR(wcnss->sysmon)) { + ret = PTR_ERR(wcnss->sysmon); + goto free_rproc; + } + + ret = rproc_add(rproc); + if (ret) + goto free_rproc; + + return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + +free_rproc: + rproc_free(rproc); + + return ret; +} + +static int wcnss_remove(struct platform_device *pdev) +{ + struct qcom_wcnss *wcnss = platform_get_drvdata(pdev); + + of_platform_depopulate(&pdev->dev); + + qcom_smem_state_put(wcnss->state); + rproc_del(wcnss->rproc); + + qcom_remove_sysmon_subdev(wcnss->sysmon); + qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev); + rproc_free(wcnss->rproc); + + return 0; +} + +static const struct of_device_id wcnss_of_match[] = { + { .compatible = "qcom,riva-pil", &riva_data }, + { .compatible = "qcom,pronto-v1-pil", &pronto_v1_data }, + { .compatible = "qcom,pronto-v2-pil", &pronto_v2_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, wcnss_of_match); + +static struct platform_driver wcnss_driver = { + .probe = wcnss_probe, + .remove = wcnss_remove, + .driver = { + .name = "qcom-wcnss-pil", + .of_match_table = wcnss_of_match, + }, +}; + +static int __init wcnss_init(void) +{ + int ret; + + ret = platform_driver_register(&wcnss_driver); + if (ret) + return ret; + + ret = platform_driver_register(&qcom_iris_driver); + if (ret) + platform_driver_unregister(&wcnss_driver); + + return ret; +} +module_init(wcnss_init); + +static void __exit wcnss_exit(void) +{ + platform_driver_unregister(&qcom_iris_driver); + platform_driver_unregister(&wcnss_driver); +} +module_exit(wcnss_exit); + +MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h new file mode 100644 index 000000000..62c8682d0 --- /dev/null +++ b/drivers/remoteproc/qcom_wcnss.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_WNCSS_H__ +#define __QCOM_WNCSS_H__ + +struct qcom_iris; +struct qcom_wcnss; + +extern struct platform_driver qcom_iris_driver; + +struct wcnss_vreg_info { + const char * const name; + int min_voltage; + int max_voltage; + + int load_uA; + + bool super_turbo; +}; + +int qcom_iris_enable(struct qcom_iris *iris); +void qcom_iris_disable(struct qcom_iris *iris); + +void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, struct qcom_iris *iris, bool use_48mhz_xo); + +#endif diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c new file mode 100644 index 000000000..0e0ae1e76 --- /dev/null +++ b/drivers/remoteproc/qcom_wcnss_iris.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm Wireless Connectivity Subsystem Iris driver + * + * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2014 Sony Mobile Communications AB + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> + +#include "qcom_wcnss.h" + +struct qcom_iris { + struct device *dev; + + struct clk *xo_clk; + + struct regulator_bulk_data *vregs; + size_t num_vregs; +}; + +struct iris_data { + const struct wcnss_vreg_info *vregs; + size_t num_vregs; + + bool use_48mhz_xo; +}; + +static const struct iris_data wcn3620_data = { + .vregs = (struct wcnss_vreg_info[]) { + { "vddxo", 1800000, 1800000, 10000 }, + { "vddrfa", 1300000, 1300000, 100000 }, + { "vddpa", 3300000, 3300000, 515000 }, + { "vdddig", 1800000, 1800000, 10000 }, + }, + .num_vregs = 4, + .use_48mhz_xo = false, +}; + +static const struct iris_data wcn3660_data = { + .vregs = (struct wcnss_vreg_info[]) { + { "vddxo", 1800000, 1800000, 10000 }, + { "vddrfa", 1300000, 1300000, 100000 }, + { "vddpa", 2900000, 3000000, 515000 }, + { "vdddig", 1200000, 1225000, 10000 }, + }, + .num_vregs = 4, + .use_48mhz_xo = true, +}; + +static const struct iris_data wcn3680_data = { + .vregs = (struct wcnss_vreg_info[]) { + { "vddxo", 1800000, 1800000, 10000 }, + { "vddrfa", 1300000, 1300000, 100000 }, + { "vddpa", 3300000, 3300000, 515000 }, + { "vdddig", 1800000, 1800000, 10000 }, + }, + .num_vregs = 4, + .use_48mhz_xo = true, +}; + +int qcom_iris_enable(struct qcom_iris *iris) +{ + int ret; + + ret = regulator_bulk_enable(iris->num_vregs, iris->vregs); + if (ret) + return ret; + + ret = clk_prepare_enable(iris->xo_clk); + if (ret) { + dev_err(iris->dev, "failed to enable xo clk\n"); + goto disable_regulators; + } + + return 0; + +disable_regulators: + regulator_bulk_disable(iris->num_vregs, iris->vregs); + + return ret; +} + +void qcom_iris_disable(struct qcom_iris *iris) +{ + clk_disable_unprepare(iris->xo_clk); + regulator_bulk_disable(iris->num_vregs, iris->vregs); +} + +static int qcom_iris_probe(struct platform_device *pdev) +{ + const struct iris_data *data; + struct qcom_wcnss *wcnss; + struct qcom_iris *iris; + int ret; + int i; + + iris = devm_kzalloc(&pdev->dev, sizeof(struct qcom_iris), GFP_KERNEL); + if (!iris) + return -ENOMEM; + + data = of_device_get_match_data(&pdev->dev); + wcnss = dev_get_drvdata(pdev->dev.parent); + + iris->xo_clk = devm_clk_get(&pdev->dev, "xo"); + if (IS_ERR(iris->xo_clk)) { + if (PTR_ERR(iris->xo_clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to acquire xo clk\n"); + return PTR_ERR(iris->xo_clk); + } + + iris->num_vregs = data->num_vregs; + iris->vregs = devm_kcalloc(&pdev->dev, + iris->num_vregs, + sizeof(struct regulator_bulk_data), + GFP_KERNEL); + if (!iris->vregs) + return -ENOMEM; + + for (i = 0; i < iris->num_vregs; i++) + iris->vregs[i].supply = data->vregs[i].name; + + ret = devm_regulator_bulk_get(&pdev->dev, iris->num_vregs, iris->vregs); + if (ret) { + dev_err(&pdev->dev, "failed to get regulators\n"); + return ret; + } + + for (i = 0; i < iris->num_vregs; i++) { + if (data->vregs[i].max_voltage) + regulator_set_voltage(iris->vregs[i].consumer, + data->vregs[i].min_voltage, + data->vregs[i].max_voltage); + + if (data->vregs[i].load_uA) + regulator_set_load(iris->vregs[i].consumer, + data->vregs[i].load_uA); + } + + qcom_wcnss_assign_iris(wcnss, iris, data->use_48mhz_xo); + + return 0; +} + +static int qcom_iris_remove(struct platform_device *pdev) +{ + struct qcom_wcnss *wcnss = dev_get_drvdata(pdev->dev.parent); + + qcom_wcnss_assign_iris(wcnss, NULL, false); + + return 0; +} + +static const struct of_device_id iris_of_match[] = { + { .compatible = "qcom,wcn3620", .data = &wcn3620_data }, + { .compatible = "qcom,wcn3660", .data = &wcn3660_data }, + { .compatible = "qcom,wcn3680", .data = &wcn3680_data }, + {} +}; +MODULE_DEVICE_TABLE(of, iris_of_match); + +struct platform_driver qcom_iris_driver = { + .probe = qcom_iris_probe, + .remove = qcom_iris_remove, + .driver = { + .name = "qcom-iris", + .of_match_table = iris_of_match, + }, +}; diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c new file mode 100644 index 000000000..ff92ed25d --- /dev/null +++ b/drivers/remoteproc/remoteproc_cdev.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Character device interface driver for Remoteproc framework. + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/cdev.h> +#include <linux/compat.h> +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/remoteproc.h> +#include <linux/uaccess.h> +#include <uapi/linux/remoteproc_cdev.h> + +#include "remoteproc_internal.h" + +#define NUM_RPROC_DEVICES 64 +static dev_t rproc_major; + +static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) +{ + struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev); + int ret = 0; + char cmd[10]; + + if (!len || len > sizeof(cmd)) + return -EINVAL; + + ret = copy_from_user(cmd, buf, len); + if (ret) + return -EFAULT; + + if (!strncmp(cmd, "start", len)) { + if (rproc->state == RPROC_RUNNING) + return -EBUSY; + + ret = rproc_boot(rproc); + } else if (!strncmp(cmd, "stop", len)) { + if (rproc->state != RPROC_RUNNING) + return -EINVAL; + + rproc_shutdown(rproc); + } else { + dev_err(&rproc->dev, "Unrecognized option\n"); + ret = -EINVAL; + } + + return ret ? ret : len; +} + +static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev); + void __user *argp = (void __user *)arg; + s32 param; + + switch (ioctl) { + case RPROC_SET_SHUTDOWN_ON_RELEASE: + if (copy_from_user(¶m, argp, sizeof(s32))) + return -EFAULT; + + rproc->cdev_put_on_release = !!param; + break; + case RPROC_GET_SHUTDOWN_ON_RELEASE: + param = (s32)rproc->cdev_put_on_release; + if (copy_to_user(argp, ¶m, sizeof(s32))) + return -EFAULT; + + break; + default: + dev_err(&rproc->dev, "Unsupported ioctl\n"); + return -EINVAL; + } + + return 0; +} + +static int rproc_cdev_release(struct inode *inode, struct file *filp) +{ + struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev); + + if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING) + rproc_shutdown(rproc); + + return 0; +} + +static const struct file_operations rproc_fops = { + .write = rproc_cdev_write, + .unlocked_ioctl = rproc_device_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = rproc_cdev_release, +}; + +int rproc_char_device_add(struct rproc *rproc) +{ + int ret; + + cdev_init(&rproc->cdev, &rproc_fops); + rproc->cdev.owner = THIS_MODULE; + + rproc->dev.devt = MKDEV(MAJOR(rproc_major), rproc->index); + cdev_set_parent(&rproc->cdev, &rproc->dev.kobj); + ret = cdev_add(&rproc->cdev, rproc->dev.devt, 1); + if (ret < 0) + dev_err(&rproc->dev, "Failed to add char dev for %s\n", rproc->name); + + return ret; +} + +void rproc_char_device_remove(struct rproc *rproc) +{ + cdev_del(&rproc->cdev); +} + +void __init rproc_init_cdev(void) +{ + int ret; + + ret = alloc_chrdev_region(&rproc_major, 0, NUM_RPROC_DEVICES, "remoteproc"); + if (ret < 0) + pr_err("Failed to alloc rproc_cdev region, err %d\n", ret); +} diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c new file mode 100644 index 000000000..cc55ff012 --- /dev/null +++ b/drivers/remoteproc/remoteproc_core.c @@ -0,0 +1,2489 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Remote Processor Framework + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2011 Google, Inc. + * + * Ohad Ben-Cohen <ohad@wizery.com> + * Brian Swetland <swetland@google.com> + * Mark Grosen <mgrosen@ti.com> + * Fernando Guzman Lugo <fernando.lugo@ti.com> + * Suman Anna <s-anna@ti.com> + * Robert Tivy <rtivy@ti.com> + * Armando Uribe De Leon <x0095078@ti.com> + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/dma-map-ops.h> +#include <linux/dma-mapping.h> +#include <linux/dma-direct.h> /* XXX: pokes into bus_dma_range */ +#include <linux/firmware.h> +#include <linux/string.h> +#include <linux/debugfs.h> +#include <linux/rculist.h> +#include <linux/remoteproc.h> +#include <linux/iommu.h> +#include <linux/idr.h> +#include <linux/elf.h> +#include <linux/crc32.h> +#include <linux/of_reserved_mem.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_ring.h> +#include <asm/byteorder.h> +#include <linux/platform_device.h> + +#include "remoteproc_internal.h" + +#define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL + +static DEFINE_MUTEX(rproc_list_mutex); +static LIST_HEAD(rproc_list); +static struct notifier_block rproc_panic_nb; + +typedef int (*rproc_handle_resource_t)(struct rproc *rproc, + void *, int offset, int avail); + +static int rproc_alloc_carveout(struct rproc *rproc, + struct rproc_mem_entry *mem); +static int rproc_release_carveout(struct rproc *rproc, + struct rproc_mem_entry *mem); + +/* Unique indices for remoteproc devices */ +static DEFINE_IDA(rproc_dev_index); + +static const char * const rproc_crash_names[] = { + [RPROC_MMUFAULT] = "mmufault", + [RPROC_WATCHDOG] = "watchdog", + [RPROC_FATAL_ERROR] = "fatal error", +}; + +/* translate rproc_crash_type to string */ +static const char *rproc_crash_to_string(enum rproc_crash_type type) +{ + if (type < ARRAY_SIZE(rproc_crash_names)) + return rproc_crash_names[type]; + return "unknown"; +} + +/* + * This is the IOMMU fault handler we register with the IOMMU API + * (when relevant; not all remote processors access memory through + * an IOMMU). + * + * IOMMU core will invoke this handler whenever the remote processor + * will try to access an unmapped device address. + */ +static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev, + unsigned long iova, int flags, void *token) +{ + struct rproc *rproc = token; + + dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags); + + rproc_report_crash(rproc, RPROC_MMUFAULT); + + /* + * Let the iommu core know we're not really handling this fault; + * we just used it as a recovery trigger. + */ + return -ENOSYS; +} + +static int rproc_enable_iommu(struct rproc *rproc) +{ + struct iommu_domain *domain; + struct device *dev = rproc->dev.parent; + int ret; + + if (!rproc->has_iommu) { + dev_dbg(dev, "iommu not present\n"); + return 0; + } + + domain = iommu_domain_alloc(dev->bus); + if (!domain) { + dev_err(dev, "can't alloc iommu domain\n"); + return -ENOMEM; + } + + iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); + + ret = iommu_attach_device(domain, dev); + if (ret) { + dev_err(dev, "can't attach iommu device: %d\n", ret); + goto free_domain; + } + + rproc->domain = domain; + + return 0; + +free_domain: + iommu_domain_free(domain); + return ret; +} + +static void rproc_disable_iommu(struct rproc *rproc) +{ + struct iommu_domain *domain = rproc->domain; + struct device *dev = rproc->dev.parent; + + if (!domain) + return; + + iommu_detach_device(domain, dev); + iommu_domain_free(domain); +} + +phys_addr_t rproc_va_to_pa(void *cpu_addr) +{ + /* + * Return physical address according to virtual address location + * - in vmalloc: if region ioremapped or defined as dma_alloc_coherent + * - in kernel: if region allocated in generic dma memory pool + */ + if (is_vmalloc_addr(cpu_addr)) { + return page_to_phys(vmalloc_to_page(cpu_addr)) + + offset_in_page(cpu_addr); + } + + WARN_ON(!virt_addr_valid(cpu_addr)); + return virt_to_phys(cpu_addr); +} +EXPORT_SYMBOL(rproc_va_to_pa); + +/** + * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address + * @rproc: handle of a remote processor + * @da: remoteproc device address to translate + * @len: length of the memory region @da is pointing to + * + * Some remote processors will ask us to allocate them physically contiguous + * memory regions (which we call "carveouts"), and map them to specific + * device addresses (which are hardcoded in the firmware). They may also have + * dedicated memory regions internal to the processors, and use them either + * exclusively or alongside carveouts. + * + * They may then ask us to copy objects into specific device addresses (e.g. + * code/data sections) or expose us certain symbols in other device address + * (e.g. their trace buffer). + * + * This function is a helper function with which we can go over the allocated + * carveouts and translate specific device addresses to kernel virtual addresses + * so we can access the referenced memory. This function also allows to perform + * translations on the internal remoteproc memory regions through a platform + * implementation specific da_to_va ops, if present. + * + * The function returns a valid kernel address on success or NULL on failure. + * + * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too, + * but only on kernel direct mapped RAM memory. Instead, we're just using + * here the output of the DMA API for the carveouts, which should be more + * correct. + */ +void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct rproc_mem_entry *carveout; + void *ptr = NULL; + + if (rproc->ops->da_to_va) { + ptr = rproc->ops->da_to_va(rproc, da, len); + if (ptr) + goto out; + } + + list_for_each_entry(carveout, &rproc->carveouts, node) { + int offset = da - carveout->da; + + /* Verify that carveout is allocated */ + if (!carveout->va) + continue; + + /* try next carveout if da is too small */ + if (offset < 0) + continue; + + /* try next carveout if da is too large */ + if (offset + len > carveout->len) + continue; + + ptr = carveout->va + offset; + + break; + } + +out: + return ptr; +} +EXPORT_SYMBOL(rproc_da_to_va); + +/** + * rproc_find_carveout_by_name() - lookup the carveout region by a name + * @rproc: handle of a remote processor + * @name: carveout name to find (format string) + * @...: optional parameters matching @name string + * + * Platform driver has the capability to register some pre-allacoted carveout + * (physically contiguous memory regions) before rproc firmware loading and + * associated resource table analysis. These regions may be dedicated memory + * regions internal to the coprocessor or specified DDR region with specific + * attributes + * + * This function is a helper function with which we can go over the + * allocated carveouts and return associated region characteristics like + * coprocessor address, length or processor virtual address. + * + * Return: a valid pointer on carveout entry on success or NULL on failure. + */ +__printf(2, 3) +struct rproc_mem_entry * +rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...) +{ + va_list args; + char _name[32]; + struct rproc_mem_entry *carveout, *mem = NULL; + + if (!name) + return NULL; + + va_start(args, name); + vsnprintf(_name, sizeof(_name), name, args); + va_end(args); + + list_for_each_entry(carveout, &rproc->carveouts, node) { + /* Compare carveout and requested names */ + if (!strcmp(carveout->name, _name)) { + mem = carveout; + break; + } + } + + return mem; +} + +/** + * rproc_check_carveout_da() - Check specified carveout da configuration + * @rproc: handle of a remote processor + * @mem: pointer on carveout to check + * @da: area device address + * @len: associated area size + * + * This function is a helper function to verify requested device area (couple + * da, len) is part of specified carveout. + * If da is not set (defined as FW_RSC_ADDR_ANY), only requested length is + * checked. + * + * Return: 0 if carveout matches request else error + */ +static int rproc_check_carveout_da(struct rproc *rproc, + struct rproc_mem_entry *mem, u32 da, u32 len) +{ + struct device *dev = &rproc->dev; + int delta; + + /* Check requested resource length */ + if (len > mem->len) { + dev_err(dev, "Registered carveout doesn't fit len request\n"); + return -EINVAL; + } + + if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) { + /* Address doesn't match registered carveout configuration */ + return -EINVAL; + } else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) { + delta = da - mem->da; + + /* Check requested resource belongs to registered carveout */ + if (delta < 0) { + dev_err(dev, + "Registered carveout doesn't fit da request\n"); + return -EINVAL; + } + + if (delta + len > mem->len) { + dev_err(dev, + "Registered carveout doesn't fit len request\n"); + return -EINVAL; + } + } + + return 0; +} + +int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) +{ + struct rproc *rproc = rvdev->rproc; + struct device *dev = &rproc->dev; + struct rproc_vring *rvring = &rvdev->vring[i]; + struct fw_rsc_vdev *rsc; + int ret, notifyid; + struct rproc_mem_entry *mem; + size_t size; + + /* actual size of vring (in bytes) */ + size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); + + rsc = (void *)rproc->table_ptr + rvdev->rsc_offset; + + /* Search for pre-registered carveout */ + mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index, + i); + if (mem) { + if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size)) + return -ENOMEM; + } else { + /* Register carveout in in list */ + mem = rproc_mem_entry_init(dev, NULL, 0, + size, rsc->vring[i].da, + rproc_alloc_carveout, + rproc_release_carveout, + "vdev%dvring%d", + rvdev->index, i); + if (!mem) { + dev_err(dev, "Can't allocate memory entry structure\n"); + return -ENOMEM; + } + + rproc_add_carveout(rproc, mem); + } + + /* + * Assign an rproc-wide unique index for this vring + * TODO: assign a notifyid for rvdev updates as well + * TODO: support predefined notifyids (via resource table) + */ + ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); + if (ret < 0) { + dev_err(dev, "idr_alloc failed: %d\n", ret); + return ret; + } + notifyid = ret; + + /* Potentially bump max_notifyid */ + if (notifyid > rproc->max_notifyid) + rproc->max_notifyid = notifyid; + + rvring->notifyid = notifyid; + + /* Let the rproc know the notifyid of this vring.*/ + rsc->vring[i].notifyid = notifyid; + return 0; +} + +static int +rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) +{ + struct rproc *rproc = rvdev->rproc; + struct device *dev = &rproc->dev; + struct fw_rsc_vdev_vring *vring = &rsc->vring[i]; + struct rproc_vring *rvring = &rvdev->vring[i]; + + dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n", + i, vring->da, vring->num, vring->align); + + /* verify queue size and vring alignment are sane */ + if (!vring->num || !vring->align) { + dev_err(dev, "invalid qsz (%d) or alignment (%d)\n", + vring->num, vring->align); + return -EINVAL; + } + + rvring->len = vring->num; + rvring->align = vring->align; + rvring->rvdev = rvdev; + + return 0; +} + +void rproc_free_vring(struct rproc_vring *rvring) +{ + struct rproc *rproc = rvring->rvdev->rproc; + int idx = rvring - rvring->rvdev->vring; + struct fw_rsc_vdev *rsc; + + idr_remove(&rproc->notifyids, rvring->notifyid); + + /* + * At this point rproc_stop() has been called and the installed resource + * table in the remote processor memory may no longer be accessible. As + * such and as per rproc_stop(), rproc->table_ptr points to the cached + * resource table (rproc->cached_table). The cached resource table is + * only available when a remote processor has been booted by the + * remoteproc core, otherwise it is NULL. + * + * Based on the above, reset the virtio device section in the cached + * resource table only if there is one to work with. + */ + if (rproc->table_ptr) { + rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset; + rsc->vring[idx].da = 0; + rsc->vring[idx].notifyid = -1; + } +} + +static int rproc_vdev_do_start(struct rproc_subdev *subdev) +{ + struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); + + return rproc_add_virtio_dev(rvdev, rvdev->id); +} + +static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed) +{ + struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); + int ret; + + ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev); + if (ret) + dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret); +} + +/** + * rproc_rvdev_release() - release the existence of a rvdev + * + * @dev: the subdevice's dev + */ +static void rproc_rvdev_release(struct device *dev) +{ + struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev); + + of_reserved_mem_device_release(dev); + + kfree(rvdev); +} + +static int copy_dma_range_map(struct device *to, struct device *from) +{ + const struct bus_dma_region *map = from->dma_range_map, *new_map, *r; + int num_ranges = 0; + + if (!map) + return 0; + + for (r = map; r->size; r++) + num_ranges++; + + new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)), + GFP_KERNEL); + if (!new_map) + return -ENOMEM; + to->dma_range_map = new_map; + return 0; +} + +/** + * rproc_handle_vdev() - handle a vdev fw resource + * @rproc: the remote processor + * @rsc: the vring resource descriptor + * @offset: offset of the resource entry + * @avail: size of available data (for sanity checking the image) + * + * This resource entry requests the host to statically register a virtio + * device (vdev), and setup everything needed to support it. It contains + * everything needed to make it possible: the virtio device id, virtio + * device features, vrings information, virtio config space, etc... + * + * Before registering the vdev, the vrings are allocated from non-cacheable + * physically contiguous memory. Currently we only support two vrings per + * remote processor (temporary limitation). We might also want to consider + * doing the vring allocation only later when ->find_vqs() is invoked, and + * then release them upon ->del_vqs(). + * + * Note: @da is currently not really handled correctly: we dynamically + * allocate it using the DMA API, ignoring requested hard coded addresses, + * and we don't take care of any required IOMMU programming. This is all + * going to be taken care of when the generic iommu-based DMA API will be + * merged. Meanwhile, statically-addressed iommu-based firmware images should + * use RSC_DEVMEM resource entries to map their required @da to the physical + * address of their base CMA region (ouch, hacky!). + * + * Returns 0 on success, or an appropriate error code otherwise + */ +static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, + int offset, int avail) +{ + struct device *dev = &rproc->dev; + struct rproc_vdev *rvdev; + int i, ret; + char name[16]; + + /* make sure resource isn't truncated */ + if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len > + avail) { + dev_err(dev, "vdev rsc is truncated\n"); + return -EINVAL; + } + + /* make sure reserved bytes are zeroes */ + if (rsc->reserved[0] || rsc->reserved[1]) { + dev_err(dev, "vdev rsc has non zero reserved bytes\n"); + return -EINVAL; + } + + dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n", + rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings); + + /* we currently support only two vrings per rvdev */ + if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) { + dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings); + return -EINVAL; + } + + rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL); + if (!rvdev) + return -ENOMEM; + + kref_init(&rvdev->refcount); + + rvdev->id = rsc->id; + rvdev->rproc = rproc; + rvdev->index = rproc->nb_vdev++; + + /* Initialise vdev subdevice */ + snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index); + rvdev->dev.parent = &rproc->dev; + rvdev->dev.release = rproc_rvdev_release; + dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name); + dev_set_drvdata(&rvdev->dev, rvdev); + + ret = device_register(&rvdev->dev); + if (ret) { + put_device(&rvdev->dev); + return ret; + } + + ret = copy_dma_range_map(&rvdev->dev, rproc->dev.parent); + if (ret) + goto free_rvdev; + + /* Make device dma capable by inheriting from parent's capabilities */ + set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent)); + + ret = dma_coerce_mask_and_coherent(&rvdev->dev, + dma_get_mask(rproc->dev.parent)); + if (ret) { + dev_warn(dev, + "Failed to set DMA mask %llx. Trying to continue... %x\n", + dma_get_mask(rproc->dev.parent), ret); + } + + /* parse the vrings */ + for (i = 0; i < rsc->num_of_vrings; i++) { + ret = rproc_parse_vring(rvdev, rsc, i); + if (ret) + goto free_rvdev; + } + + /* remember the resource offset*/ + rvdev->rsc_offset = offset; + + /* allocate the vring resources */ + for (i = 0; i < rsc->num_of_vrings; i++) { + ret = rproc_alloc_vring(rvdev, i); + if (ret) + goto unwind_vring_allocations; + } + + list_add_tail(&rvdev->node, &rproc->rvdevs); + + rvdev->subdev.start = rproc_vdev_do_start; + rvdev->subdev.stop = rproc_vdev_do_stop; + + rproc_add_subdev(rproc, &rvdev->subdev); + + return 0; + +unwind_vring_allocations: + for (i--; i >= 0; i--) + rproc_free_vring(&rvdev->vring[i]); +free_rvdev: + device_unregister(&rvdev->dev); + return ret; +} + +void rproc_vdev_release(struct kref *ref) +{ + struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount); + struct rproc_vring *rvring; + struct rproc *rproc = rvdev->rproc; + int id; + + for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) { + rvring = &rvdev->vring[id]; + rproc_free_vring(rvring); + } + + rproc_remove_subdev(rproc, &rvdev->subdev); + list_del(&rvdev->node); + device_unregister(&rvdev->dev); +} + +/** + * rproc_handle_trace() - handle a shared trace buffer resource + * @rproc: the remote processor + * @rsc: the trace resource descriptor + * @offset: offset of the resource entry + * @avail: size of available data (for sanity checking the image) + * + * In case the remote processor dumps trace logs into memory, + * export it via debugfs. + * + * Currently, the 'da' member of @rsc should contain the device address + * where the remote processor is dumping the traces. Later we could also + * support dynamically allocating this address using the generic + * DMA API (but currently there isn't a use case for that). + * + * Returns 0 on success, or an appropriate error code otherwise + */ +static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, + int offset, int avail) +{ + struct rproc_debug_trace *trace; + struct device *dev = &rproc->dev; + char name[15]; + + if (sizeof(*rsc) > avail) { + dev_err(dev, "trace rsc is truncated\n"); + return -EINVAL; + } + + /* make sure reserved bytes are zeroes */ + if (rsc->reserved) { + dev_err(dev, "trace rsc has non zero reserved bytes\n"); + return -EINVAL; + } + + trace = kzalloc(sizeof(*trace), GFP_KERNEL); + if (!trace) + return -ENOMEM; + + /* set the trace buffer dma properties */ + trace->trace_mem.len = rsc->len; + trace->trace_mem.da = rsc->da; + + /* set pointer on rproc device */ + trace->rproc = rproc; + + /* make sure snprintf always null terminates, even if truncating */ + snprintf(name, sizeof(name), "trace%d", rproc->num_traces); + + /* create the debugfs entry */ + trace->tfile = rproc_create_trace_file(name, rproc, trace); + if (!trace->tfile) { + kfree(trace); + return -EINVAL; + } + + list_add_tail(&trace->node, &rproc->traces); + + rproc->num_traces++; + + dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n", + name, rsc->da, rsc->len); + + return 0; +} + +/** + * rproc_handle_devmem() - handle devmem resource entry + * @rproc: remote processor handle + * @rsc: the devmem resource entry + * @offset: offset of the resource entry + * @avail: size of available data (for sanity checking the image) + * + * Remote processors commonly need to access certain on-chip peripherals. + * + * Some of these remote processors access memory via an iommu device, + * and might require us to configure their iommu before they can access + * the on-chip peripherals they need. + * + * This resource entry is a request to map such a peripheral device. + * + * These devmem entries will contain the physical address of the device in + * the 'pa' member. If a specific device address is expected, then 'da' will + * contain it (currently this is the only use case supported). 'len' will + * contain the size of the physical region we need to map. + * + * Currently we just "trust" those devmem entries to contain valid physical + * addresses, but this is going to change: we want the implementations to + * tell us ranges of physical addresses the firmware is allowed to request, + * and not allow firmwares to request access to physical addresses that + * are outside those ranges. + */ +static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, + int offset, int avail) +{ + struct rproc_mem_entry *mapping; + struct device *dev = &rproc->dev; + int ret; + + /* no point in handling this resource without a valid iommu domain */ + if (!rproc->domain) + return -EINVAL; + + if (sizeof(*rsc) > avail) { + dev_err(dev, "devmem rsc is truncated\n"); + return -EINVAL; + } + + /* make sure reserved bytes are zeroes */ + if (rsc->reserved) { + dev_err(dev, "devmem rsc has non zero reserved bytes\n"); + return -EINVAL; + } + + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) + return -ENOMEM; + + ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags); + if (ret) { + dev_err(dev, "failed to map devmem: %d\n", ret); + goto out; + } + + /* + * We'll need this info later when we'll want to unmap everything + * (e.g. on shutdown). + * + * We can't trust the remote processor not to change the resource + * table, so we must maintain this info independently. + */ + mapping->da = rsc->da; + mapping->len = rsc->len; + list_add_tail(&mapping->node, &rproc->mappings); + + dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n", + rsc->pa, rsc->da, rsc->len); + + return 0; + +out: + kfree(mapping); + return ret; +} + +/** + * rproc_alloc_carveout() - allocated specified carveout + * @rproc: rproc handle + * @mem: the memory entry to allocate + * + * This function allocate specified memory entry @mem using + * dma_alloc_coherent() as default allocator + */ +static int rproc_alloc_carveout(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + struct rproc_mem_entry *mapping = NULL; + struct device *dev = &rproc->dev; + dma_addr_t dma; + void *va; + int ret; + + va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL); + if (!va) { + dev_err(dev->parent, + "failed to allocate dma memory: len 0x%zx\n", + mem->len); + return -ENOMEM; + } + + dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n", + va, &dma, mem->len); + + if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) { + /* + * Check requested da is equal to dma address + * and print a warn message in case of missalignment. + * Don't stop rproc_start sequence as coprocessor may + * build pa to da translation on its side. + */ + if (mem->da != (u32)dma) + dev_warn(dev->parent, + "Allocated carveout doesn't fit device address request\n"); + } + + /* + * Ok, this is non-standard. + * + * Sometimes we can't rely on the generic iommu-based DMA API + * to dynamically allocate the device address and then set the IOMMU + * tables accordingly, because some remote processors might + * _require_ us to use hard coded device addresses that their + * firmware was compiled with. + * + * In this case, we must use the IOMMU API directly and map + * the memory to the device address as expected by the remote + * processor. + * + * Obviously such remote processor devices should not be configured + * to use the iommu-based DMA API: we expect 'dma' to contain the + * physical address in this case. + */ + if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) { + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) { + ret = -ENOMEM; + goto dma_free; + } + + ret = iommu_map(rproc->domain, mem->da, dma, mem->len, + mem->flags); + if (ret) { + dev_err(dev, "iommu_map failed: %d\n", ret); + goto free_mapping; + } + + /* + * We'll need this info later when we'll want to unmap + * everything (e.g. on shutdown). + * + * We can't trust the remote processor not to change the + * resource table, so we must maintain this info independently. + */ + mapping->da = mem->da; + mapping->len = mem->len; + list_add_tail(&mapping->node, &rproc->mappings); + + dev_dbg(dev, "carveout mapped 0x%x to %pad\n", + mem->da, &dma); + } + + if (mem->da == FW_RSC_ADDR_ANY) { + /* Update device address as undefined by requester */ + if ((u64)dma & HIGH_BITS_MASK) + dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n"); + + mem->da = (u32)dma; + } + + mem->dma = dma; + mem->va = va; + + return 0; + +free_mapping: + kfree(mapping); +dma_free: + dma_free_coherent(dev->parent, mem->len, va, dma); + return ret; +} + +/** + * rproc_release_carveout() - release acquired carveout + * @rproc: rproc handle + * @mem: the memory entry to release + * + * This function releases specified memory entry @mem allocated via + * rproc_alloc_carveout() function by @rproc. + */ +static int rproc_release_carveout(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + struct device *dev = &rproc->dev; + + /* clean up carveout allocations */ + dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma); + return 0; +} + +/** + * rproc_handle_carveout() - handle phys contig memory allocation requests + * @rproc: rproc handle + * @rsc: the resource entry + * @offset: offset of the resource entry + * @avail: size of available data (for image validation) + * + * This function will handle firmware requests for allocation of physically + * contiguous memory regions. + * + * These request entries should come first in the firmware's resource table, + * as other firmware entries might request placing other data objects inside + * these memory regions (e.g. data/code segments, trace resource entries, ...). + * + * Allocating memory this way helps utilizing the reserved physical memory + * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries + * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB + * pressure is important; it may have a substantial impact on performance. + */ +static int rproc_handle_carveout(struct rproc *rproc, + struct fw_rsc_carveout *rsc, + int offset, int avail) +{ + struct rproc_mem_entry *carveout; + struct device *dev = &rproc->dev; + + if (sizeof(*rsc) > avail) { + dev_err(dev, "carveout rsc is truncated\n"); + return -EINVAL; + } + + /* make sure reserved bytes are zeroes */ + if (rsc->reserved) { + dev_err(dev, "carveout rsc has non zero reserved bytes\n"); + return -EINVAL; + } + + dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n", + rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags); + + /* + * Check carveout rsc already part of a registered carveout, + * Search by name, then check the da and length + */ + carveout = rproc_find_carveout_by_name(rproc, rsc->name); + + if (carveout) { + if (carveout->rsc_offset != FW_RSC_ADDR_ANY) { + dev_err(dev, + "Carveout already associated to resource table\n"); + return -ENOMEM; + } + + if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len)) + return -ENOMEM; + + /* Update memory carveout with resource table info */ + carveout->rsc_offset = offset; + carveout->flags = rsc->flags; + + return 0; + } + + /* Register carveout in in list */ + carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da, + rproc_alloc_carveout, + rproc_release_carveout, rsc->name); + if (!carveout) { + dev_err(dev, "Can't allocate memory entry structure\n"); + return -ENOMEM; + } + + carveout->flags = rsc->flags; + carveout->rsc_offset = offset; + rproc_add_carveout(rproc, carveout); + + return 0; +} + +/** + * rproc_add_carveout() - register an allocated carveout region + * @rproc: rproc handle + * @mem: memory entry to register + * + * This function registers specified memory entry in @rproc carveouts list. + * Specified carveout should have been allocated before registering. + */ +void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem) +{ + list_add_tail(&mem->node, &rproc->carveouts); +} +EXPORT_SYMBOL(rproc_add_carveout); + +/** + * rproc_mem_entry_init() - allocate and initialize rproc_mem_entry struct + * @dev: pointer on device struct + * @va: virtual address + * @dma: dma address + * @len: memory carveout length + * @da: device address + * @alloc: memory carveout allocation function + * @release: memory carveout release function + * @name: carveout name + * + * This function allocates a rproc_mem_entry struct and fill it with parameters + * provided by client. + */ +__printf(8, 9) +struct rproc_mem_entry * +rproc_mem_entry_init(struct device *dev, + void *va, dma_addr_t dma, size_t len, u32 da, + int (*alloc)(struct rproc *, struct rproc_mem_entry *), + int (*release)(struct rproc *, struct rproc_mem_entry *), + const char *name, ...) +{ + struct rproc_mem_entry *mem; + va_list args; + + mem = kzalloc(sizeof(*mem), GFP_KERNEL); + if (!mem) + return mem; + + mem->va = va; + mem->dma = dma; + mem->da = da; + mem->len = len; + mem->alloc = alloc; + mem->release = release; + mem->rsc_offset = FW_RSC_ADDR_ANY; + mem->of_resm_idx = -1; + + va_start(args, name); + vsnprintf(mem->name, sizeof(mem->name), name, args); + va_end(args); + + return mem; +} +EXPORT_SYMBOL(rproc_mem_entry_init); + +/** + * rproc_of_resm_mem_entry_init() - allocate and initialize rproc_mem_entry struct + * from a reserved memory phandle + * @dev: pointer on device struct + * @of_resm_idx: reserved memory phandle index in "memory-region" + * @len: memory carveout length + * @da: device address + * @name: carveout name + * + * This function allocates a rproc_mem_entry struct and fill it with parameters + * provided by client. + */ +__printf(5, 6) +struct rproc_mem_entry * +rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, + u32 da, const char *name, ...) +{ + struct rproc_mem_entry *mem; + va_list args; + + mem = kzalloc(sizeof(*mem), GFP_KERNEL); + if (!mem) + return mem; + + mem->da = da; + mem->len = len; + mem->rsc_offset = FW_RSC_ADDR_ANY; + mem->of_resm_idx = of_resm_idx; + + va_start(args, name); + vsnprintf(mem->name, sizeof(mem->name), name, args); + va_end(args); + + return mem; +} +EXPORT_SYMBOL(rproc_of_resm_mem_entry_init); + +/** + * rproc_of_parse_firmware() - parse and return the firmware-name + * @dev: pointer on device struct representing a rproc + * @index: index to use for the firmware-name retrieval + * @fw_name: pointer to a character string, in which the firmware + * name is returned on success and unmodified otherwise. + * + * This is an OF helper function that parses a device's DT node for + * the "firmware-name" property and returns the firmware name pointer + * in @fw_name on success. + * + * Return: 0 on success, or an appropriate failure. + */ +int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name) +{ + int ret; + + ret = of_property_read_string_index(dev->of_node, "firmware-name", + index, fw_name); + return ret ? ret : 0; +} +EXPORT_SYMBOL(rproc_of_parse_firmware); + +/* + * A lookup table for resource handlers. The indices are defined in + * enum fw_resource_type. + */ +static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = { + [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, + [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, + [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, + [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev, +}; + +/* handle firmware resource entries before booting the remote processor */ +static int rproc_handle_resources(struct rproc *rproc, + rproc_handle_resource_t handlers[RSC_LAST]) +{ + struct device *dev = &rproc->dev; + rproc_handle_resource_t handler; + int ret = 0, i; + + if (!rproc->table_ptr) + return 0; + + for (i = 0; i < rproc->table_ptr->num; i++) { + int offset = rproc->table_ptr->offset[i]; + struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset; + int avail = rproc->table_sz - offset - sizeof(*hdr); + void *rsc = (void *)hdr + sizeof(*hdr); + + /* make sure table isn't truncated */ + if (avail < 0) { + dev_err(dev, "rsc table is truncated\n"); + return -EINVAL; + } + + dev_dbg(dev, "rsc: type %d\n", hdr->type); + + if (hdr->type >= RSC_VENDOR_START && + hdr->type <= RSC_VENDOR_END) { + ret = rproc_handle_rsc(rproc, hdr->type, rsc, + offset + sizeof(*hdr), avail); + if (ret == RSC_HANDLED) + continue; + else if (ret < 0) + break; + + dev_warn(dev, "unsupported vendor resource %d\n", + hdr->type); + continue; + } + + if (hdr->type >= RSC_LAST) { + dev_warn(dev, "unsupported resource %d\n", hdr->type); + continue; + } + + handler = handlers[hdr->type]; + if (!handler) + continue; + + ret = handler(rproc, rsc, offset + sizeof(*hdr), avail); + if (ret) + break; + } + + return ret; +} + +static int rproc_prepare_subdevices(struct rproc *rproc) +{ + struct rproc_subdev *subdev; + int ret; + + list_for_each_entry(subdev, &rproc->subdevs, node) { + if (subdev->prepare) { + ret = subdev->prepare(subdev); + if (ret) + goto unroll_preparation; + } + } + + return 0; + +unroll_preparation: + list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) { + if (subdev->unprepare) + subdev->unprepare(subdev); + } + + return ret; +} + +static int rproc_start_subdevices(struct rproc *rproc) +{ + struct rproc_subdev *subdev; + int ret; + + list_for_each_entry(subdev, &rproc->subdevs, node) { + if (subdev->start) { + ret = subdev->start(subdev); + if (ret) + goto unroll_registration; + } + } + + return 0; + +unroll_registration: + list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) { + if (subdev->stop) + subdev->stop(subdev, true); + } + + return ret; +} + +static void rproc_stop_subdevices(struct rproc *rproc, bool crashed) +{ + struct rproc_subdev *subdev; + + list_for_each_entry_reverse(subdev, &rproc->subdevs, node) { + if (subdev->stop) + subdev->stop(subdev, crashed); + } +} + +static void rproc_unprepare_subdevices(struct rproc *rproc) +{ + struct rproc_subdev *subdev; + + list_for_each_entry_reverse(subdev, &rproc->subdevs, node) { + if (subdev->unprepare) + subdev->unprepare(subdev); + } +} + +/** + * rproc_alloc_registered_carveouts() - allocate all carveouts registered + * in the list + * @rproc: the remote processor handle + * + * This function parses registered carveout list, performs allocation + * if alloc() ops registered and updates resource table information + * if rsc_offset set. + * + * Return: 0 on success + */ +static int rproc_alloc_registered_carveouts(struct rproc *rproc) +{ + struct rproc_mem_entry *entry, *tmp; + struct fw_rsc_carveout *rsc; + struct device *dev = &rproc->dev; + u64 pa; + int ret; + + list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { + if (entry->alloc) { + ret = entry->alloc(rproc, entry); + if (ret) { + dev_err(dev, "Unable to allocate carveout %s: %d\n", + entry->name, ret); + return -ENOMEM; + } + } + + if (entry->rsc_offset != FW_RSC_ADDR_ANY) { + /* update resource table */ + rsc = (void *)rproc->table_ptr + entry->rsc_offset; + + /* + * Some remote processors might need to know the pa + * even though they are behind an IOMMU. E.g., OMAP4's + * remote M3 processor needs this so it can control + * on-chip hardware accelerators that are not behind + * the IOMMU, and therefor must know the pa. + * + * Generally we don't want to expose physical addresses + * if we don't have to (remote processors are generally + * _not_ trusted), so we might want to do this only for + * remote processor that _must_ have this (e.g. OMAP4's + * dual M3 subsystem). + * + * Non-IOMMU processors might also want to have this info. + * In this case, the device address and the physical address + * are the same. + */ + + /* Use va if defined else dma to generate pa */ + if (entry->va) + pa = (u64)rproc_va_to_pa(entry->va); + else + pa = (u64)entry->dma; + + if (((u64)pa) & HIGH_BITS_MASK) + dev_warn(dev, + "Physical address cast in 32bit to fit resource table format\n"); + + rsc->pa = (u32)pa; + rsc->da = entry->da; + rsc->len = entry->len; + } + } + + return 0; +} + + +/** + * rproc_resource_cleanup() - clean up and free all acquired resources + * @rproc: rproc handle + * + * This function will free all resources acquired for @rproc, and it + * is called whenever @rproc either shuts down or fails to boot. + */ +void rproc_resource_cleanup(struct rproc *rproc) +{ + struct rproc_mem_entry *entry, *tmp; + struct rproc_debug_trace *trace, *ttmp; + struct rproc_vdev *rvdev, *rvtmp; + struct device *dev = &rproc->dev; + + /* clean up debugfs trace entries */ + list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) { + rproc_remove_trace_file(trace->tfile); + rproc->num_traces--; + list_del(&trace->node); + kfree(trace); + } + + /* clean up iommu mapping entries */ + list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) { + size_t unmapped; + + unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); + if (unmapped != entry->len) { + /* nothing much to do besides complaining */ + dev_err(dev, "failed to unmap %zx/%zu\n", entry->len, + unmapped); + } + + list_del(&entry->node); + kfree(entry); + } + + /* clean up carveout allocations */ + list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { + if (entry->release) + entry->release(rproc, entry); + list_del(&entry->node); + kfree(entry); + } + + /* clean up remote vdev entries */ + list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) + kref_put(&rvdev->refcount, rproc_vdev_release); + + rproc_coredump_cleanup(rproc); +} +EXPORT_SYMBOL(rproc_resource_cleanup); + +static int rproc_start(struct rproc *rproc, const struct firmware *fw) +{ + struct resource_table *loaded_table; + struct device *dev = &rproc->dev; + int ret; + + /* load the ELF segments to memory */ + ret = rproc_load_segments(rproc, fw); + if (ret) { + dev_err(dev, "Failed to load program segments: %d\n", ret); + return ret; + } + + /* + * The starting device has been given the rproc->cached_table as the + * resource table. The address of the vring along with the other + * allocated resources (carveouts etc) is stored in cached_table. + * In order to pass this information to the remote device we must copy + * this information to device memory. We also update the table_ptr so + * that any subsequent changes will be applied to the loaded version. + */ + loaded_table = rproc_find_loaded_rsc_table(rproc, fw); + if (loaded_table) { + memcpy(loaded_table, rproc->cached_table, rproc->table_sz); + rproc->table_ptr = loaded_table; + } + + ret = rproc_prepare_subdevices(rproc); + if (ret) { + dev_err(dev, "failed to prepare subdevices for %s: %d\n", + rproc->name, ret); + goto reset_table_ptr; + } + + /* power up the remote processor */ + ret = rproc->ops->start(rproc); + if (ret) { + dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret); + goto unprepare_subdevices; + } + + /* Start any subdevices for the remote processor */ + ret = rproc_start_subdevices(rproc); + if (ret) { + dev_err(dev, "failed to probe subdevices for %s: %d\n", + rproc->name, ret); + goto stop_rproc; + } + + rproc->state = RPROC_RUNNING; + + dev_info(dev, "remote processor %s is now up\n", rproc->name); + + return 0; + +stop_rproc: + rproc->ops->stop(rproc); +unprepare_subdevices: + rproc_unprepare_subdevices(rproc); +reset_table_ptr: + rproc->table_ptr = rproc->cached_table; + + return ret; +} + +static int rproc_attach(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + ret = rproc_prepare_subdevices(rproc); + if (ret) { + dev_err(dev, "failed to prepare subdevices for %s: %d\n", + rproc->name, ret); + goto out; + } + + /* Attach to the remote processor */ + ret = rproc_attach_device(rproc); + if (ret) { + dev_err(dev, "can't attach to rproc %s: %d\n", + rproc->name, ret); + goto unprepare_subdevices; + } + + /* Start any subdevices for the remote processor */ + ret = rproc_start_subdevices(rproc); + if (ret) { + dev_err(dev, "failed to probe subdevices for %s: %d\n", + rproc->name, ret); + goto stop_rproc; + } + + rproc->state = RPROC_RUNNING; + + dev_info(dev, "remote processor %s is now attached\n", rproc->name); + + return 0; + +stop_rproc: + rproc->ops->stop(rproc); +unprepare_subdevices: + rproc_unprepare_subdevices(rproc); +out: + return ret; +} + +/* + * take a firmware and boot a remote processor with it. + */ +static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) +{ + struct device *dev = &rproc->dev; + const char *name = rproc->firmware; + int ret; + + ret = rproc_fw_sanity_check(rproc, fw); + if (ret) + return ret; + + dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size); + + /* + * if enabling an IOMMU isn't relevant for this rproc, this is + * just a nop + */ + ret = rproc_enable_iommu(rproc); + if (ret) { + dev_err(dev, "can't enable iommu: %d\n", ret); + return ret; + } + + /* Prepare rproc for firmware loading if needed */ + ret = rproc_prepare_device(rproc); + if (ret) { + dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret); + goto disable_iommu; + } + + rproc->bootaddr = rproc_get_boot_addr(rproc, fw); + + /* Load resource table, core dump segment list etc from the firmware */ + ret = rproc_parse_fw(rproc, fw); + if (ret) + goto unprepare_rproc; + + /* reset max_notifyid */ + rproc->max_notifyid = -1; + + /* reset handled vdev */ + rproc->nb_vdev = 0; + + /* handle fw resources which are required to boot rproc */ + ret = rproc_handle_resources(rproc, rproc_loading_handlers); + if (ret) { + dev_err(dev, "Failed to process resources: %d\n", ret); + goto clean_up_resources; + } + + /* Allocate carveout resources associated to rproc */ + ret = rproc_alloc_registered_carveouts(rproc); + if (ret) { + dev_err(dev, "Failed to allocate associated carveouts: %d\n", + ret); + goto clean_up_resources; + } + + ret = rproc_start(rproc, fw); + if (ret) + goto clean_up_resources; + + return 0; + +clean_up_resources: + rproc_resource_cleanup(rproc); + kfree(rproc->cached_table); + rproc->cached_table = NULL; + rproc->table_ptr = NULL; +unprepare_rproc: + /* release HW resources if needed */ + rproc_unprepare_device(rproc); +disable_iommu: + rproc_disable_iommu(rproc); + return ret; +} + +/* + * Attach to remote processor - similar to rproc_fw_boot() but without + * the steps that deal with the firmware image. + */ +static int rproc_actuate(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + /* + * if enabling an IOMMU isn't relevant for this rproc, this is + * just a nop + */ + ret = rproc_enable_iommu(rproc); + if (ret) { + dev_err(dev, "can't enable iommu: %d\n", ret); + return ret; + } + + /* reset max_notifyid */ + rproc->max_notifyid = -1; + + /* reset handled vdev */ + rproc->nb_vdev = 0; + + /* + * Handle firmware resources required to attach to a remote processor. + * Because we are attaching rather than booting the remote processor, + * we expect the platform driver to properly set rproc->table_ptr. + */ + ret = rproc_handle_resources(rproc, rproc_loading_handlers); + if (ret) { + dev_err(dev, "Failed to process resources: %d\n", ret); + goto disable_iommu; + } + + /* Allocate carveout resources associated to rproc */ + ret = rproc_alloc_registered_carveouts(rproc); + if (ret) { + dev_err(dev, "Failed to allocate associated carveouts: %d\n", + ret); + goto clean_up_resources; + } + + ret = rproc_attach(rproc); + if (ret) + goto clean_up_resources; + + return 0; + +clean_up_resources: + rproc_resource_cleanup(rproc); +disable_iommu: + rproc_disable_iommu(rproc); + return ret; +} + +/* + * take a firmware and boot it up. + * + * Note: this function is called asynchronously upon registration of the + * remote processor (so we must wait until it completes before we try + * to unregister the device. one other option is just to use kref here, + * that might be cleaner). + */ +static void rproc_auto_boot_callback(const struct firmware *fw, void *context) +{ + struct rproc *rproc = context; + + rproc_boot(rproc); + + release_firmware(fw); +} + +static int rproc_trigger_auto_boot(struct rproc *rproc) +{ + int ret; + + /* + * Since the remote processor is in a detached state, it has already + * been booted by another entity. As such there is no point in waiting + * for a firmware image to be loaded, we can simply initiate the process + * of attaching to it immediately. + */ + if (rproc->state == RPROC_DETACHED) + return rproc_boot(rproc); + + /* + * We're initiating an asynchronous firmware loading, so we can + * be built-in kernel code, without hanging the boot process. + */ + ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, + rproc->firmware, &rproc->dev, GFP_KERNEL, + rproc, rproc_auto_boot_callback); + if (ret < 0) + dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret); + + return ret; +} + +static int rproc_stop(struct rproc *rproc, bool crashed) +{ + struct device *dev = &rproc->dev; + int ret; + + /* Stop any subdevices for the remote processor */ + rproc_stop_subdevices(rproc, crashed); + + /* the installed resource table is no longer accessible */ + rproc->table_ptr = rproc->cached_table; + + /* power off the remote processor */ + ret = rproc->ops->stop(rproc); + if (ret) { + dev_err(dev, "can't stop rproc: %d\n", ret); + return ret; + } + + rproc_unprepare_subdevices(rproc); + + rproc->state = RPROC_OFFLINE; + + /* + * The remote processor has been stopped and is now offline, which means + * that the next time it is brought back online the remoteproc core will + * be responsible to load its firmware. As such it is no longer + * autonomous. + */ + rproc->autonomous = false; + + dev_info(dev, "stopped remote processor %s\n", rproc->name); + + return 0; +} + + +/** + * rproc_trigger_recovery() - recover a remoteproc + * @rproc: the remote processor + * + * The recovery is done by resetting all the virtio devices, that way all the + * rpmsg drivers will be reseted along with the remote processor making the + * remoteproc functional again. + * + * This function can sleep, so it cannot be called from atomic context. + */ +int rproc_trigger_recovery(struct rproc *rproc) +{ + const struct firmware *firmware_p; + struct device *dev = &rproc->dev; + int ret; + + ret = mutex_lock_interruptible(&rproc->lock); + if (ret) + return ret; + + /* State could have changed before we got the mutex */ + if (rproc->state != RPROC_CRASHED) + goto unlock_mutex; + + dev_err(dev, "recovering %s\n", rproc->name); + + ret = rproc_stop(rproc, true); + if (ret) + goto unlock_mutex; + + /* generate coredump */ + rproc_coredump(rproc); + + /* load firmware */ + ret = request_firmware(&firmware_p, rproc->firmware, dev); + if (ret < 0) { + dev_err(dev, "request_firmware failed: %d\n", ret); + goto unlock_mutex; + } + + /* boot the remote processor up again */ + ret = rproc_start(rproc, firmware_p); + + release_firmware(firmware_p); + +unlock_mutex: + mutex_unlock(&rproc->lock); + return ret; +} + +/** + * rproc_crash_handler_work() - handle a crash + * @work: work treating the crash + * + * This function needs to handle everything related to a crash, like cpu + * registers and stack dump, information to help to debug the fatal error, etc. + */ +static void rproc_crash_handler_work(struct work_struct *work) +{ + struct rproc *rproc = container_of(work, struct rproc, crash_handler); + struct device *dev = &rproc->dev; + + dev_dbg(dev, "enter %s\n", __func__); + + mutex_lock(&rproc->lock); + + if (rproc->state == RPROC_CRASHED) { + /* handle only the first crash detected */ + mutex_unlock(&rproc->lock); + return; + } + + if (rproc->state == RPROC_OFFLINE) { + /* Don't recover if the remote processor was stopped */ + mutex_unlock(&rproc->lock); + goto out; + } + + rproc->state = RPROC_CRASHED; + dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt, + rproc->name); + + mutex_unlock(&rproc->lock); + + if (!rproc->recovery_disabled) + rproc_trigger_recovery(rproc); + +out: + pm_relax(rproc->dev.parent); +} + +/** + * rproc_boot() - boot a remote processor + * @rproc: handle of a remote processor + * + * Boot a remote processor (i.e. load its firmware, power it on, ...). + * + * If the remote processor is already powered on, this function immediately + * returns (successfully). + * + * Returns 0 on success, and an appropriate error value otherwise. + */ +int rproc_boot(struct rproc *rproc) +{ + const struct firmware *firmware_p; + struct device *dev; + int ret; + + if (!rproc) { + pr_err("invalid rproc handle\n"); + return -EINVAL; + } + + dev = &rproc->dev; + + ret = mutex_lock_interruptible(&rproc->lock); + if (ret) { + dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); + return ret; + } + + if (rproc->state == RPROC_DELETED) { + ret = -ENODEV; + dev_err(dev, "can't boot deleted rproc %s\n", rproc->name); + goto unlock_mutex; + } + + /* skip the boot or attach process if rproc is already powered up */ + if (atomic_inc_return(&rproc->power) > 1) { + ret = 0; + goto unlock_mutex; + } + + if (rproc->state == RPROC_DETACHED) { + dev_info(dev, "attaching to %s\n", rproc->name); + + ret = rproc_actuate(rproc); + } else { + dev_info(dev, "powering up %s\n", rproc->name); + + /* load firmware */ + ret = request_firmware(&firmware_p, rproc->firmware, dev); + if (ret < 0) { + dev_err(dev, "request_firmware failed: %d\n", ret); + goto downref_rproc; + } + + ret = rproc_fw_boot(rproc, firmware_p); + + release_firmware(firmware_p); + } + +downref_rproc: + if (ret) + atomic_dec(&rproc->power); +unlock_mutex: + mutex_unlock(&rproc->lock); + return ret; +} +EXPORT_SYMBOL(rproc_boot); + +/** + * rproc_shutdown() - power off the remote processor + * @rproc: the remote processor + * + * Power off a remote processor (previously booted with rproc_boot()). + * + * In case @rproc is still being used by an additional user(s), then + * this function will just decrement the power refcount and exit, + * without really powering off the device. + * + * Every call to rproc_boot() must (eventually) be accompanied by a call + * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. + * + * Notes: + * - we're not decrementing the rproc's refcount, only the power refcount. + * which means that the @rproc handle stays valid even after rproc_shutdown() + * returns, and users can still use it with a subsequent rproc_boot(), if + * needed. + */ +void rproc_shutdown(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + ret = mutex_lock_interruptible(&rproc->lock); + if (ret) { + dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); + return; + } + + /* if the remote proc is still needed, bail out */ + if (!atomic_dec_and_test(&rproc->power)) + goto out; + + ret = rproc_stop(rproc, false); + if (ret) { + atomic_inc(&rproc->power); + goto out; + } + + /* clean up all acquired resources */ + rproc_resource_cleanup(rproc); + + /* release HW resources if needed */ + rproc_unprepare_device(rproc); + + rproc_disable_iommu(rproc); + + /* Free the copy of the resource table */ + kfree(rproc->cached_table); + rproc->cached_table = NULL; + rproc->table_ptr = NULL; +out: + mutex_unlock(&rproc->lock); +} +EXPORT_SYMBOL(rproc_shutdown); + +/** + * rproc_get_by_phandle() - find a remote processor by phandle + * @phandle: phandle to the rproc + * + * Finds an rproc handle using the remote processor's phandle, and then + * return a handle to the rproc. + * + * This function increments the remote processor's refcount, so always + * use rproc_put() to decrement it back once rproc isn't needed anymore. + * + * Returns the rproc handle on success, and NULL on failure. + */ +#ifdef CONFIG_OF +struct rproc *rproc_get_by_phandle(phandle phandle) +{ + struct rproc *rproc = NULL, *r; + struct device_node *np; + + np = of_find_node_by_phandle(phandle); + if (!np) + return NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(r, &rproc_list, node) { + if (r->dev.parent && r->dev.parent->of_node == np) { + /* prevent underlying implementation from being removed */ + if (!try_module_get(r->dev.parent->driver->owner)) { + dev_err(&r->dev, "can't get owner\n"); + break; + } + + rproc = r; + get_device(&rproc->dev); + break; + } + } + rcu_read_unlock(); + + of_node_put(np); + + return rproc; +} +#else +struct rproc *rproc_get_by_phandle(phandle phandle) +{ + return NULL; +} +#endif +EXPORT_SYMBOL(rproc_get_by_phandle); + +static int rproc_validate(struct rproc *rproc) +{ + switch (rproc->state) { + case RPROC_OFFLINE: + /* + * An offline processor without a start() + * function makes no sense. + */ + if (!rproc->ops->start) + return -EINVAL; + break; + case RPROC_DETACHED: + /* + * A remote processor in a detached state without an + * attach() function makes not sense. + */ + if (!rproc->ops->attach) + return -EINVAL; + /* + * When attaching to a remote processor the device memory + * is already available and as such there is no need to have a + * cached table. + */ + if (rproc->cached_table) + return -EINVAL; + break; + default: + /* + * When adding a remote processor, the state of the device + * can be offline or detached, nothing else. + */ + return -EINVAL; + } + + return 0; +} + +/** + * rproc_add() - register a remote processor + * @rproc: the remote processor handle to register + * + * Registers @rproc with the remoteproc framework, after it has been + * allocated with rproc_alloc(). + * + * This is called by the platform-specific rproc implementation, whenever + * a new remote processor device is probed. + * + * Returns 0 on success and an appropriate error code otherwise. + * + * Note: this function initiates an asynchronous firmware loading + * context, which will look for virtio devices supported by the rproc's + * firmware. + * + * If found, those virtio devices will be created and added, so as a result + * of registering this remote processor, additional virtio drivers might be + * probed. + */ +int rproc_add(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + ret = device_add(dev); + if (ret < 0) + return ret; + + ret = rproc_validate(rproc); + if (ret < 0) + return ret; + + dev_info(dev, "%s is available\n", rproc->name); + + /* create debugfs entries */ + rproc_create_debug_dir(rproc); + + /* add char device for this remoteproc */ + ret = rproc_char_device_add(rproc); + if (ret < 0) + return ret; + + /* + * Remind ourselves the remote processor has been attached to rather + * than booted by the remoteproc core. This is important because the + * RPROC_DETACHED state will be lost as soon as the remote processor + * has been attached to. Used in firmware_show() and reset in + * rproc_stop(). + */ + if (rproc->state == RPROC_DETACHED) + rproc->autonomous = true; + + /* if rproc is marked always-on, request it to boot */ + if (rproc->auto_boot) { + ret = rproc_trigger_auto_boot(rproc); + if (ret < 0) + return ret; + } + + /* expose to rproc_get_by_phandle users */ + mutex_lock(&rproc_list_mutex); + list_add_rcu(&rproc->node, &rproc_list); + mutex_unlock(&rproc_list_mutex); + + return 0; +} +EXPORT_SYMBOL(rproc_add); + +static void devm_rproc_remove(void *rproc) +{ + rproc_del(rproc); +} + +/** + * devm_rproc_add() - resource managed rproc_add() + * @dev: the underlying device + * @rproc: the remote processor handle to register + * + * This function performs like rproc_add() but the registered rproc device will + * automatically be removed on driver detach. + * + * Returns: 0 on success, negative errno on failure + */ +int devm_rproc_add(struct device *dev, struct rproc *rproc) +{ + int err; + + err = rproc_add(rproc); + if (err) + return err; + + return devm_add_action_or_reset(dev, devm_rproc_remove, rproc); +} +EXPORT_SYMBOL(devm_rproc_add); + +/** + * rproc_type_release() - release a remote processor instance + * @dev: the rproc's device + * + * This function should _never_ be called directly. + * + * It will be called by the driver core when no one holds a valid pointer + * to @dev anymore. + */ +static void rproc_type_release(struct device *dev) +{ + struct rproc *rproc = container_of(dev, struct rproc, dev); + + dev_info(&rproc->dev, "releasing %s\n", rproc->name); + + idr_destroy(&rproc->notifyids); + + if (rproc->index >= 0) + ida_simple_remove(&rproc_dev_index, rproc->index); + + kfree_const(rproc->firmware); + kfree_const(rproc->name); + kfree(rproc->ops); + kfree(rproc); +} + +static const struct device_type rproc_type = { + .name = "remoteproc", + .release = rproc_type_release, +}; + +static int rproc_alloc_firmware(struct rproc *rproc, + const char *name, const char *firmware) +{ + const char *p; + + /* + * Allocate a firmware name if the caller gave us one to work + * with. Otherwise construct a new one using a default pattern. + */ + if (firmware) + p = kstrdup_const(firmware, GFP_KERNEL); + else + p = kasprintf(GFP_KERNEL, "rproc-%s-fw", name); + + if (!p) + return -ENOMEM; + + rproc->firmware = p; + + return 0; +} + +static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops) +{ + rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL); + if (!rproc->ops) + return -ENOMEM; + + if (rproc->ops->load) + return 0; + + /* Default to ELF loader if no load function is specified */ + rproc->ops->load = rproc_elf_load_segments; + rproc->ops->parse_fw = rproc_elf_load_rsc_table; + rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table; + rproc->ops->sanity_check = rproc_elf_sanity_check; + rproc->ops->get_boot_addr = rproc_elf_get_boot_addr; + + return 0; +} + +/** + * rproc_alloc() - allocate a remote processor handle + * @dev: the underlying device + * @name: name of this remote processor + * @ops: platform-specific handlers (mainly start/stop) + * @firmware: name of firmware file to load, can be NULL + * @len: length of private data needed by the rproc driver (in bytes) + * + * Allocates a new remote processor handle, but does not register + * it yet. if @firmware is NULL, a default name is used. + * + * This function should be used by rproc implementations during initialization + * of the remote processor. + * + * After creating an rproc handle using this function, and when ready, + * implementations should then call rproc_add() to complete + * the registration of the remote processor. + * + * On success the new rproc is returned, and on failure, NULL. + * + * Note: _never_ directly deallocate @rproc, even if it was not registered + * yet. Instead, when you need to unroll rproc_alloc(), use rproc_free(). + */ +struct rproc *rproc_alloc(struct device *dev, const char *name, + const struct rproc_ops *ops, + const char *firmware, int len) +{ + struct rproc *rproc; + + if (!dev || !name || !ops) + return NULL; + + rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL); + if (!rproc) + return NULL; + + rproc->priv = &rproc[1]; + rproc->auto_boot = true; + rproc->elf_class = ELFCLASSNONE; + rproc->elf_machine = EM_NONE; + + device_initialize(&rproc->dev); + rproc->dev.parent = dev; + rproc->dev.type = &rproc_type; + rproc->dev.class = &rproc_class; + rproc->dev.driver_data = rproc; + idr_init(&rproc->notifyids); + + rproc->name = kstrdup_const(name, GFP_KERNEL); + if (!rproc->name) + goto put_device; + + if (rproc_alloc_firmware(rproc, name, firmware)) + goto put_device; + + if (rproc_alloc_ops(rproc, ops)) + goto put_device; + + /* Assign a unique device index and name */ + rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); + if (rproc->index < 0) { + dev_err(dev, "ida_simple_get failed: %d\n", rproc->index); + goto put_device; + } + + dev_set_name(&rproc->dev, "remoteproc%d", rproc->index); + + atomic_set(&rproc->power, 0); + + mutex_init(&rproc->lock); + + INIT_LIST_HEAD(&rproc->carveouts); + INIT_LIST_HEAD(&rproc->mappings); + INIT_LIST_HEAD(&rproc->traces); + INIT_LIST_HEAD(&rproc->rvdevs); + INIT_LIST_HEAD(&rproc->subdevs); + INIT_LIST_HEAD(&rproc->dump_segments); + + INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work); + + rproc->state = RPROC_OFFLINE; + + return rproc; + +put_device: + put_device(&rproc->dev); + return NULL; +} +EXPORT_SYMBOL(rproc_alloc); + +/** + * rproc_free() - unroll rproc_alloc() + * @rproc: the remote processor handle + * + * This function decrements the rproc dev refcount. + * + * If no one holds any reference to rproc anymore, then its refcount would + * now drop to zero, and it would be freed. + */ +void rproc_free(struct rproc *rproc) +{ + put_device(&rproc->dev); +} +EXPORT_SYMBOL(rproc_free); + +/** + * rproc_put() - release rproc reference + * @rproc: the remote processor handle + * + * This function decrements the rproc dev refcount. + * + * If no one holds any reference to rproc anymore, then its refcount would + * now drop to zero, and it would be freed. + */ +void rproc_put(struct rproc *rproc) +{ + module_put(rproc->dev.parent->driver->owner); + put_device(&rproc->dev); +} +EXPORT_SYMBOL(rproc_put); + +/** + * rproc_del() - unregister a remote processor + * @rproc: rproc handle to unregister + * + * This function should be called when the platform specific rproc + * implementation decides to remove the rproc device. it should + * _only_ be called if a previous invocation of rproc_add() + * has completed successfully. + * + * After rproc_del() returns, @rproc isn't freed yet, because + * of the outstanding reference created by rproc_alloc. To decrement that + * one last refcount, one still needs to call rproc_free(). + * + * Returns 0 on success and -EINVAL if @rproc isn't valid. + */ +int rproc_del(struct rproc *rproc) +{ + if (!rproc) + return -EINVAL; + + /* if rproc is marked always-on, rproc_add() booted it */ + /* TODO: make sure this works with rproc->power > 1 */ + if (rproc->auto_boot) + rproc_shutdown(rproc); + + mutex_lock(&rproc->lock); + rproc->state = RPROC_DELETED; + mutex_unlock(&rproc->lock); + + rproc_delete_debug_dir(rproc); + + /* the rproc is downref'ed as soon as it's removed from the klist */ + mutex_lock(&rproc_list_mutex); + list_del_rcu(&rproc->node); + mutex_unlock(&rproc_list_mutex); + + /* Ensure that no readers of rproc_list are still active */ + synchronize_rcu(); + + device_del(&rproc->dev); + rproc_char_device_remove(rproc); + + return 0; +} +EXPORT_SYMBOL(rproc_del); + +static void devm_rproc_free(struct device *dev, void *res) +{ + rproc_free(*(struct rproc **)res); +} + +/** + * devm_rproc_alloc() - resource managed rproc_alloc() + * @dev: the underlying device + * @name: name of this remote processor + * @ops: platform-specific handlers (mainly start/stop) + * @firmware: name of firmware file to load, can be NULL + * @len: length of private data needed by the rproc driver (in bytes) + * + * This function performs like rproc_alloc() but the acquired rproc device will + * automatically be released on driver detach. + * + * Returns: new rproc instance, or NULL on failure + */ +struct rproc *devm_rproc_alloc(struct device *dev, const char *name, + const struct rproc_ops *ops, + const char *firmware, int len) +{ + struct rproc **ptr, *rproc; + + ptr = devres_alloc(devm_rproc_free, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return NULL; + + rproc = rproc_alloc(dev, name, ops, firmware, len); + if (rproc) { + *ptr = rproc; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return rproc; +} +EXPORT_SYMBOL(devm_rproc_alloc); + +/** + * rproc_add_subdev() - add a subdevice to a remoteproc + * @rproc: rproc handle to add the subdevice to + * @subdev: subdev handle to register + * + * Caller is responsible for populating optional subdevice function pointers. + */ +void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev) +{ + list_add_tail(&subdev->node, &rproc->subdevs); +} +EXPORT_SYMBOL(rproc_add_subdev); + +/** + * rproc_remove_subdev() - remove a subdevice from a remoteproc + * @rproc: rproc handle to remove the subdevice from + * @subdev: subdev handle, previously registered with rproc_add_subdev() + */ +void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev) +{ + list_del(&subdev->node); +} +EXPORT_SYMBOL(rproc_remove_subdev); + +/** + * rproc_get_by_child() - acquire rproc handle of @dev's ancestor + * @dev: child device to find ancestor of + * + * Returns the ancestor rproc instance, or NULL if not found. + */ +struct rproc *rproc_get_by_child(struct device *dev) +{ + for (dev = dev->parent; dev; dev = dev->parent) { + if (dev->type == &rproc_type) + return dev->driver_data; + } + + return NULL; +} +EXPORT_SYMBOL(rproc_get_by_child); + +/** + * rproc_report_crash() - rproc crash reporter function + * @rproc: remote processor + * @type: crash type + * + * This function must be called every time a crash is detected by the low-level + * drivers implementing a specific remoteproc. This should not be called from a + * non-remoteproc driver. + * + * This function can be called from atomic/interrupt context. + */ +void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type) +{ + if (!rproc) { + pr_err("NULL rproc pointer\n"); + return; + } + + /* Prevent suspend while the remoteproc is being recovered */ + pm_stay_awake(rproc->dev.parent); + + dev_err(&rproc->dev, "crash detected in %s: type %s\n", + rproc->name, rproc_crash_to_string(type)); + + /* create a new task to handle the error */ + schedule_work(&rproc->crash_handler); +} +EXPORT_SYMBOL(rproc_report_crash); + +static int rproc_panic_handler(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + unsigned int longest = 0; + struct rproc *rproc; + unsigned int d; + + rcu_read_lock(); + list_for_each_entry_rcu(rproc, &rproc_list, node) { + if (!rproc->ops->panic || rproc->state != RPROC_RUNNING) + continue; + + d = rproc->ops->panic(rproc); + longest = max(longest, d); + } + rcu_read_unlock(); + + /* + * Delay for the longest requested duration before returning. This can + * be used by the remoteproc drivers to give the remote processor time + * to perform any requested operations (such as flush caches), when + * it's not possible to signal the Linux side due to the panic. + */ + mdelay(longest); + + return NOTIFY_DONE; +} + +static void __init rproc_init_panic(void) +{ + rproc_panic_nb.notifier_call = rproc_panic_handler; + atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb); +} + +static void __exit rproc_exit_panic(void) +{ + atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb); +} + +static int __init remoteproc_init(void) +{ + rproc_init_sysfs(); + rproc_init_debugfs(); + rproc_init_cdev(); + rproc_init_panic(); + + return 0; +} +subsys_initcall(remoteproc_init); + +static void __exit remoteproc_exit(void) +{ + ida_destroy(&rproc_dev_index); + + rproc_exit_panic(); + rproc_exit_debugfs(); + rproc_exit_sysfs(); +} +module_exit(remoteproc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Generic Remote Processor Framework"); diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c new file mode 100644 index 000000000..34530dc20 --- /dev/null +++ b/drivers/remoteproc/remoteproc_coredump.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Coredump functionality for Remoteproc framework. + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/completion.h> +#include <linux/devcoredump.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/remoteproc.h> +#include "remoteproc_internal.h" +#include "remoteproc_elf_helpers.h" + +struct rproc_coredump_state { + struct rproc *rproc; + void *header; + struct completion dump_done; +}; + +/** + * rproc_coredump_cleanup() - clean up dump_segments list + * @rproc: the remote processor handle + */ +void rproc_coredump_cleanup(struct rproc *rproc) +{ + struct rproc_dump_segment *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) { + list_del(&entry->node); + kfree(entry); + } +} + +/** + * rproc_coredump_add_segment() - add segment of device memory to coredump + * @rproc: handle of a remote processor + * @da: device address + * @size: size of segment + * + * Add device memory to the list of segments to be included in a coredump for + * the remoteproc. + * + * Return: 0 on success, negative errno on error. + */ +int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size) +{ + struct rproc_dump_segment *segment; + + segment = kzalloc(sizeof(*segment), GFP_KERNEL); + if (!segment) + return -ENOMEM; + + segment->da = da; + segment->size = size; + + list_add_tail(&segment->node, &rproc->dump_segments); + + return 0; +} +EXPORT_SYMBOL(rproc_coredump_add_segment); + +/** + * rproc_coredump_add_custom_segment() - add custom coredump segment + * @rproc: handle of a remote processor + * @da: device address + * @size: size of segment + * @dumpfn: custom dump function called for each segment during coredump + * @priv: private data + * + * Add device memory to the list of segments to be included in the coredump + * and associate the segment with the given custom dump function and private + * data. + * + * Return: 0 on success, negative errno on error. + */ +int rproc_coredump_add_custom_segment(struct rproc *rproc, + dma_addr_t da, size_t size, + void (*dumpfn)(struct rproc *rproc, + struct rproc_dump_segment *segment, + void *dest, size_t offset, + size_t size), + void *priv) +{ + struct rproc_dump_segment *segment; + + segment = kzalloc(sizeof(*segment), GFP_KERNEL); + if (!segment) + return -ENOMEM; + + segment->da = da; + segment->size = size; + segment->priv = priv; + segment->dump = dumpfn; + + list_add_tail(&segment->node, &rproc->dump_segments); + + return 0; +} +EXPORT_SYMBOL(rproc_coredump_add_custom_segment); + +/** + * rproc_coredump_set_elf_info() - set coredump elf information + * @rproc: handle of a remote processor + * @class: elf class for coredump elf file + * @machine: elf machine for coredump elf file + * + * Set elf information which will be used for coredump elf file. + * + * Return: 0 on success, negative errno on error. + */ +int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine) +{ + if (class != ELFCLASS64 && class != ELFCLASS32) + return -EINVAL; + + rproc->elf_class = class; + rproc->elf_machine = machine; + + return 0; +} +EXPORT_SYMBOL(rproc_coredump_set_elf_info); + +static void rproc_coredump_free(void *data) +{ + struct rproc_coredump_state *dump_state = data; + + vfree(dump_state->header); + complete(&dump_state->dump_done); +} + +static void *rproc_coredump_find_segment(loff_t user_offset, + struct list_head *segments, + size_t *data_left) +{ + struct rproc_dump_segment *segment; + + list_for_each_entry(segment, segments, node) { + if (user_offset < segment->size) { + *data_left = segment->size - user_offset; + return segment; + } + user_offset -= segment->size; + } + + *data_left = 0; + return NULL; +} + +static void rproc_copy_segment(struct rproc *rproc, void *dest, + struct rproc_dump_segment *segment, + size_t offset, size_t size) +{ + void *ptr; + + if (segment->dump) { + segment->dump(rproc, segment, dest, offset, size); + } else { + ptr = rproc_da_to_va(rproc, segment->da + offset, size); + if (!ptr) { + dev_err(&rproc->dev, + "invalid copy request for segment %pad with offset %zu and size %zu)\n", + &segment->da, offset, size); + memset(dest, 0xff, size); + } else { + memcpy(dest, ptr, size); + } + } +} + +static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count, + void *data, size_t header_sz) +{ + size_t seg_data, bytes_left = count; + ssize_t copy_sz; + struct rproc_dump_segment *seg; + struct rproc_coredump_state *dump_state = data; + struct rproc *rproc = dump_state->rproc; + void *elfcore = dump_state->header; + + /* Copy the vmalloc'ed header first. */ + if (offset < header_sz) { + copy_sz = memory_read_from_buffer(buffer, count, &offset, + elfcore, header_sz); + + return copy_sz; + } + + /* + * Find out the segment memory chunk to be copied based on offset. + * Keep copying data until count bytes are read. + */ + while (bytes_left) { + seg = rproc_coredump_find_segment(offset - header_sz, + &rproc->dump_segments, + &seg_data); + /* EOF check */ + if (!seg) { + dev_info(&rproc->dev, "Ramdump done, %lld bytes read", + offset); + break; + } + + copy_sz = min_t(size_t, bytes_left, seg_data); + + rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data, + copy_sz); + + offset += copy_sz; + buffer += copy_sz; + bytes_left -= copy_sz; + } + + return count - bytes_left; +} + +/** + * rproc_coredump() - perform coredump + * @rproc: rproc handle + * + * This function will generate an ELF header for the registered segments + * and create a devcoredump device associated with rproc. Based on the + * coredump configuration this function will directly copy the segments + * from device memory to userspace or copy segments from device memory to + * a separate buffer, which can then be read by userspace. + * The first approach avoids using extra vmalloc memory. But it will stall + * recovery flow until dump is read by userspace. + */ +void rproc_coredump(struct rproc *rproc) +{ + struct rproc_dump_segment *segment; + void *phdr; + void *ehdr; + size_t data_size; + size_t offset; + void *data; + u8 class = rproc->elf_class; + int phnum = 0; + struct rproc_coredump_state dump_state; + enum rproc_dump_mechanism dump_conf = rproc->dump_conf; + + if (list_empty(&rproc->dump_segments) || + dump_conf == RPROC_COREDUMP_DISABLED) + return; + + if (class == ELFCLASSNONE) { + dev_err(&rproc->dev, "Elf class is not set\n"); + return; + } + + data_size = elf_size_of_hdr(class); + list_for_each_entry(segment, &rproc->dump_segments, node) { + /* + * For default configuration buffer includes headers & segments. + * For inline dump buffer just includes headers as segments are + * directly read from device memory. + */ + data_size += elf_size_of_phdr(class); + if (dump_conf == RPROC_COREDUMP_ENABLED) + data_size += segment->size; + + phnum++; + } + + data = vmalloc(data_size); + if (!data) + return; + + ehdr = data; + + memset(ehdr, 0, elf_size_of_hdr(class)); + /* e_ident field is common for both elf32 and elf64 */ + elf_hdr_init_ident(ehdr, class); + + elf_hdr_set_e_type(class, ehdr, ET_CORE); + elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine); + elf_hdr_set_e_version(class, ehdr, EV_CURRENT); + elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr); + elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class)); + elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class)); + elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class)); + elf_hdr_set_e_phnum(class, ehdr, phnum); + + phdr = data + elf_hdr_get_e_phoff(class, ehdr); + offset = elf_hdr_get_e_phoff(class, ehdr); + offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr); + + list_for_each_entry(segment, &rproc->dump_segments, node) { + memset(phdr, 0, elf_size_of_phdr(class)); + elf_phdr_set_p_type(class, phdr, PT_LOAD); + elf_phdr_set_p_offset(class, phdr, offset); + elf_phdr_set_p_vaddr(class, phdr, segment->da); + elf_phdr_set_p_paddr(class, phdr, segment->da); + elf_phdr_set_p_filesz(class, phdr, segment->size); + elf_phdr_set_p_memsz(class, phdr, segment->size); + elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X); + elf_phdr_set_p_align(class, phdr, 0); + + if (dump_conf == RPROC_COREDUMP_ENABLED) + rproc_copy_segment(rproc, data + offset, segment, 0, + segment->size); + + offset += elf_phdr_get_p_filesz(class, phdr); + phdr += elf_size_of_phdr(class); + } + if (dump_conf == RPROC_COREDUMP_ENABLED) { + dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL); + return; + } + + /* Initialize the dump state struct to be used by rproc_coredump_read */ + dump_state.rproc = rproc; + dump_state.header = data; + init_completion(&dump_state.dump_done); + + dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL, + rproc_coredump_read, rproc_coredump_free); + + /* + * Wait until the dump is read and free is called. Data is freed + * by devcoredump framework automatically after 5 minutes. + */ + wait_for_completion(&dump_state.dump_done); +} diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c new file mode 100644 index 000000000..e8bb0ee6b --- /dev/null +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -0,0 +1,443 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Remote Processor Framework + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2011 Google, Inc. + * + * Ohad Ben-Cohen <ohad@wizery.com> + * Mark Grosen <mgrosen@ti.com> + * Brian Swetland <swetland@google.com> + * Fernando Guzman Lugo <fernando.lugo@ti.com> + * Suman Anna <s-anna@ti.com> + * Robert Tivy <rtivy@ti.com> + * Armando Uribe De Leon <x0095078@ti.com> + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include <linux/remoteproc.h> +#include <linux/device.h> +#include <linux/uaccess.h> + +#include "remoteproc_internal.h" + +/* remoteproc debugfs parent dir */ +static struct dentry *rproc_dbg; + +/* + * A coredump-configuration-to-string lookup table, for exposing a + * human readable configuration via debugfs. Always keep in sync with + * enum rproc_coredump_mechanism + */ +static const char * const rproc_coredump_str[] = { + [RPROC_COREDUMP_DISABLED] = "disabled", + [RPROC_COREDUMP_ENABLED] = "enabled", + [RPROC_COREDUMP_INLINE] = "inline", +}; + +/* Expose the current coredump configuration via debugfs */ +static ssize_t rproc_coredump_read(struct file *filp, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct rproc *rproc = filp->private_data; + char buf[20]; + int len; + + len = scnprintf(buf, sizeof(buf), "%s\n", + rproc_coredump_str[rproc->dump_conf]); + + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} + +/* + * By writing to the 'coredump' debugfs entry, we control the behavior of the + * coredump mechanism dynamically. The default value of this entry is "disabled". + * + * The 'coredump' debugfs entry supports these commands: + * + * disabled: By default coredump collection is disabled. Recovery will + * proceed without collecting any dump. + * + * enabled: When the remoteproc crashes the entire coredump will be copied + * to a separate buffer and exposed to userspace. + * + * inline: The coredump will not be copied to a separate buffer and the + * recovery process will have to wait until data is read by + * userspace. But this avoid usage of extra memory. + */ +static ssize_t rproc_coredump_write(struct file *filp, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct rproc *rproc = filp->private_data; + int ret, err = 0; + char buf[20]; + + if (count < 1 || count > sizeof(buf)) + return -EINVAL; + + ret = copy_from_user(buf, user_buf, count); + if (ret) + return -EFAULT; + + /* remove end of line */ + if (buf[count - 1] == '\n') + buf[count - 1] = '\0'; + + if (rproc->state == RPROC_CRASHED) { + dev_err(&rproc->dev, "can't change coredump configuration\n"); + err = -EBUSY; + goto out; + } + + if (!strncmp(buf, "disabled", count)) { + rproc->dump_conf = RPROC_COREDUMP_DISABLED; + } else if (!strncmp(buf, "enabled", count)) { + rproc->dump_conf = RPROC_COREDUMP_ENABLED; + } else if (!strncmp(buf, "inline", count)) { + rproc->dump_conf = RPROC_COREDUMP_INLINE; + } else { + dev_err(&rproc->dev, "Invalid coredump configuration\n"); + err = -EINVAL; + } +out: + return err ? err : count; +} + +static const struct file_operations rproc_coredump_fops = { + .read = rproc_coredump_read, + .write = rproc_coredump_write, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +/* + * Some remote processors may support dumping trace logs into a shared + * memory buffer. We expose this trace buffer using debugfs, so users + * can easily tell what's going on remotely. + * + * We will most probably improve the rproc tracing facilities later on, + * but this kind of lightweight and simple mechanism is always good to have, + * as it provides very early tracing with little to no dependencies at all. + */ +static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct rproc_debug_trace *data = filp->private_data; + struct rproc_mem_entry *trace = &data->trace_mem; + void *va; + char buf[100]; + int len; + + va = rproc_da_to_va(data->rproc, trace->da, trace->len); + + if (!va) { + len = scnprintf(buf, sizeof(buf), "Trace %s not available\n", + trace->name); + va = buf; + } else { + len = strnlen(va, trace->len); + } + + return simple_read_from_buffer(userbuf, count, ppos, va, len); +} + +static const struct file_operations trace_rproc_ops = { + .read = rproc_trace_read, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +/* expose the name of the remote processor via debugfs */ +static ssize_t rproc_name_read(struct file *filp, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct rproc *rproc = filp->private_data; + /* need room for the name, a newline and a terminating null */ + char buf[100]; + int i; + + i = scnprintf(buf, sizeof(buf), "%.98s\n", rproc->name); + + return simple_read_from_buffer(userbuf, count, ppos, buf, i); +} + +static const struct file_operations rproc_name_ops = { + .read = rproc_name_read, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +/* expose recovery flag via debugfs */ +static ssize_t rproc_recovery_read(struct file *filp, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct rproc *rproc = filp->private_data; + char *buf = rproc->recovery_disabled ? "disabled\n" : "enabled\n"; + + return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); +} + +/* + * By writing to the 'recovery' debugfs entry, we control the behavior of the + * recovery mechanism dynamically. The default value of this entry is "enabled". + * + * The 'recovery' debugfs entry supports these commands: + * + * enabled: When enabled, the remote processor will be automatically + * recovered whenever it crashes. Moreover, if the remote + * processor crashes while recovery is disabled, it will + * be automatically recovered too as soon as recovery is enabled. + * + * disabled: When disabled, a remote processor will remain in a crashed + * state if it crashes. This is useful for debugging purposes; + * without it, debugging a crash is substantially harder. + * + * recover: This function will trigger an immediate recovery if the + * remote processor is in a crashed state, without changing + * or checking the recovery state (enabled/disabled). + * This is useful during debugging sessions, when one expects + * additional crashes to happen after enabling recovery. In this + * case, enabling recovery will make it hard to debug subsequent + * crashes, so it's recommended to keep recovery disabled, and + * instead use the "recover" command as needed. + */ +static ssize_t +rproc_recovery_write(struct file *filp, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rproc *rproc = filp->private_data; + char buf[10]; + int ret; + + if (count < 1 || count > sizeof(buf)) + return -EINVAL; + + ret = copy_from_user(buf, user_buf, count); + if (ret) + return -EFAULT; + + /* remove end of line */ + if (buf[count - 1] == '\n') + buf[count - 1] = '\0'; + + if (!strncmp(buf, "enabled", count)) { + /* change the flag and begin the recovery process if needed */ + rproc->recovery_disabled = false; + rproc_trigger_recovery(rproc); + } else if (!strncmp(buf, "disabled", count)) { + rproc->recovery_disabled = true; + } else if (!strncmp(buf, "recover", count)) { + /* begin the recovery process without changing the flag */ + rproc_trigger_recovery(rproc); + } else { + return -EINVAL; + } + + return count; +} + +static const struct file_operations rproc_recovery_ops = { + .read = rproc_recovery_read, + .write = rproc_recovery_write, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +/* expose the crash trigger via debugfs */ +static ssize_t +rproc_crash_write(struct file *filp, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rproc *rproc = filp->private_data; + unsigned int type; + int ret; + + ret = kstrtouint_from_user(user_buf, count, 0, &type); + if (ret < 0) + return ret; + + rproc_report_crash(rproc, type); + + return count; +} + +static const struct file_operations rproc_crash_ops = { + .write = rproc_crash_write, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +/* Expose resource table content via debugfs */ +static int rproc_rsc_table_show(struct seq_file *seq, void *p) +{ + static const char * const types[] = {"carveout", "devmem", "trace", "vdev"}; + struct rproc *rproc = seq->private; + struct resource_table *table = rproc->table_ptr; + struct fw_rsc_carveout *c; + struct fw_rsc_devmem *d; + struct fw_rsc_trace *t; + struct fw_rsc_vdev *v; + int i, j; + + if (!table) { + seq_puts(seq, "No resource table found\n"); + return 0; + } + + for (i = 0; i < table->num; i++) { + int offset = table->offset[i]; + struct fw_rsc_hdr *hdr = (void *)table + offset; + void *rsc = (void *)hdr + sizeof(*hdr); + + switch (hdr->type) { + case RSC_CARVEOUT: + c = rsc; + seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]); + seq_printf(seq, " Device Address 0x%x\n", c->da); + seq_printf(seq, " Physical Address 0x%x\n", c->pa); + seq_printf(seq, " Length 0x%x Bytes\n", c->len); + seq_printf(seq, " Flags 0x%x\n", c->flags); + seq_printf(seq, " Reserved (should be zero) [%d]\n", c->reserved); + seq_printf(seq, " Name %s\n\n", c->name); + break; + case RSC_DEVMEM: + d = rsc; + seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]); + seq_printf(seq, " Device Address 0x%x\n", d->da); + seq_printf(seq, " Physical Address 0x%x\n", d->pa); + seq_printf(seq, " Length 0x%x Bytes\n", d->len); + seq_printf(seq, " Flags 0x%x\n", d->flags); + seq_printf(seq, " Reserved (should be zero) [%d]\n", d->reserved); + seq_printf(seq, " Name %s\n\n", d->name); + break; + case RSC_TRACE: + t = rsc; + seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]); + seq_printf(seq, " Device Address 0x%x\n", t->da); + seq_printf(seq, " Length 0x%x Bytes\n", t->len); + seq_printf(seq, " Reserved (should be zero) [%d]\n", t->reserved); + seq_printf(seq, " Name %s\n\n", t->name); + break; + case RSC_VDEV: + v = rsc; + seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]); + + seq_printf(seq, " ID %d\n", v->id); + seq_printf(seq, " Notify ID %d\n", v->notifyid); + seq_printf(seq, " Device features 0x%x\n", v->dfeatures); + seq_printf(seq, " Guest features 0x%x\n", v->gfeatures); + seq_printf(seq, " Config length 0x%x\n", v->config_len); + seq_printf(seq, " Status 0x%x\n", v->status); + seq_printf(seq, " Number of vrings %d\n", v->num_of_vrings); + seq_printf(seq, " Reserved (should be zero) [%d][%d]\n\n", + v->reserved[0], v->reserved[1]); + + for (j = 0; j < v->num_of_vrings; j++) { + seq_printf(seq, " Vring %d\n", j); + seq_printf(seq, " Device Address 0x%x\n", v->vring[j].da); + seq_printf(seq, " Alignment %d\n", v->vring[j].align); + seq_printf(seq, " Number of buffers %d\n", v->vring[j].num); + seq_printf(seq, " Notify ID %d\n", v->vring[j].notifyid); + seq_printf(seq, " Physical Address 0x%x\n\n", + v->vring[j].pa); + } + break; + default: + seq_printf(seq, "Unknown resource type found: %d [hdr: %pK]\n", + hdr->type, hdr); + break; + } + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(rproc_rsc_table); + +/* Expose carveout content via debugfs */ +static int rproc_carveouts_show(struct seq_file *seq, void *p) +{ + struct rproc *rproc = seq->private; + struct rproc_mem_entry *carveout; + + list_for_each_entry(carveout, &rproc->carveouts, node) { + seq_puts(seq, "Carveout memory entry:\n"); + seq_printf(seq, "\tName: %s\n", carveout->name); + seq_printf(seq, "\tVirtual address: %pK\n", carveout->va); + seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma); + seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da); + seq_printf(seq, "\tLength: 0x%zx Bytes\n\n", carveout->len); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(rproc_carveouts); + +void rproc_remove_trace_file(struct dentry *tfile) +{ + debugfs_remove(tfile); +} + +struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc, + struct rproc_debug_trace *trace) +{ + struct dentry *tfile; + + tfile = debugfs_create_file(name, 0400, rproc->dbg_dir, trace, + &trace_rproc_ops); + if (!tfile) { + dev_err(&rproc->dev, "failed to create debugfs trace entry\n"); + return NULL; + } + + return tfile; +} + +void rproc_delete_debug_dir(struct rproc *rproc) +{ + debugfs_remove_recursive(rproc->dbg_dir); +} + +void rproc_create_debug_dir(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + + if (!rproc_dbg) + return; + + rproc->dbg_dir = debugfs_create_dir(dev_name(dev), rproc_dbg); + if (!rproc->dbg_dir) + return; + + debugfs_create_file("name", 0400, rproc->dbg_dir, + rproc, &rproc_name_ops); + debugfs_create_file("recovery", 0600, rproc->dbg_dir, + rproc, &rproc_recovery_ops); + debugfs_create_file("crash", 0200, rproc->dbg_dir, + rproc, &rproc_crash_ops); + debugfs_create_file("resource_table", 0400, rproc->dbg_dir, + rproc, &rproc_rsc_table_fops); + debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir, + rproc, &rproc_carveouts_fops); + debugfs_create_file("coredump", 0600, rproc->dbg_dir, + rproc, &rproc_coredump_fops); +} + +void __init rproc_init_debugfs(void) +{ + if (debugfs_initialized()) { + rproc_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!rproc_dbg) + pr_err("can't create debugfs dir\n"); + } +} + +void __exit rproc_exit_debugfs(void) +{ + debugfs_remove(rproc_dbg); +} diff --git a/drivers/remoteproc/remoteproc_elf_helpers.h b/drivers/remoteproc/remoteproc_elf_helpers.h new file mode 100644 index 000000000..4b6be7b6b --- /dev/null +++ b/drivers/remoteproc/remoteproc_elf_helpers.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Remote processor elf helpers defines + * + * Copyright (C) 2020 Kalray, Inc. + */ + +#ifndef REMOTEPROC_ELF_LOADER_H +#define REMOTEPROC_ELF_LOADER_H + +#include <linux/elf.h> +#include <linux/types.h> + +/** + * fw_elf_get_class - Get elf class + * @fw: the ELF firmware image + * + * Note that we use and elf32_hdr to access the class since the start of the + * struct is the same for both elf class + * + * Return: elf class of the firmware + */ +static inline u8 fw_elf_get_class(const struct firmware *fw) +{ + struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data; + + return ehdr->e_ident[EI_CLASS]; +} + +static inline void elf_hdr_init_ident(struct elf32_hdr *hdr, u8 class) +{ + memcpy(hdr->e_ident, ELFMAG, SELFMAG); + hdr->e_ident[EI_CLASS] = class; + hdr->e_ident[EI_DATA] = ELFDATA2LSB; + hdr->e_ident[EI_VERSION] = EV_CURRENT; + hdr->e_ident[EI_OSABI] = ELFOSABI_NONE; +} + +/* Generate getter and setter for a specific elf struct/field */ +#define ELF_GEN_FIELD_GET_SET(__s, __field, __type) \ +static inline __type elf_##__s##_get_##__field(u8 class, const void *arg) \ +{ \ + if (class == ELFCLASS32) \ + return (__type) ((const struct elf32_##__s *) arg)->__field; \ + else \ + return (__type) ((const struct elf64_##__s *) arg)->__field; \ +} \ +static inline void elf_##__s##_set_##__field(u8 class, void *arg, \ + __type value) \ +{ \ + if (class == ELFCLASS32) \ + ((struct elf32_##__s *) arg)->__field = (__type) value; \ + else \ + ((struct elf64_##__s *) arg)->__field = (__type) value; \ +} + +ELF_GEN_FIELD_GET_SET(hdr, e_entry, u64) +ELF_GEN_FIELD_GET_SET(hdr, e_phnum, u16) +ELF_GEN_FIELD_GET_SET(hdr, e_shnum, u16) +ELF_GEN_FIELD_GET_SET(hdr, e_phoff, u64) +ELF_GEN_FIELD_GET_SET(hdr, e_shoff, u64) +ELF_GEN_FIELD_GET_SET(hdr, e_shstrndx, u16) +ELF_GEN_FIELD_GET_SET(hdr, e_machine, u16) +ELF_GEN_FIELD_GET_SET(hdr, e_type, u16) +ELF_GEN_FIELD_GET_SET(hdr, e_version, u32) +ELF_GEN_FIELD_GET_SET(hdr, e_ehsize, u32) +ELF_GEN_FIELD_GET_SET(hdr, e_phentsize, u16) + +ELF_GEN_FIELD_GET_SET(phdr, p_paddr, u64) +ELF_GEN_FIELD_GET_SET(phdr, p_vaddr, u64) +ELF_GEN_FIELD_GET_SET(phdr, p_filesz, u64) +ELF_GEN_FIELD_GET_SET(phdr, p_memsz, u64) +ELF_GEN_FIELD_GET_SET(phdr, p_type, u32) +ELF_GEN_FIELD_GET_SET(phdr, p_offset, u64) +ELF_GEN_FIELD_GET_SET(phdr, p_flags, u32) +ELF_GEN_FIELD_GET_SET(phdr, p_align, u64) + +ELF_GEN_FIELD_GET_SET(shdr, sh_size, u64) +ELF_GEN_FIELD_GET_SET(shdr, sh_offset, u64) +ELF_GEN_FIELD_GET_SET(shdr, sh_name, u32) +ELF_GEN_FIELD_GET_SET(shdr, sh_addr, u64) + +#define ELF_STRUCT_SIZE(__s) \ +static inline unsigned long elf_size_of_##__s(u8 class) \ +{ \ + if (class == ELFCLASS32)\ + return sizeof(struct elf32_##__s); \ + else \ + return sizeof(struct elf64_##__s); \ +} + +ELF_STRUCT_SIZE(shdr) +ELF_STRUCT_SIZE(phdr) +ELF_STRUCT_SIZE(hdr) + +#endif /* REMOTEPROC_ELF_LOADER_H */ diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c new file mode 100644 index 000000000..df68d8775 --- /dev/null +++ b/drivers/remoteproc/remoteproc_elf_loader.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Remote Processor Framework Elf loader + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2011 Google, Inc. + * + * Ohad Ben-Cohen <ohad@wizery.com> + * Brian Swetland <swetland@google.com> + * Mark Grosen <mgrosen@ti.com> + * Fernando Guzman Lugo <fernando.lugo@ti.com> + * Suman Anna <s-anna@ti.com> + * Robert Tivy <rtivy@ti.com> + * Armando Uribe De Leon <x0095078@ti.com> + * Sjur Brændeland <sjur.brandeland@stericsson.com> + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/module.h> +#include <linux/firmware.h> +#include <linux/remoteproc.h> +#include <linux/elf.h> + +#include "remoteproc_internal.h" +#include "remoteproc_elf_helpers.h" + +/** + * rproc_elf_sanity_check() - Sanity Check for ELF32/ELF64 firmware image + * @rproc: the remote processor handle + * @fw: the ELF firmware image + * + * Make sure this fw image is sane (ie a correct ELF32/ELF64 file). + */ +int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw) +{ + const char *name = rproc->firmware; + struct device *dev = &rproc->dev; + /* + * Elf files are beginning with the same structure. Thus, to simplify + * header parsing, we can use the elf32_hdr one for both elf64 and + * elf32. + */ + struct elf32_hdr *ehdr; + u32 elf_shdr_get_size; + u64 phoff, shoff; + char class; + u16 phnum; + + if (!fw) { + dev_err(dev, "failed to load %s\n", name); + return -EINVAL; + } + + if (fw->size < sizeof(struct elf32_hdr)) { + dev_err(dev, "Image is too small\n"); + return -EINVAL; + } + + ehdr = (struct elf32_hdr *)fw->data; + + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) { + dev_err(dev, "Image is corrupted (bad magic)\n"); + return -EINVAL; + } + + class = ehdr->e_ident[EI_CLASS]; + if (class != ELFCLASS32 && class != ELFCLASS64) { + dev_err(dev, "Unsupported class: %d\n", class); + return -EINVAL; + } + + if (class == ELFCLASS64 && fw->size < sizeof(struct elf64_hdr)) { + dev_err(dev, "elf64 header is too small\n"); + return -EINVAL; + } + + /* We assume the firmware has the same endianness as the host */ +# ifdef __LITTLE_ENDIAN + if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) { +# else /* BIG ENDIAN */ + if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { +# endif + dev_err(dev, "Unsupported firmware endianness\n"); + return -EINVAL; + } + + phoff = elf_hdr_get_e_phoff(class, fw->data); + shoff = elf_hdr_get_e_shoff(class, fw->data); + phnum = elf_hdr_get_e_phnum(class, fw->data); + elf_shdr_get_size = elf_size_of_shdr(class); + + if (fw->size < shoff + elf_shdr_get_size) { + dev_err(dev, "Image is too small\n"); + return -EINVAL; + } + + if (phnum == 0) { + dev_err(dev, "No loadable segments\n"); + return -EINVAL; + } + + if (phoff > fw->size) { + dev_err(dev, "Firmware size is too small\n"); + return -EINVAL; + } + + dev_dbg(dev, "Firmware is an elf%d file\n", + class == ELFCLASS32 ? 32 : 64); + + return 0; +} +EXPORT_SYMBOL(rproc_elf_sanity_check); + +/** + * rproc_elf_get_boot_addr() - Get rproc's boot address. + * @rproc: the remote processor handle + * @fw: the ELF firmware image + * + * This function returns the entry point address of the ELF + * image. + * + * Note that the boot address is not a configurable property of all remote + * processors. Some will always boot at a specific hard-coded address. + */ +u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw) +{ + return elf_hdr_get_e_entry(fw_elf_get_class(fw), fw->data); +} +EXPORT_SYMBOL(rproc_elf_get_boot_addr); + +/** + * rproc_elf_load_segments() - load firmware segments to memory + * @rproc: remote processor which will be booted using these fw segments + * @fw: the ELF firmware image + * + * This function loads the firmware segments to memory, where the remote + * processor expects them. + * + * Some remote processors will expect their code and data to be placed + * in specific device addresses, and can't have them dynamically assigned. + * + * We currently support only those kind of remote processors, and expect + * the program header's paddr member to contain those addresses. We then go + * through the physically contiguous "carveout" memory regions which we + * allocated (and mapped) earlier on behalf of the remote processor, + * and "translate" device address to kernel addresses, so we can copy the + * segments where they are expected. + * + * Currently we only support remote processors that required carveout + * allocations and got them mapped onto their iommus. Some processors + * might be different: they might not have iommus, and would prefer to + * directly allocate memory for every segment/resource. This is not yet + * supported, though. + */ +int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) +{ + struct device *dev = &rproc->dev; + const void *ehdr, *phdr; + int i, ret = 0; + u16 phnum; + const u8 *elf_data = fw->data; + u8 class = fw_elf_get_class(fw); + u32 elf_phdr_get_size = elf_size_of_phdr(class); + + ehdr = elf_data; + phnum = elf_hdr_get_e_phnum(class, ehdr); + phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr); + + /* go through the available ELF segments */ + for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) { + u64 da = elf_phdr_get_p_paddr(class, phdr); + u64 memsz = elf_phdr_get_p_memsz(class, phdr); + u64 filesz = elf_phdr_get_p_filesz(class, phdr); + u64 offset = elf_phdr_get_p_offset(class, phdr); + u32 type = elf_phdr_get_p_type(class, phdr); + void *ptr; + + if (type != PT_LOAD) + continue; + + dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n", + type, da, memsz, filesz); + + if (filesz > memsz) { + dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n", + filesz, memsz); + ret = -EINVAL; + break; + } + + if (offset + filesz > fw->size) { + dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n", + offset + filesz, fw->size); + ret = -EINVAL; + break; + } + + if (!rproc_u64_fit_in_size_t(memsz)) { + dev_err(dev, "size (%llx) does not fit in size_t type\n", + memsz); + ret = -EOVERFLOW; + break; + } + + /* grab the kernel address for this device address */ + ptr = rproc_da_to_va(rproc, da, memsz); + if (!ptr) { + dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da, + memsz); + ret = -EINVAL; + break; + } + + /* put the segment where the remote processor expects it */ + if (filesz) + memcpy(ptr, elf_data + offset, filesz); + + /* + * Zero out remaining memory for this segment. + * + * This isn't strictly required since dma_alloc_coherent already + * did this for us. albeit harmless, we may consider removing + * this. + */ + if (memsz > filesz) + memset(ptr + filesz, 0, memsz - filesz); + } + + return ret; +} +EXPORT_SYMBOL(rproc_elf_load_segments); + +static const void * +find_table(struct device *dev, const struct firmware *fw) +{ + const void *shdr, *name_table_shdr; + int i; + const char *name_table; + struct resource_table *table = NULL; + const u8 *elf_data = (void *)fw->data; + u8 class = fw_elf_get_class(fw); + size_t fw_size = fw->size; + const void *ehdr = elf_data; + u16 shnum = elf_hdr_get_e_shnum(class, ehdr); + u32 elf_shdr_get_size = elf_size_of_shdr(class); + u16 shstrndx = elf_hdr_get_e_shstrndx(class, ehdr); + + /* look for the resource table and handle it */ + /* First, get the section header according to the elf class */ + shdr = elf_data + elf_hdr_get_e_shoff(class, ehdr); + /* Compute name table section header entry in shdr array */ + name_table_shdr = shdr + (shstrndx * elf_shdr_get_size); + /* Finally, compute the name table section address in elf */ + name_table = elf_data + elf_shdr_get_sh_offset(class, name_table_shdr); + + for (i = 0; i < shnum; i++, shdr += elf_shdr_get_size) { + u64 size = elf_shdr_get_sh_size(class, shdr); + u64 offset = elf_shdr_get_sh_offset(class, shdr); + u32 name = elf_shdr_get_sh_name(class, shdr); + + if (strcmp(name_table + name, ".resource_table")) + continue; + + table = (struct resource_table *)(elf_data + offset); + + /* make sure we have the entire table */ + if (offset + size > fw_size || offset + size < size) { + dev_err(dev, "resource table truncated\n"); + return NULL; + } + + /* make sure table has at least the header */ + if (sizeof(struct resource_table) > size) { + dev_err(dev, "header-less resource table\n"); + return NULL; + } + + /* we don't support any version beyond the first */ + if (table->ver != 1) { + dev_err(dev, "unsupported fw ver: %d\n", table->ver); + return NULL; + } + + /* make sure reserved bytes are zeroes */ + if (table->reserved[0] || table->reserved[1]) { + dev_err(dev, "non zero reserved bytes\n"); + return NULL; + } + + /* make sure the offsets array isn't truncated */ + if (struct_size(table, offset, table->num) > size) { + dev_err(dev, "resource table incomplete\n"); + return NULL; + } + + return shdr; + } + + return NULL; +} + +/** + * rproc_elf_load_rsc_table() - load the resource table + * @rproc: the rproc handle + * @fw: the ELF firmware image + * + * This function finds the resource table inside the remote processor's + * firmware, load it into the @cached_table and update @table_ptr. + * + * Return: 0 on success, negative errno on failure. + */ +int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw) +{ + const void *shdr; + struct device *dev = &rproc->dev; + struct resource_table *table = NULL; + const u8 *elf_data = fw->data; + size_t tablesz; + u8 class = fw_elf_get_class(fw); + u64 sh_offset; + + shdr = find_table(dev, fw); + if (!shdr) + return -EINVAL; + + sh_offset = elf_shdr_get_sh_offset(class, shdr); + table = (struct resource_table *)(elf_data + sh_offset); + tablesz = elf_shdr_get_sh_size(class, shdr); + + /* + * Create a copy of the resource table. When a virtio device starts + * and calls vring_new_virtqueue() the address of the allocated vring + * will be stored in the cached_table. Before the device is started, + * cached_table will be copied into device memory. + */ + rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL); + if (!rproc->cached_table) + return -ENOMEM; + + rproc->table_ptr = rproc->cached_table; + rproc->table_sz = tablesz; + + return 0; +} +EXPORT_SYMBOL(rproc_elf_load_rsc_table); + +/** + * rproc_elf_find_loaded_rsc_table() - find the loaded resource table + * @rproc: the rproc handle + * @fw: the ELF firmware image + * + * This function finds the location of the loaded resource table. Don't + * call this function if the table wasn't loaded yet - it's a bug if you do. + * + * Returns the pointer to the resource table if it is found or NULL otherwise. + * If the table wasn't loaded yet the result is unspecified. + */ +struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc, + const struct firmware *fw) +{ + const void *shdr; + u64 sh_addr, sh_size; + u8 class = fw_elf_get_class(fw); + struct device *dev = &rproc->dev; + + shdr = find_table(&rproc->dev, fw); + if (!shdr) + return NULL; + + sh_addr = elf_shdr_get_sh_addr(class, shdr); + sh_size = elf_shdr_get_sh_size(class, shdr); + + if (!rproc_u64_fit_in_size_t(sh_size)) { + dev_err(dev, "size (%llx) does not fit in size_t type\n", + sh_size); + return NULL; + } + + return rproc_da_to_va(rproc, sh_addr, sh_size); +} +EXPORT_SYMBOL(rproc_elf_find_loaded_rsc_table); diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h new file mode 100644 index 000000000..c34002888 --- /dev/null +++ b/drivers/remoteproc/remoteproc_internal.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Remote processor framework + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2011 Google, Inc. + * + * Ohad Ben-Cohen <ohad@wizery.com> + * Brian Swetland <swetland@google.com> + */ + +#ifndef REMOTEPROC_INTERNAL_H +#define REMOTEPROC_INTERNAL_H + +#include <linux/irqreturn.h> +#include <linux/firmware.h> + +struct rproc; + +struct rproc_debug_trace { + struct rproc *rproc; + struct dentry *tfile; + struct list_head node; + struct rproc_mem_entry trace_mem; +}; + +/* from remoteproc_core.c */ +void rproc_release(struct kref *kref); +irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id); +void rproc_vdev_release(struct kref *ref); +int rproc_of_parse_firmware(struct device *dev, int index, + const char **fw_name); + +/* from remoteproc_virtio.c */ +int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id); +int rproc_remove_virtio_dev(struct device *dev, void *data); + +/* from remoteproc_debugfs.c */ +void rproc_remove_trace_file(struct dentry *tfile); +struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc, + struct rproc_debug_trace *trace); +void rproc_delete_debug_dir(struct rproc *rproc); +void rproc_create_debug_dir(struct rproc *rproc); +void rproc_init_debugfs(void); +void rproc_exit_debugfs(void); + +/* from remoteproc_sysfs.c */ +extern struct class rproc_class; +int rproc_init_sysfs(void); +void rproc_exit_sysfs(void); + +/* from remoteproc_coredump.c */ +void rproc_coredump_cleanup(struct rproc *rproc); +void rproc_coredump(struct rproc *rproc); + +#ifdef CONFIG_REMOTEPROC_CDEV +void rproc_init_cdev(void); +void rproc_exit_cdev(void); +int rproc_char_device_add(struct rproc *rproc); +void rproc_char_device_remove(struct rproc *rproc); +#else +static inline void rproc_init_cdev(void) +{ +} + +static inline void rproc_exit_cdev(void) +{ +} + +/* + * The character device interface is an optional feature, if it is not enabled + * the function should not return an error. + */ +static inline int rproc_char_device_add(struct rproc *rproc) +{ + return 0; +} + +static inline void rproc_char_device_remove(struct rproc *rproc) +{ +} +#endif + +void rproc_free_vring(struct rproc_vring *rvring); +int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); + +void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len); +phys_addr_t rproc_va_to_pa(void *cpu_addr); +int rproc_trigger_recovery(struct rproc *rproc); + +int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw); +u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw); +int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw); +int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw); +struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc, + const struct firmware *fw); +struct rproc_mem_entry * +rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...); + +static inline int rproc_prepare_device(struct rproc *rproc) +{ + if (rproc->ops->prepare) + return rproc->ops->prepare(rproc); + + return 0; +} + +static inline int rproc_unprepare_device(struct rproc *rproc) +{ + if (rproc->ops->unprepare) + return rproc->ops->unprepare(rproc); + + return 0; +} + +static inline int rproc_attach_device(struct rproc *rproc) +{ + if (rproc->ops->attach) + return rproc->ops->attach(rproc); + + return 0; +} + +static inline +int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw) +{ + if (rproc->ops->sanity_check) + return rproc->ops->sanity_check(rproc, fw); + + return 0; +} + +static inline +u64 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw) +{ + if (rproc->ops->get_boot_addr) + return rproc->ops->get_boot_addr(rproc, fw); + + return 0; +} + +static inline +int rproc_load_segments(struct rproc *rproc, const struct firmware *fw) +{ + if (rproc->ops->load) + return rproc->ops->load(rproc, fw); + + return -EINVAL; +} + +static inline int rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) +{ + if (rproc->ops->parse_fw) + return rproc->ops->parse_fw(rproc, fw); + + return 0; +} + +static inline +int rproc_handle_rsc(struct rproc *rproc, u32 rsc_type, void *rsc, int offset, + int avail) +{ + if (rproc->ops->handle_rsc) + return rproc->ops->handle_rsc(rproc, rsc_type, rsc, offset, + avail); + + return RSC_IGNORED; +} + +static inline +struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc, + const struct firmware *fw) +{ + if (rproc->ops->find_loaded_rsc_table) + return rproc->ops->find_loaded_rsc_table(rproc, fw); + + return NULL; +} + +static inline +bool rproc_u64_fit_in_size_t(u64 val) +{ + if (sizeof(size_t) == sizeof(u64)) + return true; + + return (val <= (size_t) -1); +} + +#endif /* REMOTEPROC_INTERNAL_H */ diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c new file mode 100644 index 000000000..d1cf7bf27 --- /dev/null +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Remote Processor Framework + */ + +#include <linux/remoteproc.h> +#include <linux/slab.h> + +#include "remoteproc_internal.h" + +#define to_rproc(d) container_of(d, struct rproc, dev) + +static ssize_t recovery_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rproc *rproc = to_rproc(dev); + + return sprintf(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n"); +} + +/* + * By writing to the 'recovery' sysfs entry, we control the behavior of the + * recovery mechanism dynamically. The default value of this entry is "enabled". + * + * The 'recovery' sysfs entry supports these commands: + * + * enabled: When enabled, the remote processor will be automatically + * recovered whenever it crashes. Moreover, if the remote + * processor crashes while recovery is disabled, it will + * be automatically recovered too as soon as recovery is enabled. + * + * disabled: When disabled, a remote processor will remain in a crashed + * state if it crashes. This is useful for debugging purposes; + * without it, debugging a crash is substantially harder. + * + * recover: This function will trigger an immediate recovery if the + * remote processor is in a crashed state, without changing + * or checking the recovery state (enabled/disabled). + * This is useful during debugging sessions, when one expects + * additional crashes to happen after enabling recovery. In this + * case, enabling recovery will make it hard to debug subsequent + * crashes, so it's recommended to keep recovery disabled, and + * instead use the "recover" command as needed. + */ +static ssize_t recovery_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rproc *rproc = to_rproc(dev); + + if (sysfs_streq(buf, "enabled")) { + /* change the flag and begin the recovery process if needed */ + rproc->recovery_disabled = false; + rproc_trigger_recovery(rproc); + } else if (sysfs_streq(buf, "disabled")) { + rproc->recovery_disabled = true; + } else if (sysfs_streq(buf, "recover")) { + /* begin the recovery process without changing the flag */ + rproc_trigger_recovery(rproc); + } else { + return -EINVAL; + } + + return count; +} +static DEVICE_ATTR_RW(recovery); + +/* + * A coredump-configuration-to-string lookup table, for exposing a + * human readable configuration via sysfs. Always keep in sync with + * enum rproc_coredump_mechanism + */ +static const char * const rproc_coredump_str[] = { + [RPROC_COREDUMP_DISABLED] = "disabled", + [RPROC_COREDUMP_ENABLED] = "enabled", + [RPROC_COREDUMP_INLINE] = "inline", +}; + +/* Expose the current coredump configuration via debugfs */ +static ssize_t coredump_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rproc *rproc = to_rproc(dev); + + return sprintf(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]); +} + +/* + * By writing to the 'coredump' sysfs entry, we control the behavior of the + * coredump mechanism dynamically. The default value of this entry is "default". + * + * The 'coredump' sysfs entry supports these commands: + * + * disabled: This is the default coredump mechanism. Recovery will proceed + * without collecting any dump. + * + * default: When the remoteproc crashes the entire coredump will be + * copied to a separate buffer and exposed to userspace. + * + * inline: The coredump will not be copied to a separate buffer and the + * recovery process will have to wait until data is read by + * userspace. But this avoid usage of extra memory. + */ +static ssize_t coredump_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rproc *rproc = to_rproc(dev); + + if (rproc->state == RPROC_CRASHED) { + dev_err(&rproc->dev, "can't change coredump configuration\n"); + return -EBUSY; + } + + if (sysfs_streq(buf, "disabled")) { + rproc->dump_conf = RPROC_COREDUMP_DISABLED; + } else if (sysfs_streq(buf, "enabled")) { + rproc->dump_conf = RPROC_COREDUMP_ENABLED; + } else if (sysfs_streq(buf, "inline")) { + rproc->dump_conf = RPROC_COREDUMP_INLINE; + } else { + dev_err(&rproc->dev, "Invalid coredump configuration\n"); + return -EINVAL; + } + + return count; +} +static DEVICE_ATTR_RW(coredump); + +/* Expose the loaded / running firmware name via sysfs */ +static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rproc *rproc = to_rproc(dev); + const char *firmware = rproc->firmware; + + /* + * If the remote processor has been started by an external + * entity we have no idea of what image it is running. As such + * simply display a generic string rather then rproc->firmware. + * + * Here we rely on the autonomous flag because a remote processor + * may have been attached to and currently in a running state. + */ + if (rproc->autonomous) + firmware = "unknown"; + + return sprintf(buf, "%s\n", firmware); +} + +/* Change firmware name via sysfs */ +static ssize_t firmware_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rproc *rproc = to_rproc(dev); + char *p; + int err, len = count; + + err = mutex_lock_interruptible(&rproc->lock); + if (err) { + dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, err); + return -EINVAL; + } + + if (rproc->state != RPROC_OFFLINE) { + dev_err(dev, "can't change firmware while running\n"); + err = -EBUSY; + goto out; + } + + len = strcspn(buf, "\n"); + if (!len) { + dev_err(dev, "can't provide a NULL firmware\n"); + err = -EINVAL; + goto out; + } + + p = kstrndup(buf, len, GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto out; + } + + kfree(rproc->firmware); + rproc->firmware = p; +out: + mutex_unlock(&rproc->lock); + + return err ? err : count; +} +static DEVICE_ATTR_RW(firmware); + +/* + * A state-to-string lookup table, for exposing a human readable state + * via sysfs. Always keep in sync with enum rproc_state + */ +static const char * const rproc_state_string[] = { + [RPROC_OFFLINE] = "offline", + [RPROC_SUSPENDED] = "suspended", + [RPROC_RUNNING] = "running", + [RPROC_CRASHED] = "crashed", + [RPROC_DELETED] = "deleted", + [RPROC_DETACHED] = "detached", + [RPROC_LAST] = "invalid", +}; + +/* Expose the state of the remote processor via sysfs */ +static ssize_t state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rproc *rproc = to_rproc(dev); + unsigned int state; + + state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state; + return sprintf(buf, "%s\n", rproc_state_string[state]); +} + +/* Change remote processor state via sysfs */ +static ssize_t state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rproc *rproc = to_rproc(dev); + int ret = 0; + + if (sysfs_streq(buf, "start")) { + if (rproc->state == RPROC_RUNNING) + return -EBUSY; + + ret = rproc_boot(rproc); + if (ret) + dev_err(&rproc->dev, "Boot failed: %d\n", ret); + } else if (sysfs_streq(buf, "stop")) { + if (rproc->state != RPROC_RUNNING) + return -EINVAL; + + rproc_shutdown(rproc); + } else { + dev_err(&rproc->dev, "Unrecognised option: %s\n", buf); + ret = -EINVAL; + } + return ret ? ret : count; +} +static DEVICE_ATTR_RW(state); + +/* Expose the name of the remote processor via sysfs */ +static ssize_t name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rproc *rproc = to_rproc(dev); + + return sprintf(buf, "%s\n", rproc->name); +} +static DEVICE_ATTR_RO(name); + +static struct attribute *rproc_attrs[] = { + &dev_attr_coredump.attr, + &dev_attr_recovery.attr, + &dev_attr_firmware.attr, + &dev_attr_state.attr, + &dev_attr_name.attr, + NULL +}; + +static const struct attribute_group rproc_devgroup = { + .attrs = rproc_attrs +}; + +static const struct attribute_group *rproc_devgroups[] = { + &rproc_devgroup, + NULL +}; + +struct class rproc_class = { + .name = "remoteproc", + .dev_groups = rproc_devgroups, +}; + +int __init rproc_init_sysfs(void) +{ + /* create remoteproc device class for sysfs */ + int err = class_register(&rproc_class); + + if (err) + pr_err("remoteproc: unable to register class\n"); + return err; +} + +void __exit rproc_exit_sysfs(void) +{ + class_unregister(&rproc_class); +} diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c new file mode 100644 index 000000000..0cc617f76 --- /dev/null +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -0,0 +1,442 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Remote processor messaging transport (OMAP platform-specific bits) + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Copyright (C) 2011 Google, Inc. + * + * Ohad Ben-Cohen <ohad@wizery.com> + * Brian Swetland <swetland@google.com> + */ + +#include <linux/dma-map-ops.h> +#include <linux/export.h> +#include <linux/of_reserved_mem.h> +#include <linux/remoteproc.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_ring.h> +#include <linux/err.h> +#include <linux/kref.h> +#include <linux/slab.h> + +#include "remoteproc_internal.h" + +/* kick the remote processor, and let it know which virtqueue to poke at */ +static bool rproc_virtio_notify(struct virtqueue *vq) +{ + struct rproc_vring *rvring = vq->priv; + struct rproc *rproc = rvring->rvdev->rproc; + int notifyid = rvring->notifyid; + + dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid); + + rproc->ops->kick(rproc, notifyid); + return true; +} + +/** + * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted + * @rproc: handle to the remote processor + * @notifyid: index of the signalled virtqueue (unique per this @rproc) + * + * This function should be called by the platform-specific rproc driver, + * when the remote processor signals that a specific virtqueue has pending + * messages available. + * + * Returns IRQ_NONE if no message was found in the @notifyid virtqueue, + * and otherwise returns IRQ_HANDLED. + */ +irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid) +{ + struct rproc_vring *rvring; + + dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid); + + rvring = idr_find(&rproc->notifyids, notifyid); + if (!rvring || !rvring->vq) + return IRQ_NONE; + + return vring_interrupt(0, rvring->vq); +} +EXPORT_SYMBOL(rproc_vq_interrupt); + +static struct virtqueue *rp_find_vq(struct virtio_device *vdev, + unsigned int id, + void (*callback)(struct virtqueue *vq), + const char *name, bool ctx) +{ + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + struct rproc *rproc = vdev_to_rproc(vdev); + struct device *dev = &rproc->dev; + struct rproc_mem_entry *mem; + struct rproc_vring *rvring; + struct fw_rsc_vdev *rsc; + struct virtqueue *vq; + void *addr; + int len, size; + + /* we're temporarily limited to two virtqueues per rvdev */ + if (id >= ARRAY_SIZE(rvdev->vring)) + return ERR_PTR(-EINVAL); + + if (!name) + return NULL; + + /* Search allocated memory region by name */ + mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index, + id); + if (!mem || !mem->va) + return ERR_PTR(-ENOMEM); + + rvring = &rvdev->vring[id]; + addr = mem->va; + len = rvring->len; + + /* zero vring */ + size = vring_size(len, rvring->align); + memset(addr, 0, size); + + dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n", + id, addr, len, rvring->notifyid); + + /* + * Create the new vq, and tell virtio we're not interested in + * the 'weak' smp barriers, since we're talking with a real device. + */ + vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx, + addr, rproc_virtio_notify, callback, name); + if (!vq) { + dev_err(dev, "vring_new_virtqueue %s failed\n", name); + rproc_free_vring(rvring); + return ERR_PTR(-ENOMEM); + } + + rvring->vq = vq; + vq->priv = rvring; + + /* Update vring in resource table */ + rsc = (void *)rproc->table_ptr + rvdev->rsc_offset; + rsc->vring[id].da = mem->da; + + return vq; +} + +static void __rproc_virtio_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + struct rproc_vring *rvring; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + rvring = vq->priv; + rvring->vq = NULL; + vring_del_virtqueue(vq); + } +} + +static void rproc_virtio_del_vqs(struct virtio_device *vdev) +{ + __rproc_virtio_del_vqs(vdev); +} + +static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], + const bool * ctx, + struct irq_affinity *desc) +{ + int i, ret, queue_idx = 0; + + for (i = 0; i < nvqs; ++i) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } + + vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], + ctx ? ctx[i] : false); + if (IS_ERR(vqs[i])) { + ret = PTR_ERR(vqs[i]); + goto error; + } + } + + return 0; + +error: + __rproc_virtio_del_vqs(vdev); + return ret; +} + +static u8 rproc_virtio_get_status(struct virtio_device *vdev) +{ + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + struct fw_rsc_vdev *rsc; + + rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; + + return rsc->status; +} + +static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status) +{ + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + struct fw_rsc_vdev *rsc; + + rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; + + rsc->status = status; + dev_dbg(&vdev->dev, "status: %d\n", status); +} + +static void rproc_virtio_reset(struct virtio_device *vdev) +{ + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + struct fw_rsc_vdev *rsc; + + rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; + + rsc->status = 0; + dev_dbg(&vdev->dev, "reset !\n"); +} + +/* provide the vdev features as retrieved from the firmware */ +static u64 rproc_virtio_get_features(struct virtio_device *vdev) +{ + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + struct fw_rsc_vdev *rsc; + + rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; + + return rsc->dfeatures; +} + +static void rproc_transport_features(struct virtio_device *vdev) +{ + /* + * Packed ring isn't enabled on remoteproc for now, + * because remoteproc uses vring_new_virtqueue() which + * creates virtio rings on preallocated memory. + */ + __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED); +} + +static int rproc_virtio_finalize_features(struct virtio_device *vdev) +{ + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + struct fw_rsc_vdev *rsc; + + rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; + + /* Give virtio_ring a chance to accept features */ + vring_transport_features(vdev); + + /* Give virtio_rproc a chance to accept features. */ + rproc_transport_features(vdev); + + /* Make sure we don't have any features > 32 bits! */ + BUG_ON((u32)vdev->features != vdev->features); + + /* + * Remember the finalized features of our vdev, and provide it + * to the remote processor once it is powered on. + */ + rsc->gfeatures = vdev->features; + + return 0; +} + +static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset, + void *buf, unsigned int len) +{ + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + struct fw_rsc_vdev *rsc; + void *cfg; + + rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; + cfg = &rsc->vring[rsc->num_of_vrings]; + + if (offset + len > rsc->config_len || offset + len < len) { + dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n"); + return; + } + + memcpy(buf, cfg + offset, len); +} + +static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset, + const void *buf, unsigned int len) +{ + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + struct fw_rsc_vdev *rsc; + void *cfg; + + rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; + cfg = &rsc->vring[rsc->num_of_vrings]; + + if (offset + len > rsc->config_len || offset + len < len) { + dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n"); + return; + } + + memcpy(cfg + offset, buf, len); +} + +static const struct virtio_config_ops rproc_virtio_config_ops = { + .get_features = rproc_virtio_get_features, + .finalize_features = rproc_virtio_finalize_features, + .find_vqs = rproc_virtio_find_vqs, + .del_vqs = rproc_virtio_del_vqs, + .reset = rproc_virtio_reset, + .set_status = rproc_virtio_set_status, + .get_status = rproc_virtio_get_status, + .get = rproc_virtio_get, + .set = rproc_virtio_set, +}; + +/* + * This function is called whenever vdev is released, and is responsible + * to decrement the remote processor's refcount which was taken when vdev was + * added. + * + * Never call this function directly; it will be called by the driver + * core when needed. + */ +static void rproc_virtio_dev_release(struct device *dev) +{ + struct virtio_device *vdev = dev_to_virtio(dev); + struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); + struct rproc *rproc = vdev_to_rproc(vdev); + + kfree(vdev); + + kref_put(&rvdev->refcount, rproc_vdev_release); + + put_device(&rproc->dev); +} + +/** + * rproc_add_virtio_dev() - register an rproc-induced virtio device + * @rvdev: the remote vdev + * @id: the device type identification (used to match it with a driver). + * + * This function registers a virtio device. This vdev's partent is + * the rproc device. + * + * Returns 0 on success or an appropriate error value otherwise. + */ +int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) +{ + struct rproc *rproc = rvdev->rproc; + struct device *dev = &rvdev->dev; + struct virtio_device *vdev; + struct rproc_mem_entry *mem; + int ret; + + if (rproc->ops->kick == NULL) { + ret = -EINVAL; + dev_err(dev, ".kick method not defined for %s\n", rproc->name); + goto out; + } + + /* Try to find dedicated vdev buffer carveout */ + mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index); + if (mem) { + phys_addr_t pa; + + if (mem->of_resm_idx != -1) { + struct device_node *np = rproc->dev.parent->of_node; + + /* Associate reserved memory to vdev device */ + ret = of_reserved_mem_device_init_by_idx(dev, np, + mem->of_resm_idx); + if (ret) { + dev_err(dev, "Can't associate reserved memory\n"); + goto out; + } + } else { + if (mem->va) { + dev_warn(dev, "vdev %d buffer already mapped\n", + rvdev->index); + pa = rproc_va_to_pa(mem->va); + } else { + /* Use dma address as carveout no memmapped yet */ + pa = (phys_addr_t)mem->dma; + } + + /* Associate vdev buffer memory pool to vdev subdev */ + ret = dma_declare_coherent_memory(dev, pa, + mem->da, + mem->len); + if (ret < 0) { + dev_err(dev, "Failed to associate buffer\n"); + goto out; + } + } + } else { + struct device_node *np = rproc->dev.parent->of_node; + + /* + * If we don't have dedicated buffer, just attempt to re-assign + * the reserved memory from our parent. A default memory-region + * at index 0 from the parent's memory-regions is assigned for + * the rvdev dev to allocate from. Failure is non-critical and + * the allocations will fall back to global pools, so don't + * check return value either. + */ + of_reserved_mem_device_init_by_idx(dev, np, 0); + } + + /* Allocate virtio device */ + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) { + ret = -ENOMEM; + goto out; + } + vdev->id.device = id, + vdev->config = &rproc_virtio_config_ops, + vdev->dev.parent = dev; + vdev->dev.release = rproc_virtio_dev_release; + + /* + * We're indirectly making a non-temporary copy of the rproc pointer + * here, because drivers probed with this vdev will indirectly + * access the wrapping rproc. + * + * Therefore we must increment the rproc refcount here, and decrement + * it _only_ when the vdev is released. + */ + get_device(&rproc->dev); + + /* Reference the vdev and vring allocations */ + kref_get(&rvdev->refcount); + + ret = register_virtio_device(vdev); + if (ret) { + put_device(&vdev->dev); + dev_err(dev, "failed to register vdev: %d\n", ret); + goto out; + } + + dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id); + +out: + return ret; +} + +/** + * rproc_remove_virtio_dev() - remove an rproc-induced virtio device + * @dev: the virtio device + * @data: must be null + * + * This function unregisters an existing virtio device. + */ +int rproc_remove_virtio_dev(struct device *dev, void *data) +{ + struct virtio_device *vdev = dev_to_virtio(dev); + + unregister_virtio_device(vdev); + return 0; +} diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c new file mode 100644 index 000000000..e6bd3c7a9 --- /dev/null +++ b/drivers/remoteproc/st_remoteproc.c @@ -0,0 +1,481 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ST's Remote Processor Control Driver + * + * Copyright (C) 2015 STMicroelectronics - All Rights Reserved + * + * Author: Ludovic Barre <ludovic.barre@st.com> + */ + +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mailbox_client.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> + +#include "remoteproc_internal.h" + +#define ST_RPROC_VQ0 0 +#define ST_RPROC_VQ1 1 +#define ST_RPROC_MAX_VRING 2 + +#define MBOX_RX 0 +#define MBOX_TX 1 +#define MBOX_MAX 2 + +struct st_rproc_config { + bool sw_reset; + bool pwr_reset; + unsigned long bootaddr_mask; +}; + +struct st_rproc { + struct st_rproc_config *config; + struct reset_control *sw_reset; + struct reset_control *pwr_reset; + struct clk *clk; + u32 clk_rate; + struct regmap *boot_base; + u32 boot_offset; + struct mbox_chan *mbox_chan[ST_RPROC_MAX_VRING * MBOX_MAX]; + struct mbox_client mbox_client_vq0; + struct mbox_client mbox_client_vq1; +}; + +static void st_rproc_mbox_callback(struct device *dev, u32 msg) +{ + struct rproc *rproc = dev_get_drvdata(dev); + + if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE) + dev_dbg(dev, "no message was found in vqid %d\n", msg); +} + +static +void st_rproc_mbox_callback_vq0(struct mbox_client *mbox_client, void *data) +{ + st_rproc_mbox_callback(mbox_client->dev, 0); +} + +static +void st_rproc_mbox_callback_vq1(struct mbox_client *mbox_client, void *data) +{ + st_rproc_mbox_callback(mbox_client->dev, 1); +} + +static void st_rproc_kick(struct rproc *rproc, int vqid) +{ + struct st_rproc *ddata = rproc->priv; + struct device *dev = rproc->dev.parent; + int ret; + + /* send the index of the triggered virtqueue in the mailbox payload */ + if (WARN_ON(vqid >= ST_RPROC_MAX_VRING)) + return; + + ret = mbox_send_message(ddata->mbox_chan[vqid * MBOX_MAX + MBOX_TX], + (void *)&vqid); + if (ret < 0) + dev_err(dev, "failed to send message via mbox: %d\n", ret); +} + +static int st_rproc_mem_alloc(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + struct device *dev = rproc->dev.parent; + void *va; + + va = ioremap_wc(mem->dma, mem->len); + if (!va) { + dev_err(dev, "Unable to map memory region: %pa+%zx\n", + &mem->dma, mem->len); + return -ENOMEM; + } + + /* Update memory entry va */ + mem->va = va; + + return 0; +} + +static int st_rproc_mem_release(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + iounmap(mem->va); + + return 0; +} + +static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) +{ + struct device *dev = rproc->dev.parent; + struct device_node *np = dev->of_node; + struct rproc_mem_entry *mem; + struct reserved_mem *rmem; + struct of_phandle_iterator it; + int index = 0; + + of_phandle_iterator_init(&it, np, "memory-region", NULL, 0); + while (of_phandle_iterator_next(&it) == 0) { + rmem = of_reserved_mem_lookup(it.node); + if (!rmem) { + of_node_put(it.node); + dev_err(dev, "unable to acquire memory-region\n"); + return -EINVAL; + } + + /* No need to map vdev buffer */ + if (strcmp(it.node->name, "vdev0buffer")) { + /* Register memory region */ + mem = rproc_mem_entry_init(dev, NULL, + (dma_addr_t)rmem->base, + rmem->size, rmem->base, + st_rproc_mem_alloc, + st_rproc_mem_release, + it.node->name); + } else { + /* Register reserved memory for vdev buffer allocation */ + mem = rproc_of_resm_mem_entry_init(dev, index, + rmem->size, + rmem->base, + it.node->name); + } + + if (!mem) { + of_node_put(it.node); + return -ENOMEM; + } + + rproc_add_carveout(rproc, mem); + index++; + } + + return rproc_elf_load_rsc_table(rproc, fw); +} + +static int st_rproc_start(struct rproc *rproc) +{ + struct st_rproc *ddata = rproc->priv; + int err; + + regmap_update_bits(ddata->boot_base, ddata->boot_offset, + ddata->config->bootaddr_mask, rproc->bootaddr); + + err = clk_enable(ddata->clk); + if (err) { + dev_err(&rproc->dev, "Failed to enable clock\n"); + return err; + } + + if (ddata->config->sw_reset) { + err = reset_control_deassert(ddata->sw_reset); + if (err) { + dev_err(&rproc->dev, "Failed to deassert S/W Reset\n"); + goto sw_reset_fail; + } + } + + if (ddata->config->pwr_reset) { + err = reset_control_deassert(ddata->pwr_reset); + if (err) { + dev_err(&rproc->dev, "Failed to deassert Power Reset\n"); + goto pwr_reset_fail; + } + } + + dev_info(&rproc->dev, "Started from 0x%llx\n", rproc->bootaddr); + + return 0; + + +pwr_reset_fail: + if (ddata->config->pwr_reset) + reset_control_assert(ddata->sw_reset); +sw_reset_fail: + clk_disable(ddata->clk); + + return err; +} + +static int st_rproc_stop(struct rproc *rproc) +{ + struct st_rproc *ddata = rproc->priv; + int sw_err = 0, pwr_err = 0; + + if (ddata->config->sw_reset) { + sw_err = reset_control_assert(ddata->sw_reset); + if (sw_err) + dev_err(&rproc->dev, "Failed to assert S/W Reset\n"); + } + + if (ddata->config->pwr_reset) { + pwr_err = reset_control_assert(ddata->pwr_reset); + if (pwr_err) + dev_err(&rproc->dev, "Failed to assert Power Reset\n"); + } + + clk_disable(ddata->clk); + + return sw_err ?: pwr_err; +} + +static const struct rproc_ops st_rproc_ops = { + .kick = st_rproc_kick, + .start = st_rproc_start, + .stop = st_rproc_stop, + .parse_fw = st_rproc_parse_fw, + .load = rproc_elf_load_segments, + .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, + .sanity_check = rproc_elf_sanity_check, + .get_boot_addr = rproc_elf_get_boot_addr, +}; + +/* + * Fetch state of the processor: 0 is off, 1 is on. + */ +static int st_rproc_state(struct platform_device *pdev) +{ + struct rproc *rproc = platform_get_drvdata(pdev); + struct st_rproc *ddata = rproc->priv; + int reset_sw = 0, reset_pwr = 0; + + if (ddata->config->sw_reset) + reset_sw = reset_control_status(ddata->sw_reset); + + if (ddata->config->pwr_reset) + reset_pwr = reset_control_status(ddata->pwr_reset); + + if (reset_sw < 0 || reset_pwr < 0) + return -EINVAL; + + return !reset_sw && !reset_pwr; +} + +static const struct st_rproc_config st40_rproc_cfg = { + .sw_reset = true, + .pwr_reset = true, + .bootaddr_mask = GENMASK(28, 1), +}; + +static const struct st_rproc_config st231_rproc_cfg = { + .sw_reset = true, + .pwr_reset = false, + .bootaddr_mask = GENMASK(31, 6), +}; + +static const struct of_device_id st_rproc_match[] = { + { .compatible = "st,st40-rproc", .data = &st40_rproc_cfg }, + { .compatible = "st,st231-rproc", .data = &st231_rproc_cfg }, + {}, +}; +MODULE_DEVICE_TABLE(of, st_rproc_match); + +static int st_rproc_parse_dt(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rproc *rproc = platform_get_drvdata(pdev); + struct st_rproc *ddata = rproc->priv; + struct device_node *np = dev->of_node; + int err; + + if (ddata->config->sw_reset) { + ddata->sw_reset = devm_reset_control_get_exclusive(dev, + "sw_reset"); + if (IS_ERR(ddata->sw_reset)) { + dev_err(dev, "Failed to get S/W Reset\n"); + return PTR_ERR(ddata->sw_reset); + } + } + + if (ddata->config->pwr_reset) { + ddata->pwr_reset = devm_reset_control_get_exclusive(dev, + "pwr_reset"); + if (IS_ERR(ddata->pwr_reset)) { + dev_err(dev, "Failed to get Power Reset\n"); + return PTR_ERR(ddata->pwr_reset); + } + } + + ddata->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ddata->clk)) { + dev_err(dev, "Failed to get clock\n"); + return PTR_ERR(ddata->clk); + } + + err = of_property_read_u32(np, "clock-frequency", &ddata->clk_rate); + if (err) { + dev_err(dev, "failed to get clock frequency\n"); + return err; + } + + ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); + if (IS_ERR(ddata->boot_base)) { + dev_err(dev, "Boot base not found\n"); + return PTR_ERR(ddata->boot_base); + } + + err = of_property_read_u32_index(np, "st,syscfg", 1, + &ddata->boot_offset); + if (err) { + dev_err(dev, "Boot offset not found\n"); + return -EINVAL; + } + + err = clk_prepare(ddata->clk); + if (err) + dev_err(dev, "failed to get clock\n"); + + return err; +} + +static int st_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct of_device_id *match; + struct st_rproc *ddata; + struct device_node *np = dev->of_node; + struct rproc *rproc; + struct mbox_chan *chan; + int enabled; + int ret, i; + + match = of_match_device(st_rproc_match, dev); + if (!match || !match->data) { + dev_err(dev, "No device match found\n"); + return -ENODEV; + } + + rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata)); + if (!rproc) + return -ENOMEM; + + rproc->has_iommu = false; + ddata = rproc->priv; + ddata->config = (struct st_rproc_config *)match->data; + + platform_set_drvdata(pdev, rproc); + + ret = st_rproc_parse_dt(pdev); + if (ret) + goto free_rproc; + + enabled = st_rproc_state(pdev); + if (enabled < 0) { + ret = enabled; + goto free_clk; + } + + if (enabled) { + atomic_inc(&rproc->power); + rproc->state = RPROC_RUNNING; + } else { + clk_set_rate(ddata->clk, ddata->clk_rate); + } + + if (of_get_property(np, "mbox-names", NULL)) { + ddata->mbox_client_vq0.dev = dev; + ddata->mbox_client_vq0.tx_done = NULL; + ddata->mbox_client_vq0.tx_block = false; + ddata->mbox_client_vq0.knows_txdone = false; + ddata->mbox_client_vq0.rx_callback = st_rproc_mbox_callback_vq0; + + ddata->mbox_client_vq1.dev = dev; + ddata->mbox_client_vq1.tx_done = NULL; + ddata->mbox_client_vq1.tx_block = false; + ddata->mbox_client_vq1.knows_txdone = false; + ddata->mbox_client_vq1.rx_callback = st_rproc_mbox_callback_vq1; + + /* + * To control a co-processor without IPC mechanism. + * This driver can be used without mbox and rpmsg. + */ + chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_rx"); + if (IS_ERR(chan)) { + dev_err(&rproc->dev, "failed to request mbox chan 0\n"); + ret = PTR_ERR(chan); + goto free_clk; + } + ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_RX] = chan; + + chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_tx"); + if (IS_ERR(chan)) { + dev_err(&rproc->dev, "failed to request mbox chan 0\n"); + ret = PTR_ERR(chan); + goto free_mbox; + } + ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_TX] = chan; + + chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_rx"); + if (IS_ERR(chan)) { + dev_err(&rproc->dev, "failed to request mbox chan 1\n"); + ret = PTR_ERR(chan); + goto free_mbox; + } + ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_RX] = chan; + + chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_tx"); + if (IS_ERR(chan)) { + dev_err(&rproc->dev, "failed to request mbox chan 1\n"); + ret = PTR_ERR(chan); + goto free_mbox; + } + ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_TX] = chan; + } + + ret = rproc_add(rproc); + if (ret) + goto free_mbox; + + return 0; + +free_mbox: + for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++) + mbox_free_channel(ddata->mbox_chan[i]); +free_clk: + clk_unprepare(ddata->clk); +free_rproc: + rproc_free(rproc); + return ret; +} + +static int st_rproc_remove(struct platform_device *pdev) +{ + struct rproc *rproc = platform_get_drvdata(pdev); + struct st_rproc *ddata = rproc->priv; + int i; + + rproc_del(rproc); + + clk_disable_unprepare(ddata->clk); + + for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++) + mbox_free_channel(ddata->mbox_chan[i]); + + rproc_free(rproc); + + return 0; +} + +static struct platform_driver st_rproc_driver = { + .probe = st_rproc_probe, + .remove = st_rproc_remove, + .driver = { + .name = "st-rproc", + .of_match_table = of_match_ptr(st_rproc_match), + }, +}; +module_platform_driver(st_rproc_driver); + +MODULE_DESCRIPTION("ST Remote Processor Control Driver"); +MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c new file mode 100644 index 000000000..09bcb4d8b --- /dev/null +++ b/drivers/remoteproc/st_slim_rproc.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SLIM core rproc driver + * + * Copyright (C) 2016 STMicroelectronics + * + * Author: Peter Griffin <peter.griffin@linaro.org> + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> +#include <linux/remoteproc/st_slim_rproc.h> +#include "remoteproc_internal.h" + +/* SLIM core registers */ +#define SLIM_ID_OFST 0x0 +#define SLIM_VER_OFST 0x4 + +#define SLIM_EN_OFST 0x8 +#define SLIM_EN_RUN BIT(0) + +#define SLIM_CLK_GATE_OFST 0xC +#define SLIM_CLK_GATE_DIS BIT(0) +#define SLIM_CLK_GATE_RESET BIT(2) + +#define SLIM_SLIM_PC_OFST 0x20 + +/* DMEM registers */ +#define SLIM_REV_ID_OFST 0x0 +#define SLIM_REV_ID_MIN_MASK GENMASK(15, 8) +#define SLIM_REV_ID_MIN(id) ((id & SLIM_REV_ID_MIN_MASK) >> 8) +#define SLIM_REV_ID_MAJ_MASK GENMASK(23, 16) +#define SLIM_REV_ID_MAJ(id) ((id & SLIM_REV_ID_MAJ_MASK) >> 16) + + +/* peripherals registers */ +#define SLIM_STBUS_SYNC_OFST 0xF88 +#define SLIM_STBUS_SYNC_DIS BIT(0) + +#define SLIM_INT_SET_OFST 0xFD4 +#define SLIM_INT_CLR_OFST 0xFD8 +#define SLIM_INT_MASK_OFST 0xFDC + +#define SLIM_CMD_CLR_OFST 0xFC8 +#define SLIM_CMD_MASK_OFST 0xFCC + +static const char *mem_names[ST_SLIM_MEM_MAX] = { + [ST_SLIM_DMEM] = "dmem", + [ST_SLIM_IMEM] = "imem", +}; + +static int slim_clk_get(struct st_slim_rproc *slim_rproc, struct device *dev) +{ + int clk, err; + + for (clk = 0; clk < ST_SLIM_MAX_CLK; clk++) { + slim_rproc->clks[clk] = of_clk_get(dev->of_node, clk); + if (IS_ERR(slim_rproc->clks[clk])) { + err = PTR_ERR(slim_rproc->clks[clk]); + if (err == -EPROBE_DEFER) + goto err_put_clks; + slim_rproc->clks[clk] = NULL; + break; + } + } + + return 0; + +err_put_clks: + while (--clk >= 0) + clk_put(slim_rproc->clks[clk]); + + return err; +} + +static void slim_clk_disable(struct st_slim_rproc *slim_rproc) +{ + int clk; + + for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) + clk_disable_unprepare(slim_rproc->clks[clk]); +} + +static int slim_clk_enable(struct st_slim_rproc *slim_rproc) +{ + int clk, ret; + + for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) { + ret = clk_prepare_enable(slim_rproc->clks[clk]); + if (ret) + goto err_disable_clks; + } + + return 0; + +err_disable_clks: + while (--clk >= 0) + clk_disable_unprepare(slim_rproc->clks[clk]); + + return ret; +} + +/* + * Remoteproc slim specific device handlers + */ +static int slim_rproc_start(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + struct st_slim_rproc *slim_rproc = rproc->priv; + unsigned long hw_id, hw_ver, fw_rev; + u32 val; + + /* disable CPU pipeline clock & reset CPU pipeline */ + val = SLIM_CLK_GATE_DIS | SLIM_CLK_GATE_RESET; + writel(val, slim_rproc->slimcore + SLIM_CLK_GATE_OFST); + + /* disable SLIM core STBus sync */ + writel(SLIM_STBUS_SYNC_DIS, slim_rproc->peri + SLIM_STBUS_SYNC_OFST); + + /* enable cpu pipeline clock */ + writel(!SLIM_CLK_GATE_DIS, + slim_rproc->slimcore + SLIM_CLK_GATE_OFST); + + /* clear int & cmd mailbox */ + writel(~0U, slim_rproc->peri + SLIM_INT_CLR_OFST); + writel(~0U, slim_rproc->peri + SLIM_CMD_CLR_OFST); + + /* enable all channels cmd & int */ + writel(~0U, slim_rproc->peri + SLIM_INT_MASK_OFST); + writel(~0U, slim_rproc->peri + SLIM_CMD_MASK_OFST); + + /* enable cpu */ + writel(SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST); + + hw_id = readl_relaxed(slim_rproc->slimcore + SLIM_ID_OFST); + hw_ver = readl_relaxed(slim_rproc->slimcore + SLIM_VER_OFST); + + fw_rev = readl(slim_rproc->mem[ST_SLIM_DMEM].cpu_addr + + SLIM_REV_ID_OFST); + + dev_info(dev, "fw rev:%ld.%ld on SLIM %ld.%ld\n", + SLIM_REV_ID_MAJ(fw_rev), SLIM_REV_ID_MIN(fw_rev), + hw_id, hw_ver); + + return 0; +} + +static int slim_rproc_stop(struct rproc *rproc) +{ + struct st_slim_rproc *slim_rproc = rproc->priv; + u32 val; + + /* mask all (cmd & int) channels */ + writel(0UL, slim_rproc->peri + SLIM_INT_MASK_OFST); + writel(0UL, slim_rproc->peri + SLIM_CMD_MASK_OFST); + + /* disable cpu pipeline clock */ + writel(SLIM_CLK_GATE_DIS, slim_rproc->slimcore + SLIM_CLK_GATE_OFST); + + writel(!SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST); + + val = readl(slim_rproc->slimcore + SLIM_EN_OFST); + if (val & SLIM_EN_RUN) + dev_warn(&rproc->dev, "Failed to disable SLIM"); + + dev_dbg(&rproc->dev, "slim stopped\n"); + + return 0; +} + +static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct st_slim_rproc *slim_rproc = rproc->priv; + void *va = NULL; + int i; + + for (i = 0; i < ST_SLIM_MEM_MAX; i++) { + if (da != slim_rproc->mem[i].bus_addr) + continue; + + if (len <= slim_rproc->mem[i].size) { + /* __force to make sparse happy with type conversion */ + va = (__force void *)slim_rproc->mem[i].cpu_addr; + break; + } + } + + dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%pK\n", + da, len, va); + + return va; +} + +static const struct rproc_ops slim_rproc_ops = { + .start = slim_rproc_start, + .stop = slim_rproc_stop, + .da_to_va = slim_rproc_da_to_va, + .get_boot_addr = rproc_elf_get_boot_addr, + .load = rproc_elf_load_segments, + .sanity_check = rproc_elf_sanity_check, +}; + +/** + * st_slim_rproc_alloc() - allocate and initialise slim rproc + * @pdev: Pointer to the platform_device struct + * @fw_name: Name of firmware for rproc to use + * + * Function for allocating and initialising a slim rproc for use by + * device drivers whose IP is based around the SLIM core. It + * obtains and enables any clocks required by the SLIM core and also + * ioremaps the various IO. + * + * Returns st_slim_rproc pointer or PTR_ERR() on error. + */ + +struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, + char *fw_name) +{ + struct device *dev = &pdev->dev; + struct st_slim_rproc *slim_rproc; + struct device_node *np = dev->of_node; + struct rproc *rproc; + struct resource *res; + int err, i; + + if (!fw_name) + return ERR_PTR(-EINVAL); + + if (!of_device_is_compatible(np, "st,slim-rproc")) + return ERR_PTR(-EINVAL); + + rproc = rproc_alloc(dev, np->name, &slim_rproc_ops, + fw_name, sizeof(*slim_rproc)); + if (!rproc) + return ERR_PTR(-ENOMEM); + + rproc->has_iommu = false; + + slim_rproc = rproc->priv; + slim_rproc->rproc = rproc; + + /* get imem and dmem */ + for (i = 0; i < ARRAY_SIZE(mem_names); i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mem_names[i]); + + slim_rproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(slim_rproc->mem[i].cpu_addr)) { + dev_err(&pdev->dev, "devm_ioremap_resource failed\n"); + err = PTR_ERR(slim_rproc->mem[i].cpu_addr); + goto err; + } + slim_rproc->mem[i].bus_addr = res->start; + slim_rproc->mem[i].size = resource_size(res); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore"); + slim_rproc->slimcore = devm_ioremap_resource(dev, res); + if (IS_ERR(slim_rproc->slimcore)) { + dev_err(&pdev->dev, "failed to ioremap slimcore IO\n"); + err = PTR_ERR(slim_rproc->slimcore); + goto err; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals"); + slim_rproc->peri = devm_ioremap_resource(dev, res); + if (IS_ERR(slim_rproc->peri)) { + dev_err(&pdev->dev, "failed to ioremap peripherals IO\n"); + err = PTR_ERR(slim_rproc->peri); + goto err; + } + + err = slim_clk_get(slim_rproc, dev); + if (err) + goto err; + + err = slim_clk_enable(slim_rproc); + if (err) { + dev_err(dev, "Failed to enable clocks\n"); + goto err_clk_put; + } + + /* Register as a remoteproc device */ + err = rproc_add(rproc); + if (err) { + dev_err(dev, "registration of slim remoteproc failed\n"); + goto err_clk_dis; + } + + return slim_rproc; + +err_clk_dis: + slim_clk_disable(slim_rproc); +err_clk_put: + for (i = 0; i < ST_SLIM_MAX_CLK && slim_rproc->clks[i]; i++) + clk_put(slim_rproc->clks[i]); +err: + rproc_free(rproc); + return ERR_PTR(err); +} +EXPORT_SYMBOL(st_slim_rproc_alloc); + +/** + * st_slim_rproc_put() - put slim rproc resources + * @slim_rproc: Pointer to the st_slim_rproc struct + * + * Function for calling respective _put() functions on slim_rproc resources. + * + */ +void st_slim_rproc_put(struct st_slim_rproc *slim_rproc) +{ + int clk; + + if (!slim_rproc) + return; + + slim_clk_disable(slim_rproc); + + for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) + clk_put(slim_rproc->clks[clk]); + + rproc_del(slim_rproc->rproc); + rproc_free(slim_rproc->rproc); +} +EXPORT_SYMBOL(st_slim_rproc_put); + +MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>"); +MODULE_DESCRIPTION("STMicroelectronics SLIM core rproc driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c new file mode 100644 index 000000000..df784fec1 --- /dev/null +++ b/drivers/remoteproc/stm32_rproc.c @@ -0,0 +1,918 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics. + * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. + */ + +#include <linux/arm-smccc.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/mailbox_client.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/pm_wakeirq.h> +#include <linux/regmap.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> +#include <linux/slab.h> +#include <linux/workqueue.h> + +#include "remoteproc_internal.h" + +#define HOLD_BOOT 0 +#define RELEASE_BOOT 1 + +#define MBOX_NB_VQ 2 +#define MBOX_NB_MBX 3 + +#define STM32_SMC_RCC 0x82001000 +#define STM32_SMC_REG_WRITE 0x1 + +#define STM32_MBX_VQ0 "vq0" +#define STM32_MBX_VQ0_ID 0 +#define STM32_MBX_VQ1 "vq1" +#define STM32_MBX_VQ1_ID 1 +#define STM32_MBX_SHUTDOWN "shutdown" + +#define RSC_TBL_SIZE 1024 + +#define M4_STATE_OFF 0 +#define M4_STATE_INI 1 +#define M4_STATE_CRUN 2 +#define M4_STATE_CSTOP 3 +#define M4_STATE_STANDBY 4 +#define M4_STATE_CRASH 5 + +struct stm32_syscon { + struct regmap *map; + u32 reg; + u32 mask; +}; + +struct stm32_rproc_mem { + char name[20]; + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +struct stm32_rproc_mem_ranges { + u32 dev_addr; + u32 bus_addr; + u32 size; +}; + +struct stm32_mbox { + const unsigned char name[10]; + struct mbox_chan *chan; + struct mbox_client client; + struct work_struct vq_work; + int vq_id; +}; + +struct stm32_rproc { + struct reset_control *rst; + struct stm32_syscon hold_boot; + struct stm32_syscon pdds; + struct stm32_syscon m4_state; + struct stm32_syscon rsctbl; + int wdg_irq; + u32 nb_rmems; + struct stm32_rproc_mem *rmems; + struct stm32_mbox mb[MBOX_NB_MBX]; + struct workqueue_struct *workqueue; + bool secured_soc; + void __iomem *rsc_va; +}; + +static int stm32_rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da) +{ + unsigned int i; + struct stm32_rproc *ddata = rproc->priv; + struct stm32_rproc_mem *p_mem; + + for (i = 0; i < ddata->nb_rmems; i++) { + p_mem = &ddata->rmems[i]; + + if (pa < p_mem->bus_addr || + pa >= p_mem->bus_addr + p_mem->size) + continue; + *da = pa - p_mem->bus_addr + p_mem->dev_addr; + dev_dbg(rproc->dev.parent, "pa %pa to da %llx\n", &pa, *da); + return 0; + } + + return -EINVAL; +} + +static int stm32_rproc_mem_alloc(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + struct device *dev = rproc->dev.parent; + void *va; + + dev_dbg(dev, "map memory: %pa+%x\n", &mem->dma, mem->len); + va = ioremap_wc(mem->dma, mem->len); + if (IS_ERR_OR_NULL(va)) { + dev_err(dev, "Unable to map memory region: %pa+%x\n", + &mem->dma, mem->len); + return -ENOMEM; + } + + /* Update memory entry va */ + mem->va = va; + + return 0; +} + +static int stm32_rproc_mem_release(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma); + iounmap(mem->va); + + return 0; +} + +static int stm32_rproc_of_memory_translations(struct platform_device *pdev, + struct stm32_rproc *ddata) +{ + struct device *parent, *dev = &pdev->dev; + struct device_node *np; + struct stm32_rproc_mem *p_mems; + struct stm32_rproc_mem_ranges *mem_range; + int cnt, array_size, i, ret = 0; + + parent = dev->parent; + np = parent->of_node; + + cnt = of_property_count_elems_of_size(np, "dma-ranges", + sizeof(*mem_range)); + if (cnt <= 0) { + dev_err(dev, "%s: dma-ranges property not defined\n", __func__); + return -EINVAL; + } + + p_mems = devm_kcalloc(dev, cnt, sizeof(*p_mems), GFP_KERNEL); + if (!p_mems) + return -ENOMEM; + mem_range = kcalloc(cnt, sizeof(*mem_range), GFP_KERNEL); + if (!mem_range) + return -ENOMEM; + + array_size = cnt * sizeof(struct stm32_rproc_mem_ranges) / sizeof(u32); + + ret = of_property_read_u32_array(np, "dma-ranges", + (u32 *)mem_range, array_size); + if (ret) { + dev_err(dev, "error while get dma-ranges property: %x\n", ret); + goto free_mem; + } + + for (i = 0; i < cnt; i++) { + p_mems[i].bus_addr = mem_range[i].bus_addr; + p_mems[i].dev_addr = mem_range[i].dev_addr; + p_mems[i].size = mem_range[i].size; + + dev_dbg(dev, "memory range[%i]: da %#x, pa %pa, size %#zx:\n", + i, p_mems[i].dev_addr, &p_mems[i].bus_addr, + p_mems[i].size); + } + + ddata->rmems = p_mems; + ddata->nb_rmems = cnt; + +free_mem: + kfree(mem_range); + return ret; +} + +static int stm32_rproc_mbox_idx(struct rproc *rproc, const unsigned char *name) +{ + struct stm32_rproc *ddata = rproc->priv; + int i; + + for (i = 0; i < ARRAY_SIZE(ddata->mb); i++) { + if (!strncmp(ddata->mb[i].name, name, strlen(name))) + return i; + } + dev_err(&rproc->dev, "mailbox %s not found\n", name); + + return -EINVAL; +} + +static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc, + const struct firmware *fw) +{ + if (rproc_elf_load_rsc_table(rproc, fw)) + dev_warn(&rproc->dev, "no resource table found for this firmware\n"); + + return 0; +} + +static int stm32_rproc_parse_memory_regions(struct rproc *rproc) +{ + struct device *dev = rproc->dev.parent; + struct device_node *np = dev->of_node; + struct of_phandle_iterator it; + struct rproc_mem_entry *mem; + struct reserved_mem *rmem; + u64 da; + int index = 0; + + /* Register associated reserved memory regions */ + of_phandle_iterator_init(&it, np, "memory-region", NULL, 0); + while (of_phandle_iterator_next(&it) == 0) { + rmem = of_reserved_mem_lookup(it.node); + if (!rmem) { + of_node_put(it.node); + dev_err(dev, "unable to acquire memory-region\n"); + return -EINVAL; + } + + if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) { + of_node_put(it.node); + dev_err(dev, "memory region not valid %pa\n", + &rmem->base); + return -EINVAL; + } + + /* No need to map vdev buffer */ + if (strcmp(it.node->name, "vdev0buffer")) { + /* Register memory region */ + mem = rproc_mem_entry_init(dev, NULL, + (dma_addr_t)rmem->base, + rmem->size, da, + stm32_rproc_mem_alloc, + stm32_rproc_mem_release, + it.node->name); + + if (mem) + rproc_coredump_add_segment(rproc, da, + rmem->size); + } else { + /* Register reserved memory for vdev buffer alloc */ + mem = rproc_of_resm_mem_entry_init(dev, index, + rmem->size, + rmem->base, + it.node->name); + } + + if (!mem) { + of_node_put(it.node); + return -ENOMEM; + } + + rproc_add_carveout(rproc, mem); + index++; + } + + return 0; +} + +static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) +{ + int ret = stm32_rproc_parse_memory_regions(rproc); + + if (ret) + return ret; + + return stm32_rproc_elf_load_rsc_table(rproc, fw); +} + +static irqreturn_t stm32_rproc_wdg(int irq, void *data) +{ + struct platform_device *pdev = data; + struct rproc *rproc = platform_get_drvdata(pdev); + + rproc_report_crash(rproc, RPROC_WATCHDOG); + + return IRQ_HANDLED; +} + +static void stm32_rproc_mb_vq_work(struct work_struct *work) +{ + struct stm32_mbox *mb = container_of(work, struct stm32_mbox, vq_work); + struct rproc *rproc = dev_get_drvdata(mb->client.dev); + + mutex_lock(&rproc->lock); + + if (rproc->state != RPROC_RUNNING) + goto unlock_mutex; + + if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE) + dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id); + +unlock_mutex: + mutex_unlock(&rproc->lock); +} + +static void stm32_rproc_mb_callback(struct mbox_client *cl, void *data) +{ + struct rproc *rproc = dev_get_drvdata(cl->dev); + struct stm32_mbox *mb = container_of(cl, struct stm32_mbox, client); + struct stm32_rproc *ddata = rproc->priv; + + queue_work(ddata->workqueue, &mb->vq_work); +} + +static void stm32_rproc_free_mbox(struct rproc *rproc) +{ + struct stm32_rproc *ddata = rproc->priv; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ddata->mb); i++) { + if (ddata->mb[i].chan) + mbox_free_channel(ddata->mb[i].chan); + ddata->mb[i].chan = NULL; + } +} + +static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = { + { + .name = STM32_MBX_VQ0, + .vq_id = STM32_MBX_VQ0_ID, + .client = { + .rx_callback = stm32_rproc_mb_callback, + .tx_block = false, + }, + }, + { + .name = STM32_MBX_VQ1, + .vq_id = STM32_MBX_VQ1_ID, + .client = { + .rx_callback = stm32_rproc_mb_callback, + .tx_block = false, + }, + }, + { + .name = STM32_MBX_SHUTDOWN, + .vq_id = -1, + .client = { + .tx_block = true, + .tx_done = NULL, + .tx_tout = 500, /* 500 ms time out */ + }, + } +}; + +static int stm32_rproc_request_mbox(struct rproc *rproc) +{ + struct stm32_rproc *ddata = rproc->priv; + struct device *dev = &rproc->dev; + unsigned int i; + int j; + const unsigned char *name; + struct mbox_client *cl; + + /* Initialise mailbox structure table */ + memcpy(ddata->mb, stm32_rproc_mbox, sizeof(stm32_rproc_mbox)); + + for (i = 0; i < MBOX_NB_MBX; i++) { + name = ddata->mb[i].name; + + cl = &ddata->mb[i].client; + cl->dev = dev->parent; + + ddata->mb[i].chan = mbox_request_channel_byname(cl, name); + if (IS_ERR(ddata->mb[i].chan)) { + if (PTR_ERR(ddata->mb[i].chan) == -EPROBE_DEFER) + goto err_probe; + dev_warn(dev, "cannot get %s mbox\n", name); + ddata->mb[i].chan = NULL; + } + if (ddata->mb[i].vq_id >= 0) { + INIT_WORK(&ddata->mb[i].vq_work, + stm32_rproc_mb_vq_work); + } + } + + return 0; + +err_probe: + for (j = i - 1; j >= 0; j--) + if (ddata->mb[j].chan) + mbox_free_channel(ddata->mb[j].chan); + return -EPROBE_DEFER; +} + +static int stm32_rproc_set_hold_boot(struct rproc *rproc, bool hold) +{ + struct stm32_rproc *ddata = rproc->priv; + struct stm32_syscon hold_boot = ddata->hold_boot; + struct arm_smccc_res smc_res; + int val, err; + + val = hold ? HOLD_BOOT : RELEASE_BOOT; + + if (IS_ENABLED(CONFIG_HAVE_ARM_SMCCC) && ddata->secured_soc) { + arm_smccc_smc(STM32_SMC_RCC, STM32_SMC_REG_WRITE, + hold_boot.reg, val, 0, 0, 0, 0, &smc_res); + err = smc_res.a0; + } else { + err = regmap_update_bits(hold_boot.map, hold_boot.reg, + hold_boot.mask, val); + } + + if (err) + dev_err(&rproc->dev, "failed to set hold boot\n"); + + return err; +} + +static void stm32_rproc_add_coredump_trace(struct rproc *rproc) +{ + struct rproc_debug_trace *trace; + struct rproc_dump_segment *segment; + bool already_added; + + list_for_each_entry(trace, &rproc->traces, node) { + already_added = false; + + list_for_each_entry(segment, &rproc->dump_segments, node) { + if (segment->da == trace->trace_mem.da) { + already_added = true; + break; + } + } + + if (!already_added) + rproc_coredump_add_segment(rproc, trace->trace_mem.da, + trace->trace_mem.len); + } +} + +static int stm32_rproc_start(struct rproc *rproc) +{ + struct stm32_rproc *ddata = rproc->priv; + int err; + + stm32_rproc_add_coredump_trace(rproc); + + /* clear remote proc Deep Sleep */ + if (ddata->pdds.map) { + err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg, + ddata->pdds.mask, 0); + if (err) { + dev_err(&rproc->dev, "failed to clear pdds\n"); + return err; + } + } + + err = stm32_rproc_set_hold_boot(rproc, false); + if (err) + return err; + + return stm32_rproc_set_hold_boot(rproc, true); +} + +static int stm32_rproc_attach(struct rproc *rproc) +{ + stm32_rproc_add_coredump_trace(rproc); + + return stm32_rproc_set_hold_boot(rproc, true); +} + +static int stm32_rproc_stop(struct rproc *rproc) +{ + struct stm32_rproc *ddata = rproc->priv; + int err, dummy_data, idx; + + /* request shutdown of the remote processor */ + if (rproc->state != RPROC_OFFLINE) { + idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN); + if (idx >= 0 && ddata->mb[idx].chan) { + /* a dummy data is sent to allow to block on transmit */ + err = mbox_send_message(ddata->mb[idx].chan, + &dummy_data); + if (err < 0) + dev_warn(&rproc->dev, "warning: remote FW shutdown without ack\n"); + } + } + + err = stm32_rproc_set_hold_boot(rproc, true); + if (err) + return err; + + err = reset_control_assert(ddata->rst); + if (err) { + dev_err(&rproc->dev, "failed to assert the reset\n"); + return err; + } + + /* to allow platform Standby power mode, set remote proc Deep Sleep */ + if (ddata->pdds.map) { + err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg, + ddata->pdds.mask, 1); + if (err) { + dev_err(&rproc->dev, "failed to set pdds\n"); + return err; + } + } + + /* update coprocessor state to OFF if available */ + if (ddata->m4_state.map) { + err = regmap_update_bits(ddata->m4_state.map, + ddata->m4_state.reg, + ddata->m4_state.mask, + M4_STATE_OFF); + if (err) { + dev_err(&rproc->dev, "failed to set copro state\n"); + return err; + } + } + + return 0; +} + +static void stm32_rproc_kick(struct rproc *rproc, int vqid) +{ + struct stm32_rproc *ddata = rproc->priv; + unsigned int i; + int err; + + if (WARN_ON(vqid >= MBOX_NB_VQ)) + return; + + for (i = 0; i < MBOX_NB_MBX; i++) { + if (vqid != ddata->mb[i].vq_id) + continue; + if (!ddata->mb[i].chan) + return; + err = mbox_send_message(ddata->mb[i].chan, (void *)(long)vqid); + if (err < 0) + dev_err(&rproc->dev, "%s: failed (%s, err:%d)\n", + __func__, ddata->mb[i].name, err); + return; + } +} + +static struct rproc_ops st_rproc_ops = { + .start = stm32_rproc_start, + .stop = stm32_rproc_stop, + .attach = stm32_rproc_attach, + .kick = stm32_rproc_kick, + .load = rproc_elf_load_segments, + .parse_fw = stm32_rproc_parse_fw, + .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, + .sanity_check = rproc_elf_sanity_check, + .get_boot_addr = rproc_elf_get_boot_addr, +}; + +static const struct of_device_id stm32_rproc_match[] = { + { .compatible = "st,stm32mp1-m4" }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32_rproc_match); + +static int stm32_rproc_get_syscon(struct device_node *np, const char *prop, + struct stm32_syscon *syscon) +{ + int err = 0; + + syscon->map = syscon_regmap_lookup_by_phandle(np, prop); + if (IS_ERR(syscon->map)) { + err = PTR_ERR(syscon->map); + syscon->map = NULL; + goto out; + } + + err = of_property_read_u32_index(np, prop, 1, &syscon->reg); + if (err) + goto out; + + err = of_property_read_u32_index(np, prop, 2, &syscon->mask); + +out: + return err; +} + +static int stm32_rproc_parse_dt(struct platform_device *pdev, + struct stm32_rproc *ddata, bool *auto_boot) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct stm32_syscon tz; + unsigned int tzen; + int err, irq; + + irq = platform_get_irq(pdev, 0); + if (irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + + if (irq > 0) { + err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0, + dev_name(dev), pdev); + if (err) { + dev_err(dev, "failed to request wdg irq\n"); + return err; + } + + ddata->wdg_irq = irq; + + if (of_property_read_bool(np, "wakeup-source")) { + device_init_wakeup(dev, true); + dev_pm_set_wake_irq(dev, irq); + } + + dev_info(dev, "wdg irq registered\n"); + } + + ddata->rst = devm_reset_control_get_by_index(dev, 0); + if (IS_ERR(ddata->rst)) { + dev_err(dev, "failed to get mcu reset\n"); + return PTR_ERR(ddata->rst); + } + + /* + * if platform is secured the hold boot bit must be written by + * smc call and read normally. + * if not secure the hold boot bit could be read/write normally + */ + err = stm32_rproc_get_syscon(np, "st,syscfg-tz", &tz); + if (err) { + dev_err(dev, "failed to get tz syscfg\n"); + return err; + } + + err = regmap_read(tz.map, tz.reg, &tzen); + if (err) { + dev_err(dev, "failed to read tzen\n"); + return err; + } + ddata->secured_soc = tzen & tz.mask; + + err = stm32_rproc_get_syscon(np, "st,syscfg-holdboot", + &ddata->hold_boot); + if (err) { + dev_err(dev, "failed to get hold boot\n"); + return err; + } + + err = stm32_rproc_get_syscon(np, "st,syscfg-pdds", &ddata->pdds); + if (err) + dev_info(dev, "failed to get pdds\n"); + + *auto_boot = of_property_read_bool(np, "st,auto-boot"); + + /* + * See if we can check the M4 status, i.e if it was started + * from the boot loader or not. + */ + err = stm32_rproc_get_syscon(np, "st,syscfg-m4-state", + &ddata->m4_state); + if (err) { + /* remember this */ + ddata->m4_state.map = NULL; + /* no coprocessor state syscon (optional) */ + dev_warn(dev, "m4 state not supported\n"); + + /* no need to go further */ + return 0; + } + + /* See if we can get the resource table */ + err = stm32_rproc_get_syscon(np, "st,syscfg-rsc-tbl", + &ddata->rsctbl); + if (err) { + /* no rsc table syscon (optional) */ + dev_warn(dev, "rsc tbl syscon not supported\n"); + } + + return 0; +} + +static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata, + unsigned int *state) +{ + /* See stm32_rproc_parse_dt() */ + if (!ddata->m4_state.map) { + /* + * We couldn't get the coprocessor's state, assume + * it is not running. + */ + *state = M4_STATE_OFF; + return 0; + } + + return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state); +} + +static int stm32_rproc_da_to_pa(struct platform_device *pdev, + struct stm32_rproc *ddata, + u64 da, phys_addr_t *pa) +{ + struct device *dev = &pdev->dev; + struct stm32_rproc_mem *p_mem; + unsigned int i; + + for (i = 0; i < ddata->nb_rmems; i++) { + p_mem = &ddata->rmems[i]; + + if (da < p_mem->dev_addr || + da >= p_mem->dev_addr + p_mem->size) + continue; + + *pa = da - p_mem->dev_addr + p_mem->bus_addr; + dev_dbg(dev, "da %llx to pa %#x\n", da, *pa); + + return 0; + } + + dev_err(dev, "can't translate da %llx\n", da); + + return -EINVAL; +} + +static int stm32_rproc_get_loaded_rsc_table(struct platform_device *pdev, + struct rproc *rproc, + struct stm32_rproc *ddata) +{ + struct device *dev = &pdev->dev; + phys_addr_t rsc_pa; + u32 rsc_da; + int err; + + err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da); + if (err) { + dev_err(dev, "failed to read rsc tbl addr\n"); + return err; + } + + if (!rsc_da) + /* no rsc table */ + return 0; + + err = stm32_rproc_da_to_pa(pdev, ddata, rsc_da, &rsc_pa); + if (err) + return err; + + ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE); + if (IS_ERR_OR_NULL(ddata->rsc_va)) { + dev_err(dev, "Unable to map memory region: %pa+%zx\n", + &rsc_pa, RSC_TBL_SIZE); + ddata->rsc_va = NULL; + return -ENOMEM; + } + + /* + * The resource table is already loaded in device memory, no need + * to work with a cached table. + */ + rproc->cached_table = NULL; + /* Assuming the resource table fits in 1kB is fair */ + rproc->table_sz = RSC_TBL_SIZE; + rproc->table_ptr = (struct resource_table *)ddata->rsc_va; + + return 0; +} + +static int stm32_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct stm32_rproc *ddata; + struct device_node *np = dev->of_node; + struct rproc *rproc; + unsigned int state; + int ret; + + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata)); + if (!rproc) + return -ENOMEM; + + ddata = rproc->priv; + + rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); + + ret = stm32_rproc_parse_dt(pdev, ddata, &rproc->auto_boot); + if (ret) + goto free_rproc; + + ret = stm32_rproc_of_memory_translations(pdev, ddata); + if (ret) + goto free_rproc; + + ret = stm32_rproc_get_m4_status(ddata, &state); + if (ret) + goto free_rproc; + + if (state == M4_STATE_CRUN) { + rproc->state = RPROC_DETACHED; + + ret = stm32_rproc_parse_memory_regions(rproc); + if (ret) + goto free_resources; + + ret = stm32_rproc_get_loaded_rsc_table(pdev, rproc, ddata); + if (ret) + goto free_resources; + } + + rproc->has_iommu = false; + ddata->workqueue = create_workqueue(dev_name(dev)); + if (!ddata->workqueue) { + dev_err(dev, "cannot create workqueue\n"); + ret = -ENOMEM; + goto free_resources; + } + + platform_set_drvdata(pdev, rproc); + + ret = stm32_rproc_request_mbox(rproc); + if (ret) + goto free_wkq; + + ret = rproc_add(rproc); + if (ret) + goto free_mb; + + return 0; + +free_mb: + stm32_rproc_free_mbox(rproc); +free_wkq: + destroy_workqueue(ddata->workqueue); +free_resources: + rproc_resource_cleanup(rproc); +free_rproc: + if (device_may_wakeup(dev)) { + dev_pm_clear_wake_irq(dev); + device_init_wakeup(dev, false); + } + rproc_free(rproc); + return ret; +} + +static int stm32_rproc_remove(struct platform_device *pdev) +{ + struct rproc *rproc = platform_get_drvdata(pdev); + struct stm32_rproc *ddata = rproc->priv; + struct device *dev = &pdev->dev; + + if (atomic_read(&rproc->power) > 0) + rproc_shutdown(rproc); + + rproc_del(rproc); + stm32_rproc_free_mbox(rproc); + destroy_workqueue(ddata->workqueue); + + if (device_may_wakeup(dev)) { + dev_pm_clear_wake_irq(dev); + device_init_wakeup(dev, false); + } + rproc_free(rproc); + + return 0; +} + +static int __maybe_unused stm32_rproc_suspend(struct device *dev) +{ + struct rproc *rproc = dev_get_drvdata(dev); + struct stm32_rproc *ddata = rproc->priv; + + if (device_may_wakeup(dev)) + return enable_irq_wake(ddata->wdg_irq); + + return 0; +} + +static int __maybe_unused stm32_rproc_resume(struct device *dev) +{ + struct rproc *rproc = dev_get_drvdata(dev); + struct stm32_rproc *ddata = rproc->priv; + + if (device_may_wakeup(dev)) + return disable_irq_wake(ddata->wdg_irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops, + stm32_rproc_suspend, stm32_rproc_resume); + +static struct platform_driver stm32_rproc_driver = { + .probe = stm32_rproc_probe, + .remove = stm32_rproc_remove, + .driver = { + .name = "stm32-rproc", + .pm = &stm32_rproc_pm_ops, + .of_match_table = of_match_ptr(stm32_rproc_match), + }, +}; +module_platform_driver(stm32_rproc_driver); + +MODULE_DESCRIPTION("STM32 Remote Processor Control Driver"); +MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>"); +MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c new file mode 100644 index 000000000..863c0214e --- /dev/null +++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI K3 DSP Remote Processor(s) driver + * + * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/ + * Suman Anna <s-anna@ti.com> + */ + +#include <linux/io.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/omap-mailbox.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include "omap_remoteproc.h" +#include "remoteproc_internal.h" +#include "ti_sci_proc.h" + +#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1) + +/** + * struct k3_dsp_mem - internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @dev_addr: Device address of the memory region from DSP view + * @size: Size of the memory region + */ +struct k3_dsp_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +/** + * struct k3_dsp_mem_data - memory definitions for a DSP + * @name: name for this memory entry + * @dev_addr: device address for the memory entry + */ +struct k3_dsp_mem_data { + const char *name; + const u32 dev_addr; +}; + +/** + * struct k3_dsp_dev_data - device data structure for a DSP + * @mems: pointer to memory definitions for a DSP + * @num_mems: number of memory regions in @mems + * @boot_align_addr: boot vector address alignment granularity + * @uses_lreset: flag to denote the need for local reset management + */ +struct k3_dsp_dev_data { + const struct k3_dsp_mem_data *mems; + u32 num_mems; + u32 boot_align_addr; + bool uses_lreset; +}; + +/** + * struct k3_dsp_rproc - k3 DSP remote processor driver structure + * @dev: cached device pointer + * @rproc: remoteproc device handle + * @mem: internal memory regions data + * @num_mems: number of internal memory regions + * @rmem: reserved memory regions data + * @num_rmems: number of reserved memory regions + * @reset: reset control handle + * @data: pointer to DSP-specific device data + * @tsp: TI-SCI processor control handle + * @ti_sci: TI-SCI handle + * @ti_sci_id: TI-SCI device identifier + * @mbox: mailbox channel handle + * @client: mailbox client to request the mailbox channel + */ +struct k3_dsp_rproc { + struct device *dev; + struct rproc *rproc; + struct k3_dsp_mem *mem; + int num_mems; + struct k3_dsp_mem *rmem; + int num_rmems; + struct reset_control *reset; + const struct k3_dsp_dev_data *data; + struct ti_sci_proc *tsp; + const struct ti_sci_handle *ti_sci; + u32 ti_sci_id; + struct mbox_chan *mbox; + struct mbox_client client; +}; + +/** + * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler + * @client: mailbox client pointer used for requesting the mailbox channel + * @data: mailbox payload + * + * This handler is invoked by the OMAP mailbox driver whenever a mailbox + * message is received. Usually, the mailbox payload simply contains + * the index of the virtqueue that is kicked by the remote processor, + * and we let remoteproc core handle it. + * + * In addition to virtqueue indices, we also have some out-of-band values + * that indicate different events. Those values are deliberately very + * large so they don't coincide with virtqueue indices. + */ +static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data) +{ + struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc, + client); + struct device *dev = kproc->rproc->dev.parent; + const char *name = kproc->rproc->name; + u32 msg = omap_mbox_message(data); + + dev_dbg(dev, "mbox msg: 0x%x\n", msg); + + switch (msg) { + case RP_MBOX_CRASH: + /* + * remoteproc detected an exception, but error recovery is not + * supported. So, just log this for now + */ + dev_err(dev, "K3 DSP rproc %s crashed\n", name); + break; + case RP_MBOX_ECHO_REPLY: + dev_info(dev, "received echo reply from %s\n", name); + break; + default: + /* silently handle all other valid messages */ + if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) + return; + if (msg > kproc->rproc->max_notifyid) { + dev_dbg(dev, "dropping unknown message 0x%x", msg); + return; + } + /* msg contains the index of the triggered vring */ + if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE) + dev_dbg(dev, "no message was found in vqid %d\n", msg); + } +} + +/* + * Kick the remote processor to notify about pending unprocessed messages. + * The vqid usage is not used and is inconsequential, as the kick is performed + * through a simulated GPIO (a bit in an IPC interrupt-triggering register), + * the remote processor is expected to process both its Tx and Rx virtqueues. + */ +static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + struct device *dev = rproc->dev.parent; + mbox_msg_t msg = (mbox_msg_t)vqid; + int ret; + + /* send the index of the triggered virtqueue in the mailbox payload */ + ret = mbox_send_message(kproc->mbox, (void *)msg); + if (ret < 0) + dev_err(dev, "failed to send mailbox message, status = %d\n", + ret); +} + +/* Put the DSP processor into reset */ +static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc) +{ + struct device *dev = kproc->dev; + int ret; + + ret = reset_control_assert(kproc->reset); + if (ret) { + dev_err(dev, "local-reset assert failed, ret = %d\n", ret); + return ret; + } + + if (kproc->data->uses_lreset) + return ret; + + ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) { + dev_err(dev, "module-reset assert failed, ret = %d\n", ret); + if (reset_control_deassert(kproc->reset)) + dev_warn(dev, "local-reset deassert back failed\n"); + } + + return ret; +} + +/* Release the DSP processor from reset */ +static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc) +{ + struct device *dev = kproc->dev; + int ret; + + if (kproc->data->uses_lreset) + goto lreset; + + ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) { + dev_err(dev, "module-reset deassert failed, ret = %d\n", ret); + return ret; + } + +lreset: + ret = reset_control_deassert(kproc->reset); + if (ret) { + dev_err(dev, "local-reset deassert failed, ret = %d\n", ret); + if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id)) + dev_warn(dev, "module-reset assert back failed\n"); + } + + return ret; +} + +/* + * The C66x DSP cores have a local reset that affects only the CPU, and a + * generic module reset that powers on the device and allows the DSP internal + * memories to be accessed while the local reset is asserted. This function is + * used to release the global reset on C66x DSPs to allow loading into the DSP + * internal RAMs. The .prepare() ops is invoked by remoteproc core before any + * firmware loading, and is followed by the .start() ops after loading to + * actually let the C66x DSP cores run. + */ +static int k3_dsp_rproc_prepare(struct rproc *rproc) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + int ret; + + ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) + dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n", + ret); + + return ret; +} + +/* + * This function implements the .unprepare() ops and performs the complimentary + * operations to that of the .prepare() ops. The function is used to assert the + * global reset on applicable C66x cores. This completes the second portion of + * powering down the C66x DSP cores. The cores themselves are only halted in the + * .stop() callback through the local reset, and the .unprepare() ops is invoked + * by the remoteproc core after the remoteproc is stopped to balance the global + * reset. + */ +static int k3_dsp_rproc_unprepare(struct rproc *rproc) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + int ret; + + ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) + dev_err(dev, "module-reset assert failed, ret = %d\n", ret); + + return ret; +} + +/* + * Power up the DSP remote processor. + * + * This function will be invoked only after the firmware for this rproc + * was loaded, parsed successfully, and all of its resource requirements + * were met. + */ +static int k3_dsp_rproc_start(struct rproc *rproc) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + struct mbox_client *client = &kproc->client; + struct device *dev = kproc->dev; + u32 boot_addr; + int ret; + + client->dev = dev; + client->tx_done = NULL; + client->rx_callback = k3_dsp_rproc_mbox_callback; + client->tx_block = false; + client->knows_txdone = false; + + kproc->mbox = mbox_request_channel(client, 0); + if (IS_ERR(kproc->mbox)) { + ret = -EBUSY; + dev_err(dev, "mbox_request_channel failed: %ld\n", + PTR_ERR(kproc->mbox)); + return ret; + } + + /* + * Ping the remote processor, this is only for sanity-sake for now; + * there is no functional effect whatsoever. + * + * Note that the reply will _not_ arrive immediately: this message + * will wait in the mailbox fifo until the remote processor is booted. + */ + ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST); + if (ret < 0) { + dev_err(dev, "mbox_send_message failed: %d\n", ret); + goto put_mbox; + } + + boot_addr = rproc->bootaddr; + if (boot_addr & (kproc->data->boot_align_addr - 1)) { + dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n", + boot_addr, kproc->data->boot_align_addr); + ret = -EINVAL; + goto put_mbox; + } + + dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr); + ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0); + if (ret) + goto put_mbox; + + ret = k3_dsp_rproc_release(kproc); + if (ret) + goto put_mbox; + + return 0; + +put_mbox: + mbox_free_channel(kproc->mbox); + return ret; +} + +/* + * Stop the DSP remote processor. + * + * This function puts the DSP processor into reset, and finishes processing + * of any pending messages. + */ +static int k3_dsp_rproc_stop(struct rproc *rproc) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + + mbox_free_channel(kproc->mbox); + + k3_dsp_rproc_reset(kproc); + + return 0; +} + +/* + * Custom function to translate a DSP device address (internal RAMs only) to a + * kernel virtual address. The DSPs can access their RAMs at either an internal + * address visible only from a DSP, or at the SoC-level bus address. Both these + * addresses need to be looked through for translation. The translated addresses + * can be used either by the remoteproc core for loading (when using kernel + * remoteproc loader), or by any rpmsg bus drivers. + */ +static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + void __iomem *va = NULL; + phys_addr_t bus_addr; + u32 dev_addr, offset; + size_t size; + int i; + + if (len == 0) + return NULL; + + for (i = 0; i < kproc->num_mems; i++) { + bus_addr = kproc->mem[i].bus_addr; + dev_addr = kproc->mem[i].dev_addr; + size = kproc->mem[i].size; + + if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) { + /* handle DSP-view addresses */ + if (da >= dev_addr && + ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = kproc->mem[i].cpu_addr + offset; + return (__force void *)va; + } + } else { + /* handle SoC-view addresses */ + if (da >= bus_addr && + (da + len) <= (bus_addr + size)) { + offset = da - bus_addr; + va = kproc->mem[i].cpu_addr + offset; + return (__force void *)va; + } + } + } + + /* handle static DDR reserved memory regions */ + for (i = 0; i < kproc->num_rmems; i++) { + dev_addr = kproc->rmem[i].dev_addr; + size = kproc->rmem[i].size; + + if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = kproc->rmem[i].cpu_addr + offset; + return (__force void *)va; + } + } + + return NULL; +} + +static const struct rproc_ops k3_dsp_rproc_ops = { + .start = k3_dsp_rproc_start, + .stop = k3_dsp_rproc_stop, + .kick = k3_dsp_rproc_kick, + .da_to_va = k3_dsp_rproc_da_to_va, +}; + +static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev, + struct k3_dsp_rproc *kproc) +{ + const struct k3_dsp_dev_data *data = kproc->data; + struct device *dev = &pdev->dev; + struct resource *res; + int num_mems = 0; + int i; + + num_mems = kproc->data->num_mems; + kproc->mem = devm_kcalloc(kproc->dev, num_mems, + sizeof(*kproc->mem), GFP_KERNEL); + if (!kproc->mem) + return -ENOMEM; + + for (i = 0; i < num_mems; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + data->mems[i].name); + if (!res) { + dev_err(dev, "found no memory resource for %s\n", + data->mems[i].name); + return -EINVAL; + } + if (!devm_request_mem_region(dev, res->start, + resource_size(res), + dev_name(dev))) { + dev_err(dev, "could not request %s region for resource\n", + data->mems[i].name); + return -EBUSY; + } + + kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start, + resource_size(res)); + if (!kproc->mem[i].cpu_addr) { + dev_err(dev, "failed to map %s memory\n", + data->mems[i].name); + return -ENOMEM; + } + kproc->mem[i].bus_addr = res->start; + kproc->mem[i].dev_addr = data->mems[i].dev_addr; + kproc->mem[i].size = resource_size(res); + + dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n", + data->mems[i].name, &kproc->mem[i].bus_addr, + kproc->mem[i].size, kproc->mem[i].cpu_addr, + kproc->mem[i].dev_addr); + } + kproc->num_mems = num_mems; + + return 0; +} + +static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc) +{ + struct device *dev = kproc->dev; + struct device_node *np = dev->of_node; + struct device_node *rmem_np; + struct reserved_mem *rmem; + int num_rmems; + int ret, i; + + num_rmems = of_property_count_elems_of_size(np, "memory-region", + sizeof(phandle)); + if (num_rmems <= 0) { + dev_err(dev, "device does not reserved memory regions, ret = %d\n", + num_rmems); + return -EINVAL; + } + if (num_rmems < 2) { + dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n", + num_rmems); + return -EINVAL; + } + + /* use reserved memory region 0 for vring DMA allocations */ + ret = of_reserved_mem_device_init_by_idx(dev, np, 0); + if (ret) { + dev_err(dev, "device cannot initialize DMA pool, ret = %d\n", + ret); + return ret; + } + + num_rmems--; + kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); + if (!kproc->rmem) { + ret = -ENOMEM; + goto release_rmem; + } + + /* use remaining reserved memory regions for static carveouts */ + for (i = 0; i < num_rmems; i++) { + rmem_np = of_parse_phandle(np, "memory-region", i + 1); + if (!rmem_np) { + ret = -EINVAL; + goto unmap_rmem; + } + + rmem = of_reserved_mem_lookup(rmem_np); + if (!rmem) { + of_node_put(rmem_np); + ret = -EINVAL; + goto unmap_rmem; + } + of_node_put(rmem_np); + + kproc->rmem[i].bus_addr = rmem->base; + /* 64-bit address regions currently not supported */ + kproc->rmem[i].dev_addr = (u32)rmem->base; + kproc->rmem[i].size = rmem->size; + kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size); + if (!kproc->rmem[i].cpu_addr) { + dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", + i + 1, &rmem->base, &rmem->size); + ret = -ENOMEM; + goto unmap_rmem; + } + + dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", + i + 1, &kproc->rmem[i].bus_addr, + kproc->rmem[i].size, kproc->rmem[i].cpu_addr, + kproc->rmem[i].dev_addr); + } + kproc->num_rmems = num_rmems; + + return 0; + +unmap_rmem: + for (i--; i >= 0; i--) + iounmap(kproc->rmem[i].cpu_addr); + kfree(kproc->rmem); +release_rmem: + of_reserved_mem_device_release(kproc->dev); + return ret; +} + +static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc) +{ + int i; + + for (i = 0; i < kproc->num_rmems; i++) + iounmap(kproc->rmem[i].cpu_addr); + kfree(kproc->rmem); + + of_reserved_mem_device_release(kproc->dev); +} + +static +struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev, + const struct ti_sci_handle *sci) +{ + struct ti_sci_proc *tsp; + u32 temp[2]; + int ret; + + ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids", + temp, 2); + if (ret < 0) + return ERR_PTR(ret); + + tsp = kzalloc(sizeof(*tsp), GFP_KERNEL); + if (!tsp) + return ERR_PTR(-ENOMEM); + + tsp->dev = dev; + tsp->sci = sci; + tsp->ops = &sci->ops.proc_ops; + tsp->proc_id = temp[0]; + tsp->host_id = temp[1]; + + return tsp; +} + +static int k3_dsp_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + const struct k3_dsp_dev_data *data; + struct k3_dsp_rproc *kproc; + struct rproc *rproc; + const char *fw_name; + int ret = 0; + int ret1; + + data = of_device_get_match_data(dev); + if (!data) + return -ENODEV; + + ret = rproc_of_parse_firmware(dev, 0, &fw_name); + if (ret) { + dev_err(dev, "failed to parse firmware-name property, ret = %d\n", + ret); + return ret; + } + + rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name, + sizeof(*kproc)); + if (!rproc) + return -ENOMEM; + + rproc->has_iommu = false; + rproc->recovery_disabled = true; + if (data->uses_lreset) { + rproc->ops->prepare = k3_dsp_rproc_prepare; + rproc->ops->unprepare = k3_dsp_rproc_unprepare; + } + kproc = rproc->priv; + kproc->rproc = rproc; + kproc->dev = dev; + kproc->data = data; + + kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci"); + if (IS_ERR(kproc->ti_sci)) { + ret = PTR_ERR(kproc->ti_sci); + if (ret != -EPROBE_DEFER) { + dev_err(dev, "failed to get ti-sci handle, ret = %d\n", + ret); + } + kproc->ti_sci = NULL; + goto free_rproc; + } + + ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id); + if (ret) { + dev_err(dev, "missing 'ti,sci-dev-id' property\n"); + goto put_sci; + } + + kproc->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(kproc->reset)) { + ret = PTR_ERR(kproc->reset); + dev_err(dev, "failed to get reset, status = %d\n", ret); + goto put_sci; + } + + kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci); + if (IS_ERR(kproc->tsp)) { + dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n", + ret); + ret = PTR_ERR(kproc->tsp); + goto put_sci; + } + + ret = ti_sci_proc_request(kproc->tsp); + if (ret < 0) { + dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret); + goto free_tsp; + } + + ret = k3_dsp_rproc_of_get_memories(pdev, kproc); + if (ret) + goto release_tsp; + + ret = k3_dsp_reserved_mem_init(kproc); + if (ret) { + dev_err(dev, "reserved memory init failed, ret = %d\n", ret); + goto release_tsp; + } + + /* + * ensure the DSP local reset is asserted to ensure the DSP doesn't + * execute bogus code in .prepare() when the module reset is released. + */ + if (data->uses_lreset) { + ret = reset_control_status(kproc->reset); + if (ret < 0) { + dev_err(dev, "failed to get reset status, status = %d\n", + ret); + goto release_mem; + } else if (ret == 0) { + dev_warn(dev, "local reset is deasserted for device\n"); + k3_dsp_rproc_reset(kproc); + } + } + + ret = rproc_add(rproc); + if (ret) { + dev_err(dev, "failed to add register device with remoteproc core, status = %d\n", + ret); + goto release_mem; + } + + platform_set_drvdata(pdev, kproc); + + return 0; + +release_mem: + k3_dsp_reserved_mem_exit(kproc); +release_tsp: + ret1 = ti_sci_proc_release(kproc->tsp); + if (ret1) + dev_err(dev, "failed to release proc, ret = %d\n", ret1); +free_tsp: + kfree(kproc->tsp); +put_sci: + ret1 = ti_sci_put_handle(kproc->ti_sci); + if (ret1) + dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1); +free_rproc: + rproc_free(rproc); + return ret; +} + +static int k3_dsp_rproc_remove(struct platform_device *pdev) +{ + struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int ret; + + rproc_del(kproc->rproc); + + ret = ti_sci_proc_release(kproc->tsp); + if (ret) + dev_err(dev, "failed to release proc, ret = %d\n", ret); + + kfree(kproc->tsp); + + ret = ti_sci_put_handle(kproc->ti_sci); + if (ret) + dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret); + + k3_dsp_reserved_mem_exit(kproc); + rproc_free(kproc->rproc); + + return 0; +} + +static const struct k3_dsp_mem_data c66_mems[] = { + { .name = "l2sram", .dev_addr = 0x800000 }, + { .name = "l1pram", .dev_addr = 0xe00000 }, + { .name = "l1dram", .dev_addr = 0xf00000 }, +}; + +/* C71x cores only have a L1P Cache, there are no L1P SRAMs */ +static const struct k3_dsp_mem_data c71_mems[] = { + { .name = "l2sram", .dev_addr = 0x800000 }, + { .name = "l1dram", .dev_addr = 0xe00000 }, +}; + +static const struct k3_dsp_dev_data c66_data = { + .mems = c66_mems, + .num_mems = ARRAY_SIZE(c66_mems), + .boot_align_addr = SZ_1K, + .uses_lreset = true, +}; + +static const struct k3_dsp_dev_data c71_data = { + .mems = c71_mems, + .num_mems = ARRAY_SIZE(c71_mems), + .boot_align_addr = SZ_2M, + .uses_lreset = false, +}; + +static const struct of_device_id k3_dsp_of_match[] = { + { .compatible = "ti,j721e-c66-dsp", .data = &c66_data, }, + { .compatible = "ti,j721e-c71-dsp", .data = &c71_data, }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, k3_dsp_of_match); + +static struct platform_driver k3_dsp_rproc_driver = { + .probe = k3_dsp_rproc_probe, + .remove = k3_dsp_rproc_remove, + .driver = { + .name = "k3-dsp-rproc", + .of_match_table = k3_dsp_of_match, + }, +}; + +module_platform_driver(k3_dsp_rproc_driver); + +MODULE_AUTHOR("Suman Anna <s-anna@ti.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver"); diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c new file mode 100644 index 000000000..f92a18c06 --- /dev/null +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -0,0 +1,1397 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI K3 R5F (MCU) Remote Processor driver + * + * Copyright (C) 2017-2020 Texas Instruments Incorporated - https://www.ti.com/ + * Suman Anna <s-anna@ti.com> + */ + +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/omap-mailbox.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include "omap_remoteproc.h" +#include "remoteproc_internal.h" +#include "ti_sci_proc.h" + +/* This address can either be for ATCM or BTCM with the other at address 0x0 */ +#define K3_R5_TCM_DEV_ADDR 0x41010000 + +/* R5 TI-SCI Processor Configuration Flags */ +#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001 +#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002 +#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100 +#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200 +#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400 +#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800 +#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000 +#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000 + +/* R5 TI-SCI Processor Control Flags */ +#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001 + +/* R5 TI-SCI Processor Status Flags */ +#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001 +#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002 +#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004 +#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100 + +/** + * struct k3_r5_mem - internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @dev_addr: Device address from remoteproc view + * @size: Size of the memory region + */ +struct k3_r5_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +enum cluster_mode { + CLUSTER_MODE_SPLIT = 0, + CLUSTER_MODE_LOCKSTEP, +}; + +/** + * struct k3_r5_cluster - K3 R5F Cluster structure + * @dev: cached device pointer + * @mode: Mode to configure the Cluster - Split or LockStep + * @cores: list of R5 cores within the cluster + */ +struct k3_r5_cluster { + struct device *dev; + enum cluster_mode mode; + struct list_head cores; +}; + +/** + * struct k3_r5_core - K3 R5 core structure + * @elem: linked list item + * @dev: cached device pointer + * @rproc: rproc handle representing this core + * @mem: internal memory regions data + * @sram: on-chip SRAM memory regions data + * @num_mems: number of internal memory regions + * @num_sram: number of on-chip SRAM memory regions + * @reset: reset control handle + * @tsp: TI-SCI processor control handle + * @ti_sci: TI-SCI handle + * @ti_sci_id: TI-SCI device identifier + * @atcm_enable: flag to control ATCM enablement + * @btcm_enable: flag to control BTCM enablement + * @loczrama: flag to dictate which TCM is at device address 0x0 + */ +struct k3_r5_core { + struct list_head elem; + struct device *dev; + struct rproc *rproc; + struct k3_r5_mem *mem; + struct k3_r5_mem *sram; + int num_mems; + int num_sram; + struct reset_control *reset; + struct ti_sci_proc *tsp; + const struct ti_sci_handle *ti_sci; + u32 ti_sci_id; + u32 atcm_enable; + u32 btcm_enable; + u32 loczrama; +}; + +/** + * struct k3_r5_rproc - K3 remote processor state + * @dev: cached device pointer + * @cluster: cached pointer to parent cluster structure + * @mbox: mailbox channel handle + * @client: mailbox client to request the mailbox channel + * @rproc: rproc handle + * @core: cached pointer to r5 core structure being used + * @rmem: reserved memory regions data + * @num_rmems: number of reserved memory regions + */ +struct k3_r5_rproc { + struct device *dev; + struct k3_r5_cluster *cluster; + struct mbox_chan *mbox; + struct mbox_client client; + struct rproc *rproc; + struct k3_r5_core *core; + struct k3_r5_mem *rmem; + int num_rmems; +}; + +/** + * k3_r5_rproc_mbox_callback() - inbound mailbox message handler + * @client: mailbox client pointer used for requesting the mailbox channel + * @data: mailbox payload + * + * This handler is invoked by the OMAP mailbox driver whenever a mailbox + * message is received. Usually, the mailbox payload simply contains + * the index of the virtqueue that is kicked by the remote processor, + * and we let remoteproc core handle it. + * + * In addition to virtqueue indices, we also have some out-of-band values + * that indicate different events. Those values are deliberately very + * large so they don't coincide with virtqueue indices. + */ +static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data) +{ + struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc, + client); + struct device *dev = kproc->rproc->dev.parent; + const char *name = kproc->rproc->name; + u32 msg = omap_mbox_message(data); + + dev_dbg(dev, "mbox msg: 0x%x\n", msg); + + switch (msg) { + case RP_MBOX_CRASH: + /* + * remoteproc detected an exception, but error recovery is not + * supported. So, just log this for now + */ + dev_err(dev, "K3 R5F rproc %s crashed\n", name); + break; + case RP_MBOX_ECHO_REPLY: + dev_info(dev, "received echo reply from %s\n", name); + break; + default: + /* silently handle all other valid messages */ + if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) + return; + if (msg > kproc->rproc->max_notifyid) { + dev_dbg(dev, "dropping unknown message 0x%x", msg); + return; + } + /* msg contains the index of the triggered vring */ + if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE) + dev_dbg(dev, "no message was found in vqid %d\n", msg); + } +} + +/* kick a virtqueue */ +static void k3_r5_rproc_kick(struct rproc *rproc, int vqid) +{ + struct k3_r5_rproc *kproc = rproc->priv; + struct device *dev = rproc->dev.parent; + mbox_msg_t msg = (mbox_msg_t)vqid; + int ret; + + /* send the index of the triggered virtqueue in the mailbox payload */ + ret = mbox_send_message(kproc->mbox, (void *)msg); + if (ret < 0) + dev_err(dev, "failed to send mailbox message, status = %d\n", + ret); +} + +static int k3_r5_split_reset(struct k3_r5_core *core) +{ + int ret; + + ret = reset_control_assert(core->reset); + if (ret) { + dev_err(core->dev, "local-reset assert failed, ret = %d\n", + ret); + return ret; + } + + ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci, + core->ti_sci_id); + if (ret) { + dev_err(core->dev, "module-reset assert failed, ret = %d\n", + ret); + if (reset_control_deassert(core->reset)) + dev_warn(core->dev, "local-reset deassert back failed\n"); + } + + return ret; +} + +static int k3_r5_split_release(struct k3_r5_core *core) +{ + int ret; + + ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci, + core->ti_sci_id); + if (ret) { + dev_err(core->dev, "module-reset deassert failed, ret = %d\n", + ret); + return ret; + } + + ret = reset_control_deassert(core->reset); + if (ret) { + dev_err(core->dev, "local-reset deassert failed, ret = %d\n", + ret); + if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci, + core->ti_sci_id)) + dev_warn(core->dev, "module-reset assert back failed\n"); + } + + return ret; +} + +static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster) +{ + struct k3_r5_core *core; + int ret; + + /* assert local reset on all applicable cores */ + list_for_each_entry(core, &cluster->cores, elem) { + ret = reset_control_assert(core->reset); + if (ret) { + dev_err(core->dev, "local-reset assert failed, ret = %d\n", + ret); + core = list_prev_entry(core, elem); + goto unroll_local_reset; + } + } + + /* disable PSC modules on all applicable cores */ + list_for_each_entry(core, &cluster->cores, elem) { + ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci, + core->ti_sci_id); + if (ret) { + dev_err(core->dev, "module-reset assert failed, ret = %d\n", + ret); + goto unroll_module_reset; + } + } + + return 0; + +unroll_module_reset: + list_for_each_entry_continue_reverse(core, &cluster->cores, elem) { + if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci, + core->ti_sci_id)) + dev_warn(core->dev, "module-reset assert back failed\n"); + } + core = list_last_entry(&cluster->cores, struct k3_r5_core, elem); +unroll_local_reset: + list_for_each_entry_from_reverse(core, &cluster->cores, elem) { + if (reset_control_deassert(core->reset)) + dev_warn(core->dev, "local-reset deassert back failed\n"); + } + + return ret; +} + +static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster) +{ + struct k3_r5_core *core; + int ret; + + /* enable PSC modules on all applicable cores */ + list_for_each_entry_reverse(core, &cluster->cores, elem) { + ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci, + core->ti_sci_id); + if (ret) { + dev_err(core->dev, "module-reset deassert failed, ret = %d\n", + ret); + core = list_next_entry(core, elem); + goto unroll_module_reset; + } + } + + /* deassert local reset on all applicable cores */ + list_for_each_entry_reverse(core, &cluster->cores, elem) { + ret = reset_control_deassert(core->reset); + if (ret) { + dev_err(core->dev, "module-reset deassert failed, ret = %d\n", + ret); + goto unroll_local_reset; + } + } + + return 0; + +unroll_local_reset: + list_for_each_entry_continue(core, &cluster->cores, elem) { + if (reset_control_assert(core->reset)) + dev_warn(core->dev, "local-reset assert back failed\n"); + } + core = list_first_entry(&cluster->cores, struct k3_r5_core, elem); +unroll_module_reset: + list_for_each_entry_from(core, &cluster->cores, elem) { + if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci, + core->ti_sci_id)) + dev_warn(core->dev, "module-reset assert back failed\n"); + } + + return ret; +} + +static inline int k3_r5_core_halt(struct k3_r5_core *core) +{ + return ti_sci_proc_set_control(core->tsp, + PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0); +} + +static inline int k3_r5_core_run(struct k3_r5_core *core) +{ + return ti_sci_proc_set_control(core->tsp, + 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT); +} + +/* + * The R5F cores have controls for both a reset and a halt/run. The code + * execution from DDR requires the initial boot-strapping code to be run + * from the internal TCMs. This function is used to release the resets on + * applicable cores to allow loading into the TCMs. The .prepare() ops is + * invoked by remoteproc core before any firmware loading, and is followed + * by the .start() ops after loading to actually let the R5 cores run. + */ +static int k3_r5_rproc_prepare(struct rproc *rproc) +{ + struct k3_r5_rproc *kproc = rproc->priv; + struct k3_r5_cluster *cluster = kproc->cluster; + struct k3_r5_core *core = kproc->core; + struct device *dev = kproc->dev; + int ret; + + ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? + k3_r5_lockstep_release(cluster) : k3_r5_split_release(core); + if (ret) { + dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n", + ret); + return ret; + } + + /* + * Zero out both TCMs unconditionally (access from v8 Arm core is not + * affected by ATCM & BTCM enable configuration values) so that ECC + * can be effective on all TCM addresses. + */ + dev_dbg(dev, "zeroing out ATCM memory\n"); + memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size); + + dev_dbg(dev, "zeroing out BTCM memory\n"); + memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size); + + return 0; +} + +/* + * This function implements the .unprepare() ops and performs the complimentary + * operations to that of the .prepare() ops. The function is used to assert the + * resets on all applicable cores for the rproc device (depending on LockStep + * or Split mode). This completes the second portion of powering down the R5F + * cores. The cores themselves are only halted in the .stop() ops, and the + * .unprepare() ops is invoked by the remoteproc core after the remoteproc is + * stopped. + */ +static int k3_r5_rproc_unprepare(struct rproc *rproc) +{ + struct k3_r5_rproc *kproc = rproc->priv; + struct k3_r5_cluster *cluster = kproc->cluster; + struct k3_r5_core *core = kproc->core; + struct device *dev = kproc->dev; + int ret; + + ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? + k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core); + if (ret) + dev_err(dev, "unable to disable cores, ret = %d\n", ret); + + return ret; +} + +/* + * The R5F start sequence includes two different operations + * 1. Configure the boot vector for R5F core(s) + * 2. Unhalt/Run the R5F core(s) + * + * The sequence is different between LockStep and Split modes. The LockStep + * mode requires the boot vector to be configured only for Core0, and then + * unhalt both the cores to start the execution - Core1 needs to be unhalted + * first followed by Core0. The Split-mode requires that Core0 to be maintained + * always in a higher power state that Core1 (implying Core1 needs to be started + * always only after Core0 is started). + */ +static int k3_r5_rproc_start(struct rproc *rproc) +{ + struct k3_r5_rproc *kproc = rproc->priv; + struct k3_r5_cluster *cluster = kproc->cluster; + struct mbox_client *client = &kproc->client; + struct device *dev = kproc->dev; + struct k3_r5_core *core; + u32 boot_addr; + int ret; + + client->dev = dev; + client->tx_done = NULL; + client->rx_callback = k3_r5_rproc_mbox_callback; + client->tx_block = false; + client->knows_txdone = false; + + kproc->mbox = mbox_request_channel(client, 0); + if (IS_ERR(kproc->mbox)) { + ret = -EBUSY; + dev_err(dev, "mbox_request_channel failed: %ld\n", + PTR_ERR(kproc->mbox)); + return ret; + } + + /* + * Ping the remote processor, this is only for sanity-sake for now; + * there is no functional effect whatsoever. + * + * Note that the reply will _not_ arrive immediately: this message + * will wait in the mailbox fifo until the remote processor is booted. + */ + ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST); + if (ret < 0) { + dev_err(dev, "mbox_send_message failed: %d\n", ret); + goto put_mbox; + } + + boot_addr = rproc->bootaddr; + /* TODO: add boot_addr sanity checking */ + dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr); + + /* boot vector need not be programmed for Core1 in LockStep mode */ + core = kproc->core; + ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0); + if (ret) + goto put_mbox; + + /* unhalt/run all applicable cores */ + if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { + list_for_each_entry_reverse(core, &cluster->cores, elem) { + ret = k3_r5_core_run(core); + if (ret) + goto unroll_core_run; + } + } else { + ret = k3_r5_core_run(core); + if (ret) + goto put_mbox; + } + + return 0; + +unroll_core_run: + list_for_each_entry_continue(core, &cluster->cores, elem) { + if (k3_r5_core_halt(core)) + dev_warn(core->dev, "core halt back failed\n"); + } +put_mbox: + mbox_free_channel(kproc->mbox); + return ret; +} + +/* + * The R5F stop function includes the following operations + * 1. Halt R5F core(s) + * + * The sequence is different between LockStep and Split modes, and the order + * of cores the operations are performed are also in general reverse to that + * of the start function. The LockStep mode requires each operation to be + * performed first on Core0 followed by Core1. The Split-mode requires that + * Core0 to be maintained always in a higher power state that Core1 (implying + * Core1 needs to be stopped first before Core0). + * + * Note that the R5F halt operation in general is not effective when the R5F + * core is running, but is needed to make sure the core won't run after + * deasserting the reset the subsequent time. The asserting of reset can + * be done here, but is preferred to be done in the .unprepare() ops - this + * maintains the symmetric behavior between the .start(), .stop(), .prepare() + * and .unprepare() ops, and also balances them well between sysfs 'state' + * flow and device bind/unbind or module removal. + */ +static int k3_r5_rproc_stop(struct rproc *rproc) +{ + struct k3_r5_rproc *kproc = rproc->priv; + struct k3_r5_cluster *cluster = kproc->cluster; + struct k3_r5_core *core = kproc->core; + int ret; + + /* halt all applicable cores */ + if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { + list_for_each_entry(core, &cluster->cores, elem) { + ret = k3_r5_core_halt(core); + if (ret) { + core = list_prev_entry(core, elem); + goto unroll_core_halt; + } + } + } else { + ret = k3_r5_core_halt(core); + if (ret) + goto out; + } + + mbox_free_channel(kproc->mbox); + + return 0; + +unroll_core_halt: + list_for_each_entry_from_reverse(core, &cluster->cores, elem) { + if (k3_r5_core_run(core)) + dev_warn(core->dev, "core run back failed\n"); + } +out: + return ret; +} + +/* + * Internal Memory translation helper + * + * Custom function implementing the rproc .da_to_va ops to provide address + * translation (device address to kernel virtual address) for internal RAMs + * present in a DSP or IPU device). The translated addresses can be used + * either by the remoteproc core for loading, or by any rpmsg bus drivers. + */ +static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct k3_r5_rproc *kproc = rproc->priv; + struct k3_r5_core *core = kproc->core; + void __iomem *va = NULL; + phys_addr_t bus_addr; + u32 dev_addr, offset; + size_t size; + int i; + + if (len == 0) + return NULL; + + /* handle both R5 and SoC views of ATCM and BTCM */ + for (i = 0; i < core->num_mems; i++) { + bus_addr = core->mem[i].bus_addr; + dev_addr = core->mem[i].dev_addr; + size = core->mem[i].size; + + /* handle R5-view addresses of TCMs */ + if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = core->mem[i].cpu_addr + offset; + return (__force void *)va; + } + + /* handle SoC-view addresses of TCMs */ + if (da >= bus_addr && ((da + len) <= (bus_addr + size))) { + offset = da - bus_addr; + va = core->mem[i].cpu_addr + offset; + return (__force void *)va; + } + } + + /* handle any SRAM regions using SoC-view addresses */ + for (i = 0; i < core->num_sram; i++) { + dev_addr = core->sram[i].dev_addr; + size = core->sram[i].size; + + if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = core->sram[i].cpu_addr + offset; + return (__force void *)va; + } + } + + /* handle static DDR reserved memory regions */ + for (i = 0; i < kproc->num_rmems; i++) { + dev_addr = kproc->rmem[i].dev_addr; + size = kproc->rmem[i].size; + + if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = kproc->rmem[i].cpu_addr + offset; + return (__force void *)va; + } + } + + return NULL; +} + +static const struct rproc_ops k3_r5_rproc_ops = { + .prepare = k3_r5_rproc_prepare, + .unprepare = k3_r5_rproc_unprepare, + .start = k3_r5_rproc_start, + .stop = k3_r5_rproc_stop, + .kick = k3_r5_rproc_kick, + .da_to_va = k3_r5_rproc_da_to_va, +}; + +/* + * Internal R5F Core configuration + * + * Each R5FSS has a cluster-level setting for configuring the processor + * subsystem either in a safety/fault-tolerant LockStep mode or a performance + * oriented Split mode. Each R5F core has a number of settings to either + * enable/disable each of the TCMs, control which TCM appears at the R5F core's + * address 0x0. These settings need to be configured before the resets for the + * corresponding core are released. These settings are all protected and managed + * by the System Processor. + * + * This function is used to pre-configure these settings for each R5F core, and + * the configuration is all done through various ti_sci_proc functions that + * communicate with the System Processor. The function also ensures that both + * the cores are halted before the .prepare() step. + * + * The function is called from k3_r5_cluster_rproc_init() and is invoked either + * once (in LockStep mode) or twice (in Split mode). Support for LockStep-mode + * is dictated by an eFUSE register bit, and the config settings retrieved from + * DT are adjusted accordingly as per the permitted cluster mode. All cluster + * level settings like Cluster mode and TEINIT (exception handling state + * dictating ARM or Thumb mode) can only be set and retrieved using Core0. + * + * The function behavior is different based on the cluster mode. The R5F cores + * are configured independently as per their individual settings in Split mode. + * They are identically configured in LockStep mode using the primary Core0 + * settings. However, some individual settings cannot be set in LockStep mode. + * This is overcome by switching to Split-mode initially and then programming + * both the cores with the same settings, before reconfiguing again for + * LockStep mode. + */ +static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc) +{ + struct k3_r5_cluster *cluster = kproc->cluster; + struct device *dev = kproc->dev; + struct k3_r5_core *core0, *core, *temp; + u32 ctrl = 0, cfg = 0, stat = 0; + u32 set_cfg = 0, clr_cfg = 0; + u64 boot_vec = 0; + bool lockstep_en; + int ret; + + core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); + core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? core0 : kproc->core; + + ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, + &stat); + if (ret < 0) + return ret; + + dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n", + boot_vec, cfg, ctrl, stat); + + lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED); + if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) { + dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n"); + cluster->mode = CLUSTER_MODE_SPLIT; + } + + /* always enable ARM mode and set boot vector to 0 */ + boot_vec = 0x0; + if (core == core0) { + clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT; + /* + * LockStep configuration bit is Read-only on Split-mode _only_ + * devices and system firmware will NACK any requests with the + * bit configured, so program it only on permitted devices + */ + if (lockstep_en) + clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP; + } + + if (core->atcm_enable) + set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN; + else + clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN; + + if (core->btcm_enable) + set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN; + else + clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN; + + if (core->loczrama) + set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE; + else + clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE; + + if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { + /* + * work around system firmware limitations to make sure both + * cores are programmed symmetrically in LockStep. LockStep + * and TEINIT config is only allowed with Core0. + */ + list_for_each_entry(temp, &cluster->cores, elem) { + ret = k3_r5_core_halt(temp); + if (ret) + goto out; + + if (temp != core) { + clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP; + clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT; + } + ret = ti_sci_proc_set_config(temp->tsp, boot_vec, + set_cfg, clr_cfg); + if (ret) + goto out; + } + + set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP; + clr_cfg = 0; + ret = ti_sci_proc_set_config(core->tsp, boot_vec, + set_cfg, clr_cfg); + } else { + ret = k3_r5_core_halt(core); + if (ret) + goto out; + + ret = ti_sci_proc_set_config(core->tsp, boot_vec, + set_cfg, clr_cfg); + } + +out: + return ret; +} + +static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) +{ + struct device *dev = kproc->dev; + struct device_node *np = dev_of_node(dev); + struct device_node *rmem_np; + struct reserved_mem *rmem; + int num_rmems; + int ret, i; + + num_rmems = of_property_count_elems_of_size(np, "memory-region", + sizeof(phandle)); + if (num_rmems <= 0) { + dev_err(dev, "device does not have reserved memory regions, ret = %d\n", + num_rmems); + return -EINVAL; + } + if (num_rmems < 2) { + dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n", + num_rmems); + return -EINVAL; + } + + /* use reserved memory region 0 for vring DMA allocations */ + ret = of_reserved_mem_device_init_by_idx(dev, np, 0); + if (ret) { + dev_err(dev, "device cannot initialize DMA pool, ret = %d\n", + ret); + return ret; + } + + num_rmems--; + kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); + if (!kproc->rmem) { + ret = -ENOMEM; + goto release_rmem; + } + + /* use remaining reserved memory regions for static carveouts */ + for (i = 0; i < num_rmems; i++) { + rmem_np = of_parse_phandle(np, "memory-region", i + 1); + if (!rmem_np) { + ret = -EINVAL; + goto unmap_rmem; + } + + rmem = of_reserved_mem_lookup(rmem_np); + if (!rmem) { + of_node_put(rmem_np); + ret = -EINVAL; + goto unmap_rmem; + } + of_node_put(rmem_np); + + kproc->rmem[i].bus_addr = rmem->base; + /* + * R5Fs do not have an MMU, but have a Region Address Translator + * (RAT) module that provides a fixed entry translation between + * the 32-bit processor addresses to 64-bit bus addresses. The + * RAT is programmable only by the R5F cores. Support for RAT + * is currently not supported, so 64-bit address regions are not + * supported. The absence of MMUs implies that the R5F device + * addresses/supported memory regions are restricted to 32-bit + * bus addresses, and are identical + */ + kproc->rmem[i].dev_addr = (u32)rmem->base; + kproc->rmem[i].size = rmem->size; + kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size); + if (!kproc->rmem[i].cpu_addr) { + dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", + i + 1, &rmem->base, &rmem->size); + ret = -ENOMEM; + goto unmap_rmem; + } + + dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", + i + 1, &kproc->rmem[i].bus_addr, + kproc->rmem[i].size, kproc->rmem[i].cpu_addr, + kproc->rmem[i].dev_addr); + } + kproc->num_rmems = num_rmems; + + return 0; + +unmap_rmem: + for (i--; i >= 0; i--) + iounmap(kproc->rmem[i].cpu_addr); + kfree(kproc->rmem); +release_rmem: + of_reserved_mem_device_release(dev); + return ret; +} + +static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc) +{ + int i; + + for (i = 0; i < kproc->num_rmems; i++) + iounmap(kproc->rmem[i].cpu_addr); + kfree(kproc->rmem); + + of_reserved_mem_device_release(kproc->dev); +} + +static int k3_r5_cluster_rproc_init(struct platform_device *pdev) +{ + struct k3_r5_cluster *cluster = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + struct k3_r5_rproc *kproc; + struct k3_r5_core *core, *core1; + struct device *cdev; + const char *fw_name; + struct rproc *rproc; + int ret; + + core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); + list_for_each_entry(core, &cluster->cores, elem) { + cdev = core->dev; + ret = rproc_of_parse_firmware(cdev, 0, &fw_name); + if (ret) { + dev_err(dev, "failed to parse firmware-name property, ret = %d\n", + ret); + goto out; + } + + rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops, + fw_name, sizeof(*kproc)); + if (!rproc) { + ret = -ENOMEM; + goto out; + } + + /* K3 R5s have a Region Address Translator (RAT) but no MMU */ + rproc->has_iommu = false; + /* error recovery is not supported at present */ + rproc->recovery_disabled = true; + + kproc = rproc->priv; + kproc->cluster = cluster; + kproc->core = core; + kproc->dev = cdev; + kproc->rproc = rproc; + core->rproc = rproc; + + ret = k3_r5_rproc_configure(kproc); + if (ret) { + dev_err(dev, "initial configure failed, ret = %d\n", + ret); + goto err_config; + } + + ret = k3_r5_reserved_mem_init(kproc); + if (ret) { + dev_err(dev, "reserved memory init failed, ret = %d\n", + ret); + goto err_config; + } + + ret = rproc_add(rproc); + if (ret) { + dev_err(dev, "rproc_add failed, ret = %d\n", ret); + goto err_add; + } + + /* create only one rproc in lockstep mode */ + if (cluster->mode == CLUSTER_MODE_LOCKSTEP) + break; + } + + return 0; + +err_split: + rproc_del(rproc); +err_add: + k3_r5_reserved_mem_exit(kproc); +err_config: + rproc_free(rproc); + core->rproc = NULL; +out: + /* undo core0 upon any failures on core1 in split-mode */ + if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) { + core = list_prev_entry(core, elem); + rproc = core->rproc; + kproc = rproc->priv; + goto err_split; + } + return ret; +} + +static int k3_r5_cluster_rproc_exit(struct platform_device *pdev) +{ + struct k3_r5_cluster *cluster = platform_get_drvdata(pdev); + struct k3_r5_rproc *kproc; + struct k3_r5_core *core; + struct rproc *rproc; + + /* + * lockstep mode has only one rproc associated with first core, whereas + * split-mode has two rprocs associated with each core, and requires + * that core1 be powered down first + */ + core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? + list_first_entry(&cluster->cores, struct k3_r5_core, elem) : + list_last_entry(&cluster->cores, struct k3_r5_core, elem); + + list_for_each_entry_from_reverse(core, &cluster->cores, elem) { + rproc = core->rproc; + kproc = rproc->priv; + + rproc_del(rproc); + + k3_r5_reserved_mem_exit(kproc); + + rproc_free(rproc); + core->rproc = NULL; + } + + return 0; +} + +static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev, + struct k3_r5_core *core) +{ + static const char * const mem_names[] = {"atcm", "btcm"}; + struct device *dev = &pdev->dev; + struct resource *res; + int num_mems; + int i; + + num_mems = ARRAY_SIZE(mem_names); + core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL); + if (!core->mem) + return -ENOMEM; + + for (i = 0; i < num_mems; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mem_names[i]); + if (!res) { + dev_err(dev, "found no memory resource for %s\n", + mem_names[i]); + return -EINVAL; + } + if (!devm_request_mem_region(dev, res->start, + resource_size(res), + dev_name(dev))) { + dev_err(dev, "could not request %s region for resource\n", + mem_names[i]); + return -EBUSY; + } + + /* + * TCMs are designed in general to support RAM-like backing + * memories. So, map these as Normal Non-Cached memories. This + * also avoids/fixes any potential alignment faults due to + * unaligned data accesses when using memcpy() or memset() + * functions (normally seen with device type memory). + */ + core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start, + resource_size(res)); + if (!core->mem[i].cpu_addr) { + dev_err(dev, "failed to map %s memory\n", mem_names[i]); + return -ENOMEM; + } + core->mem[i].bus_addr = res->start; + + /* + * TODO: + * The R5F cores can place ATCM & BTCM anywhere in its address + * based on the corresponding Region Registers in the System + * Control coprocessor. For now, place ATCM and BTCM at + * addresses 0 and 0x41010000 (same as the bus address on AM65x + * SoCs) based on loczrama setting + */ + if (!strcmp(mem_names[i], "atcm")) { + core->mem[i].dev_addr = core->loczrama ? + 0 : K3_R5_TCM_DEV_ADDR; + } else { + core->mem[i].dev_addr = core->loczrama ? + K3_R5_TCM_DEV_ADDR : 0; + } + core->mem[i].size = resource_size(res); + + dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n", + mem_names[i], &core->mem[i].bus_addr, + core->mem[i].size, core->mem[i].cpu_addr, + core->mem[i].dev_addr); + } + core->num_mems = num_mems; + + return 0; +} + +static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev, + struct k3_r5_core *core) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct device_node *sram_np; + struct resource res; + int num_sram; + int i, ret; + + num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle)); + if (num_sram <= 0) { + dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n", + num_sram); + return 0; + } + + core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL); + if (!core->sram) + return -ENOMEM; + + for (i = 0; i < num_sram; i++) { + sram_np = of_parse_phandle(np, "sram", i); + if (!sram_np) + return -EINVAL; + + if (!of_device_is_available(sram_np)) { + of_node_put(sram_np); + return -EINVAL; + } + + ret = of_address_to_resource(sram_np, 0, &res); + of_node_put(sram_np); + if (ret) + return -EINVAL; + + core->sram[i].bus_addr = res.start; + core->sram[i].dev_addr = res.start; + core->sram[i].size = resource_size(&res); + core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start, + resource_size(&res)); + if (!core->sram[i].cpu_addr) { + dev_err(dev, "failed to parse and map sram%d memory at %pad\n", + i, &res.start); + return -ENOMEM; + } + + dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", + i, &core->sram[i].bus_addr, + core->sram[i].size, core->sram[i].cpu_addr, + core->sram[i].dev_addr); + } + core->num_sram = num_sram; + + return 0; +} + +static +struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev, + const struct ti_sci_handle *sci) +{ + struct ti_sci_proc *tsp; + u32 temp[2]; + int ret; + + ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids", + temp, 2); + if (ret < 0) + return ERR_PTR(ret); + + tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL); + if (!tsp) + return ERR_PTR(-ENOMEM); + + tsp->dev = dev; + tsp->sci = sci; + tsp->ops = &sci->ops.proc_ops; + tsp->proc_id = temp[0]; + tsp->host_id = temp[1]; + + return tsp; +} + +static int k3_r5_core_of_init(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev_of_node(dev); + struct k3_r5_core *core; + int ret; + + if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL)) + return -ENOMEM; + + core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL); + if (!core) { + ret = -ENOMEM; + goto err; + } + + core->dev = dev; + /* + * Use SoC Power-on-Reset values as default if no DT properties are + * used to dictate the TCM configurations + */ + core->atcm_enable = 0; + core->btcm_enable = 1; + core->loczrama = 1; + + ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable); + if (ret < 0 && ret != -EINVAL) { + dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n", + ret); + goto err; + } + + ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable); + if (ret < 0 && ret != -EINVAL) { + dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n", + ret); + goto err; + } + + ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama); + if (ret < 0 && ret != -EINVAL) { + dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret); + goto err; + } + + core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); + if (IS_ERR(core->ti_sci)) { + ret = PTR_ERR(core->ti_sci); + if (ret != -EPROBE_DEFER) { + dev_err(dev, "failed to get ti-sci handle, ret = %d\n", + ret); + } + core->ti_sci = NULL; + goto err; + } + + ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id); + if (ret) { + dev_err(dev, "missing 'ti,sci-dev-id' property\n"); + goto err; + } + + core->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR_OR_NULL(core->reset)) { + ret = PTR_ERR_OR_ZERO(core->reset); + if (!ret) + ret = -ENODEV; + if (ret != -EPROBE_DEFER) { + dev_err(dev, "failed to get reset handle, ret = %d\n", + ret); + } + goto err; + } + + core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci); + if (IS_ERR(core->tsp)) { + ret = PTR_ERR(core->tsp); + dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n", + ret); + goto err; + } + + ret = k3_r5_core_of_get_internal_memories(pdev, core); + if (ret) { + dev_err(dev, "failed to get internal memories, ret = %d\n", + ret); + goto err; + } + + ret = k3_r5_core_of_get_sram_memories(pdev, core); + if (ret) { + dev_err(dev, "failed to get sram memories, ret = %d\n", ret); + goto err; + } + + ret = ti_sci_proc_request(core->tsp); + if (ret < 0) { + dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret); + goto err; + } + + platform_set_drvdata(pdev, core); + devres_close_group(dev, k3_r5_core_of_init); + + return 0; + +err: + devres_release_group(dev, k3_r5_core_of_init); + return ret; +} + +/* + * free the resources explicitly since driver model is not being used + * for the child R5F devices + */ +static void k3_r5_core_of_exit(struct platform_device *pdev) +{ + struct k3_r5_core *core = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int ret; + + ret = ti_sci_proc_release(core->tsp); + if (ret) + dev_err(dev, "failed to release proc, ret = %d\n", ret); + + platform_set_drvdata(pdev, NULL); + devres_release_group(dev, k3_r5_core_of_init); +} + +static void k3_r5_cluster_of_exit(struct platform_device *pdev) +{ + struct k3_r5_cluster *cluster = platform_get_drvdata(pdev); + struct platform_device *cpdev; + struct k3_r5_core *core, *temp; + + list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) { + list_del(&core->elem); + cpdev = to_platform_device(core->dev); + k3_r5_core_of_exit(cpdev); + } +} + +static int k3_r5_cluster_of_init(struct platform_device *pdev) +{ + struct k3_r5_cluster *cluster = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + struct device_node *np = dev_of_node(dev); + struct platform_device *cpdev; + struct device_node *child; + struct k3_r5_core *core; + int ret; + + for_each_available_child_of_node(np, child) { + cpdev = of_find_device_by_node(child); + if (!cpdev) { + ret = -ENODEV; + dev_err(dev, "could not get R5 core platform device\n"); + of_node_put(child); + goto fail; + } + + ret = k3_r5_core_of_init(cpdev); + if (ret) { + dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n", + ret); + put_device(&cpdev->dev); + of_node_put(child); + goto fail; + } + + core = platform_get_drvdata(cpdev); + put_device(&cpdev->dev); + list_add_tail(&core->elem, &cluster->cores); + } + + return 0; + +fail: + k3_r5_cluster_of_exit(pdev); + return ret; +} + +static int k3_r5_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev_of_node(dev); + struct k3_r5_cluster *cluster; + int ret; + int num_cores; + + cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL); + if (!cluster) + return -ENOMEM; + + cluster->dev = dev; + cluster->mode = CLUSTER_MODE_LOCKSTEP; + INIT_LIST_HEAD(&cluster->cores); + + ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode); + if (ret < 0 && ret != -EINVAL) { + dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n", + ret); + return ret; + } + + num_cores = of_get_available_child_count(np); + if (num_cores != 2) { + dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n", + num_cores); + return -ENODEV; + } + + platform_set_drvdata(pdev, cluster); + + ret = devm_of_platform_populate(dev); + if (ret) { + dev_err(dev, "devm_of_platform_populate failed, ret = %d\n", + ret); + return ret; + } + + ret = k3_r5_cluster_of_init(pdev); + if (ret) { + dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(dev, + (void(*)(void *))k3_r5_cluster_of_exit, + pdev); + if (ret) + return ret; + + ret = k3_r5_cluster_rproc_init(pdev); + if (ret) { + dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n", + ret); + return ret; + } + + ret = devm_add_action_or_reset(dev, + (void(*)(void *))k3_r5_cluster_rproc_exit, + pdev); + if (ret) + return ret; + + return 0; +} + +static const struct of_device_id k3_r5_of_match[] = { + { .compatible = "ti,am654-r5fss", }, + { .compatible = "ti,j721e-r5fss", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, k3_r5_of_match); + +static struct platform_driver k3_r5_rproc_driver = { + .probe = k3_r5_probe, + .driver = { + .name = "k3_r5_rproc", + .of_match_table = k3_r5_of_match, + }, +}; + +module_platform_driver(k3_r5_rproc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI K3 R5F remote processor driver"); +MODULE_AUTHOR("Suman Anna <s-anna@ti.com>"); diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h new file mode 100644 index 000000000..778558abc --- /dev/null +++ b/drivers/remoteproc/ti_sci_proc.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Texas Instruments TI-SCI Processor Controller Helper Functions + * + * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/ + * Suman Anna <s-anna@ti.com> + */ + +#ifndef REMOTEPROC_TI_SCI_PROC_H +#define REMOTEPROC_TI_SCI_PROC_H + +#include <linux/soc/ti/ti_sci_protocol.h> + +/** + * struct ti_sci_proc - structure representing a processor control client + * @sci: cached TI-SCI protocol handle + * @ops: cached TI-SCI proc ops + * @dev: cached client device pointer + * @proc_id: processor id for the consumer remoteproc device + * @host_id: host id to pass the control over for this consumer remoteproc + * device + */ +struct ti_sci_proc { + const struct ti_sci_handle *sci; + const struct ti_sci_proc_ops *ops; + struct device *dev; + u8 proc_id; + u8 host_id; +}; + +static inline int ti_sci_proc_request(struct ti_sci_proc *tsp) +{ + int ret; + + ret = tsp->ops->request(tsp->sci, tsp->proc_id); + if (ret) + dev_err(tsp->dev, "ti-sci processor request failed: %d\n", + ret); + return ret; +} + +static inline int ti_sci_proc_release(struct ti_sci_proc *tsp) +{ + int ret; + + ret = tsp->ops->release(tsp->sci, tsp->proc_id); + if (ret) + dev_err(tsp->dev, "ti-sci processor release failed: %d\n", + ret); + return ret; +} + +static inline int ti_sci_proc_handover(struct ti_sci_proc *tsp) +{ + int ret; + + ret = tsp->ops->handover(tsp->sci, tsp->proc_id, tsp->host_id); + if (ret) + dev_err(tsp->dev, "ti-sci processor handover of %d to %d failed: %d\n", + tsp->proc_id, tsp->host_id, ret); + return ret; +} + +static inline int ti_sci_proc_set_config(struct ti_sci_proc *tsp, + u64 boot_vector, + u32 cfg_set, u32 cfg_clr) +{ + int ret; + + ret = tsp->ops->set_config(tsp->sci, tsp->proc_id, boot_vector, + cfg_set, cfg_clr); + if (ret) + dev_err(tsp->dev, "ti-sci processor set_config failed: %d\n", + ret); + return ret; +} + +static inline int ti_sci_proc_set_control(struct ti_sci_proc *tsp, + u32 ctrl_set, u32 ctrl_clr) +{ + int ret; + + ret = tsp->ops->set_control(tsp->sci, tsp->proc_id, ctrl_set, ctrl_clr); + if (ret) + dev_err(tsp->dev, "ti-sci processor set_control failed: %d\n", + ret); + return ret; +} + +static inline int ti_sci_proc_get_status(struct ti_sci_proc *tsp, + u64 *boot_vector, u32 *cfg_flags, + u32 *ctrl_flags, u32 *status_flags) +{ + int ret; + + ret = tsp->ops->get_status(tsp->sci, tsp->proc_id, boot_vector, + cfg_flags, ctrl_flags, status_flags); + if (ret) + dev_err(tsp->dev, "ti-sci processor get_status failed: %d\n", + ret); + return ret; +} + +#endif /* REMOTEPROC_TI_SCI_PROC_H */ diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c new file mode 100644 index 000000000..b9349d684 --- /dev/null +++ b/drivers/remoteproc/wkup_m3_rproc.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI AMx3 Wakeup M3 Remote Processor driver + * + * Copyright (C) 2014-2015 Texas Instruments, Inc. + * + * Dave Gerlach <d-gerlach@ti.com> + * Suman Anna <s-anna@ti.com> + */ + +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/remoteproc.h> + +#include <linux/platform_data/wkup_m3.h> + +#include "remoteproc_internal.h" + +#define WKUPM3_MEM_MAX 2 + +/** + * struct wkup_m3_mem - WkupM3 internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @dev_addr: Device address from Wakeup M3 view + * @size: Size of the memory region + */ +struct wkup_m3_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +/** + * struct wkup_m3_rproc - WkupM3 remote processor state + * @rproc: rproc handle + * @pdev: pointer to platform device + * @mem: WkupM3 memory information + */ +struct wkup_m3_rproc { + struct rproc *rproc; + struct platform_device *pdev; + struct wkup_m3_mem mem[WKUPM3_MEM_MAX]; +}; + +static int wkup_m3_rproc_start(struct rproc *rproc) +{ + struct wkup_m3_rproc *wkupm3 = rproc->priv; + struct platform_device *pdev = wkupm3->pdev; + struct device *dev = &pdev->dev; + struct wkup_m3_platform_data *pdata = dev_get_platdata(dev); + + if (pdata->deassert_reset(pdev, pdata->reset_name)) { + dev_err(dev, "Unable to reset wkup_m3!\n"); + return -ENODEV; + } + + return 0; +} + +static int wkup_m3_rproc_stop(struct rproc *rproc) +{ + struct wkup_m3_rproc *wkupm3 = rproc->priv; + struct platform_device *pdev = wkupm3->pdev; + struct device *dev = &pdev->dev; + struct wkup_m3_platform_data *pdata = dev_get_platdata(dev); + + if (pdata->assert_reset(pdev, pdata->reset_name)) { + dev_err(dev, "Unable to assert reset of wkup_m3!\n"); + return -ENODEV; + } + + return 0; +} + +static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct wkup_m3_rproc *wkupm3 = rproc->priv; + void *va = NULL; + int i; + u32 offset; + + if (len == 0) + return NULL; + + for (i = 0; i < WKUPM3_MEM_MAX; i++) { + if (da >= wkupm3->mem[i].dev_addr && da + len <= + wkupm3->mem[i].dev_addr + wkupm3->mem[i].size) { + offset = da - wkupm3->mem[i].dev_addr; + /* __force to make sparse happy with type conversion */ + va = (__force void *)(wkupm3->mem[i].cpu_addr + offset); + break; + } + } + + return va; +} + +static const struct rproc_ops wkup_m3_rproc_ops = { + .start = wkup_m3_rproc_start, + .stop = wkup_m3_rproc_stop, + .da_to_va = wkup_m3_rproc_da_to_va, +}; + +static const struct of_device_id wkup_m3_rproc_of_match[] = { + { .compatible = "ti,am3352-wkup-m3", }, + { .compatible = "ti,am4372-wkup-m3", }, + {}, +}; +MODULE_DEVICE_TABLE(of, wkup_m3_rproc_of_match); + +static int wkup_m3_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct wkup_m3_platform_data *pdata = dev->platform_data; + /* umem always needs to be processed first */ + const char *mem_names[WKUPM3_MEM_MAX] = { "umem", "dmem" }; + struct wkup_m3_rproc *wkupm3; + const char *fw_name; + struct rproc *rproc; + struct resource *res; + const __be32 *addrp; + u32 l4_offset = 0; + u64 size; + int ret; + int i; + + if (!(pdata && pdata->deassert_reset && pdata->assert_reset && + pdata->reset_name)) { + dev_err(dev, "Platform data missing!\n"); + return -ENODEV; + } + + ret = of_property_read_string(dev->of_node, "ti,pm-firmware", + &fw_name); + if (ret) { + dev_err(dev, "No firmware filename given\n"); + return -ENODEV; + } + + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n"); + goto err; + } + + rproc = rproc_alloc(dev, "wkup_m3", &wkup_m3_rproc_ops, + fw_name, sizeof(*wkupm3)); + if (!rproc) { + ret = -ENOMEM; + goto err; + } + + rproc->auto_boot = false; + + wkupm3 = rproc->priv; + wkupm3->rproc = rproc; + wkupm3->pdev = pdev; + + for (i = 0; i < ARRAY_SIZE(mem_names); i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mem_names[i]); + wkupm3->mem[i].cpu_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(wkupm3->mem[i].cpu_addr)) { + dev_err(&pdev->dev, "devm_ioremap_resource failed for resource %d\n", + i); + ret = PTR_ERR(wkupm3->mem[i].cpu_addr); + goto err; + } + wkupm3->mem[i].bus_addr = res->start; + wkupm3->mem[i].size = resource_size(res); + addrp = of_get_address(dev->of_node, i, &size, NULL); + /* + * The wkupm3 has umem at address 0 in its view, so the device + * addresses for each memory region is computed as a relative + * offset of the bus address for umem, and therefore needs to be + * processed first. + */ + if (!strcmp(mem_names[i], "umem")) + l4_offset = be32_to_cpu(*addrp); + wkupm3->mem[i].dev_addr = be32_to_cpu(*addrp) - l4_offset; + } + + dev_set_drvdata(dev, rproc); + + ret = rproc_add(rproc); + if (ret) { + dev_err(dev, "rproc_add failed\n"); + goto err_put_rproc; + } + + return 0; + +err_put_rproc: + rproc_free(rproc); +err: + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + return ret; +} + +static int wkup_m3_rproc_remove(struct platform_device *pdev) +{ + struct rproc *rproc = platform_get_drvdata(pdev); + + rproc_del(rproc); + rproc_free(rproc); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM +static int wkup_m3_rpm_suspend(struct device *dev) +{ + return -EBUSY; +} + +static int wkup_m3_rpm_resume(struct device *dev) +{ + return 0; +} +#endif + +static const struct dev_pm_ops wkup_m3_rproc_pm_ops = { + SET_RUNTIME_PM_OPS(wkup_m3_rpm_suspend, wkup_m3_rpm_resume, NULL) +}; + +static struct platform_driver wkup_m3_rproc_driver = { + .probe = wkup_m3_rproc_probe, + .remove = wkup_m3_rproc_remove, + .driver = { + .name = "wkup_m3_rproc", + .of_match_table = wkup_m3_rproc_of_match, + .pm = &wkup_m3_rproc_pm_ops, + }, +}; + +module_platform_driver(wkup_m3_rproc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI Wakeup M3 remote processor control driver"); +MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>"); |