From 7f3a4257159dea8e7ef66d1a539dc6df708b8ed3 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 7 Aug 2024 15:17:46 +0200 Subject: Adding upstream version 6.10.3. Signed-off-by: Daniel Baumann --- drivers/remoteproc/imx_rproc.c | 10 +- drivers/remoteproc/mtk_common.h | 11 +- drivers/remoteproc/mtk_scp.c | 248 ++++++++++++++++++++--- drivers/remoteproc/mtk_scp_ipi.c | 7 +- drivers/remoteproc/remoteproc_internal.h | 2 +- drivers/remoteproc/remoteproc_sysfs.c | 2 +- drivers/remoteproc/stm32_rproc.c | 2 +- drivers/remoteproc/ti_k3_r5_remoteproc.c | 13 +- drivers/remoteproc/xlnx_r5_remoteproc.c | 329 +++++++++++++++---------------- 9 files changed, 405 insertions(+), 219 deletions(-) (limited to 'drivers/remoteproc') diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 5a3fb902ac..144c8e9a64 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -726,31 +726,37 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, struct resource res; node = of_parse_phandle(np, "memory-region", a); + if (!node) + continue; /* Not map vdevbuffer, vdevring region */ if (!strncmp(node->name, "vdev", strlen("vdev"))) { of_node_put(node); continue; } err = of_address_to_resource(node, 0, &res); - of_node_put(node); if (err) { dev_err(dev, "unable to resolve memory region\n"); + of_node_put(node); return err; } - if (b >= IMX_RPROC_MEM_MAX) + if (b >= IMX_RPROC_MEM_MAX) { + of_node_put(node); break; + } /* Not use resource version, because we might share region */ priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res)); if (!priv->mem[b].cpu_addr) { dev_err(dev, "failed to remap %pr\n", &res); + of_node_put(node); return -ENOMEM; } priv->mem[b].sys_addr = res.start; priv->mem[b].size = resource_size(&res); if (!strcmp(node->name, "rsc-table")) priv->rsc_table = priv->mem[b].cpu_addr; + of_node_put(node); b++; } diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h index 6d7736a031..fd5c539ab2 100644 --- a/drivers/remoteproc/mtk_common.h +++ b/drivers/remoteproc/mtk_common.h @@ -78,7 +78,6 @@ #define MT8195_L2TCM_OFFSET 0x850d0 #define SCP_FW_VER_LEN 32 -#define SCP_SHARE_BUFFER_SIZE 288 struct scp_run { u32 signaled; @@ -97,6 +96,11 @@ struct scp_ipi_desc { struct mtk_scp; +struct mtk_scp_sizes_data { + size_t max_dram_size; + size_t ipi_share_buffer_size; +}; + struct mtk_scp_of_data { int (*scp_clk_get)(struct mtk_scp *scp); int (*scp_before_load)(struct mtk_scp *scp); @@ -110,6 +114,7 @@ struct mtk_scp_of_data { u32 host_to_scp_int_bit; size_t ipi_buf_offset; + const struct mtk_scp_sizes_data *scp_sizes; }; struct mtk_scp_of_cluster { @@ -141,10 +146,10 @@ struct mtk_scp { struct scp_ipi_desc ipi_desc[SCP_IPI_MAX]; bool ipi_id_ack[SCP_IPI_MAX]; wait_queue_head_t ack_wq; + u8 *share_buf; void *cpu_addr; dma_addr_t dma_addr; - size_t dram_size; struct rproc_subdev *rpmsg_subdev; @@ -162,7 +167,7 @@ struct mtk_scp { struct mtk_share_obj { u32 id; u32 len; - u8 share_buf[SCP_SHARE_BUFFER_SIZE]; + u8 *share_buf; }; void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len); diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c index 67518291a8..abf7b371b8 100644 --- a/drivers/remoteproc/mtk_scp.c +++ b/drivers/remoteproc/mtk_scp.c @@ -20,7 +20,6 @@ #include "mtk_common.h" #include "remoteproc_internal.h" -#define MAX_CODE_SIZE 0x500000 #define SECTION_NAME_IPI_BUFFER ".ipi_buffer" /** @@ -94,14 +93,15 @@ static void scp_ipi_handler(struct mtk_scp *scp) { struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf; struct scp_ipi_desc *ipi_desc = scp->ipi_desc; - u8 tmp_data[SCP_SHARE_BUFFER_SIZE]; scp_ipi_handler_t handler; u32 id = readl(&rcv_obj->id); u32 len = readl(&rcv_obj->len); + const struct mtk_scp_sizes_data *scp_sizes; - if (len > SCP_SHARE_BUFFER_SIZE) { - dev_err(scp->dev, "ipi message too long (len %d, max %d)", len, - SCP_SHARE_BUFFER_SIZE); + scp_sizes = scp->data->scp_sizes; + if (len > scp_sizes->ipi_share_buffer_size) { + dev_err(scp->dev, "ipi message too long (len %d, max %zd)", len, + scp_sizes->ipi_share_buffer_size); return; } if (id >= SCP_IPI_MAX) { @@ -117,8 +117,9 @@ static void scp_ipi_handler(struct mtk_scp *scp) return; } - memcpy_fromio(tmp_data, &rcv_obj->share_buf, len); - handler(tmp_data, len, ipi_desc[id].priv); + memset(scp->share_buf, 0, scp_sizes->ipi_share_buffer_size); + memcpy_fromio(scp->share_buf, &rcv_obj->share_buf, len); + handler(scp->share_buf, len, ipi_desc[id].priv); scp_ipi_unlock(scp, id); scp->ipi_id_ack[id] = true; @@ -133,6 +134,8 @@ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw) { int ret; size_t buf_sz, offset; + size_t share_buf_offset; + const struct mtk_scp_sizes_data *scp_sizes; /* read the ipi buf addr from FW itself first */ ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset); @@ -152,12 +155,15 @@ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw) return -EOVERFLOW; } + scp_sizes = scp->data->scp_sizes; scp->recv_buf = (struct mtk_share_obj __iomem *) (scp->sram_base + offset); + share_buf_offset = sizeof(scp->recv_buf->id) + + sizeof(scp->recv_buf->len) + scp_sizes->ipi_share_buffer_size; scp->send_buf = (struct mtk_share_obj __iomem *) - (scp->sram_base + offset + sizeof(*scp->recv_buf)); - memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf)); - memset_io(scp->send_buf, 0, sizeof(*scp->send_buf)); + (scp->sram_base + offset + share_buf_offset); + memset_io(scp->recv_buf, 0, share_buf_offset); + memset_io(scp->send_buf, 0, share_buf_offset); return 0; } @@ -471,6 +477,86 @@ static int mt8186_scp_before_load(struct mtk_scp *scp) return 0; } +static int mt8188_scp_l2tcm_on(struct mtk_scp *scp) +{ + struct mtk_scp_of_cluster *scp_cluster = scp->cluster; + + mutex_lock(&scp_cluster->cluster_lock); + + if (scp_cluster->l2tcm_refcnt == 0) { + /* clear SPM interrupt, SCP2SPM_IPC_CLR */ + writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR); + + /* Power on L2TCM */ + scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0); + scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0); + scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0); + scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0); + } + + scp_cluster->l2tcm_refcnt += 1; + + mutex_unlock(&scp_cluster->cluster_lock); + + return 0; +} + +static int mt8188_scp_before_load(struct mtk_scp *scp) +{ + writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET); + + mt8188_scp_l2tcm_on(scp); + + scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0); + + /* enable MPU for all memory regions */ + writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF); + + return 0; +} + +static int mt8188_scp_c1_before_load(struct mtk_scp *scp) +{ + u32 sec_ctrl; + struct mtk_scp *scp_c0; + struct mtk_scp_of_cluster *scp_cluster = scp->cluster; + + scp->data->scp_reset_assert(scp); + + mt8188_scp_l2tcm_on(scp); + + scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0); + + /* enable MPU for all memory regions */ + writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF); + + /* + * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address + * on SRAM when SCP core 1 accesses SRAM. + * + * This configuration solves booting the SCP core 0 and core 1 from + * different SRAM address because core 0 and core 1 both boot from + * the head of SRAM by default. this must be configured before boot SCP core 1. + * + * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1. + * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE), + * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus. + * The shift action is tranparent to software. + */ + writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW); + writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH); + + scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem); + writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET); + + /* enable SRAM offset when fetching instruction and data */ + sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL); + sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D; + writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL); + + return 0; +} + static int mt8192_scp_before_load(struct mtk_scp *scp) { /* clear SPM interrupt, SCP2SPM_IPC_CLR */ @@ -661,14 +747,16 @@ stop: static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len) { int offset; + const struct mtk_scp_sizes_data *scp_sizes; + scp_sizes = scp->data->scp_sizes; if (da < scp->sram_size) { offset = da; if (offset >= 0 && (offset + len) <= scp->sram_size) return (void __force *)scp->sram_base + offset; - } else if (scp->dram_size) { + } else if (scp_sizes->max_dram_size) { offset = da - scp->dma_addr; - if (offset >= 0 && (offset + len) <= scp->dram_size) + if (offset >= 0 && (offset + len) <= scp_sizes->max_dram_size) return scp->cpu_addr + offset; } @@ -678,7 +766,9 @@ static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len) static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len) { int offset; + const struct mtk_scp_sizes_data *scp_sizes; + scp_sizes = scp->data->scp_sizes; if (da >= scp->sram_phys && (da + len) <= scp->sram_phys + scp->sram_size) { offset = da - scp->sram_phys; @@ -694,9 +784,9 @@ static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len) } /* optional memory region */ - if (scp->dram_size && + if (scp_sizes->max_dram_size && da >= scp->dma_addr && - (da + len) <= scp->dma_addr + scp->dram_size) { + (da + len) <= scp->dma_addr + scp_sizes->max_dram_size) { offset = da - scp->dma_addr; return scp->cpu_addr + offset; } @@ -717,6 +807,47 @@ static void mt8183_scp_stop(struct mtk_scp *scp) writel(0, scp->cluster->reg_base + MT8183_WDT_CFG); } +static void mt8188_scp_l2tcm_off(struct mtk_scp *scp) +{ + struct mtk_scp_of_cluster *scp_cluster = scp->cluster; + + mutex_lock(&scp_cluster->cluster_lock); + + if (scp_cluster->l2tcm_refcnt > 0) + scp_cluster->l2tcm_refcnt -= 1; + + if (scp_cluster->l2tcm_refcnt == 0) { + /* Power off L2TCM */ + scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0); + scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0); + scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0); + scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0); + } + + mutex_unlock(&scp_cluster->cluster_lock); +} + +static void mt8188_scp_stop(struct mtk_scp *scp) +{ + mt8188_scp_l2tcm_off(scp); + + scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0); + + /* Disable SCP watchdog */ + writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG); +} + +static void mt8188_scp_c1_stop(struct mtk_scp *scp) +{ + mt8188_scp_l2tcm_off(scp); + + /* Power off CPU SRAM */ + scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0); + + /* Disable SCP watchdog */ + writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG); +} + static void mt8192_scp_stop(struct mtk_scp *scp) { /* Disable SRAM clock */ @@ -876,6 +1007,7 @@ EXPORT_SYMBOL_GPL(scp_mapping_dm_addr); static int scp_map_memory_region(struct mtk_scp *scp) { int ret; + const struct mtk_scp_sizes_data *scp_sizes; ret = of_reserved_mem_device_init(scp->dev); @@ -891,8 +1023,8 @@ static int scp_map_memory_region(struct mtk_scp *scp) } /* Reserved SCP code size */ - scp->dram_size = MAX_CODE_SIZE; - scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size, + scp_sizes = scp->data->scp_sizes; + scp->cpu_addr = dma_alloc_coherent(scp->dev, scp_sizes->max_dram_size, &scp->dma_addr, GFP_KERNEL); if (!scp->cpu_addr) return -ENOMEM; @@ -902,10 +1034,13 @@ static int scp_map_memory_region(struct mtk_scp *scp) static void scp_unmap_memory_region(struct mtk_scp *scp) { - if (scp->dram_size == 0) + const struct mtk_scp_sizes_data *scp_sizes; + + scp_sizes = scp->data->scp_sizes; + if (scp_sizes->max_dram_size == 0) return; - dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr, + dma_free_coherent(scp->dev, scp_sizes->max_dram_size, scp->cpu_addr, scp->dma_addr); of_reserved_mem_device_release(scp->dev); } @@ -969,6 +1104,7 @@ static struct mtk_scp *scp_rproc_init(struct platform_device *pdev, struct resource *res; const char *fw_name = "scp.img"; int ret, i; + const struct mtk_scp_sizes_data *scp_sizes; ret = rproc_of_parse_firmware(dev, 0, &fw_name); if (ret < 0 && ret != -EINVAL) @@ -1016,6 +1152,14 @@ static struct mtk_scp *scp_rproc_init(struct platform_device *pdev, goto release_dev_mem; } + scp_sizes = scp->data->scp_sizes; + scp->share_buf = kzalloc(scp_sizes->ipi_share_buffer_size, GFP_KERNEL); + if (!scp->share_buf) { + dev_err(dev, "Failed to allocate IPI share buffer\n"); + ret = -ENOMEM; + goto release_dev_mem; + } + init_waitqueue_head(&scp->run.wq); init_waitqueue_head(&scp->ack_wq); @@ -1035,6 +1179,8 @@ static struct mtk_scp *scp_rproc_init(struct platform_device *pdev, remove_subdev: scp_remove_rpmsg_subdev(scp); scp_ipi_unregister(scp, SCP_IPI_INIT); + kfree(scp->share_buf); + scp->share_buf = NULL; release_dev_mem: scp_unmap_memory_region(scp); for (i = 0; i < SCP_IPI_MAX; i++) @@ -1050,6 +1196,8 @@ static void scp_free(struct mtk_scp *scp) scp_remove_rpmsg_subdev(scp); scp_ipi_unregister(scp, SCP_IPI_INIT); + kfree(scp->share_buf); + scp->share_buf = NULL; scp_unmap_memory_region(scp); for (i = 0; i < SCP_IPI_MAX; i++) mutex_destroy(&scp->ipi_desc[i].lock); @@ -1196,14 +1344,12 @@ static int scp_probe(struct platform_device *pdev) /* l1tcm is an optional memory region */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm"); - scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res); - if (IS_ERR(scp_cluster->l1tcm_base)) { - ret = PTR_ERR(scp_cluster->l1tcm_base); - if (ret != -EINVAL) - return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n"); + if (res) { + scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(scp_cluster->l1tcm_base)) + return dev_err_probe(dev, PTR_ERR(scp_cluster->l1tcm_base), + "Failed to map l1tcm memory\n"); - scp_cluster->l1tcm_base = NULL; - } else { scp_cluster->l1tcm_size = resource_size(res); scp_cluster->l1tcm_phys = res->start; } @@ -1236,6 +1382,26 @@ static void scp_remove(struct platform_device *pdev) mutex_destroy(&scp_cluster->cluster_lock); } +static const struct mtk_scp_sizes_data default_scp_sizes = { + .max_dram_size = 0x500000, + .ipi_share_buffer_size = 288, +}; + +static const struct mtk_scp_sizes_data mt8188_scp_sizes = { + .max_dram_size = 0x800000, + .ipi_share_buffer_size = 600, +}; + +static const struct mtk_scp_sizes_data mt8188_scp_c1_sizes = { + .max_dram_size = 0xA00000, + .ipi_share_buffer_size = 600, +}; + +static const struct mtk_scp_sizes_data mt8195_scp_sizes = { + .max_dram_size = 0x800000, + .ipi_share_buffer_size = 288, +}; + static const struct mtk_scp_of_data mt8183_of_data = { .scp_clk_get = mt8183_scp_clk_get, .scp_before_load = mt8183_scp_before_load, @@ -1247,6 +1413,7 @@ static const struct mtk_scp_of_data mt8183_of_data = { .host_to_scp_reg = MT8183_HOST_TO_SCP, .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT, .ipi_buf_offset = 0x7bdb0, + .scp_sizes = &default_scp_sizes, }; static const struct mtk_scp_of_data mt8186_of_data = { @@ -1260,18 +1427,33 @@ static const struct mtk_scp_of_data mt8186_of_data = { .host_to_scp_reg = MT8183_HOST_TO_SCP, .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT, .ipi_buf_offset = 0x3bdb0, + .scp_sizes = &default_scp_sizes, }; static const struct mtk_scp_of_data mt8188_of_data = { .scp_clk_get = mt8195_scp_clk_get, - .scp_before_load = mt8192_scp_before_load, - .scp_irq_handler = mt8192_scp_irq_handler, + .scp_before_load = mt8188_scp_before_load, + .scp_irq_handler = mt8195_scp_irq_handler, .scp_reset_assert = mt8192_scp_reset_assert, .scp_reset_deassert = mt8192_scp_reset_deassert, - .scp_stop = mt8192_scp_stop, + .scp_stop = mt8188_scp_stop, .scp_da_to_va = mt8192_scp_da_to_va, .host_to_scp_reg = MT8192_GIPC_IN_SET, .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT, + .scp_sizes = &mt8188_scp_sizes, +}; + +static const struct mtk_scp_of_data mt8188_of_data_c1 = { + .scp_clk_get = mt8195_scp_clk_get, + .scp_before_load = mt8188_scp_c1_before_load, + .scp_irq_handler = mt8195_scp_c1_irq_handler, + .scp_reset_assert = mt8195_scp_c1_reset_assert, + .scp_reset_deassert = mt8195_scp_c1_reset_deassert, + .scp_stop = mt8188_scp_c1_stop, + .scp_da_to_va = mt8192_scp_da_to_va, + .host_to_scp_reg = MT8192_GIPC_IN_SET, + .host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT, + .scp_sizes = &mt8188_scp_c1_sizes, }; static const struct mtk_scp_of_data mt8192_of_data = { @@ -1284,6 +1466,7 @@ static const struct mtk_scp_of_data mt8192_of_data = { .scp_da_to_va = mt8192_scp_da_to_va, .host_to_scp_reg = MT8192_GIPC_IN_SET, .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT, + .scp_sizes = &default_scp_sizes, }; static const struct mtk_scp_of_data mt8195_of_data = { @@ -1296,6 +1479,7 @@ static const struct mtk_scp_of_data mt8195_of_data = { .scp_da_to_va = mt8192_scp_da_to_va, .host_to_scp_reg = MT8192_GIPC_IN_SET, .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT, + .scp_sizes = &mt8195_scp_sizes, }; static const struct mtk_scp_of_data mt8195_of_data_c1 = { @@ -1308,6 +1492,13 @@ static const struct mtk_scp_of_data mt8195_of_data_c1 = { .scp_da_to_va = mt8192_scp_da_to_va, .host_to_scp_reg = MT8192_GIPC_IN_SET, .host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT, + .scp_sizes = &default_scp_sizes, +}; + +static const struct mtk_scp_of_data *mt8188_of_data_cores[] = { + &mt8188_of_data, + &mt8188_of_data_c1, + NULL }; static const struct mtk_scp_of_data *mt8195_of_data_cores[] = { @@ -1320,6 +1511,7 @@ static const struct of_device_id mtk_scp_of_match[] = { { .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data }, { .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data }, { .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data }, + { .compatible = "mediatek,mt8188-scp-dual", .data = &mt8188_of_data_cores }, { .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data }, { .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data }, { .compatible = "mediatek,mt8195-scp-dual", .data = &mt8195_of_data_cores }, diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c index cd0b60106e..c068227e25 100644 --- a/drivers/remoteproc/mtk_scp_ipi.c +++ b/drivers/remoteproc/mtk_scp_ipi.c @@ -162,10 +162,13 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len, struct mtk_share_obj __iomem *send_obj = scp->send_buf; u32 val; int ret; + const struct mtk_scp_sizes_data *scp_sizes; + + scp_sizes = scp->data->scp_sizes; if (WARN_ON(id <= SCP_IPI_INIT) || WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(id == SCP_IPI_NS_SERVICE) || - WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf)) + WARN_ON(len > scp_sizes->ipi_share_buffer_size) || WARN_ON(!buf)) return -EINVAL; ret = clk_prepare_enable(scp->clk); @@ -184,7 +187,7 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len, goto unlock_mutex; } - scp_memcpy_aligned(send_obj->share_buf, buf, len); + scp_memcpy_aligned(&send_obj->share_buf, buf, len); writel(len, &send_obj->len); writel(id, &send_obj->id); diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index f62a82d71d..0cd09e67ac 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -72,7 +72,7 @@ void rproc_init_debugfs(void); void rproc_exit_debugfs(void); /* from remoteproc_sysfs.c */ -extern struct class rproc_class; +extern const struct class rproc_class; int rproc_init_sysfs(void); void rproc_exit_sysfs(void); diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 8c7ea89226..138e752c5e 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -254,7 +254,7 @@ static const struct attribute_group *rproc_devgroups[] = { NULL }; -struct class rproc_class = { +const struct class rproc_class = { .name = "remoteproc", .dev_groups = rproc_devgroups, }; diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c index 88623df7d0..8c7f7950b8 100644 --- a/drivers/remoteproc/stm32_rproc.c +++ b/drivers/remoteproc/stm32_rproc.c @@ -294,7 +294,7 @@ static void stm32_rproc_mb_vq_work(struct work_struct *work) mutex_lock(&rproc->lock); - if (rproc->state != RPROC_RUNNING) + if (rproc->state != RPROC_RUNNING && rproc->state != RPROC_ATTACHED) goto unlock_mutex; if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE) diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index 50e486bcfa..39a47540c5 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -1144,6 +1144,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) u32 atcm_enable, btcm_enable, loczrama; struct k3_r5_core *core0; enum cluster_mode mode = cluster->mode; + int reset_ctrl_status; int ret; core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); @@ -1160,11 +1161,11 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) r_state, c_state); } - ret = reset_control_status(core->reset); - if (ret < 0) { + reset_ctrl_status = reset_control_status(core->reset); + if (reset_ctrl_status < 0) { dev_err(cdev, "failed to get initial local reset status, ret = %d\n", - ret); - return ret; + reset_ctrl_status); + return reset_ctrl_status; } /* @@ -1199,7 +1200,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) * irrelevant if module reset is asserted (POR value has local reset * deasserted), and is deemed as remoteproc mode */ - if (c_state && !ret && !halted) { + if (c_state && !reset_ctrl_status && !halted) { dev_info(cdev, "configured R5F for IPC-only mode\n"); kproc->rproc->state = RPROC_DETACHED; ret = 1; @@ -1217,7 +1218,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) ret = 0; } else { dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n", - !ret ? "deasserted" : "asserted", + !reset_ctrl_status ? "deasserted" : "asserted", c_state ? "deasserted" : "asserted", halted ? "halted" : "unhalted"); ret = -EINVAL; diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c index 4395edea9a..84243d1dff 100644 --- a/drivers/remoteproc/xlnx_r5_remoteproc.c +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c @@ -74,8 +74,8 @@ struct mbox_info { }; /* - * Hardcoded TCM bank values. This will be removed once TCM bindings are - * accepted for system-dt specifications and upstreamed in linux kernel + * Hardcoded TCM bank values. This will stay in driver to maintain backward + * compatibility with device-tree that does not have TCM information. */ static const struct mem_bank_data zynqmp_tcm_banks_split[] = { {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */ @@ -84,12 +84,12 @@ static const struct mem_bank_data zynqmp_tcm_banks_split[] = { {0xffeb0000UL, 0x20000, 0x10000UL, PD_R5_1_BTCM, "btcm1"}, }; -/* In lockstep mode cluster combines each 64KB TCM and makes 128KB TCM */ +/* In lockstep mode cluster uses each 64KB TCM from second core as well */ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = { - {0xffe00000UL, 0x0, 0x20000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 128KB each */ - {0xffe20000UL, 0x20000, 0x20000UL, PD_R5_0_BTCM, "btcm0"}, - {0, 0, 0, PD_R5_1_ATCM, ""}, - {0, 0, 0, PD_R5_1_BTCM, ""}, + {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */ + {0xffe20000UL, 0x20000, 0x10000UL, PD_R5_0_BTCM, "btcm0"}, + {0xffe10000UL, 0x10000, 0x10000UL, PD_R5_1_ATCM, "atcm1"}, + {0xffe30000UL, 0x30000, 0x10000UL, PD_R5_1_BTCM, "btcm1"}, }; /** @@ -300,36 +300,6 @@ static void zynqmp_r5_rproc_kick(struct rproc *rproc, int vqid) dev_warn(dev, "failed to send message\n"); } -/* - * zynqmp_r5_set_mode() - * - * set RPU cluster and TCM operation mode - * - * @r5_core: pointer to zynqmp_r5_core type object - * @fw_reg_val: value expected by firmware to configure RPU cluster mode - * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split) - * - * Return: 0 for success and < 0 for failure - */ -static int zynqmp_r5_set_mode(struct zynqmp_r5_core *r5_core, - enum rpu_oper_mode fw_reg_val, - enum rpu_tcm_comb tcm_mode) -{ - int ret; - - ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val); - if (ret < 0) { - dev_err(r5_core->dev, "failed to set RPU mode\n"); - return ret; - } - - ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, tcm_mode); - if (ret < 0) - dev_err(r5_core->dev, "failed to configure TCM\n"); - - return ret; -} - /* * zynqmp_r5_rproc_start() * @rproc: single R5 core's corresponding rproc instance @@ -486,6 +456,7 @@ static int add_mem_regions_carveout(struct rproc *rproc) } rproc_add_carveout(rproc, rproc_mem); + rproc_coredump_add_segment(rproc, rmem->base, rmem->size); dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx", it.node->name, rmem->base, rmem->size); @@ -540,14 +511,14 @@ static int tcm_mem_map(struct rproc *rproc, } /* - * add_tcm_carveout_split_mode() + * add_tcm_banks() * @rproc: single R5 core's corresponding rproc instance * - * allocate and add remoteproc carveout for TCM memory in split mode + * allocate and add remoteproc carveout for TCM memory * * return 0 on success, otherwise non-zero value on failure */ -static int add_tcm_carveout_split_mode(struct rproc *rproc) +static int add_tcm_banks(struct rproc *rproc) { struct rproc_mem_entry *rproc_mem; struct zynqmp_r5_core *r5_core; @@ -580,10 +551,10 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc) ZYNQMP_PM_REQUEST_ACK_BLOCKING); if (ret < 0) { dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id); - goto release_tcm_split; + goto release_tcm; } - dev_dbg(dev, "TCM carveout split mode %s addr=%llx, da=0x%x, size=0x%lx", + dev_dbg(dev, "TCM carveout %s addr=%llx, da=0x%x, size=0x%lx", bank_name, bank_addr, da, bank_size); rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr, @@ -593,15 +564,16 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc) if (!rproc_mem) { ret = -ENOMEM; zynqmp_pm_release_node(pm_domain_id); - goto release_tcm_split; + goto release_tcm; } rproc_add_carveout(rproc, rproc_mem); + rproc_coredump_add_segment(rproc, da, bank_size); } return 0; -release_tcm_split: +release_tcm: /* If failed, Turn off all TCM banks turned on before */ for (i--; i >= 0; i--) { pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; @@ -610,127 +582,6 @@ release_tcm_split: return ret; } -/* - * add_tcm_carveout_lockstep_mode() - * @rproc: single R5 core's corresponding rproc instance - * - * allocate and add remoteproc carveout for TCM memory in lockstep mode - * - * return 0 on success, otherwise non-zero value on failure - */ -static int add_tcm_carveout_lockstep_mode(struct rproc *rproc) -{ - struct rproc_mem_entry *rproc_mem; - struct zynqmp_r5_core *r5_core; - int i, num_banks, ret; - phys_addr_t bank_addr; - size_t bank_size = 0; - struct device *dev; - u32 pm_domain_id; - char *bank_name; - u32 da; - - r5_core = rproc->priv; - dev = r5_core->dev; - - /* Go through zynqmp banks for r5 node */ - num_banks = r5_core->tcm_bank_count; - - /* - * In lockstep mode, TCM is contiguous memory block - * However, each TCM block still needs to be enabled individually. - * So, Enable each TCM block individually. - * Although ATCM and BTCM is contiguous memory block, add two separate - * carveouts for both. - */ - for (i = 0; i < num_banks; i++) { - pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; - - /* Turn on each TCM bank individually */ - ret = zynqmp_pm_request_node(pm_domain_id, - ZYNQMP_PM_CAPABILITY_ACCESS, 0, - ZYNQMP_PM_REQUEST_ACK_BLOCKING); - if (ret < 0) { - dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id); - goto release_tcm_lockstep; - } - - bank_size = r5_core->tcm_banks[i]->size; - if (bank_size == 0) - continue; - - bank_addr = r5_core->tcm_banks[i]->addr; - da = r5_core->tcm_banks[i]->da; - bank_name = r5_core->tcm_banks[i]->bank_name; - - /* Register TCM address range, TCM map and unmap functions */ - rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr, - bank_size, da, - tcm_mem_map, tcm_mem_unmap, - bank_name); - if (!rproc_mem) { - ret = -ENOMEM; - zynqmp_pm_release_node(pm_domain_id); - goto release_tcm_lockstep; - } - - /* If registration is success, add carveouts */ - rproc_add_carveout(rproc, rproc_mem); - - dev_dbg(dev, "TCM carveout lockstep mode %s addr=0x%llx, da=0x%x, size=0x%lx", - bank_name, bank_addr, da, bank_size); - } - - return 0; - -release_tcm_lockstep: - /* If failed, Turn off all TCM banks turned on before */ - for (i--; i >= 0; i--) { - pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; - zynqmp_pm_release_node(pm_domain_id); - } - return ret; -} - -/* - * add_tcm_banks() - * @rproc: single R5 core's corresponding rproc instance - * - * allocate and add remoteproc carveouts for TCM memory based on cluster mode - * - * return 0 on success, otherwise non-zero value on failure - */ -static int add_tcm_banks(struct rproc *rproc) -{ - struct zynqmp_r5_cluster *cluster; - struct zynqmp_r5_core *r5_core; - struct device *dev; - - r5_core = rproc->priv; - if (!r5_core) - return -EINVAL; - - dev = r5_core->dev; - - cluster = dev_get_drvdata(dev->parent); - if (!cluster) { - dev_err(dev->parent, "Invalid driver data\n"); - return -EINVAL; - } - - /* - * In lockstep mode TCM banks are one contiguous memory region of 256Kb - * In split mode, each TCM bank is 64Kb and not contiguous. - * We add memory carveouts accordingly. - */ - if (cluster->mode == SPLIT_MODE) - return add_tcm_carveout_split_mode(rproc); - else if (cluster->mode == LOCKSTEP_MODE) - return add_tcm_carveout_lockstep_mode(rproc); - - return -EINVAL; -} - /* * zynqmp_r5_parse_fw() * @rproc: single R5 core's corresponding rproc instance @@ -853,6 +704,8 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev) return ERR_PTR(-ENOMEM); } + rproc_coredump_set_elf_info(r5_rproc, ELFCLASS32, EM_ARM); + r5_rproc->auto_boot = false; r5_core = r5_rproc->priv; r5_core->dev = cdev; @@ -878,6 +731,103 @@ free_rproc: return ERR_PTR(ret); } +static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster) +{ + int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count; + struct of_phandle_args out_args; + struct zynqmp_r5_core *r5_core; + struct platform_device *cpdev; + struct mem_bank_data *tcm; + struct device_node *np; + struct resource *res; + u64 abs_addr, size; + struct device *dev; + + for (i = 0; i < cluster->core_count; i++) { + r5_core = cluster->r5_cores[i]; + dev = r5_core->dev; + np = r5_core->np; + + pd_count = of_count_phandle_with_args(np, "power-domains", + "#power-domain-cells"); + + if (pd_count <= 0) { + dev_err(dev, "invalid power-domains property, %d\n", pd_count); + return -EINVAL; + } + + /* First entry in power-domains list is for r5 core, rest for TCM. */ + tcm_bank_count = pd_count - 1; + + if (tcm_bank_count <= 0) { + dev_err(dev, "invalid TCM count %d\n", tcm_bank_count); + return -EINVAL; + } + + r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count, + sizeof(struct mem_bank_data *), + GFP_KERNEL); + if (!r5_core->tcm_banks) + return -ENOMEM; + + r5_core->tcm_bank_count = tcm_bank_count; + for (j = 0, tcm_pd_idx = 1; j < tcm_bank_count; j++, tcm_pd_idx++) { + tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data), + GFP_KERNEL); + if (!tcm) + return -ENOMEM; + + r5_core->tcm_banks[j] = tcm; + + /* Get power-domains id of TCM. */ + ret = of_parse_phandle_with_args(np, "power-domains", + "#power-domain-cells", + tcm_pd_idx, &out_args); + if (ret) { + dev_err(r5_core->dev, + "failed to get tcm %d pm domain, ret %d\n", + tcm_pd_idx, ret); + return ret; + } + tcm->pm_domain_id = out_args.args[0]; + of_node_put(out_args.np); + + /* Get TCM address without translation. */ + ret = of_property_read_reg(np, j, &abs_addr, &size); + if (ret) { + dev_err(dev, "failed to get reg property\n"); + return ret; + } + + /* + * Remote processor can address only 32 bits + * so convert 64-bits into 32-bits. This will discard + * any unwanted upper 32-bits. + */ + tcm->da = (u32)abs_addr; + tcm->size = (u32)size; + + cpdev = to_platform_device(dev); + res = platform_get_resource(cpdev, IORESOURCE_MEM, j); + if (!res) { + dev_err(dev, "failed to get tcm resource\n"); + return -EINVAL; + } + + tcm->addr = (u32)res->start; + tcm->bank_name = (char *)res->name; + res = devm_request_mem_region(dev, tcm->addr, tcm->size, + tcm->bank_name); + if (!res) { + dev_err(dev, "failed to request tcm resource\n"); + return -EINVAL; + } + } + } + + return 0; +} + /** * zynqmp_r5_get_tcm_node() * Ideally this function should parse tcm node and store information @@ -954,11 +904,18 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster, { struct device *dev = cluster->dev; struct zynqmp_r5_core *r5_core; - int ret, i; + int ret = -EINVAL, i; - ret = zynqmp_r5_get_tcm_node(cluster); - if (ret < 0) { - dev_err(dev, "can't get tcm node, err %d\n", ret); + r5_core = cluster->r5_cores[0]; + + /* Maintain backward compatibility for zynqmp by using hardcode TCM address. */ + if (of_find_property(r5_core->np, "reg", NULL)) + ret = zynqmp_r5_get_tcm_node_from_dt(cluster); + else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss")) + ret = zynqmp_r5_get_tcm_node(cluster); + + if (ret) { + dev_err(dev, "can't get tcm, err %d\n", ret); return ret; } @@ -973,12 +930,21 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster, return ret; } - ret = zynqmp_r5_set_mode(r5_core, fw_reg_val, tcm_mode); - if (ret) { - dev_err(dev, "failed to set r5 cluster mode %d, err %d\n", - cluster->mode, ret); + ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val); + if (ret < 0) { + dev_err(r5_core->dev, "failed to set RPU mode\n"); return ret; } + + if (of_find_property(dev_of_node(dev), "xlnx,tcm-mode", NULL) || + device_is_compatible(dev, "xlnx,zynqmp-r5fss")) { + ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, + tcm_mode); + if (ret < 0) { + dev_err(r5_core->dev, "failed to configure TCM\n"); + return ret; + } + } } return 0; @@ -1023,16 +989,27 @@ static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster) * fail driver probe if either of that is not set in dts. */ if (cluster_mode == LOCKSTEP_MODE) { - tcm_mode = PM_RPU_TCM_COMB; fw_reg_val = PM_RPU_MODE_LOCKSTEP; } else if (cluster_mode == SPLIT_MODE) { - tcm_mode = PM_RPU_TCM_SPLIT; fw_reg_val = PM_RPU_MODE_SPLIT; } else { dev_err(dev, "driver does not support cluster mode %d\n", cluster_mode); return -EINVAL; } + if (of_find_property(dev_node, "xlnx,tcm-mode", NULL)) { + ret = of_property_read_u32(dev_node, "xlnx,tcm-mode", (u32 *)&tcm_mode); + if (ret) + return ret; + } else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss")) { + if (cluster_mode == LOCKSTEP_MODE) + tcm_mode = PM_RPU_TCM_COMB; + else + tcm_mode = PM_RPU_TCM_SPLIT; + } else { + tcm_mode = PM_RPU_TCM_COMB; + } + /* * Number of cores is decided by number of child nodes of * r5f subsystem node in dts. If Split mode is used in dts @@ -1216,6 +1193,8 @@ static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev) /* Match table for OF platform binding */ static const struct of_device_id zynqmp_r5_remoteproc_match[] = { + { .compatible = "xlnx,versal-net-r52fss", }, + { .compatible = "xlnx,versal-r5fss", }, { .compatible = "xlnx,zynqmp-r5fss", }, { /* end of list */ }, }; -- cgit v1.2.3