diff options
Diffstat (limited to 'drivers/spmi')
-rw-r--r-- | drivers/spmi/Kconfig | 48 | ||||
-rw-r--r-- | drivers/spmi/Makefile | 9 | ||||
-rw-r--r-- | drivers/spmi/hisi-spmi-controller.c | 367 | ||||
-rw-r--r-- | drivers/spmi/spmi-mtk-pmif.c | 554 | ||||
-rw-r--r-- | drivers/spmi/spmi-pmic-arb.c | 1505 | ||||
-rw-r--r-- | drivers/spmi/spmi.c | 621 |
6 files changed, 3104 insertions, 0 deletions
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig new file mode 100644 index 000000000..737802046 --- /dev/null +++ b/drivers/spmi/Kconfig @@ -0,0 +1,48 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# SPMI driver configuration +# +menuconfig SPMI + tristate "SPMI support" + help + SPMI (System Power Management Interface) is a two-wire + serial interface between baseband and application processors + and Power Management Integrated Circuits (PMIC). + +if SPMI + +config SPMI_HISI3670 + tristate "Hisilicon 3670 SPMI Controller" + select IRQ_DOMAIN_HIERARCHY + depends on HAS_IOMEM + help + If you say yes to this option, support will be included for the + built-in SPMI PMIC Arbiter interface on Hisilicon 3670 + processors. + +config SPMI_MSM_PMIC_ARB + tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)" + select IRQ_DOMAIN_HIERARCHY + depends on ARCH_QCOM || COMPILE_TEST + depends on HAS_IOMEM + default ARCH_QCOM + help + If you say yes to this option, support will be included for the + built-in SPMI PMIC Arbiter interface on Qualcomm MSM family + processors. + + This is required for communicating with Qualcomm PMICs and + other devices that have the SPMI interface. + +config SPMI_MTK_PMIF + tristate "Mediatek SPMI Controller (PMIC Arbiter)" + depends on ARCH_MEDIATEK || COMPILE_TEST + help + If you say yes to this option, support will be included for the + built-in SPMI PMIC Arbiter interface on Mediatek family + processors. + + This is required for communicating with Mediatek PMICs and + other devices that have the SPMI interface. + +endif diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile new file mode 100644 index 000000000..9d974424c --- /dev/null +++ b/drivers/spmi/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for kernel SPMI framework. +# +obj-$(CONFIG_SPMI) += spmi.o + +obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o +obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o +obj-$(CONFIG_SPMI_MTK_PMIF) += spmi-mtk-pmif.o diff --git a/drivers/spmi/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c new file mode 100644 index 000000000..5bd23262a --- /dev/null +++ b/drivers/spmi/hisi-spmi-controller.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/spmi.h> + +/* + * SPMI register addr + */ +#define SPMI_CHANNEL_OFFSET 0x0300 +#define SPMI_SLAVE_OFFSET 0x20 + +#define SPMI_APB_SPMI_CMD_BASE_ADDR 0x0100 + +#define SPMI_APB_SPMI_WDATA0_BASE_ADDR 0x0104 +#define SPMI_APB_SPMI_WDATA1_BASE_ADDR 0x0108 +#define SPMI_APB_SPMI_WDATA2_BASE_ADDR 0x010c +#define SPMI_APB_SPMI_WDATA3_BASE_ADDR 0x0110 + +#define SPMI_APB_SPMI_STATUS_BASE_ADDR 0x0200 + +#define SPMI_APB_SPMI_RDATA0_BASE_ADDR 0x0204 +#define SPMI_APB_SPMI_RDATA1_BASE_ADDR 0x0208 +#define SPMI_APB_SPMI_RDATA2_BASE_ADDR 0x020c +#define SPMI_APB_SPMI_RDATA3_BASE_ADDR 0x0210 + +#define SPMI_PER_DATAREG_BYTE 4 +/* + * SPMI cmd register + */ +#define SPMI_APB_SPMI_CMD_EN BIT(31) +#define SPMI_APB_SPMI_CMD_TYPE_OFFSET 24 +#define SPMI_APB_SPMI_CMD_LENGTH_OFFSET 20 +#define SPMI_APB_SPMI_CMD_SLAVEID_OFFSET 16 +#define SPMI_APB_SPMI_CMD_ADDR_OFFSET 0 + +/* Command Opcodes */ + +enum spmi_controller_cmd_op_code { + SPMI_CMD_REG_ZERO_WRITE = 0, + SPMI_CMD_REG_WRITE = 1, + SPMI_CMD_REG_READ = 2, + SPMI_CMD_EXT_REG_WRITE = 3, + SPMI_CMD_EXT_REG_READ = 4, + SPMI_CMD_EXT_REG_WRITE_L = 5, + SPMI_CMD_EXT_REG_READ_L = 6, + SPMI_CMD_REG_RESET = 7, + SPMI_CMD_REG_SLEEP = 8, + SPMI_CMD_REG_SHUTDOWN = 9, + SPMI_CMD_REG_WAKEUP = 10, +}; + +/* + * SPMI status register + */ +#define SPMI_APB_TRANS_DONE BIT(0) +#define SPMI_APB_TRANS_FAIL BIT(2) + +/* Command register fields */ +#define SPMI_CONTROLLER_CMD_MAX_BYTE_COUNT 16 + +/* Maximum number of support PMIC peripherals */ +#define SPMI_CONTROLLER_TIMEOUT_US 1000 +#define SPMI_CONTROLLER_MAX_TRANS_BYTES 16 + +struct spmi_controller_dev { + struct spmi_controller *controller; + struct device *dev; + void __iomem *base; + spinlock_t lock; + u32 channel; +}; + +static int spmi_controller_wait_for_done(struct device *dev, + struct spmi_controller_dev *ctrl_dev, + void __iomem *base, u8 sid, u16 addr) +{ + u32 timeout = SPMI_CONTROLLER_TIMEOUT_US; + u32 status, offset; + + offset = SPMI_APB_SPMI_STATUS_BASE_ADDR; + offset += SPMI_CHANNEL_OFFSET * ctrl_dev->channel + SPMI_SLAVE_OFFSET * sid; + + do { + status = readl(base + offset); + + if (status & SPMI_APB_TRANS_DONE) { + if (status & SPMI_APB_TRANS_FAIL) { + dev_err(dev, "%s: transaction failed (0x%x)\n", + __func__, status); + return -EIO; + } + dev_dbg(dev, "%s: status 0x%x\n", __func__, status); + return 0; + } + udelay(1); + } while (timeout--); + + dev_err(dev, "%s: timeout, status 0x%x\n", __func__, status); + return -ETIMEDOUT; +} + +static int spmi_read_cmd(struct spmi_controller *ctrl, + u8 opc, u8 slave_id, u16 slave_addr, u8 *__buf, size_t bc) +{ + struct spmi_controller_dev *spmi_controller = dev_get_drvdata(&ctrl->dev); + u32 chnl_ofst = SPMI_CHANNEL_OFFSET * spmi_controller->channel; + unsigned long flags; + u8 *buf = __buf; + u32 cmd, data; + int rc; + u8 op_code, i; + + if (bc > SPMI_CONTROLLER_MAX_TRANS_BYTES) { + dev_err(&ctrl->dev, + "spmi_controller supports 1..%d bytes per trans, but:%zu requested\n", + SPMI_CONTROLLER_MAX_TRANS_BYTES, bc); + return -EINVAL; + } + + switch (opc) { + case SPMI_CMD_READ: + op_code = SPMI_CMD_REG_READ; + break; + case SPMI_CMD_EXT_READ: + op_code = SPMI_CMD_EXT_REG_READ; + break; + case SPMI_CMD_EXT_READL: + op_code = SPMI_CMD_EXT_REG_READ_L; + break; + default: + dev_err(&ctrl->dev, "invalid read cmd 0x%x\n", opc); + return -EINVAL; + } + + cmd = SPMI_APB_SPMI_CMD_EN | + (op_code << SPMI_APB_SPMI_CMD_TYPE_OFFSET) | + ((bc - 1) << SPMI_APB_SPMI_CMD_LENGTH_OFFSET) | + ((slave_id & 0xf) << SPMI_APB_SPMI_CMD_SLAVEID_OFFSET) | /* slvid */ + ((slave_addr & 0xffff) << SPMI_APB_SPMI_CMD_ADDR_OFFSET); /* slave_addr */ + + spin_lock_irqsave(&spmi_controller->lock, flags); + + writel(cmd, spmi_controller->base + chnl_ofst + SPMI_APB_SPMI_CMD_BASE_ADDR); + + rc = spmi_controller_wait_for_done(&ctrl->dev, spmi_controller, + spmi_controller->base, slave_id, slave_addr); + if (rc) + goto done; + + for (i = 0; bc > i * SPMI_PER_DATAREG_BYTE; i++) { + data = readl(spmi_controller->base + chnl_ofst + + SPMI_SLAVE_OFFSET * slave_id + + SPMI_APB_SPMI_RDATA0_BASE_ADDR + + i * SPMI_PER_DATAREG_BYTE); + data = be32_to_cpu((__be32 __force)data); + if ((bc - i * SPMI_PER_DATAREG_BYTE) >> 2) { + memcpy(buf, &data, sizeof(data)); + buf += sizeof(data); + } else { + memcpy(buf, &data, bc % SPMI_PER_DATAREG_BYTE); + buf += (bc % SPMI_PER_DATAREG_BYTE); + } + } + +done: + spin_unlock_irqrestore(&spmi_controller->lock, flags); + if (rc) + dev_err(&ctrl->dev, + "spmi read wait timeout op:0x%x slave_id:%d slave_addr:0x%x bc:%zu\n", + opc, slave_id, slave_addr, bc + 1); + else + dev_dbg(&ctrl->dev, "%s: id:%d slave_addr:0x%x, read value: %*ph\n", + __func__, slave_id, slave_addr, (int)bc, __buf); + + return rc; +} + +static int spmi_write_cmd(struct spmi_controller *ctrl, + u8 opc, u8 slave_id, u16 slave_addr, const u8 *__buf, size_t bc) +{ + struct spmi_controller_dev *spmi_controller = dev_get_drvdata(&ctrl->dev); + u32 chnl_ofst = SPMI_CHANNEL_OFFSET * spmi_controller->channel; + const u8 *buf = __buf; + unsigned long flags; + u32 cmd, data; + int rc; + u8 op_code, i; + + if (bc > SPMI_CONTROLLER_MAX_TRANS_BYTES) { + dev_err(&ctrl->dev, + "spmi_controller supports 1..%d bytes per trans, but:%zu requested\n", + SPMI_CONTROLLER_MAX_TRANS_BYTES, bc); + return -EINVAL; + } + + switch (opc) { + case SPMI_CMD_WRITE: + op_code = SPMI_CMD_REG_WRITE; + break; + case SPMI_CMD_EXT_WRITE: + op_code = SPMI_CMD_EXT_REG_WRITE; + break; + case SPMI_CMD_EXT_WRITEL: + op_code = SPMI_CMD_EXT_REG_WRITE_L; + break; + default: + dev_err(&ctrl->dev, "invalid write cmd 0x%x\n", opc); + return -EINVAL; + } + + cmd = SPMI_APB_SPMI_CMD_EN | + (op_code << SPMI_APB_SPMI_CMD_TYPE_OFFSET) | + ((bc - 1) << SPMI_APB_SPMI_CMD_LENGTH_OFFSET) | + ((slave_id & 0xf) << SPMI_APB_SPMI_CMD_SLAVEID_OFFSET) | + ((slave_addr & 0xffff) << SPMI_APB_SPMI_CMD_ADDR_OFFSET); + + /* Write data to FIFOs */ + spin_lock_irqsave(&spmi_controller->lock, flags); + + for (i = 0; bc > i * SPMI_PER_DATAREG_BYTE; i++) { + data = 0; + if ((bc - i * SPMI_PER_DATAREG_BYTE) >> 2) { + memcpy(&data, buf, sizeof(data)); + buf += sizeof(data); + } else { + memcpy(&data, buf, bc % SPMI_PER_DATAREG_BYTE); + buf += (bc % SPMI_PER_DATAREG_BYTE); + } + + writel((u32 __force)cpu_to_be32(data), + spmi_controller->base + chnl_ofst + + SPMI_APB_SPMI_WDATA0_BASE_ADDR + + SPMI_PER_DATAREG_BYTE * i); + } + + /* Start the transaction */ + writel(cmd, spmi_controller->base + chnl_ofst + SPMI_APB_SPMI_CMD_BASE_ADDR); + + rc = spmi_controller_wait_for_done(&ctrl->dev, spmi_controller, + spmi_controller->base, slave_id, + slave_addr); + spin_unlock_irqrestore(&spmi_controller->lock, flags); + + if (rc) + dev_err(&ctrl->dev, "spmi write wait timeout op:0x%x slave_id:%d slave_addr:0x%x bc:%zu\n", + opc, slave_id, slave_addr, bc); + else + dev_dbg(&ctrl->dev, "%s: id:%d slave_addr:0x%x, wrote value: %*ph\n", + __func__, slave_id, slave_addr, (int)bc, __buf); + + return rc; +} + +static int spmi_controller_probe(struct platform_device *pdev) +{ + struct spmi_controller_dev *spmi_controller; + struct spmi_controller *ctrl; + struct resource *iores; + int ret; + + ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*spmi_controller)); + if (!ctrl) { + dev_err(&pdev->dev, "can not allocate spmi_controller data\n"); + return -ENOMEM; + } + spmi_controller = spmi_controller_get_drvdata(ctrl); + spmi_controller->controller = ctrl; + + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iores) { + dev_err(&pdev->dev, "can not get resource!\n"); + ret = -EINVAL; + goto err_put_controller; + } + + spmi_controller->base = devm_ioremap(&pdev->dev, iores->start, + resource_size(iores)); + if (!spmi_controller->base) { + dev_err(&pdev->dev, "can not remap base addr!\n"); + ret = -EADDRNOTAVAIL; + goto err_put_controller; + } + + ret = of_property_read_u32(pdev->dev.of_node, "hisilicon,spmi-channel", + &spmi_controller->channel); + if (ret) { + dev_err(&pdev->dev, "can not get channel\n"); + ret = -ENODEV; + goto err_put_controller; + } + + platform_set_drvdata(pdev, spmi_controller); + dev_set_drvdata(&ctrl->dev, spmi_controller); + + spin_lock_init(&spmi_controller->lock); + + ctrl->nr = spmi_controller->channel; + ctrl->dev.parent = pdev->dev.parent; + ctrl->dev.of_node = of_node_get(pdev->dev.of_node); + + /* Callbacks */ + ctrl->read_cmd = spmi_read_cmd; + ctrl->write_cmd = spmi_write_cmd; + + ret = spmi_controller_add(ctrl); + if (ret) { + dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret); + goto err_put_controller; + } + + return 0; + +err_put_controller: + spmi_controller_put(ctrl); + return ret; +} + +static int spmi_del_controller(struct platform_device *pdev) +{ + struct spmi_controller *ctrl = platform_get_drvdata(pdev); + + spmi_controller_remove(ctrl); + spmi_controller_put(ctrl); + return 0; +} + +static const struct of_device_id spmi_controller_match_table[] = { + { + .compatible = "hisilicon,kirin970-spmi-controller", + }, + {} +}; +MODULE_DEVICE_TABLE(of, spmi_controller_match_table); + +static struct platform_driver spmi_controller_driver = { + .probe = spmi_controller_probe, + .remove = spmi_del_controller, + .driver = { + .name = "hisi_spmi_controller", + .of_match_table = spmi_controller_match_table, + }, +}; + +static int __init spmi_controller_init(void) +{ + return platform_driver_register(&spmi_controller_driver); +} +postcore_initcall(spmi_controller_init); + +static void __exit spmi_controller_exit(void) +{ + platform_driver_unregister(&spmi_controller_driver); +} +module_exit(spmi_controller_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("1.0"); +MODULE_ALIAS("platform:spmi_controller"); diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c new file mode 100644 index 000000000..01e8851e6 --- /dev/null +++ b/drivers/spmi/spmi-mtk-pmif.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2021 MediaTek Inc. + +#include <linux/clk.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/spmi.h> + +#define SWINF_IDLE 0x00 +#define SWINF_WFVLDCLR 0x06 + +#define GET_SWINF(x) (((x) >> 1) & 0x7) + +#define PMIF_CMD_REG_0 0 +#define PMIF_CMD_REG 1 +#define PMIF_CMD_EXT_REG 2 +#define PMIF_CMD_EXT_REG_LONG 3 + +#define PMIF_DELAY_US 10 +#define PMIF_TIMEOUT_US (10 * 1000) + +#define PMIF_CHAN_OFFSET 0x5 + +#define PMIF_MAX_CLKS 3 + +#define SPMI_OP_ST_BUSY 1 + +struct ch_reg { + u32 ch_sta; + u32 wdata; + u32 rdata; + u32 ch_send; + u32 ch_rdy; +}; + +struct pmif_data { + const u32 *regs; + const u32 *spmimst_regs; + u32 soc_chan; +}; + +struct pmif { + void __iomem *base; + void __iomem *spmimst_base; + struct ch_reg chan; + struct clk_bulk_data clks[PMIF_MAX_CLKS]; + size_t nclks; + const struct pmif_data *data; + raw_spinlock_t lock; +}; + +static const char * const pmif_clock_names[] = { + "pmif_sys_ck", "pmif_tmr_ck", "spmimst_clk_mux", +}; + +enum pmif_regs { + PMIF_INIT_DONE, + PMIF_INF_EN, + PMIF_ARB_EN, + PMIF_CMDISSUE_EN, + PMIF_TIMER_CTRL, + PMIF_SPI_MODE_CTRL, + PMIF_IRQ_EVENT_EN_0, + PMIF_IRQ_FLAG_0, + PMIF_IRQ_CLR_0, + PMIF_IRQ_EVENT_EN_1, + PMIF_IRQ_FLAG_1, + PMIF_IRQ_CLR_1, + PMIF_IRQ_EVENT_EN_2, + PMIF_IRQ_FLAG_2, + PMIF_IRQ_CLR_2, + PMIF_IRQ_EVENT_EN_3, + PMIF_IRQ_FLAG_3, + PMIF_IRQ_CLR_3, + PMIF_IRQ_EVENT_EN_4, + PMIF_IRQ_FLAG_4, + PMIF_IRQ_CLR_4, + PMIF_WDT_EVENT_EN_0, + PMIF_WDT_FLAG_0, + PMIF_WDT_EVENT_EN_1, + PMIF_WDT_FLAG_1, + PMIF_SWINF_0_STA, + PMIF_SWINF_0_WDATA_31_0, + PMIF_SWINF_0_RDATA_31_0, + PMIF_SWINF_0_ACC, + PMIF_SWINF_0_VLD_CLR, + PMIF_SWINF_1_STA, + PMIF_SWINF_1_WDATA_31_0, + PMIF_SWINF_1_RDATA_31_0, + PMIF_SWINF_1_ACC, + PMIF_SWINF_1_VLD_CLR, + PMIF_SWINF_2_STA, + PMIF_SWINF_2_WDATA_31_0, + PMIF_SWINF_2_RDATA_31_0, + PMIF_SWINF_2_ACC, + PMIF_SWINF_2_VLD_CLR, + PMIF_SWINF_3_STA, + PMIF_SWINF_3_WDATA_31_0, + PMIF_SWINF_3_RDATA_31_0, + PMIF_SWINF_3_ACC, + PMIF_SWINF_3_VLD_CLR, +}; + +static const u32 mt6873_regs[] = { + [PMIF_INIT_DONE] = 0x0000, + [PMIF_INF_EN] = 0x0024, + [PMIF_ARB_EN] = 0x0150, + [PMIF_CMDISSUE_EN] = 0x03B4, + [PMIF_TIMER_CTRL] = 0x03E0, + [PMIF_SPI_MODE_CTRL] = 0x0400, + [PMIF_IRQ_EVENT_EN_0] = 0x0418, + [PMIF_IRQ_FLAG_0] = 0x0420, + [PMIF_IRQ_CLR_0] = 0x0424, + [PMIF_IRQ_EVENT_EN_1] = 0x0428, + [PMIF_IRQ_FLAG_1] = 0x0430, + [PMIF_IRQ_CLR_1] = 0x0434, + [PMIF_IRQ_EVENT_EN_2] = 0x0438, + [PMIF_IRQ_FLAG_2] = 0x0440, + [PMIF_IRQ_CLR_2] = 0x0444, + [PMIF_IRQ_EVENT_EN_3] = 0x0448, + [PMIF_IRQ_FLAG_3] = 0x0450, + [PMIF_IRQ_CLR_3] = 0x0454, + [PMIF_IRQ_EVENT_EN_4] = 0x0458, + [PMIF_IRQ_FLAG_4] = 0x0460, + [PMIF_IRQ_CLR_4] = 0x0464, + [PMIF_WDT_EVENT_EN_0] = 0x046C, + [PMIF_WDT_FLAG_0] = 0x0470, + [PMIF_WDT_EVENT_EN_1] = 0x0474, + [PMIF_WDT_FLAG_1] = 0x0478, + [PMIF_SWINF_0_ACC] = 0x0C00, + [PMIF_SWINF_0_WDATA_31_0] = 0x0C04, + [PMIF_SWINF_0_RDATA_31_0] = 0x0C14, + [PMIF_SWINF_0_VLD_CLR] = 0x0C24, + [PMIF_SWINF_0_STA] = 0x0C28, + [PMIF_SWINF_1_ACC] = 0x0C40, + [PMIF_SWINF_1_WDATA_31_0] = 0x0C44, + [PMIF_SWINF_1_RDATA_31_0] = 0x0C54, + [PMIF_SWINF_1_VLD_CLR] = 0x0C64, + [PMIF_SWINF_1_STA] = 0x0C68, + [PMIF_SWINF_2_ACC] = 0x0C80, + [PMIF_SWINF_2_WDATA_31_0] = 0x0C84, + [PMIF_SWINF_2_RDATA_31_0] = 0x0C94, + [PMIF_SWINF_2_VLD_CLR] = 0x0CA4, + [PMIF_SWINF_2_STA] = 0x0CA8, + [PMIF_SWINF_3_ACC] = 0x0CC0, + [PMIF_SWINF_3_WDATA_31_0] = 0x0CC4, + [PMIF_SWINF_3_RDATA_31_0] = 0x0CD4, + [PMIF_SWINF_3_VLD_CLR] = 0x0CE4, + [PMIF_SWINF_3_STA] = 0x0CE8, +}; + +static const u32 mt8195_regs[] = { + [PMIF_INIT_DONE] = 0x0000, + [PMIF_INF_EN] = 0x0024, + [PMIF_ARB_EN] = 0x0150, + [PMIF_CMDISSUE_EN] = 0x03B8, + [PMIF_TIMER_CTRL] = 0x03E4, + [PMIF_SPI_MODE_CTRL] = 0x0408, + [PMIF_IRQ_EVENT_EN_0] = 0x0420, + [PMIF_IRQ_FLAG_0] = 0x0428, + [PMIF_IRQ_CLR_0] = 0x042C, + [PMIF_IRQ_EVENT_EN_1] = 0x0430, + [PMIF_IRQ_FLAG_1] = 0x0438, + [PMIF_IRQ_CLR_1] = 0x043C, + [PMIF_IRQ_EVENT_EN_2] = 0x0440, + [PMIF_IRQ_FLAG_2] = 0x0448, + [PMIF_IRQ_CLR_2] = 0x044C, + [PMIF_IRQ_EVENT_EN_3] = 0x0450, + [PMIF_IRQ_FLAG_3] = 0x0458, + [PMIF_IRQ_CLR_3] = 0x045C, + [PMIF_IRQ_EVENT_EN_4] = 0x0460, + [PMIF_IRQ_FLAG_4] = 0x0468, + [PMIF_IRQ_CLR_4] = 0x046C, + [PMIF_WDT_EVENT_EN_0] = 0x0474, + [PMIF_WDT_FLAG_0] = 0x0478, + [PMIF_WDT_EVENT_EN_1] = 0x047C, + [PMIF_WDT_FLAG_1] = 0x0480, + [PMIF_SWINF_0_ACC] = 0x0800, + [PMIF_SWINF_0_WDATA_31_0] = 0x0804, + [PMIF_SWINF_0_RDATA_31_0] = 0x0814, + [PMIF_SWINF_0_VLD_CLR] = 0x0824, + [PMIF_SWINF_0_STA] = 0x0828, + [PMIF_SWINF_1_ACC] = 0x0840, + [PMIF_SWINF_1_WDATA_31_0] = 0x0844, + [PMIF_SWINF_1_RDATA_31_0] = 0x0854, + [PMIF_SWINF_1_VLD_CLR] = 0x0864, + [PMIF_SWINF_1_STA] = 0x0868, + [PMIF_SWINF_2_ACC] = 0x0880, + [PMIF_SWINF_2_WDATA_31_0] = 0x0884, + [PMIF_SWINF_2_RDATA_31_0] = 0x0894, + [PMIF_SWINF_2_VLD_CLR] = 0x08A4, + [PMIF_SWINF_2_STA] = 0x08A8, + [PMIF_SWINF_3_ACC] = 0x08C0, + [PMIF_SWINF_3_WDATA_31_0] = 0x08C4, + [PMIF_SWINF_3_RDATA_31_0] = 0x08D4, + [PMIF_SWINF_3_VLD_CLR] = 0x08E4, + [PMIF_SWINF_3_STA] = 0x08E8, +}; + +enum spmi_regs { + SPMI_OP_ST_CTRL, + SPMI_GRP_ID_EN, + SPMI_OP_ST_STA, + SPMI_MST_SAMPL, + SPMI_MST_REQ_EN, + SPMI_REC_CTRL, + SPMI_REC0, + SPMI_REC1, + SPMI_REC2, + SPMI_REC3, + SPMI_REC4, + SPMI_MST_DBG, + + /* MT8195 spmi regs */ + SPMI_MST_RCS_CTRL, + SPMI_SLV_3_0_EINT, + SPMI_SLV_7_4_EINT, + SPMI_SLV_B_8_EINT, + SPMI_SLV_F_C_EINT, + SPMI_REC_CMD_DEC, + SPMI_DEC_DBG, +}; + +static const u32 mt6873_spmi_regs[] = { + [SPMI_OP_ST_CTRL] = 0x0000, + [SPMI_GRP_ID_EN] = 0x0004, + [SPMI_OP_ST_STA] = 0x0008, + [SPMI_MST_SAMPL] = 0x000c, + [SPMI_MST_REQ_EN] = 0x0010, + [SPMI_REC_CTRL] = 0x0040, + [SPMI_REC0] = 0x0044, + [SPMI_REC1] = 0x0048, + [SPMI_REC2] = 0x004c, + [SPMI_REC3] = 0x0050, + [SPMI_REC4] = 0x0054, + [SPMI_MST_DBG] = 0x00fc, +}; + +static const u32 mt8195_spmi_regs[] = { + [SPMI_OP_ST_CTRL] = 0x0000, + [SPMI_GRP_ID_EN] = 0x0004, + [SPMI_OP_ST_STA] = 0x0008, + [SPMI_MST_SAMPL] = 0x000C, + [SPMI_MST_REQ_EN] = 0x0010, + [SPMI_MST_RCS_CTRL] = 0x0014, + [SPMI_SLV_3_0_EINT] = 0x0020, + [SPMI_SLV_7_4_EINT] = 0x0024, + [SPMI_SLV_B_8_EINT] = 0x0028, + [SPMI_SLV_F_C_EINT] = 0x002C, + [SPMI_REC_CTRL] = 0x0040, + [SPMI_REC0] = 0x0044, + [SPMI_REC1] = 0x0048, + [SPMI_REC2] = 0x004C, + [SPMI_REC3] = 0x0050, + [SPMI_REC4] = 0x0054, + [SPMI_REC_CMD_DEC] = 0x005C, + [SPMI_DEC_DBG] = 0x00F8, + [SPMI_MST_DBG] = 0x00FC, +}; + +static u32 pmif_readl(struct pmif *arb, enum pmif_regs reg) +{ + return readl(arb->base + arb->data->regs[reg]); +} + +static void pmif_writel(struct pmif *arb, u32 val, enum pmif_regs reg) +{ + writel(val, arb->base + arb->data->regs[reg]); +} + +static void mtk_spmi_writel(struct pmif *arb, u32 val, enum spmi_regs reg) +{ + writel(val, arb->spmimst_base + arb->data->spmimst_regs[reg]); +} + +static bool pmif_is_fsm_vldclr(struct pmif *arb) +{ + u32 reg_rdata; + + reg_rdata = pmif_readl(arb, arb->chan.ch_sta); + + return GET_SWINF(reg_rdata) == SWINF_WFVLDCLR; +} + +static int pmif_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid) +{ + struct pmif *arb = spmi_controller_get_drvdata(ctrl); + u32 rdata, cmd; + int ret; + + /* Check the opcode */ + if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP) + return -EINVAL; + + cmd = opc - SPMI_CMD_RESET; + + mtk_spmi_writel(arb, (cmd << 0x4) | sid, SPMI_OP_ST_CTRL); + ret = readl_poll_timeout_atomic(arb->spmimst_base + arb->data->spmimst_regs[SPMI_OP_ST_STA], + rdata, (rdata & SPMI_OP_ST_BUSY) == SPMI_OP_ST_BUSY, + PMIF_DELAY_US, PMIF_TIMEOUT_US); + if (ret < 0) + dev_err(&ctrl->dev, "timeout, err = %d\n", ret); + + return ret; +} + +static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, + u16 addr, u8 *buf, size_t len) +{ + struct pmif *arb = spmi_controller_get_drvdata(ctrl); + struct ch_reg *inf_reg; + int ret; + u32 data, cmd; + unsigned long flags; + + /* Check for argument validation. */ + if (sid & ~0xf) { + dev_err(&ctrl->dev, "exceed the max slv id\n"); + return -EINVAL; + } + + if (len > 4) { + dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len); + + return -EINVAL; + } + + if (opc >= 0x60 && opc <= 0x7f) + opc = PMIF_CMD_REG; + else if ((opc >= 0x20 && opc <= 0x2f) || (opc >= 0x38 && opc <= 0x3f)) + opc = PMIF_CMD_EXT_REG_LONG; + else + return -EINVAL; + + raw_spin_lock_irqsave(&arb->lock, flags); + /* Wait for Software Interface FSM state to be IDLE. */ + inf_reg = &arb->chan; + ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta], + data, GET_SWINF(data) == SWINF_IDLE, + PMIF_DELAY_US, PMIF_TIMEOUT_US); + if (ret < 0) { + /* set channel ready if the data has transferred */ + if (pmif_is_fsm_vldclr(arb)) + pmif_writel(arb, 1, inf_reg->ch_rdy); + raw_spin_unlock_irqrestore(&arb->lock, flags); + dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n"); + return ret; + } + + /* Send the command. */ + cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr; + pmif_writel(arb, cmd, inf_reg->ch_send); + raw_spin_unlock_irqrestore(&arb->lock, flags); + + /* + * Wait for Software Interface FSM state to be WFVLDCLR, + * read the data and clear the valid flag. + */ + ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta], + data, GET_SWINF(data) == SWINF_WFVLDCLR, + PMIF_DELAY_US, PMIF_TIMEOUT_US); + if (ret < 0) { + dev_err(&ctrl->dev, "failed to wait for SWINF_WFVLDCLR\n"); + return ret; + } + + data = pmif_readl(arb, inf_reg->rdata); + memcpy(buf, &data, len); + pmif_writel(arb, 1, inf_reg->ch_rdy); + + return 0; +} + +static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, + u16 addr, const u8 *buf, size_t len) +{ + struct pmif *arb = spmi_controller_get_drvdata(ctrl); + struct ch_reg *inf_reg; + int ret; + u32 data, wdata, cmd; + unsigned long flags; + + if (len > 4) { + dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len); + + return -EINVAL; + } + + /* Check the opcode */ + if (opc >= 0x40 && opc <= 0x5F) + opc = PMIF_CMD_REG; + else if ((opc <= 0xF) || (opc >= 0x30 && opc <= 0x37)) + opc = PMIF_CMD_EXT_REG_LONG; + else if (opc >= 0x80) + opc = PMIF_CMD_REG_0; + else + return -EINVAL; + + /* Set the write data. */ + memcpy(&wdata, buf, len); + + raw_spin_lock_irqsave(&arb->lock, flags); + /* Wait for Software Interface FSM state to be IDLE. */ + inf_reg = &arb->chan; + ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta], + data, GET_SWINF(data) == SWINF_IDLE, + PMIF_DELAY_US, PMIF_TIMEOUT_US); + if (ret < 0) { + /* set channel ready if the data has transferred */ + if (pmif_is_fsm_vldclr(arb)) + pmif_writel(arb, 1, inf_reg->ch_rdy); + raw_spin_unlock_irqrestore(&arb->lock, flags); + dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n"); + return ret; + } + + pmif_writel(arb, wdata, inf_reg->wdata); + + /* Send the command. */ + cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr; + pmif_writel(arb, cmd, inf_reg->ch_send); + raw_spin_unlock_irqrestore(&arb->lock, flags); + + return 0; +} + +static const struct pmif_data mt6873_pmif_arb = { + .regs = mt6873_regs, + .spmimst_regs = mt6873_spmi_regs, + .soc_chan = 2, +}; + +static const struct pmif_data mt8195_pmif_arb = { + .regs = mt8195_regs, + .spmimst_regs = mt8195_spmi_regs, + .soc_chan = 2, +}; + +static int mtk_spmi_probe(struct platform_device *pdev) +{ + struct pmif *arb; + struct spmi_controller *ctrl; + int err, i; + u32 chan_offset; + + ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*arb)); + if (!ctrl) + return -ENOMEM; + + arb = spmi_controller_get_drvdata(ctrl); + arb->data = device_get_match_data(&pdev->dev); + if (!arb->data) { + err = -EINVAL; + dev_err(&pdev->dev, "Cannot get drv_data\n"); + goto err_put_ctrl; + } + + arb->base = devm_platform_ioremap_resource_byname(pdev, "pmif"); + if (IS_ERR(arb->base)) { + err = PTR_ERR(arb->base); + goto err_put_ctrl; + } + + arb->spmimst_base = devm_platform_ioremap_resource_byname(pdev, "spmimst"); + if (IS_ERR(arb->spmimst_base)) { + err = PTR_ERR(arb->spmimst_base); + goto err_put_ctrl; + } + + arb->nclks = ARRAY_SIZE(pmif_clock_names); + for (i = 0; i < arb->nclks; i++) + arb->clks[i].id = pmif_clock_names[i]; + + err = devm_clk_bulk_get(&pdev->dev, arb->nclks, arb->clks); + if (err) { + dev_err(&pdev->dev, "Failed to get clocks: %d\n", err); + goto err_put_ctrl; + } + + err = clk_bulk_prepare_enable(arb->nclks, arb->clks); + if (err) { + dev_err(&pdev->dev, "Failed to enable clocks: %d\n", err); + goto err_put_ctrl; + } + + ctrl->cmd = pmif_arb_cmd; + ctrl->read_cmd = pmif_spmi_read_cmd; + ctrl->write_cmd = pmif_spmi_write_cmd; + + chan_offset = PMIF_CHAN_OFFSET * arb->data->soc_chan; + arb->chan.ch_sta = PMIF_SWINF_0_STA + chan_offset; + arb->chan.wdata = PMIF_SWINF_0_WDATA_31_0 + chan_offset; + arb->chan.rdata = PMIF_SWINF_0_RDATA_31_0 + chan_offset; + arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset; + arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset; + + raw_spin_lock_init(&arb->lock); + + platform_set_drvdata(pdev, ctrl); + + err = spmi_controller_add(ctrl); + if (err) + goto err_domain_remove; + + return 0; + +err_domain_remove: + clk_bulk_disable_unprepare(arb->nclks, arb->clks); +err_put_ctrl: + spmi_controller_put(ctrl); + return err; +} + +static int mtk_spmi_remove(struct platform_device *pdev) +{ + struct spmi_controller *ctrl = platform_get_drvdata(pdev); + struct pmif *arb = spmi_controller_get_drvdata(ctrl); + + clk_bulk_disable_unprepare(arb->nclks, arb->clks); + spmi_controller_remove(ctrl); + spmi_controller_put(ctrl); + return 0; +} + +static const struct of_device_id mtk_spmi_match_table[] = { + { + .compatible = "mediatek,mt6873-spmi", + .data = &mt6873_pmif_arb, + }, { + .compatible = "mediatek,mt8195-spmi", + .data = &mt8195_pmif_arb, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, mtk_spmi_match_table); + +static struct platform_driver mtk_spmi_driver = { + .driver = { + .name = "spmi-mtk", + .of_match_table = of_match_ptr(mtk_spmi_match_table), + }, + .probe = mtk_spmi_probe, + .remove = mtk_spmi_remove, +}; +module_platform_driver(mtk_spmi_driver); + +MODULE_AUTHOR("Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek SPMI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c new file mode 100644 index 000000000..2cf3203b2 --- /dev/null +++ b/drivers/spmi/spmi-pmic-arb.c @@ -0,0 +1,1505 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2015, 2017, 2021, The Linux Foundation. All rights reserved. + */ +#include <linux/bitmap.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spmi.h> + +/* PMIC Arbiter configuration registers */ +#define PMIC_ARB_VERSION 0x0000 +#define PMIC_ARB_VERSION_V2_MIN 0x20010000 +#define PMIC_ARB_VERSION_V3_MIN 0x30000000 +#define PMIC_ARB_VERSION_V5_MIN 0x50000000 +#define PMIC_ARB_INT_EN 0x0004 + +/* PMIC Arbiter channel registers offsets */ +#define PMIC_ARB_CMD 0x00 +#define PMIC_ARB_CONFIG 0x04 +#define PMIC_ARB_STATUS 0x08 +#define PMIC_ARB_WDATA0 0x10 +#define PMIC_ARB_WDATA1 0x14 +#define PMIC_ARB_RDATA0 0x18 +#define PMIC_ARB_RDATA1 0x1C + +/* Mapping Table */ +#define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N))) +#define SPMI_MAPPING_BIT_INDEX(X) (((X) >> 18) & 0xF) +#define SPMI_MAPPING_BIT_IS_0_FLAG(X) (((X) >> 17) & 0x1) +#define SPMI_MAPPING_BIT_IS_0_RESULT(X) (((X) >> 9) & 0xFF) +#define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1) +#define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF) + +#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */ +#define PMIC_ARB_MAX_PPID BIT(12) /* PPID is 12bit */ +#define PMIC_ARB_APID_VALID BIT(15) +#define PMIC_ARB_CHAN_IS_IRQ_OWNER(reg) ((reg) & BIT(24)) +#define INVALID_EE 0xFF + +/* Ownership Table */ +#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N))) +#define SPMI_OWNERSHIP_PERIPH2OWNER(X) ((X) & 0x7) + +/* Channel Status fields */ +enum pmic_arb_chnl_status { + PMIC_ARB_STATUS_DONE = BIT(0), + PMIC_ARB_STATUS_FAILURE = BIT(1), + PMIC_ARB_STATUS_DENIED = BIT(2), + PMIC_ARB_STATUS_DROPPED = BIT(3), +}; + +/* Command register fields */ +#define PMIC_ARB_CMD_MAX_BYTE_COUNT 8 + +/* Command Opcodes */ +enum pmic_arb_cmd_op_code { + PMIC_ARB_OP_EXT_WRITEL = 0, + PMIC_ARB_OP_EXT_READL = 1, + PMIC_ARB_OP_EXT_WRITE = 2, + PMIC_ARB_OP_RESET = 3, + PMIC_ARB_OP_SLEEP = 4, + PMIC_ARB_OP_SHUTDOWN = 5, + PMIC_ARB_OP_WAKEUP = 6, + PMIC_ARB_OP_AUTHENTICATE = 7, + PMIC_ARB_OP_MSTR_READ = 8, + PMIC_ARB_OP_MSTR_WRITE = 9, + PMIC_ARB_OP_EXT_READ = 13, + PMIC_ARB_OP_WRITE = 14, + PMIC_ARB_OP_READ = 15, + PMIC_ARB_OP_ZERO_WRITE = 16, +}; + +/* + * PMIC arbiter version 5 uses different register offsets for read/write vs + * observer channels. + */ +enum pmic_arb_channel { + PMIC_ARB_CHANNEL_RW, + PMIC_ARB_CHANNEL_OBS, +}; + +/* Maximum number of support PMIC peripherals */ +#define PMIC_ARB_MAX_PERIPHS 512 +#define PMIC_ARB_TIMEOUT_US 1000 +#define PMIC_ARB_MAX_TRANS_BYTES (8) + +#define PMIC_ARB_APID_MASK 0xFF +#define PMIC_ARB_PPID_MASK 0xFFF + +/* interrupt enable bit */ +#define SPMI_PIC_ACC_ENABLE_BIT BIT(0) + +#define spec_to_hwirq(slave_id, periph_id, irq_id, apid) \ + ((((slave_id) & 0xF) << 28) | \ + (((periph_id) & 0xFF) << 20) | \ + (((irq_id) & 0x7) << 16) | \ + (((apid) & 0x1FF) << 0)) + +#define hwirq_to_sid(hwirq) (((hwirq) >> 28) & 0xF) +#define hwirq_to_per(hwirq) (((hwirq) >> 20) & 0xFF) +#define hwirq_to_irq(hwirq) (((hwirq) >> 16) & 0x7) +#define hwirq_to_apid(hwirq) (((hwirq) >> 0) & 0x1FF) + +struct pmic_arb_ver_ops; + +struct apid_data { + u16 ppid; + u8 write_ee; + u8 irq_ee; +}; + +/** + * spmi_pmic_arb - SPMI PMIC Arbiter object + * + * @rd_base: on v1 "core", on v2 "observer" register base off DT. + * @wr_base: on v1 "core", on v2 "chnls" register base off DT. + * @intr: address of the SPMI interrupt control registers. + * @cnfg: address of the PMIC Arbiter configuration registers. + * @lock: lock to synchronize accesses. + * @channel: execution environment channel to use for accesses. + * @irq: PMIC ARB interrupt. + * @ee: the current Execution Environment + * @min_apid: minimum APID (used for bounding IRQ search) + * @max_apid: maximum APID + * @mapping_table: in-memory copy of PPID -> APID mapping table. + * @domain: irq domain object for PMIC IRQ domain + * @spmic: SPMI controller object + * @ver_ops: version dependent operations. + * @ppid_to_apid in-memory copy of PPID -> APID mapping table. + */ +struct spmi_pmic_arb { + void __iomem *rd_base; + void __iomem *wr_base; + void __iomem *intr; + void __iomem *cnfg; + void __iomem *core; + resource_size_t core_size; + raw_spinlock_t lock; + u8 channel; + int irq; + u8 ee; + u16 min_apid; + u16 max_apid; + u32 *mapping_table; + DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS); + struct irq_domain *domain; + struct spmi_controller *spmic; + const struct pmic_arb_ver_ops *ver_ops; + u16 *ppid_to_apid; + u16 last_apid; + struct apid_data apid_data[PMIC_ARB_MAX_PERIPHS]; +}; + +/** + * pmic_arb_ver: version dependent functionality. + * + * @ver_str: version string. + * @ppid_to_apid: finds the apid for a given ppid. + * @non_data_cmd: on v1 issues an spmi non-data command. + * on v2 no HW support, returns -EOPNOTSUPP. + * @offset: on v1 offset of per-ee channel. + * on v2 offset of per-ee and per-ppid channel. + * @fmt_cmd: formats a GENI/SPMI command. + * @owner_acc_status: on v1 address of PMIC_ARB_SPMI_PIC_OWNERm_ACC_STATUSn + * on v2 address of SPMI_PIC_OWNERm_ACC_STATUSn. + * @acc_enable: on v1 address of PMIC_ARB_SPMI_PIC_ACC_ENABLEn + * on v2 address of SPMI_PIC_ACC_ENABLEn. + * @irq_status: on v1 address of PMIC_ARB_SPMI_PIC_IRQ_STATUSn + * on v2 address of SPMI_PIC_IRQ_STATUSn. + * @irq_clear: on v1 address of PMIC_ARB_SPMI_PIC_IRQ_CLEARn + * on v2 address of SPMI_PIC_IRQ_CLEARn. + * @apid_map_offset: offset of PMIC_ARB_REG_CHNLn + */ +struct pmic_arb_ver_ops { + const char *ver_str; + int (*ppid_to_apid)(struct spmi_pmic_arb *pmic_arb, u16 ppid); + /* spmi commands (read_cmd, write_cmd, cmd) functionality */ + int (*offset)(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr, + enum pmic_arb_channel ch_type); + u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc); + int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid); + /* Interrupts controller functionality (offset of PIC registers) */ + void __iomem *(*owner_acc_status)(struct spmi_pmic_arb *pmic_arb, u8 m, + u16 n); + void __iomem *(*acc_enable)(struct spmi_pmic_arb *pmic_arb, u16 n); + void __iomem *(*irq_status)(struct spmi_pmic_arb *pmic_arb, u16 n); + void __iomem *(*irq_clear)(struct spmi_pmic_arb *pmic_arb, u16 n); + u32 (*apid_map_offset)(u16 n); +}; + +static inline void pmic_arb_base_write(struct spmi_pmic_arb *pmic_arb, + u32 offset, u32 val) +{ + writel_relaxed(val, pmic_arb->wr_base + offset); +} + +static inline void pmic_arb_set_rd_cmd(struct spmi_pmic_arb *pmic_arb, + u32 offset, u32 val) +{ + writel_relaxed(val, pmic_arb->rd_base + offset); +} + +/** + * pmic_arb_read_data: reads pmic-arb's register and copy 1..4 bytes to buf + * @bc: byte count -1. range: 0..3 + * @reg: register's address + * @buf: output parameter, length must be bc + 1 + */ +static void +pmic_arb_read_data(struct spmi_pmic_arb *pmic_arb, u8 *buf, u32 reg, u8 bc) +{ + u32 data = __raw_readl(pmic_arb->rd_base + reg); + + memcpy(buf, &data, (bc & 3) + 1); +} + +/** + * pmic_arb_write_data: write 1..4 bytes from buf to pmic-arb's register + * @bc: byte-count -1. range: 0..3. + * @reg: register's address. + * @buf: buffer to write. length must be bc + 1. + */ +static void pmic_arb_write_data(struct spmi_pmic_arb *pmic_arb, const u8 *buf, + u32 reg, u8 bc) +{ + u32 data = 0; + + memcpy(&data, buf, (bc & 3) + 1); + __raw_writel(data, pmic_arb->wr_base + reg); +} + +static int pmic_arb_wait_for_done(struct spmi_controller *ctrl, + void __iomem *base, u8 sid, u16 addr, + enum pmic_arb_channel ch_type) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + u32 status = 0; + u32 timeout = PMIC_ARB_TIMEOUT_US; + u32 offset; + int rc; + + rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, ch_type); + if (rc < 0) + return rc; + + offset = rc; + offset += PMIC_ARB_STATUS; + + while (timeout--) { + status = readl_relaxed(base + offset); + + if (status & PMIC_ARB_STATUS_DONE) { + if (status & PMIC_ARB_STATUS_DENIED) { + dev_err(&ctrl->dev, "%s: %#x %#x: transaction denied (%#x)\n", + __func__, sid, addr, status); + return -EPERM; + } + + if (status & PMIC_ARB_STATUS_FAILURE) { + dev_err(&ctrl->dev, "%s: %#x %#x: transaction failed (%#x)\n", + __func__, sid, addr, status); + WARN_ON(1); + return -EIO; + } + + if (status & PMIC_ARB_STATUS_DROPPED) { + dev_err(&ctrl->dev, "%s: %#x %#x: transaction dropped (%#x)\n", + __func__, sid, addr, status); + return -EIO; + } + + return 0; + } + udelay(1); + } + + dev_err(&ctrl->dev, "%s: %#x %#x: timeout, status %#x\n", + __func__, sid, addr, status); + return -ETIMEDOUT; +} + +static int +pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + unsigned long flags; + u32 cmd; + int rc; + u32 offset; + + rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, PMIC_ARB_CHANNEL_RW); + if (rc < 0) + return rc; + + offset = rc; + cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20); + + raw_spin_lock_irqsave(&pmic_arb->lock, flags); + pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd); + rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, 0, + PMIC_ARB_CHANNEL_RW); + raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); + + return rc; +} + +static int +pmic_arb_non_data_cmd_v2(struct spmi_controller *ctrl, u8 opc, u8 sid) +{ + return -EOPNOTSUPP; +} + +/* Non-data command */ +static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + + dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid); + + /* Check for valid non-data command */ + if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP) + return -EINVAL; + + return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid); +} + +static int pmic_arb_fmt_read_cmd(struct spmi_pmic_arb *pmic_arb, u8 opc, u8 sid, + u16 addr, size_t len, u32 *cmd, u32 *offset) +{ + u8 bc = len - 1; + int rc; + + rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, + PMIC_ARB_CHANNEL_OBS); + if (rc < 0) + return rc; + + *offset = rc; + if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { + dev_err(&pmic_arb->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", + PMIC_ARB_MAX_TRANS_BYTES, len); + return -EINVAL; + } + + /* Check the opcode */ + if (opc >= 0x60 && opc <= 0x7F) + opc = PMIC_ARB_OP_READ; + else if (opc >= 0x20 && opc <= 0x2F) + opc = PMIC_ARB_OP_EXT_READ; + else if (opc >= 0x38 && opc <= 0x3F) + opc = PMIC_ARB_OP_EXT_READL; + else + return -EINVAL; + + *cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc); + + return 0; +} + +static int pmic_arb_read_cmd_unlocked(struct spmi_controller *ctrl, u32 cmd, + u32 offset, u8 sid, u16 addr, u8 *buf, + size_t len) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + u8 bc = len - 1; + int rc; + + pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd); + rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr, + PMIC_ARB_CHANNEL_OBS); + if (rc) + return rc; + + pmic_arb_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0, + min_t(u8, bc, 3)); + + if (bc > 3) + pmic_arb_read_data(pmic_arb, buf + 4, offset + PMIC_ARB_RDATA1, + bc - 4); + return 0; +} + +static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, + u16 addr, u8 *buf, size_t len) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + unsigned long flags; + u32 cmd, offset; + int rc; + + rc = pmic_arb_fmt_read_cmd(pmic_arb, opc, sid, addr, len, &cmd, + &offset); + if (rc) + return rc; + + raw_spin_lock_irqsave(&pmic_arb->lock, flags); + rc = pmic_arb_read_cmd_unlocked(ctrl, cmd, offset, sid, addr, buf, len); + raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); + + return rc; +} + +static int pmic_arb_fmt_write_cmd(struct spmi_pmic_arb *pmic_arb, u8 opc, + u8 sid, u16 addr, size_t len, u32 *cmd, + u32 *offset) +{ + u8 bc = len - 1; + int rc; + + rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, + PMIC_ARB_CHANNEL_RW); + if (rc < 0) + return rc; + + *offset = rc; + if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { + dev_err(&pmic_arb->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested", + PMIC_ARB_MAX_TRANS_BYTES, len); + return -EINVAL; + } + + /* Check the opcode */ + if (opc >= 0x40 && opc <= 0x5F) + opc = PMIC_ARB_OP_WRITE; + else if (opc <= 0x0F) + opc = PMIC_ARB_OP_EXT_WRITE; + else if (opc >= 0x30 && opc <= 0x37) + opc = PMIC_ARB_OP_EXT_WRITEL; + else if (opc >= 0x80) + opc = PMIC_ARB_OP_ZERO_WRITE; + else + return -EINVAL; + + *cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc); + + return 0; +} + +static int pmic_arb_write_cmd_unlocked(struct spmi_controller *ctrl, u32 cmd, + u32 offset, u8 sid, u16 addr, + const u8 *buf, size_t len) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + u8 bc = len - 1; + + /* Write data to FIFOs */ + pmic_arb_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0, + min_t(u8, bc, 3)); + if (bc > 3) + pmic_arb_write_data(pmic_arb, buf + 4, offset + PMIC_ARB_WDATA1, + bc - 4); + + /* Start the transaction */ + pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd); + return pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr, + PMIC_ARB_CHANNEL_RW); +} + +static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, + u16 addr, const u8 *buf, size_t len) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + unsigned long flags; + u32 cmd, offset; + int rc; + + rc = pmic_arb_fmt_write_cmd(pmic_arb, opc, sid, addr, len, &cmd, + &offset); + if (rc) + return rc; + + raw_spin_lock_irqsave(&pmic_arb->lock, flags); + rc = pmic_arb_write_cmd_unlocked(ctrl, cmd, offset, sid, addr, buf, + len); + raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); + + return rc; +} + +static int pmic_arb_masked_write(struct spmi_controller *ctrl, u8 sid, u16 addr, + const u8 *buf, const u8 *mask, size_t len) +{ + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + u32 read_cmd, read_offset, write_cmd, write_offset; + u8 temp[PMIC_ARB_MAX_TRANS_BYTES]; + unsigned long flags; + int rc, i; + + rc = pmic_arb_fmt_read_cmd(pmic_arb, SPMI_CMD_EXT_READL, sid, addr, len, + &read_cmd, &read_offset); + if (rc) + return rc; + + rc = pmic_arb_fmt_write_cmd(pmic_arb, SPMI_CMD_EXT_WRITEL, sid, addr, + len, &write_cmd, &write_offset); + if (rc) + return rc; + + raw_spin_lock_irqsave(&pmic_arb->lock, flags); + rc = pmic_arb_read_cmd_unlocked(ctrl, read_cmd, read_offset, sid, addr, + temp, len); + if (rc) + goto done; + + for (i = 0; i < len; i++) + temp[i] = (temp[i] & ~mask[i]) | (buf[i] & mask[i]); + + rc = pmic_arb_write_cmd_unlocked(ctrl, write_cmd, write_offset, sid, + addr, temp, len); +done: + raw_spin_unlock_irqrestore(&pmic_arb->lock, flags); + + return rc; +} + +enum qpnpint_regs { + QPNPINT_REG_RT_STS = 0x10, + QPNPINT_REG_SET_TYPE = 0x11, + QPNPINT_REG_POLARITY_HIGH = 0x12, + QPNPINT_REG_POLARITY_LOW = 0x13, + QPNPINT_REG_LATCHED_CLR = 0x14, + QPNPINT_REG_EN_SET = 0x15, + QPNPINT_REG_EN_CLR = 0x16, + QPNPINT_REG_LATCHED_STS = 0x18, +}; + +struct spmi_pmic_arb_qpnpint_type { + u8 type; /* 1 -> edge */ + u8 polarity_high; + u8 polarity_low; +} __packed; + +/* Simplified accessor functions for irqchip callbacks */ +static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf, + size_t len) +{ + struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d); + u8 sid = hwirq_to_sid(d->hwirq); + u8 per = hwirq_to_per(d->hwirq); + + if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid, + (per << 8) + reg, buf, len)) + dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x\n", + d->irq); +} + +static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len) +{ + struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d); + u8 sid = hwirq_to_sid(d->hwirq); + u8 per = hwirq_to_per(d->hwirq); + + if (pmic_arb_read_cmd(pmic_arb->spmic, SPMI_CMD_EXT_READL, sid, + (per << 8) + reg, buf, len)) + dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x\n", + d->irq); +} + +static int qpnpint_spmi_masked_write(struct irq_data *d, u8 reg, + const void *buf, const void *mask, + size_t len) +{ + struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d); + u8 sid = hwirq_to_sid(d->hwirq); + u8 per = hwirq_to_per(d->hwirq); + int rc; + + rc = pmic_arb_masked_write(pmic_arb->spmic, sid, (per << 8) + reg, buf, + mask, len); + if (rc) + dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x rc=%d\n", + d->irq, rc); + return rc; +} + +static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id) +{ + u16 ppid = pmic_arb->apid_data[apid].ppid; + u8 sid = ppid >> 8; + u8 per = ppid & 0xFF; + u8 irq_mask = BIT(id); + + dev_err_ratelimited(&pmic_arb->spmic->dev, "%s apid=%d sid=0x%x per=0x%x irq=%d\n", + __func__, apid, sid, per, id); + writel_relaxed(irq_mask, pmic_arb->ver_ops->irq_clear(pmic_arb, apid)); +} + +static int periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid) +{ + unsigned int irq; + u32 status, id; + int handled = 0; + u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF; + u8 per = pmic_arb->apid_data[apid].ppid & 0xFF; + + status = readl_relaxed(pmic_arb->ver_ops->irq_status(pmic_arb, apid)); + while (status) { + id = ffs(status) - 1; + status &= ~BIT(id); + irq = irq_find_mapping(pmic_arb->domain, + spec_to_hwirq(sid, per, id, apid)); + if (irq == 0) { + cleanup_irq(pmic_arb, apid, id); + continue; + } + generic_handle_irq(irq); + handled++; + } + + return handled; +} + +static void pmic_arb_chained_irq(struct irq_desc *desc) +{ + struct spmi_pmic_arb *pmic_arb = irq_desc_get_handler_data(desc); + const struct pmic_arb_ver_ops *ver_ops = pmic_arb->ver_ops; + struct irq_chip *chip = irq_desc_get_chip(desc); + int first = pmic_arb->min_apid; + int last = pmic_arb->max_apid; + u8 ee = pmic_arb->ee; + u32 status, enable, handled = 0; + int i, id, apid; + /* status based dispatch */ + bool acc_valid = false; + u32 irq_status = 0; + + chained_irq_enter(chip, desc); + + for (i = first >> 5; i <= last >> 5; ++i) { + status = readl_relaxed( + ver_ops->owner_acc_status(pmic_arb, ee, i)); + if (status) + acc_valid = true; + + while (status) { + id = ffs(status) - 1; + status &= ~BIT(id); + apid = id + i * 32; + if (apid < first || apid > last) { + WARN_ONCE(true, "spurious spmi irq received for apid=%d\n", + apid); + continue; + } + enable = readl_relaxed( + ver_ops->acc_enable(pmic_arb, apid)); + if (enable & SPMI_PIC_ACC_ENABLE_BIT) + if (periph_interrupt(pmic_arb, apid) != 0) + handled++; + } + } + + /* ACC_STATUS is empty but IRQ fired check IRQ_STATUS */ + if (!acc_valid) { + for (i = first; i <= last; i++) { + /* skip if APPS is not irq owner */ + if (pmic_arb->apid_data[i].irq_ee != pmic_arb->ee) + continue; + + irq_status = readl_relaxed( + ver_ops->irq_status(pmic_arb, i)); + if (irq_status) { + enable = readl_relaxed( + ver_ops->acc_enable(pmic_arb, i)); + if (enable & SPMI_PIC_ACC_ENABLE_BIT) { + dev_dbg(&pmic_arb->spmic->dev, + "Dispatching IRQ for apid=%d status=%x\n", + i, irq_status); + if (periph_interrupt(pmic_arb, i) != 0) + handled++; + } + } + } + } + + if (handled == 0) + handle_bad_irq(desc); + + chained_irq_exit(chip, desc); +} + +static void qpnpint_irq_ack(struct irq_data *d) +{ + struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d); + u8 irq = hwirq_to_irq(d->hwirq); + u16 apid = hwirq_to_apid(d->hwirq); + u8 data; + + writel_relaxed(BIT(irq), pmic_arb->ver_ops->irq_clear(pmic_arb, apid)); + + data = BIT(irq); + qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &data, 1); +} + +static void qpnpint_irq_mask(struct irq_data *d) +{ + u8 irq = hwirq_to_irq(d->hwirq); + u8 data = BIT(irq); + + qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1); +} + +static void qpnpint_irq_unmask(struct irq_data *d) +{ + struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d); + const struct pmic_arb_ver_ops *ver_ops = pmic_arb->ver_ops; + u8 irq = hwirq_to_irq(d->hwirq); + u16 apid = hwirq_to_apid(d->hwirq); + u8 buf[2]; + + writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT, + ver_ops->acc_enable(pmic_arb, apid)); + + qpnpint_spmi_read(d, QPNPINT_REG_EN_SET, &buf[0], 1); + if (!(buf[0] & BIT(irq))) { + /* + * Since the interrupt is currently disabled, write to both the + * LATCHED_CLR and EN_SET registers so that a spurious interrupt + * cannot be triggered when the interrupt is enabled + */ + buf[0] = BIT(irq); + buf[1] = BIT(irq); + qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 2); + } +} + +static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + struct spmi_pmic_arb_qpnpint_type type = {0}; + struct spmi_pmic_arb_qpnpint_type mask; + irq_flow_handler_t flow_handler; + u8 irq_bit = BIT(hwirq_to_irq(d->hwirq)); + int rc; + + if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { + type.type = irq_bit; + if (flow_type & IRQF_TRIGGER_RISING) + type.polarity_high = irq_bit; + if (flow_type & IRQF_TRIGGER_FALLING) + type.polarity_low = irq_bit; + + flow_handler = handle_edge_irq; + } else { + if ((flow_type & (IRQF_TRIGGER_HIGH)) && + (flow_type & (IRQF_TRIGGER_LOW))) + return -EINVAL; + + if (flow_type & IRQF_TRIGGER_HIGH) + type.polarity_high = irq_bit; + else + type.polarity_low = irq_bit; + + flow_handler = handle_level_irq; + } + + mask.type = irq_bit; + mask.polarity_high = irq_bit; + mask.polarity_low = irq_bit; + + rc = qpnpint_spmi_masked_write(d, QPNPINT_REG_SET_TYPE, &type, &mask, + sizeof(type)); + irq_set_handler_locked(d, flow_handler); + + return rc; +} + +static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on) +{ + struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d); + + return irq_set_irq_wake(pmic_arb->irq, on); +} + +static int qpnpint_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool *state) +{ + u8 irq = hwirq_to_irq(d->hwirq); + u8 status = 0; + + if (which != IRQCHIP_STATE_LINE_LEVEL) + return -EINVAL; + + qpnpint_spmi_read(d, QPNPINT_REG_RT_STS, &status, 1); + *state = !!(status & BIT(irq)); + + return 0; +} + +static int qpnpint_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d); + u16 periph = hwirq_to_per(d->hwirq); + u16 apid = hwirq_to_apid(d->hwirq); + u16 sid = hwirq_to_sid(d->hwirq); + u16 irq = hwirq_to_irq(d->hwirq); + u8 buf; + + if (pmic_arb->apid_data[apid].irq_ee != pmic_arb->ee) { + dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u: ee=%u but owner=%u\n", + sid, periph, irq, pmic_arb->ee, + pmic_arb->apid_data[apid].irq_ee); + return -ENODEV; + } + + buf = BIT(irq); + qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &buf, 1); + qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 1); + + return 0; +} + +static struct irq_chip pmic_arb_irqchip = { + .name = "pmic_arb", + .irq_ack = qpnpint_irq_ack, + .irq_mask = qpnpint_irq_mask, + .irq_unmask = qpnpint_irq_unmask, + .irq_set_type = qpnpint_irq_set_type, + .irq_set_wake = qpnpint_irq_set_wake, + .irq_get_irqchip_state = qpnpint_get_irqchip_state, + .flags = IRQCHIP_MASK_ON_SUSPEND, +}; + +static int qpnpint_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + struct spmi_pmic_arb *pmic_arb = d->host_data; + u32 *intspec = fwspec->param; + u16 apid, ppid; + int rc; + + dev_dbg(&pmic_arb->spmic->dev, "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n", + intspec[0], intspec[1], intspec[2]); + + if (irq_domain_get_of_node(d) != pmic_arb->spmic->dev.of_node) + return -EINVAL; + if (fwspec->param_count != 4) + return -EINVAL; + if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7) + return -EINVAL; + + ppid = intspec[0] << 8 | intspec[1]; + rc = pmic_arb->ver_ops->ppid_to_apid(pmic_arb, ppid); + if (rc < 0) { + dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u rc = %d\n", + intspec[0], intspec[1], intspec[2], rc); + return rc; + } + + apid = rc; + /* Keep track of {max,min}_apid for bounding search during interrupt */ + if (apid > pmic_arb->max_apid) + pmic_arb->max_apid = apid; + if (apid < pmic_arb->min_apid) + pmic_arb->min_apid = apid; + + *out_hwirq = spec_to_hwirq(intspec[0], intspec[1], intspec[2], apid); + *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK; + + dev_dbg(&pmic_arb->spmic->dev, "out_hwirq = %lu\n", *out_hwirq); + + return 0; +} + +static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class; + +static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb, + struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq, unsigned int type) +{ + irq_flow_handler_t handler; + + dev_dbg(&pmic_arb->spmic->dev, "virq = %u, hwirq = %lu, type = %u\n", + virq, hwirq, type); + + if (type & IRQ_TYPE_EDGE_BOTH) + handler = handle_edge_irq; + else + handler = handle_level_irq; + + + irq_set_lockdep_class(virq, &qpnpint_irq_lock_class, + &qpnpint_irq_request_class); + irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb, + handler, NULL, NULL); +} + +static int qpnpint_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *data) +{ + struct spmi_pmic_arb *pmic_arb = domain->host_data; + struct irq_fwspec *fwspec = data; + irq_hw_number_t hwirq; + unsigned int type; + int ret, i; + + ret = qpnpint_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) + qpnpint_irq_domain_map(pmic_arb, domain, virq + i, hwirq + i, + type); + + return 0; +} + +static int pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pmic_arb, u16 ppid) +{ + u32 *mapping_table = pmic_arb->mapping_table; + int index = 0, i; + u16 apid_valid; + u16 apid; + u32 data; + + apid_valid = pmic_arb->ppid_to_apid[ppid]; + if (apid_valid & PMIC_ARB_APID_VALID) { + apid = apid_valid & ~PMIC_ARB_APID_VALID; + return apid; + } + + for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) { + if (!test_and_set_bit(index, pmic_arb->mapping_table_valid)) + mapping_table[index] = readl_relaxed(pmic_arb->cnfg + + SPMI_MAPPING_TABLE_REG(index)); + + data = mapping_table[index]; + + if (ppid & BIT(SPMI_MAPPING_BIT_INDEX(data))) { + if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) { + index = SPMI_MAPPING_BIT_IS_1_RESULT(data); + } else { + apid = SPMI_MAPPING_BIT_IS_1_RESULT(data); + pmic_arb->ppid_to_apid[ppid] + = apid | PMIC_ARB_APID_VALID; + pmic_arb->apid_data[apid].ppid = ppid; + return apid; + } + } else { + if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) { + index = SPMI_MAPPING_BIT_IS_0_RESULT(data); + } else { + apid = SPMI_MAPPING_BIT_IS_0_RESULT(data); + pmic_arb->ppid_to_apid[ppid] + = apid | PMIC_ARB_APID_VALID; + pmic_arb->apid_data[apid].ppid = ppid; + return apid; + } + } + } + + return -ENODEV; +} + +/* v1 offset per ee */ +static int pmic_arb_offset_v1(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr, + enum pmic_arb_channel ch_type) +{ + return 0x800 + 0x80 * pmic_arb->channel; +} + +static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pmic_arb, u16 ppid) +{ + struct apid_data *apidd = &pmic_arb->apid_data[pmic_arb->last_apid]; + u32 regval, offset; + u16 id, apid; + + for (apid = pmic_arb->last_apid; ; apid++, apidd++) { + offset = pmic_arb->ver_ops->apid_map_offset(apid); + if (offset >= pmic_arb->core_size) + break; + + regval = readl_relaxed(pmic_arb->cnfg + + SPMI_OWNERSHIP_TABLE_REG(apid)); + apidd->irq_ee = SPMI_OWNERSHIP_PERIPH2OWNER(regval); + apidd->write_ee = apidd->irq_ee; + + regval = readl_relaxed(pmic_arb->core + offset); + if (!regval) + continue; + + id = (regval >> 8) & PMIC_ARB_PPID_MASK; + pmic_arb->ppid_to_apid[id] = apid | PMIC_ARB_APID_VALID; + apidd->ppid = id; + if (id == ppid) { + apid |= PMIC_ARB_APID_VALID; + break; + } + } + pmic_arb->last_apid = apid & ~PMIC_ARB_APID_VALID; + + return apid; +} + +static int pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pmic_arb, u16 ppid) +{ + u16 apid_valid; + + apid_valid = pmic_arb->ppid_to_apid[ppid]; + if (!(apid_valid & PMIC_ARB_APID_VALID)) + apid_valid = pmic_arb_find_apid(pmic_arb, ppid); + if (!(apid_valid & PMIC_ARB_APID_VALID)) + return -ENODEV; + + return apid_valid & ~PMIC_ARB_APID_VALID; +} + +static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb) +{ + struct apid_data *apidd = pmic_arb->apid_data; + struct apid_data *prev_apidd; + u16 i, apid, ppid; + bool valid, is_irq_ee; + u32 regval, offset; + + /* + * In order to allow multiple EEs to write to a single PPID in arbiter + * version 5, there is more than one APID mapped to each PPID. + * The owner field for each of these mappings specifies the EE which is + * allowed to write to the APID. The owner of the last (highest) APID + * which has the IRQ owner bit set for a given PPID will receive + * interrupts from the PPID. + */ + for (i = 0; ; i++, apidd++) { + offset = pmic_arb->ver_ops->apid_map_offset(i); + if (offset >= pmic_arb->core_size) + break; + + regval = readl_relaxed(pmic_arb->core + offset); + if (!regval) + continue; + ppid = (regval >> 8) & PMIC_ARB_PPID_MASK; + is_irq_ee = PMIC_ARB_CHAN_IS_IRQ_OWNER(regval); + + regval = readl_relaxed(pmic_arb->cnfg + + SPMI_OWNERSHIP_TABLE_REG(i)); + apidd->write_ee = SPMI_OWNERSHIP_PERIPH2OWNER(regval); + + apidd->irq_ee = is_irq_ee ? apidd->write_ee : INVALID_EE; + + valid = pmic_arb->ppid_to_apid[ppid] & PMIC_ARB_APID_VALID; + apid = pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID; + prev_apidd = &pmic_arb->apid_data[apid]; + + if (!valid || apidd->write_ee == pmic_arb->ee) { + /* First PPID mapping or one for this EE */ + pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID; + } else if (valid && is_irq_ee && + prev_apidd->write_ee == pmic_arb->ee) { + /* + * Duplicate PPID mapping after the one for this EE; + * override the irq owner + */ + prev_apidd->irq_ee = apidd->irq_ee; + } + + apidd->ppid = ppid; + pmic_arb->last_apid = i; + } + + /* Dump the mapping table for debug purposes. */ + dev_dbg(&pmic_arb->spmic->dev, "PPID APID Write-EE IRQ-EE\n"); + for (ppid = 0; ppid < PMIC_ARB_MAX_PPID; ppid++) { + apid = pmic_arb->ppid_to_apid[ppid]; + if (apid & PMIC_ARB_APID_VALID) { + apid &= ~PMIC_ARB_APID_VALID; + apidd = &pmic_arb->apid_data[apid]; + dev_dbg(&pmic_arb->spmic->dev, "%#03X %3u %2u %2u\n", + ppid, apid, apidd->write_ee, apidd->irq_ee); + } + } + + return 0; +} + +static int pmic_arb_ppid_to_apid_v5(struct spmi_pmic_arb *pmic_arb, u16 ppid) +{ + if (!(pmic_arb->ppid_to_apid[ppid] & PMIC_ARB_APID_VALID)) + return -ENODEV; + + return pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID; +} + +/* v2 offset per ppid and per ee */ +static int pmic_arb_offset_v2(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr, + enum pmic_arb_channel ch_type) +{ + u16 apid; + u16 ppid; + int rc; + + ppid = sid << 8 | ((addr >> 8) & 0xFF); + rc = pmic_arb_ppid_to_apid_v2(pmic_arb, ppid); + if (rc < 0) + return rc; + + apid = rc; + return 0x1000 * pmic_arb->ee + 0x8000 * apid; +} + +/* + * v5 offset per ee and per apid for observer channels and per apid for + * read/write channels. + */ +static int pmic_arb_offset_v5(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr, + enum pmic_arb_channel ch_type) +{ + u16 apid; + int rc; + u32 offset = 0; + u16 ppid = (sid << 8) | (addr >> 8); + + rc = pmic_arb_ppid_to_apid_v5(pmic_arb, ppid); + if (rc < 0) + return rc; + + apid = rc; + switch (ch_type) { + case PMIC_ARB_CHANNEL_OBS: + offset = 0x10000 * pmic_arb->ee + 0x80 * apid; + break; + case PMIC_ARB_CHANNEL_RW: + if (pmic_arb->apid_data[apid].write_ee != pmic_arb->ee) { + dev_err(&pmic_arb->spmic->dev, "disallowed SPMI write to sid=%u, addr=0x%04X\n", + sid, addr); + return -EPERM; + } + offset = 0x10000 * apid; + break; + } + + return offset; +} + +static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc) +{ + return (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7); +} + +static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc) +{ + return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7); +} + +static void __iomem * +pmic_arb_owner_acc_status_v1(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n) +{ + return pmic_arb->intr + 0x20 * m + 0x4 * n; +} + +static void __iomem * +pmic_arb_owner_acc_status_v2(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n) +{ + return pmic_arb->intr + 0x100000 + 0x1000 * m + 0x4 * n; +} + +static void __iomem * +pmic_arb_owner_acc_status_v3(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n) +{ + return pmic_arb->intr + 0x200000 + 0x1000 * m + 0x4 * n; +} + +static void __iomem * +pmic_arb_owner_acc_status_v5(struct spmi_pmic_arb *pmic_arb, u8 m, u16 n) +{ + return pmic_arb->intr + 0x10000 * m + 0x4 * n; +} + +static void __iomem * +pmic_arb_acc_enable_v1(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->intr + 0x200 + 0x4 * n; +} + +static void __iomem * +pmic_arb_acc_enable_v2(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->intr + 0x1000 * n; +} + +static void __iomem * +pmic_arb_acc_enable_v5(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->wr_base + 0x100 + 0x10000 * n; +} + +static void __iomem * +pmic_arb_irq_status_v1(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->intr + 0x600 + 0x4 * n; +} + +static void __iomem * +pmic_arb_irq_status_v2(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->intr + 0x4 + 0x1000 * n; +} + +static void __iomem * +pmic_arb_irq_status_v5(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->wr_base + 0x104 + 0x10000 * n; +} + +static void __iomem * +pmic_arb_irq_clear_v1(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->intr + 0xA00 + 0x4 * n; +} + +static void __iomem * +pmic_arb_irq_clear_v2(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->intr + 0x8 + 0x1000 * n; +} + +static void __iomem * +pmic_arb_irq_clear_v5(struct spmi_pmic_arb *pmic_arb, u16 n) +{ + return pmic_arb->wr_base + 0x108 + 0x10000 * n; +} + +static u32 pmic_arb_apid_map_offset_v2(u16 n) +{ + return 0x800 + 0x4 * n; +} + +static u32 pmic_arb_apid_map_offset_v5(u16 n) +{ + return 0x900 + 0x4 * n; +} + +static const struct pmic_arb_ver_ops pmic_arb_v1 = { + .ver_str = "v1", + .ppid_to_apid = pmic_arb_ppid_to_apid_v1, + .non_data_cmd = pmic_arb_non_data_cmd_v1, + .offset = pmic_arb_offset_v1, + .fmt_cmd = pmic_arb_fmt_cmd_v1, + .owner_acc_status = pmic_arb_owner_acc_status_v1, + .acc_enable = pmic_arb_acc_enable_v1, + .irq_status = pmic_arb_irq_status_v1, + .irq_clear = pmic_arb_irq_clear_v1, + .apid_map_offset = pmic_arb_apid_map_offset_v2, +}; + +static const struct pmic_arb_ver_ops pmic_arb_v2 = { + .ver_str = "v2", + .ppid_to_apid = pmic_arb_ppid_to_apid_v2, + .non_data_cmd = pmic_arb_non_data_cmd_v2, + .offset = pmic_arb_offset_v2, + .fmt_cmd = pmic_arb_fmt_cmd_v2, + .owner_acc_status = pmic_arb_owner_acc_status_v2, + .acc_enable = pmic_arb_acc_enable_v2, + .irq_status = pmic_arb_irq_status_v2, + .irq_clear = pmic_arb_irq_clear_v2, + .apid_map_offset = pmic_arb_apid_map_offset_v2, +}; + +static const struct pmic_arb_ver_ops pmic_arb_v3 = { + .ver_str = "v3", + .ppid_to_apid = pmic_arb_ppid_to_apid_v2, + .non_data_cmd = pmic_arb_non_data_cmd_v2, + .offset = pmic_arb_offset_v2, + .fmt_cmd = pmic_arb_fmt_cmd_v2, + .owner_acc_status = pmic_arb_owner_acc_status_v3, + .acc_enable = pmic_arb_acc_enable_v2, + .irq_status = pmic_arb_irq_status_v2, + .irq_clear = pmic_arb_irq_clear_v2, + .apid_map_offset = pmic_arb_apid_map_offset_v2, +}; + +static const struct pmic_arb_ver_ops pmic_arb_v5 = { + .ver_str = "v5", + .ppid_to_apid = pmic_arb_ppid_to_apid_v5, + .non_data_cmd = pmic_arb_non_data_cmd_v2, + .offset = pmic_arb_offset_v5, + .fmt_cmd = pmic_arb_fmt_cmd_v2, + .owner_acc_status = pmic_arb_owner_acc_status_v5, + .acc_enable = pmic_arb_acc_enable_v5, + .irq_status = pmic_arb_irq_status_v5, + .irq_clear = pmic_arb_irq_clear_v5, + .apid_map_offset = pmic_arb_apid_map_offset_v5, +}; + +static const struct irq_domain_ops pmic_arb_irq_domain_ops = { + .activate = qpnpint_irq_domain_activate, + .alloc = qpnpint_irq_domain_alloc, + .free = irq_domain_free_irqs_common, + .translate = qpnpint_irq_domain_translate, +}; + +static int spmi_pmic_arb_probe(struct platform_device *pdev) +{ + struct spmi_pmic_arb *pmic_arb; + struct spmi_controller *ctrl; + struct resource *res; + void __iomem *core; + u32 *mapping_table; + u32 channel, ee, hw_ver; + int err; + + ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pmic_arb)); + if (!ctrl) + return -ENOMEM; + + pmic_arb = spmi_controller_get_drvdata(ctrl); + pmic_arb->spmic = ctrl; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); + core = devm_ioremap_resource(&ctrl->dev, res); + if (IS_ERR(core)) { + err = PTR_ERR(core); + goto err_put_ctrl; + } + + pmic_arb->core_size = resource_size(res); + + pmic_arb->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID, + sizeof(*pmic_arb->ppid_to_apid), + GFP_KERNEL); + if (!pmic_arb->ppid_to_apid) { + err = -ENOMEM; + goto err_put_ctrl; + } + + hw_ver = readl_relaxed(core + PMIC_ARB_VERSION); + + if (hw_ver < PMIC_ARB_VERSION_V2_MIN) { + pmic_arb->ver_ops = &pmic_arb_v1; + pmic_arb->wr_base = core; + pmic_arb->rd_base = core; + } else { + pmic_arb->core = core; + + if (hw_ver < PMIC_ARB_VERSION_V3_MIN) + pmic_arb->ver_ops = &pmic_arb_v2; + else if (hw_ver < PMIC_ARB_VERSION_V5_MIN) + pmic_arb->ver_ops = &pmic_arb_v3; + else + pmic_arb->ver_ops = &pmic_arb_v5; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "obsrvr"); + pmic_arb->rd_base = devm_ioremap_resource(&ctrl->dev, res); + if (IS_ERR(pmic_arb->rd_base)) { + err = PTR_ERR(pmic_arb->rd_base); + goto err_put_ctrl; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "chnls"); + pmic_arb->wr_base = devm_ioremap_resource(&ctrl->dev, res); + if (IS_ERR(pmic_arb->wr_base)) { + err = PTR_ERR(pmic_arb->wr_base); + goto err_put_ctrl; + } + } + + dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n", + pmic_arb->ver_ops->ver_str, hw_ver); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr"); + pmic_arb->intr = devm_ioremap_resource(&ctrl->dev, res); + if (IS_ERR(pmic_arb->intr)) { + err = PTR_ERR(pmic_arb->intr); + goto err_put_ctrl; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg"); + pmic_arb->cnfg = devm_ioremap_resource(&ctrl->dev, res); + if (IS_ERR(pmic_arb->cnfg)) { + err = PTR_ERR(pmic_arb->cnfg); + goto err_put_ctrl; + } + + pmic_arb->irq = platform_get_irq_byname(pdev, "periph_irq"); + if (pmic_arb->irq < 0) { + err = pmic_arb->irq; + goto err_put_ctrl; + } + + err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel); + if (err) { + dev_err(&pdev->dev, "channel unspecified.\n"); + goto err_put_ctrl; + } + + if (channel > 5) { + dev_err(&pdev->dev, "invalid channel (%u) specified.\n", + channel); + err = -EINVAL; + goto err_put_ctrl; + } + + pmic_arb->channel = channel; + + err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee); + if (err) { + dev_err(&pdev->dev, "EE unspecified.\n"); + goto err_put_ctrl; + } + + if (ee > 5) { + dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee); + err = -EINVAL; + goto err_put_ctrl; + } + + pmic_arb->ee = ee; + mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS, + sizeof(*mapping_table), GFP_KERNEL); + if (!mapping_table) { + err = -ENOMEM; + goto err_put_ctrl; + } + + pmic_arb->mapping_table = mapping_table; + /* Initialize max_apid/min_apid to the opposite bounds, during + * the irq domain translation, we are sure to update these */ + pmic_arb->max_apid = 0; + pmic_arb->min_apid = PMIC_ARB_MAX_PERIPHS - 1; + + platform_set_drvdata(pdev, ctrl); + raw_spin_lock_init(&pmic_arb->lock); + + ctrl->cmd = pmic_arb_cmd; + ctrl->read_cmd = pmic_arb_read_cmd; + ctrl->write_cmd = pmic_arb_write_cmd; + + if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) { + err = pmic_arb_read_apid_map_v5(pmic_arb); + if (err) { + dev_err(&pdev->dev, "could not read APID->PPID mapping table, rc= %d\n", + err); + goto err_put_ctrl; + } + } + + dev_dbg(&pdev->dev, "adding irq domain\n"); + pmic_arb->domain = irq_domain_add_tree(pdev->dev.of_node, + &pmic_arb_irq_domain_ops, pmic_arb); + if (!pmic_arb->domain) { + dev_err(&pdev->dev, "unable to create irq_domain\n"); + err = -ENOMEM; + goto err_put_ctrl; + } + + irq_set_chained_handler_and_data(pmic_arb->irq, pmic_arb_chained_irq, + pmic_arb); + err = spmi_controller_add(ctrl); + if (err) + goto err_domain_remove; + + return 0; + +err_domain_remove: + irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL); + irq_domain_remove(pmic_arb->domain); +err_put_ctrl: + spmi_controller_put(ctrl); + return err; +} + +static int spmi_pmic_arb_remove(struct platform_device *pdev) +{ + struct spmi_controller *ctrl = platform_get_drvdata(pdev); + struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl); + spmi_controller_remove(ctrl); + irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL); + irq_domain_remove(pmic_arb->domain); + spmi_controller_put(ctrl); + return 0; +} + +static const struct of_device_id spmi_pmic_arb_match_table[] = { + { .compatible = "qcom,spmi-pmic-arb", }, + {}, +}; +MODULE_DEVICE_TABLE(of, spmi_pmic_arb_match_table); + +static struct platform_driver spmi_pmic_arb_driver = { + .probe = spmi_pmic_arb_probe, + .remove = spmi_pmic_arb_remove, + .driver = { + .name = "spmi_pmic_arb", + .of_match_table = spmi_pmic_arb_match_table, + }, +}; +module_platform_driver(spmi_pmic_arb_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:spmi_pmic_arb"); diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c new file mode 100644 index 000000000..e73d30178 --- /dev/null +++ b/drivers/spmi/spmi.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/idr.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/spmi.h> +#include <linux/pm_runtime.h> + +#include <dt-bindings/spmi/spmi.h> +#define CREATE_TRACE_POINTS +#include <trace/events/spmi.h> + +static bool is_registered; +static DEFINE_IDA(ctrl_ida); + +static void spmi_dev_release(struct device *dev) +{ + struct spmi_device *sdev = to_spmi_device(dev); + + kfree(sdev); +} + +static const struct device_type spmi_dev_type = { + .release = spmi_dev_release, +}; + +static void spmi_ctrl_release(struct device *dev) +{ + struct spmi_controller *ctrl = to_spmi_controller(dev); + + ida_free(&ctrl_ida, ctrl->nr); + kfree(ctrl); +} + +static const struct device_type spmi_ctrl_type = { + .release = spmi_ctrl_release, +}; + +static int spmi_device_match(struct device *dev, struct device_driver *drv) +{ + if (of_driver_match_device(dev, drv)) + return 1; + + if (drv->name) + return strncmp(dev_name(dev), drv->name, + SPMI_NAME_SIZE) == 0; + + return 0; +} + +/** + * spmi_device_add() - add a device previously constructed via spmi_device_alloc() + * @sdev: spmi_device to be added + */ +int spmi_device_add(struct spmi_device *sdev) +{ + struct spmi_controller *ctrl = sdev->ctrl; + int err; + + dev_set_name(&sdev->dev, "%d-%02x", ctrl->nr, sdev->usid); + + err = device_add(&sdev->dev); + if (err < 0) { + dev_err(&sdev->dev, "Can't add %s, status %d\n", + dev_name(&sdev->dev), err); + goto err_device_add; + } + + dev_dbg(&sdev->dev, "device %s registered\n", dev_name(&sdev->dev)); + +err_device_add: + return err; +} +EXPORT_SYMBOL_GPL(spmi_device_add); + +/** + * spmi_device_remove(): remove an SPMI device + * @sdev: spmi_device to be removed + */ +void spmi_device_remove(struct spmi_device *sdev) +{ + device_unregister(&sdev->dev); +} +EXPORT_SYMBOL_GPL(spmi_device_remove); + +static inline int +spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid) +{ + int ret; + + if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type) + return -EINVAL; + + ret = ctrl->cmd(ctrl, opcode, sid); + trace_spmi_cmd(opcode, sid, ret); + return ret; +} + +static inline int spmi_read_cmd(struct spmi_controller *ctrl, u8 opcode, + u8 sid, u16 addr, u8 *buf, size_t len) +{ + int ret; + + if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type) + return -EINVAL; + + trace_spmi_read_begin(opcode, sid, addr); + ret = ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len); + trace_spmi_read_end(opcode, sid, addr, ret, len, buf); + return ret; +} + +static inline int spmi_write_cmd(struct spmi_controller *ctrl, u8 opcode, + u8 sid, u16 addr, const u8 *buf, size_t len) +{ + int ret; + + if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type) + return -EINVAL; + + trace_spmi_write_begin(opcode, sid, addr, len, buf); + ret = ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len); + trace_spmi_write_end(opcode, sid, addr, ret); + return ret; +} + +/** + * spmi_register_read() - register read + * @sdev: SPMI device. + * @addr: slave register address (5-bit address). + * @buf: buffer to be populated with data from the Slave. + * + * Reads 1 byte of data from a Slave device register. + */ +int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf) +{ + /* 5-bit register address */ + if (addr > 0x1F) + return -EINVAL; + + return spmi_read_cmd(sdev->ctrl, SPMI_CMD_READ, sdev->usid, addr, + buf, 1); +} +EXPORT_SYMBOL_GPL(spmi_register_read); + +/** + * spmi_ext_register_read() - extended register read + * @sdev: SPMI device. + * @addr: slave register address (8-bit address). + * @buf: buffer to be populated with data from the Slave. + * @len: the request number of bytes to read (up to 16 bytes). + * + * Reads up to 16 bytes of data from the extended register space on a + * Slave device. + */ +int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf, + size_t len) +{ + /* 8-bit register address, up to 16 bytes */ + if (len == 0 || len > 16) + return -EINVAL; + + return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READ, sdev->usid, addr, + buf, len); +} +EXPORT_SYMBOL_GPL(spmi_ext_register_read); + +/** + * spmi_ext_register_readl() - extended register read long + * @sdev: SPMI device. + * @addr: slave register address (16-bit address). + * @buf: buffer to be populated with data from the Slave. + * @len: the request number of bytes to read (up to 8 bytes). + * + * Reads up to 8 bytes of data from the extended register space on a + * Slave device using 16-bit address. + */ +int spmi_ext_register_readl(struct spmi_device *sdev, u16 addr, u8 *buf, + size_t len) +{ + /* 16-bit register address, up to 8 bytes */ + if (len == 0 || len > 8) + return -EINVAL; + + return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READL, sdev->usid, addr, + buf, len); +} +EXPORT_SYMBOL_GPL(spmi_ext_register_readl); + +/** + * spmi_register_write() - register write + * @sdev: SPMI device + * @addr: slave register address (5-bit address). + * @data: buffer containing the data to be transferred to the Slave. + * + * Writes 1 byte of data to a Slave device register. + */ +int spmi_register_write(struct spmi_device *sdev, u8 addr, u8 data) +{ + /* 5-bit register address */ + if (addr > 0x1F) + return -EINVAL; + + return spmi_write_cmd(sdev->ctrl, SPMI_CMD_WRITE, sdev->usid, addr, + &data, 1); +} +EXPORT_SYMBOL_GPL(spmi_register_write); + +/** + * spmi_register_zero_write() - register zero write + * @sdev: SPMI device. + * @data: the data to be written to register 0 (7-bits). + * + * Writes data to register 0 of the Slave device. + */ +int spmi_register_zero_write(struct spmi_device *sdev, u8 data) +{ + return spmi_write_cmd(sdev->ctrl, SPMI_CMD_ZERO_WRITE, sdev->usid, 0, + &data, 1); +} +EXPORT_SYMBOL_GPL(spmi_register_zero_write); + +/** + * spmi_ext_register_write() - extended register write + * @sdev: SPMI device. + * @addr: slave register address (8-bit address). + * @buf: buffer containing the data to be transferred to the Slave. + * @len: the request number of bytes to read (up to 16 bytes). + * + * Writes up to 16 bytes of data to the extended register space of a + * Slave device. + */ +int spmi_ext_register_write(struct spmi_device *sdev, u8 addr, const u8 *buf, + size_t len) +{ + /* 8-bit register address, up to 16 bytes */ + if (len == 0 || len > 16) + return -EINVAL; + + return spmi_write_cmd(sdev->ctrl, SPMI_CMD_EXT_WRITE, sdev->usid, addr, + buf, len); +} +EXPORT_SYMBOL_GPL(spmi_ext_register_write); + +/** + * spmi_ext_register_writel() - extended register write long + * @sdev: SPMI device. + * @addr: slave register address (16-bit address). + * @buf: buffer containing the data to be transferred to the Slave. + * @len: the request number of bytes to read (up to 8 bytes). + * + * Writes up to 8 bytes of data to the extended register space of a + * Slave device using 16-bit address. + */ +int spmi_ext_register_writel(struct spmi_device *sdev, u16 addr, const u8 *buf, + size_t len) +{ + /* 4-bit Slave Identifier, 16-bit register address, up to 8 bytes */ + if (len == 0 || len > 8) + return -EINVAL; + + return spmi_write_cmd(sdev->ctrl, SPMI_CMD_EXT_WRITEL, sdev->usid, + addr, buf, len); +} +EXPORT_SYMBOL_GPL(spmi_ext_register_writel); + +/** + * spmi_command_reset() - sends RESET command to the specified slave + * @sdev: SPMI device. + * + * The Reset command initializes the Slave and forces all registers to + * their reset values. The Slave shall enter the STARTUP state after + * receiving a Reset command. + */ +int spmi_command_reset(struct spmi_device *sdev) +{ + return spmi_cmd(sdev->ctrl, SPMI_CMD_RESET, sdev->usid); +} +EXPORT_SYMBOL_GPL(spmi_command_reset); + +/** + * spmi_command_sleep() - sends SLEEP command to the specified SPMI device + * @sdev: SPMI device. + * + * The Sleep command causes the Slave to enter the user defined SLEEP state. + */ +int spmi_command_sleep(struct spmi_device *sdev) +{ + return spmi_cmd(sdev->ctrl, SPMI_CMD_SLEEP, sdev->usid); +} +EXPORT_SYMBOL_GPL(spmi_command_sleep); + +/** + * spmi_command_wakeup() - sends WAKEUP command to the specified SPMI device + * @sdev: SPMI device. + * + * The Wakeup command causes the Slave to move from the SLEEP state to + * the ACTIVE state. + */ +int spmi_command_wakeup(struct spmi_device *sdev) +{ + return spmi_cmd(sdev->ctrl, SPMI_CMD_WAKEUP, sdev->usid); +} +EXPORT_SYMBOL_GPL(spmi_command_wakeup); + +/** + * spmi_command_shutdown() - sends SHUTDOWN command to the specified SPMI device + * @sdev: SPMI device. + * + * The Shutdown command causes the Slave to enter the SHUTDOWN state. + */ +int spmi_command_shutdown(struct spmi_device *sdev) +{ + return spmi_cmd(sdev->ctrl, SPMI_CMD_SHUTDOWN, sdev->usid); +} +EXPORT_SYMBOL_GPL(spmi_command_shutdown); + +static int spmi_drv_probe(struct device *dev) +{ + const struct spmi_driver *sdrv = to_spmi_driver(dev->driver); + struct spmi_device *sdev = to_spmi_device(dev); + int err; + + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + err = sdrv->probe(sdev); + if (err) + goto fail_probe; + + return 0; + +fail_probe: + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + pm_runtime_put_noidle(dev); + return err; +} + +static void spmi_drv_remove(struct device *dev) +{ + const struct spmi_driver *sdrv = to_spmi_driver(dev->driver); + + pm_runtime_get_sync(dev); + if (sdrv->remove) + sdrv->remove(to_spmi_device(dev)); + pm_runtime_put_noidle(dev); + + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + pm_runtime_put_noidle(dev); +} + +static void spmi_drv_shutdown(struct device *dev) +{ + const struct spmi_driver *sdrv = to_spmi_driver(dev->driver); + + if (sdrv && sdrv->shutdown) + sdrv->shutdown(to_spmi_device(dev)); +} + +static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + int ret; + + ret = of_device_uevent_modalias(dev, env); + if (ret != -ENODEV) + return ret; + + return 0; +} + +static struct bus_type spmi_bus_type = { + .name = "spmi", + .match = spmi_device_match, + .probe = spmi_drv_probe, + .remove = spmi_drv_remove, + .shutdown = spmi_drv_shutdown, + .uevent = spmi_drv_uevent, +}; + +/** + * spmi_device_from_of() - get the associated SPMI device from a device node + * + * @np: device node + * + * Returns the struct spmi_device associated with a device node or NULL. + */ +struct spmi_device *spmi_device_from_of(struct device_node *np) +{ + struct device *dev = bus_find_device_by_of_node(&spmi_bus_type, np); + + if (dev) + return to_spmi_device(dev); + return NULL; +} +EXPORT_SYMBOL_GPL(spmi_device_from_of); + +/** + * spmi_controller_alloc() - Allocate a new SPMI device + * @ctrl: associated controller + * + * Caller is responsible for either calling spmi_device_add() to add the + * newly allocated controller, or calling spmi_device_put() to discard it. + */ +struct spmi_device *spmi_device_alloc(struct spmi_controller *ctrl) +{ + struct spmi_device *sdev; + + sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); + if (!sdev) + return NULL; + + sdev->ctrl = ctrl; + device_initialize(&sdev->dev); + sdev->dev.parent = &ctrl->dev; + sdev->dev.bus = &spmi_bus_type; + sdev->dev.type = &spmi_dev_type; + return sdev; +} +EXPORT_SYMBOL_GPL(spmi_device_alloc); + +/** + * spmi_controller_alloc() - Allocate a new SPMI controller + * @parent: parent device + * @size: size of private data + * + * Caller is responsible for either calling spmi_controller_add() to add the + * newly allocated controller, or calling spmi_controller_put() to discard it. + * The allocated private data region may be accessed via + * spmi_controller_get_drvdata() + */ +struct spmi_controller *spmi_controller_alloc(struct device *parent, + size_t size) +{ + struct spmi_controller *ctrl; + int id; + + if (WARN_ON(!parent)) + return NULL; + + ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL); + if (!ctrl) + return NULL; + + device_initialize(&ctrl->dev); + ctrl->dev.type = &spmi_ctrl_type; + ctrl->dev.bus = &spmi_bus_type; + ctrl->dev.parent = parent; + ctrl->dev.of_node = parent->of_node; + spmi_controller_set_drvdata(ctrl, &ctrl[1]); + + id = ida_alloc(&ctrl_ida, GFP_KERNEL); + if (id < 0) { + dev_err(parent, + "unable to allocate SPMI controller identifier.\n"); + spmi_controller_put(ctrl); + return NULL; + } + + ctrl->nr = id; + dev_set_name(&ctrl->dev, "spmi-%d", id); + + dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id); + return ctrl; +} +EXPORT_SYMBOL_GPL(spmi_controller_alloc); + +static void of_spmi_register_devices(struct spmi_controller *ctrl) +{ + struct device_node *node; + int err; + + if (!ctrl->dev.of_node) + return; + + for_each_available_child_of_node(ctrl->dev.of_node, node) { + struct spmi_device *sdev; + u32 reg[2]; + + dev_dbg(&ctrl->dev, "adding child %pOF\n", node); + + err = of_property_read_u32_array(node, "reg", reg, 2); + if (err) { + dev_err(&ctrl->dev, + "node %pOF err (%d) does not have 'reg' property\n", + node, err); + continue; + } + + if (reg[1] != SPMI_USID) { + dev_err(&ctrl->dev, + "node %pOF contains unsupported 'reg' entry\n", + node); + continue; + } + + if (reg[0] >= SPMI_MAX_SLAVE_ID) { + dev_err(&ctrl->dev, "invalid usid on node %pOF\n", node); + continue; + } + + dev_dbg(&ctrl->dev, "read usid %02x\n", reg[0]); + + sdev = spmi_device_alloc(ctrl); + if (!sdev) + continue; + + sdev->dev.of_node = node; + sdev->usid = (u8)reg[0]; + + err = spmi_device_add(sdev); + if (err) { + dev_err(&sdev->dev, + "failure adding device. status %d\n", err); + spmi_device_put(sdev); + } + } +} + +/** + * spmi_controller_add() - Add an SPMI controller + * @ctrl: controller to be registered. + * + * Register a controller previously allocated via spmi_controller_alloc() with + * the SPMI core. + */ +int spmi_controller_add(struct spmi_controller *ctrl) +{ + int ret; + + /* Can't register until after driver model init */ + if (WARN_ON(!is_registered)) + return -EAGAIN; + + ret = device_add(&ctrl->dev); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_OF)) + of_spmi_register_devices(ctrl); + + dev_dbg(&ctrl->dev, "spmi-%d registered: dev:%p\n", + ctrl->nr, &ctrl->dev); + + return 0; +}; +EXPORT_SYMBOL_GPL(spmi_controller_add); + +/* Remove a device associated with a controller */ +static int spmi_ctrl_remove_device(struct device *dev, void *data) +{ + struct spmi_device *spmidev = to_spmi_device(dev); + + if (dev->type == &spmi_dev_type) + spmi_device_remove(spmidev); + return 0; +} + +/** + * spmi_controller_remove(): remove an SPMI controller + * @ctrl: controller to remove + * + * Remove a SPMI controller. Caller is responsible for calling + * spmi_controller_put() to discard the allocated controller. + */ +void spmi_controller_remove(struct spmi_controller *ctrl) +{ + if (!ctrl) + return; + + device_for_each_child(&ctrl->dev, NULL, spmi_ctrl_remove_device); + device_del(&ctrl->dev); +} +EXPORT_SYMBOL_GPL(spmi_controller_remove); + +/** + * spmi_driver_register() - Register client driver with SPMI core + * @sdrv: client driver to be associated with client-device. + * + * This API will register the client driver with the SPMI framework. + * It is typically called from the driver's module-init function. + */ +int __spmi_driver_register(struct spmi_driver *sdrv, struct module *owner) +{ + sdrv->driver.bus = &spmi_bus_type; + sdrv->driver.owner = owner; + return driver_register(&sdrv->driver); +} +EXPORT_SYMBOL_GPL(__spmi_driver_register); + +static void __exit spmi_exit(void) +{ + bus_unregister(&spmi_bus_type); +} +module_exit(spmi_exit); + +static int __init spmi_init(void) +{ + int ret; + + ret = bus_register(&spmi_bus_type); + if (ret) + return ret; + + is_registered = true; + return 0; +} +postcore_initcall(spmi_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("SPMI module"); +MODULE_ALIAS("platform:spmi"); |