diff options
Diffstat (limited to 'drivers/soc/qcom')
36 files changed, 17953 insertions, 0 deletions
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig new file mode 100644 index 0000000000..715348869d --- /dev/null +++ b/drivers/soc/qcom/Kconfig @@ -0,0 +1,294 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# QCOM Soc drivers +# +menu "Qualcomm SoC drivers" + +config QCOM_AOSS_QMP + tristate "Qualcomm AOSS Driver" + depends on ARCH_QCOM || COMPILE_TEST + depends on MAILBOX + depends on COMMON_CLK && PM + select PM_GENERIC_DOMAINS + help + This driver provides the means of communicating with and controlling + the low-power state for resources related to the remoteproc + subsystems as well as controlling the debug clocks exposed by the Always On + Subsystem (AOSS) using Qualcomm Messaging Protocol (QMP). + +config QCOM_COMMAND_DB + tristate "Qualcomm Command DB" + depends on ARCH_QCOM || COMPILE_TEST + depends on OF_RESERVED_MEM + help + Command DB queries shared memory by key string for shared system + resources. Platform drivers that require to set state of a shared + resource on a RPM-hardened platform must use this database to get + SoC specific identifier and information for the shared resources. + +config QCOM_CPR + tristate "QCOM Core Power Reduction (CPR) support" + depends on ARCH_QCOM && HAS_IOMEM + select PM_OPP + select REGMAP + help + Say Y here to enable support for the CPR hardware found on Qualcomm + SoCs like QCS404. + + This driver populates CPU OPPs tables and makes adjustments to the + tables based on feedback from the CPR hardware. If you want to do + CPUfrequency scaling say Y here. + + To compile this driver as a module, choose M here: the module will + be called qcom-cpr + +config QCOM_GENI_SE + tristate "QCOM GENI Serial Engine Driver" + depends on ARCH_QCOM || COMPILE_TEST + help + This driver is used to manage Generic Interface (GENI) firmware based + Qualcomm Technologies, Inc. Universal Peripheral (QUP) Wrapper. This + driver is also used to manage the common aspects of multiple Serial + Engines present in the QUP. + +config QCOM_GSBI + tristate "QCOM General Serial Bus Interface" + depends on ARCH_QCOM || COMPILE_TEST + select MFD_SYSCON + help + Say y here to enable GSBI support. The GSBI provides control + functions for connecting the underlying serial UART, SPI, and I2C + devices to the output pins. + +config QCOM_LLCC + tristate "Qualcomm Technologies, Inc. LLCC driver" + depends on ARCH_QCOM || COMPILE_TEST + select REGMAP_MMIO + help + Qualcomm Technologies, Inc. platform specific + Last Level Cache Controller(LLCC) driver for platforms such as, + SDM845. This provides interfaces to clients that use the LLCC. + Say yes here to enable LLCC slice driver. + +config QCOM_KRYO_L2_ACCESSORS + bool + depends on (ARCH_QCOM || COMPILE_TEST) && ARM64 + +config QCOM_MDT_LOADER + tristate + select QCOM_SCM + +config QCOM_OCMEM + tristate "Qualcomm On Chip Memory (OCMEM) driver" + depends on ARCH_QCOM + select QCOM_SCM + help + The On Chip Memory (OCMEM) allocator allows various clients to + allocate memory from OCMEM based on performance, latency and power + requirements. This is typically used by the GPU, camera/video, and + audio components on some Snapdragon SoCs. + +config QCOM_PDR_HELPERS + tristate + select QCOM_QMI_HELPERS + depends on NET + +config QCOM_PMIC_GLINK + tristate "Qualcomm PMIC GLINK driver" + depends on RPMSG + depends on TYPEC + depends on DRM + depends on NET + depends on OF + select AUXILIARY_BUS + select QCOM_PDR_HELPERS + help + The Qualcomm PMIC GLINK driver provides access, over GLINK, to the + USB and battery firmware running on one of the coprocessors in + several modern Qualcomm platforms. + + Say yes here to support USB-C and battery status on modern Qualcomm + platforms. + +config QCOM_QMI_HELPERS + tristate + depends on NET + +config QCOM_RAMP_CTRL + tristate "Qualcomm Ramp Controller driver" + depends on ARCH_QCOM || COMPILE_TEST + help + The Ramp Controller is used to program the sequence ID for pulse + swallowing, enable sequence and link sequence IDs for the CPU + cores on some Qualcomm SoCs. + Say y here to enable support for the ramp controller. + +config QCOM_RMTFS_MEM + tristate "Qualcomm Remote Filesystem memory driver" + depends on ARCH_QCOM + select QCOM_SCM + help + The Qualcomm remote filesystem memory driver is used for allocating + and exposing regions of shared memory with remote processors for the + purpose of exchanging sector-data between the remote filesystem + service and its clients. + + Say y here if you intend to boot the modem remoteproc. + +config QCOM_RPM_MASTER_STATS + tristate "Qualcomm RPM Master stats" + depends on ARCH_QCOM || COMPILE_TEST + help + The RPM Master sleep stats driver provides detailed per-subsystem + sleep/wake data, read from the RPM message RAM. It can be used to + assess whether all the low-power modes available are entered as + expected or to check which part of the SoC prevents it from sleeping. + + Say y here if you intend to debug or monitor platform sleep. + +config QCOM_RPMH + tristate "Qualcomm RPM-Hardened (RPMH) Communication" + depends on ARCH_QCOM || COMPILE_TEST + depends on (QCOM_COMMAND_DB || !QCOM_COMMAND_DB) + help + Support for communication with the hardened-RPM blocks in + Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an + internal bus to transmit state requests for shared resources. A set + of hardware components aggregate requests for these resources and + help apply the aggregated state on the resource. + +config QCOM_RPMHPD + tristate "Qualcomm RPMh Power domain driver" + depends on QCOM_RPMH && QCOM_COMMAND_DB + help + QCOM RPMh Power domain driver to support power-domains with + performance states. The driver communicates a performance state + value to RPMh which then translates it into corresponding voltage + for the voltage rail. + +config QCOM_RPMPD + tristate "Qualcomm RPM Power domain driver" + depends on PM && OF + depends on QCOM_SMD_RPM + select PM_GENERIC_DOMAINS + select PM_GENERIC_DOMAINS_OF + help + QCOM RPM Power domain driver to support power-domains with + performance states. The driver communicates a performance state + value to RPM which then translates it into corresponding voltage + for the voltage rail. + +config QCOM_SMEM + tristate "Qualcomm Shared Memory Manager (SMEM)" + depends on ARCH_QCOM || COMPILE_TEST + depends on HWSPINLOCK + help + Say y here to enable support for the Qualcomm Shared Memory Manager. + The driver provides an interface to items in a heap shared among all + processors in a Qualcomm platform. + +config QCOM_SMD_RPM + tristate "Qualcomm Resource Power Manager (RPM) over SMD" + depends on ARCH_QCOM || COMPILE_TEST + depends on RPMSG + depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n + help + If you say yes to this option, support will be included for the + Resource Power Manager system found in the Qualcomm 8974 based + devices. + + This is required to access many regulators, clocks and bus + frequencies controlled by the RPM on these devices. + + Say M here if you want to include support for the Qualcomm RPM as a + module. This will build a module called "qcom-smd-rpm". + +config QCOM_SMEM_STATE + bool + +config QCOM_SMP2P + tristate "Qualcomm Shared Memory Point to Point support" + depends on MAILBOX + depends on QCOM_SMEM + select QCOM_SMEM_STATE + select IRQ_DOMAIN + help + Say yes here to support the Qualcomm Shared Memory Point to Point + protocol. + +config QCOM_SMSM + tristate "Qualcomm Shared Memory State Machine" + depends on QCOM_SMEM + select QCOM_SMEM_STATE + select IRQ_DOMAIN + help + Say yes here to support the Qualcomm Shared Memory State Machine. + The state machine is represented by bits in shared memory. + +config QCOM_SOCINFO + tristate "Qualcomm socinfo driver" + depends on QCOM_SMEM + select SOC_BUS + help + Say yes here to support the Qualcomm socinfo driver, providing + information about the SoC to user space. + +config QCOM_SPM + tristate "Qualcomm Subsystem Power Manager (SPM)" + depends on ARCH_QCOM || COMPILE_TEST + select QCOM_SCM + help + Enable the support for the Qualcomm Subsystem Power Manager, used + to manage cores, L2 low power modes and to configure the internal + Adaptive Voltage Scaler parameters, where supported. + +config QCOM_STATS + tristate "Qualcomm Technologies, Inc. (QTI) Sleep stats driver" + depends on (ARCH_QCOM && DEBUG_FS) || COMPILE_TEST + depends on QCOM_SMEM + help + Qualcomm Technologies, Inc. (QTI) Sleep stats driver to read + the shared memory exported by the remote processor related to + various SoC level low power modes statistics and export to debugfs + interface. + +config QCOM_WCNSS_CTRL + tristate "Qualcomm WCNSS control driver" + depends on ARCH_QCOM || COMPILE_TEST + depends on RPMSG + help + Client driver for the WCNSS_CTRL SMD channel, used to download nv + firmware to a newly booted WCNSS chip. + +config QCOM_APR + tristate "Qualcomm APR/GPR Bus (Asynchronous/Generic Packet Router)" + depends on ARCH_QCOM || COMPILE_TEST + depends on RPMSG + depends on NET + select QCOM_PDR_HELPERS + help + Enable APR IPC protocol support between + application processor and QDSP6. APR is + used by audio driver to configure QDSP6 + ASM, ADM and AFE modules. + +config QCOM_ICC_BWMON + tristate "QCOM Interconnect Bandwidth Monitor driver" + depends on ARCH_QCOM || COMPILE_TEST + select PM_OPP + select REGMAP_MMIO + help + Sets up driver monitoring bandwidth on various interconnects and + based on that voting for interconnect bandwidth, adjusting their + speed to current demand. + Current implementation brings support for BWMON v4, used for example + on SDM845 to measure bandwidth between CPU (gladiator_noc) and Last + Level Cache (memnoc). Usage of this BWMON allows to remove some of + the fixed bandwidth votes from cpufreq (CPU nodes) thus achieve high + memory throughput even with lower CPU frequencies. + +config QCOM_INLINE_CRYPTO_ENGINE + tristate + select QCOM_SCM + +endmenu diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile new file mode 100644 index 0000000000..bbca2e1e55 --- /dev/null +++ b/drivers/soc/qcom/Makefile @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0 +CFLAGS_rpmh-rsc.o := -I$(src) +obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o +obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o +obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o +obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o +obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o +obj-$(CONFIG_QCOM_OCMEM) += ocmem.o +obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o +obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o +obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o +obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o +qmi_helpers-y += qmi_encdec.o qmi_interface.o +obj-$(CONFIG_QCOM_RAMP_CTRL) += ramp_controller.o +obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o +obj-$(CONFIG_QCOM_RPM_MASTER_STATS) += rpm_master_stats.o +obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o +qcom_rpmh-y += rpmh-rsc.o +qcom_rpmh-y += rpmh.o +obj-$(CONFIG_QCOM_SMD_RPM) += rpm-proc.o smd-rpm.o +obj-$(CONFIG_QCOM_SMEM) += smem.o +obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o +obj-$(CONFIG_QCOM_SMP2P) += smp2p.o +obj-$(CONFIG_QCOM_SMSM) += smsm.o +obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o +obj-$(CONFIG_QCOM_SPM) += spm.o +obj-$(CONFIG_QCOM_STATS) += qcom_stats.o +obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o +obj-$(CONFIG_QCOM_APR) += apr.o +obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o +obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o +obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o +qcom_ice-objs += ice.o +obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += qcom_ice.o diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c new file mode 100644 index 0000000000..30f81d6d9d --- /dev/null +++ b/drivers/soc/qcom/apr.c @@ -0,0 +1,736 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +// Copyright (c) 2018, Linaro Limited + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/idr.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <linux/of_device.h> +#include <linux/soc/qcom/apr.h> +#include <linux/soc/qcom/pdr.h> +#include <linux/rpmsg.h> +#include <linux/of.h> + +enum { + PR_TYPE_APR = 0, + PR_TYPE_GPR, +}; + +/* Some random values tbh which does not collide with static modules */ +#define GPR_DYNAMIC_PORT_START 0x10000000 +#define GPR_DYNAMIC_PORT_END 0x20000000 + +struct packet_router { + struct rpmsg_endpoint *ch; + struct device *dev; + spinlock_t svcs_lock; + spinlock_t rx_lock; + struct idr svcs_idr; + int dest_domain_id; + int type; + struct pdr_handle *pdr; + struct workqueue_struct *rxwq; + struct work_struct rx_work; + struct list_head rx_list; +}; + +struct apr_rx_buf { + struct list_head node; + int len; + uint8_t buf[]; +}; + +/** + * apr_send_pkt() - Send a apr message from apr device + * + * @adev: Pointer to previously registered apr device. + * @pkt: Pointer to apr packet to send + * + * Return: Will be an negative on packet size on success. + */ +int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt) +{ + struct packet_router *apr = dev_get_drvdata(adev->dev.parent); + struct apr_hdr *hdr; + unsigned long flags; + int ret; + + spin_lock_irqsave(&adev->svc.lock, flags); + + hdr = &pkt->hdr; + hdr->src_domain = APR_DOMAIN_APPS; + hdr->src_svc = adev->svc.id; + hdr->dest_domain = adev->domain_id; + hdr->dest_svc = adev->svc.id; + + ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size); + spin_unlock_irqrestore(&adev->svc.lock, flags); + + return ret ? ret : hdr->pkt_size; +} +EXPORT_SYMBOL_GPL(apr_send_pkt); + +void gpr_free_port(gpr_port_t *port) +{ + struct packet_router *gpr = port->pr; + unsigned long flags; + + spin_lock_irqsave(&gpr->svcs_lock, flags); + idr_remove(&gpr->svcs_idr, port->id); + spin_unlock_irqrestore(&gpr->svcs_lock, flags); + + kfree(port); +} +EXPORT_SYMBOL_GPL(gpr_free_port); + +gpr_port_t *gpr_alloc_port(struct apr_device *gdev, struct device *dev, + gpr_port_cb cb, void *priv) +{ + struct packet_router *pr = dev_get_drvdata(gdev->dev.parent); + gpr_port_t *port; + struct pkt_router_svc *svc; + int id; + + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) + return ERR_PTR(-ENOMEM); + + svc = port; + svc->callback = cb; + svc->pr = pr; + svc->priv = priv; + svc->dev = dev; + spin_lock_init(&svc->lock); + + spin_lock(&pr->svcs_lock); + id = idr_alloc_cyclic(&pr->svcs_idr, svc, GPR_DYNAMIC_PORT_START, + GPR_DYNAMIC_PORT_END, GFP_ATOMIC); + if (id < 0) { + dev_err(dev, "Unable to allocate dynamic GPR src port\n"); + kfree(port); + spin_unlock(&pr->svcs_lock); + return ERR_PTR(id); + } + + svc->id = id; + spin_unlock(&pr->svcs_lock); + + return port; +} +EXPORT_SYMBOL_GPL(gpr_alloc_port); + +static int pkt_router_send_svc_pkt(struct pkt_router_svc *svc, struct gpr_pkt *pkt) +{ + struct packet_router *pr = svc->pr; + struct gpr_hdr *hdr; + unsigned long flags; + int ret; + + hdr = &pkt->hdr; + + spin_lock_irqsave(&svc->lock, flags); + ret = rpmsg_trysend(pr->ch, pkt, hdr->pkt_size); + spin_unlock_irqrestore(&svc->lock, flags); + + return ret ? ret : hdr->pkt_size; +} + +int gpr_send_pkt(struct apr_device *gdev, struct gpr_pkt *pkt) +{ + return pkt_router_send_svc_pkt(&gdev->svc, pkt); +} +EXPORT_SYMBOL_GPL(gpr_send_pkt); + +int gpr_send_port_pkt(gpr_port_t *port, struct gpr_pkt *pkt) +{ + return pkt_router_send_svc_pkt(port, pkt); +} +EXPORT_SYMBOL_GPL(gpr_send_port_pkt); + +static void apr_dev_release(struct device *dev) +{ + struct apr_device *adev = to_apr_device(dev); + + kfree(adev); +} + +static int apr_callback(struct rpmsg_device *rpdev, void *buf, + int len, void *priv, u32 addr) +{ + struct packet_router *apr = dev_get_drvdata(&rpdev->dev); + struct apr_rx_buf *abuf; + unsigned long flags; + + if (len <= APR_HDR_SIZE) { + dev_err(apr->dev, "APR: Improper apr pkt received:%p %d\n", + buf, len); + return -EINVAL; + } + + abuf = kzalloc(sizeof(*abuf) + len, GFP_ATOMIC); + if (!abuf) + return -ENOMEM; + + abuf->len = len; + memcpy(abuf->buf, buf, len); + + spin_lock_irqsave(&apr->rx_lock, flags); + list_add_tail(&abuf->node, &apr->rx_list); + spin_unlock_irqrestore(&apr->rx_lock, flags); + + queue_work(apr->rxwq, &apr->rx_work); + + return 0; +} + +static int apr_do_rx_callback(struct packet_router *apr, struct apr_rx_buf *abuf) +{ + uint16_t hdr_size, msg_type, ver, svc_id; + struct pkt_router_svc *svc; + struct apr_device *adev; + struct apr_driver *adrv = NULL; + struct apr_resp_pkt resp; + struct apr_hdr *hdr; + unsigned long flags; + void *buf = abuf->buf; + int len = abuf->len; + + hdr = buf; + ver = APR_HDR_FIELD_VER(hdr->hdr_field); + if (ver > APR_PKT_VER + 1) + return -EINVAL; + + hdr_size = APR_HDR_FIELD_SIZE_BYTES(hdr->hdr_field); + if (hdr_size < APR_HDR_SIZE) { + dev_err(apr->dev, "APR: Wrong hdr size:%d\n", hdr_size); + return -EINVAL; + } + + if (hdr->pkt_size < APR_HDR_SIZE || hdr->pkt_size != len) { + dev_err(apr->dev, "APR: Wrong packet size\n"); + return -EINVAL; + } + + msg_type = APR_HDR_FIELD_MT(hdr->hdr_field); + if (msg_type >= APR_MSG_TYPE_MAX) { + dev_err(apr->dev, "APR: Wrong message type: %d\n", msg_type); + return -EINVAL; + } + + if (hdr->src_domain >= APR_DOMAIN_MAX || + hdr->dest_domain >= APR_DOMAIN_MAX || + hdr->src_svc >= APR_SVC_MAX || + hdr->dest_svc >= APR_SVC_MAX) { + dev_err(apr->dev, "APR: Wrong APR header\n"); + return -EINVAL; + } + + svc_id = hdr->dest_svc; + spin_lock_irqsave(&apr->svcs_lock, flags); + svc = idr_find(&apr->svcs_idr, svc_id); + if (svc && svc->dev->driver) { + adev = svc_to_apr_device(svc); + adrv = to_apr_driver(adev->dev.driver); + } + spin_unlock_irqrestore(&apr->svcs_lock, flags); + + if (!adrv || !adev) { + dev_err(apr->dev, "APR: service is not registered (%d)\n", + svc_id); + return -EINVAL; + } + + resp.hdr = *hdr; + resp.payload_size = hdr->pkt_size - hdr_size; + + /* + * NOTE: hdr_size is not same as APR_HDR_SIZE as remote can include + * optional headers in to apr_hdr which should be ignored + */ + if (resp.payload_size > 0) + resp.payload = buf + hdr_size; + + adrv->callback(adev, &resp); + + return 0; +} + +static int gpr_do_rx_callback(struct packet_router *gpr, struct apr_rx_buf *abuf) +{ + uint16_t hdr_size, ver; + struct pkt_router_svc *svc = NULL; + struct gpr_resp_pkt resp; + struct gpr_hdr *hdr; + unsigned long flags; + void *buf = abuf->buf; + int len = abuf->len; + + hdr = buf; + ver = hdr->version; + if (ver > GPR_PKT_VER + 1) + return -EINVAL; + + hdr_size = hdr->hdr_size; + if (hdr_size < GPR_PKT_HEADER_WORD_SIZE) { + dev_err(gpr->dev, "GPR: Wrong hdr size:%d\n", hdr_size); + return -EINVAL; + } + + if (hdr->pkt_size < GPR_PKT_HEADER_BYTE_SIZE || hdr->pkt_size != len) { + dev_err(gpr->dev, "GPR: Wrong packet size\n"); + return -EINVAL; + } + + resp.hdr = *hdr; + resp.payload_size = hdr->pkt_size - (hdr_size * 4); + + /* + * NOTE: hdr_size is not same as GPR_HDR_SIZE as remote can include + * optional headers in to gpr_hdr which should be ignored + */ + if (resp.payload_size > 0) + resp.payload = buf + (hdr_size * 4); + + + spin_lock_irqsave(&gpr->svcs_lock, flags); + svc = idr_find(&gpr->svcs_idr, hdr->dest_port); + spin_unlock_irqrestore(&gpr->svcs_lock, flags); + + if (!svc) { + dev_err(gpr->dev, "GPR: Port(%x) is not registered\n", + hdr->dest_port); + return -EINVAL; + } + + if (svc->callback) + svc->callback(&resp, svc->priv, 0); + + return 0; +} + +static void apr_rxwq(struct work_struct *work) +{ + struct packet_router *apr = container_of(work, struct packet_router, rx_work); + struct apr_rx_buf *abuf, *b; + unsigned long flags; + + if (!list_empty(&apr->rx_list)) { + list_for_each_entry_safe(abuf, b, &apr->rx_list, node) { + switch (apr->type) { + case PR_TYPE_APR: + apr_do_rx_callback(apr, abuf); + break; + case PR_TYPE_GPR: + gpr_do_rx_callback(apr, abuf); + break; + default: + break; + } + spin_lock_irqsave(&apr->rx_lock, flags); + list_del(&abuf->node); + spin_unlock_irqrestore(&apr->rx_lock, flags); + kfree(abuf); + } + } +} + +static int apr_device_match(struct device *dev, struct device_driver *drv) +{ + struct apr_device *adev = to_apr_device(dev); + struct apr_driver *adrv = to_apr_driver(drv); + const struct apr_device_id *id = adrv->id_table; + + /* Attempt an OF style match first */ + if (of_driver_match_device(dev, drv)) + return 1; + + if (!id) + return 0; + + while (id->domain_id != 0 || id->svc_id != 0) { + if (id->domain_id == adev->domain_id && + id->svc_id == adev->svc.id) + return 1; + id++; + } + + return 0; +} + +static int apr_device_probe(struct device *dev) +{ + struct apr_device *adev = to_apr_device(dev); + struct apr_driver *adrv = to_apr_driver(dev->driver); + int ret; + + ret = adrv->probe(adev); + if (!ret) + adev->svc.callback = adrv->gpr_callback; + + return ret; +} + +static void apr_device_remove(struct device *dev) +{ + struct apr_device *adev = to_apr_device(dev); + struct apr_driver *adrv = to_apr_driver(dev->driver); + struct packet_router *apr = dev_get_drvdata(adev->dev.parent); + + if (adrv->remove) + adrv->remove(adev); + spin_lock(&apr->svcs_lock); + idr_remove(&apr->svcs_idr, adev->svc.id); + spin_unlock(&apr->svcs_lock); +} + +static int apr_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + const struct apr_device *adev = to_apr_device(dev); + int ret; + + ret = of_device_uevent_modalias(dev, env); + if (ret != -ENODEV) + return ret; + + return add_uevent_var(env, "MODALIAS=apr:%s", adev->name); +} + +struct bus_type aprbus = { + .name = "aprbus", + .match = apr_device_match, + .probe = apr_device_probe, + .uevent = apr_uevent, + .remove = apr_device_remove, +}; +EXPORT_SYMBOL_GPL(aprbus); + +static int apr_add_device(struct device *dev, struct device_node *np, + u32 svc_id, u32 domain_id) +{ + struct packet_router *apr = dev_get_drvdata(dev); + struct apr_device *adev = NULL; + struct pkt_router_svc *svc; + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + + adev->svc_id = svc_id; + svc = &adev->svc; + + svc->id = svc_id; + svc->pr = apr; + svc->priv = adev; + svc->dev = dev; + spin_lock_init(&svc->lock); + + adev->domain_id = domain_id; + + if (np) + snprintf(adev->name, APR_NAME_SIZE, "%pOFn", np); + + switch (apr->type) { + case PR_TYPE_APR: + dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name, + domain_id, svc_id); + break; + case PR_TYPE_GPR: + dev_set_name(&adev->dev, "gprsvc:%s:%x:%x", adev->name, + domain_id, svc_id); + break; + default: + break; + } + + adev->dev.bus = &aprbus; + adev->dev.parent = dev; + adev->dev.of_node = np; + adev->dev.release = apr_dev_release; + adev->dev.driver = NULL; + + spin_lock(&apr->svcs_lock); + ret = idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC); + spin_unlock(&apr->svcs_lock); + if (ret < 0) { + dev_err(dev, "idr_alloc failed: %d\n", ret); + goto out; + } + + /* Protection domain is optional, it does not exist on older platforms */ + ret = of_property_read_string_index(np, "qcom,protection-domain", + 1, &adev->service_path); + if (ret < 0 && ret != -EINVAL) { + dev_err(dev, "Failed to read second value of qcom,protection-domain\n"); + goto out; + } + + dev_info(dev, "Adding APR/GPR dev: %s\n", dev_name(&adev->dev)); + + ret = device_register(&adev->dev); + if (ret) { + dev_err(dev, "device_register failed: %d\n", ret); + put_device(&adev->dev); + } + +out: + return ret; +} + +static int of_apr_add_pd_lookups(struct device *dev) +{ + const char *service_name, *service_path; + struct packet_router *apr = dev_get_drvdata(dev); + struct device_node *node; + struct pdr_service *pds; + int ret; + + for_each_child_of_node(dev->of_node, node) { + ret = of_property_read_string_index(node, "qcom,protection-domain", + 0, &service_name); + if (ret < 0) + continue; + + ret = of_property_read_string_index(node, "qcom,protection-domain", + 1, &service_path); + if (ret < 0) { + dev_err(dev, "pdr service path missing: %d\n", ret); + of_node_put(node); + return ret; + } + + pds = pdr_add_lookup(apr->pdr, service_name, service_path); + if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) { + dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds)); + of_node_put(node); + return PTR_ERR(pds); + } + } + + return 0; +} + +static void of_register_apr_devices(struct device *dev, const char *svc_path) +{ + struct packet_router *apr = dev_get_drvdata(dev); + struct device_node *node; + const char *service_path; + int ret; + + for_each_child_of_node(dev->of_node, node) { + u32 svc_id; + u32 domain_id; + + /* + * This function is called with svc_path NULL during + * apr_probe(), in which case we register any apr devices + * without a qcom,protection-domain specified. + * + * Then as the protection domains becomes available + * (if applicable) this function is again called, but with + * svc_path representing the service becoming available. In + * this case we register any apr devices with a matching + * qcom,protection-domain. + */ + + ret = of_property_read_string_index(node, "qcom,protection-domain", + 1, &service_path); + if (svc_path) { + /* skip APR services that are PD independent */ + if (ret) + continue; + + /* skip APR services whose PD paths don't match */ + if (strcmp(service_path, svc_path)) + continue; + } else { + /* skip APR services whose PD lookups are registered */ + if (ret == 0) + continue; + } + + if (of_property_read_u32(node, "reg", &svc_id)) + continue; + + domain_id = apr->dest_domain_id; + + if (apr_add_device(dev, node, svc_id, domain_id)) + dev_err(dev, "Failed to add apr %d svc\n", svc_id); + } +} + +static int apr_remove_device(struct device *dev, void *svc_path) +{ + struct apr_device *adev = to_apr_device(dev); + + if (svc_path && adev->service_path) { + if (!strcmp(adev->service_path, (char *)svc_path)) + device_unregister(&adev->dev); + } else { + device_unregister(&adev->dev); + } + + return 0; +} + +static void apr_pd_status(int state, char *svc_path, void *priv) +{ + struct packet_router *apr = (struct packet_router *)priv; + + switch (state) { + case SERVREG_SERVICE_STATE_UP: + of_register_apr_devices(apr->dev, svc_path); + break; + case SERVREG_SERVICE_STATE_DOWN: + device_for_each_child(apr->dev, svc_path, apr_remove_device); + break; + } +} + +static int apr_probe(struct rpmsg_device *rpdev) +{ + struct device *dev = &rpdev->dev; + struct packet_router *apr; + int ret; + + apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL); + if (!apr) + return -ENOMEM; + + ret = of_property_read_u32(dev->of_node, "qcom,domain", &apr->dest_domain_id); + + if (of_device_is_compatible(dev->of_node, "qcom,gpr")) { + apr->type = PR_TYPE_GPR; + } else { + if (ret) /* try deprecated apr-domain property */ + ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", + &apr->dest_domain_id); + apr->type = PR_TYPE_APR; + } + + if (ret) { + dev_err(dev, "Domain ID not specified in DT\n"); + return ret; + } + + dev_set_drvdata(dev, apr); + apr->ch = rpdev->ept; + apr->dev = dev; + apr->rxwq = create_singlethread_workqueue("qcom_apr_rx"); + if (!apr->rxwq) { + dev_err(apr->dev, "Failed to start Rx WQ\n"); + return -ENOMEM; + } + INIT_WORK(&apr->rx_work, apr_rxwq); + + apr->pdr = pdr_handle_alloc(apr_pd_status, apr); + if (IS_ERR(apr->pdr)) { + dev_err(dev, "Failed to init PDR handle\n"); + ret = PTR_ERR(apr->pdr); + goto destroy_wq; + } + + INIT_LIST_HEAD(&apr->rx_list); + spin_lock_init(&apr->rx_lock); + spin_lock_init(&apr->svcs_lock); + idr_init(&apr->svcs_idr); + + ret = of_apr_add_pd_lookups(dev); + if (ret) + goto handle_release; + + of_register_apr_devices(dev, NULL); + + return 0; + +handle_release: + pdr_handle_release(apr->pdr); +destroy_wq: + destroy_workqueue(apr->rxwq); + return ret; +} + +static void apr_remove(struct rpmsg_device *rpdev) +{ + struct packet_router *apr = dev_get_drvdata(&rpdev->dev); + + pdr_handle_release(apr->pdr); + device_for_each_child(&rpdev->dev, NULL, apr_remove_device); + destroy_workqueue(apr->rxwq); +} + +/* + * __apr_driver_register() - Client driver registration with aprbus + * + * @drv:Client driver to be associated with client-device. + * @owner: owning module/driver + * + * This API will register the client driver with the aprbus + * It is called from the driver's module-init function. + */ +int __apr_driver_register(struct apr_driver *drv, struct module *owner) +{ + drv->driver.bus = &aprbus; + drv->driver.owner = owner; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL_GPL(__apr_driver_register); + +/* + * apr_driver_unregister() - Undo effect of apr_driver_register + * + * @drv: Client driver to be unregistered + */ +void apr_driver_unregister(struct apr_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL_GPL(apr_driver_unregister); + +static const struct of_device_id pkt_router_of_match[] = { + { .compatible = "qcom,apr"}, + { .compatible = "qcom,apr-v2"}, + { .compatible = "qcom,gpr"}, + {} +}; +MODULE_DEVICE_TABLE(of, pkt_router_of_match); + +static struct rpmsg_driver packet_router_driver = { + .probe = apr_probe, + .remove = apr_remove, + .callback = apr_callback, + .drv = { + .name = "qcom,apr", + .of_match_table = pkt_router_of_match, + }, +}; + +static int __init apr_init(void) +{ + int ret; + + ret = bus_register(&aprbus); + if (!ret) + ret = register_rpmsg_driver(&packet_router_driver); + else + bus_unregister(&aprbus); + + return ret; +} + +static void __exit apr_exit(void) +{ + bus_unregister(&aprbus); + unregister_rpmsg_driver(&packet_router_driver); +} + +subsys_initcall(apr_init); +module_exit(apr_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm APR Bus"); diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c new file mode 100644 index 0000000000..34c40368d5 --- /dev/null +++ b/drivers/soc/qcom/cmd-db.c @@ -0,0 +1,368 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */ + +#include <linux/debugfs.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/seq_file.h> +#include <linux/types.h> + +#include <soc/qcom/cmd-db.h> + +#define NUM_PRIORITY 2 +#define MAX_SLV_ID 8 +#define SLAVE_ID_MASK 0x7 +#define SLAVE_ID_SHIFT 16 + +/** + * struct entry_header: header for each entry in cmddb + * + * @id: resource's identifier + * @priority: unused + * @addr: the address of the resource + * @len: length of the data + * @offset: offset from :@data_offset, start of the data + */ +struct entry_header { + u8 id[8]; + __le32 priority[NUM_PRIORITY]; + __le32 addr; + __le16 len; + __le16 offset; +}; + +/** + * struct rsc_hdr: resource header information + * + * @slv_id: id for the resource + * @header_offset: entry's header at offset from the end of the cmd_db_header + * @data_offset: entry's data at offset from the end of the cmd_db_header + * @cnt: number of entries for HW type + * @version: MSB is major, LSB is minor + * @reserved: reserved for future use. + */ +struct rsc_hdr { + __le16 slv_id; + __le16 header_offset; + __le16 data_offset; + __le16 cnt; + __le16 version; + __le16 reserved[3]; +}; + +/** + * struct cmd_db_header: The DB header information + * + * @version: The cmd db version + * @magic: constant expected in the database + * @header: array of resources + * @checksum: checksum for the header. Unused. + * @reserved: reserved memory + * @data: driver specific data + */ +struct cmd_db_header { + __le32 version; + u8 magic[4]; + struct rsc_hdr header[MAX_SLV_ID]; + __le32 checksum; + __le32 reserved; + u8 data[]; +}; + +/** + * DOC: Description of the Command DB database. + * + * At the start of the command DB memory is the cmd_db_header structure. + * The cmd_db_header holds the version, checksum, magic key as well as an + * array for header for each slave (depicted by the rsc_header). Each h/w + * based accelerator is a 'slave' (shared resource) and has slave id indicating + * the type of accelerator. The rsc_header is the header for such individual + * slaves of a given type. The entries for each of these slaves begin at the + * rsc_hdr.header_offset. In addition each slave could have auxiliary data + * that may be needed by the driver. The data for the slave starts at the + * entry_header.offset to the location pointed to by the rsc_hdr.data_offset. + * + * Drivers have a stringified key to a slave/resource. They can query the slave + * information and get the slave id and the auxiliary data and the length of the + * data. Using this information, they can format the request to be sent to the + * h/w accelerator and request a resource state. + */ + +static const u8 CMD_DB_MAGIC[] = { 0xdb, 0x30, 0x03, 0x0c }; + +static bool cmd_db_magic_matches(const struct cmd_db_header *header) +{ + const u8 *magic = header->magic; + + return memcmp(magic, CMD_DB_MAGIC, ARRAY_SIZE(CMD_DB_MAGIC)) == 0; +} + +static struct cmd_db_header *cmd_db_header; + +static inline const void *rsc_to_entry_header(const struct rsc_hdr *hdr) +{ + u16 offset = le16_to_cpu(hdr->header_offset); + + return cmd_db_header->data + offset; +} + +static inline void * +rsc_offset(const struct rsc_hdr *hdr, const struct entry_header *ent) +{ + u16 offset = le16_to_cpu(hdr->data_offset); + u16 loffset = le16_to_cpu(ent->offset); + + return cmd_db_header->data + offset + loffset; +} + +/** + * cmd_db_ready - Indicates if command DB is available + * + * Return: 0 on success, errno otherwise + */ +int cmd_db_ready(void) +{ + if (cmd_db_header == NULL) + return -EPROBE_DEFER; + else if (!cmd_db_magic_matches(cmd_db_header)) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(cmd_db_ready); + +static int cmd_db_get_header(const char *id, const struct entry_header **eh, + const struct rsc_hdr **rh) +{ + const struct rsc_hdr *rsc_hdr; + const struct entry_header *ent; + int ret, i, j; + u8 query[sizeof(ent->id)] __nonstring; + + ret = cmd_db_ready(); + if (ret) + return ret; + + /* + * Pad out query string to same length as in DB. NOTE: the output + * query string is not necessarily '\0' terminated if it bumps up + * against the max size. That's OK and expected. + */ + strncpy(query, id, sizeof(query)); + + for (i = 0; i < MAX_SLV_ID; i++) { + rsc_hdr = &cmd_db_header->header[i]; + if (!rsc_hdr->slv_id) + break; + + ent = rsc_to_entry_header(rsc_hdr); + for (j = 0; j < le16_to_cpu(rsc_hdr->cnt); j++, ent++) { + if (memcmp(ent->id, query, sizeof(ent->id)) == 0) { + if (eh) + *eh = ent; + if (rh) + *rh = rsc_hdr; + return 0; + } + } + } + + return -ENODEV; +} + +/** + * cmd_db_read_addr() - Query command db for resource id address. + * + * @id: resource id to query for address + * + * Return: resource address on success, 0 on error + * + * This is used to retrieve resource address based on resource + * id. + */ +u32 cmd_db_read_addr(const char *id) +{ + int ret; + const struct entry_header *ent; + + ret = cmd_db_get_header(id, &ent, NULL); + + return ret < 0 ? 0 : le32_to_cpu(ent->addr); +} +EXPORT_SYMBOL(cmd_db_read_addr); + +/** + * cmd_db_read_aux_data() - Query command db for aux data. + * + * @id: Resource to retrieve AUX Data on + * @len: size of data buffer returned + * + * Return: pointer to data on success, error pointer otherwise + */ +const void *cmd_db_read_aux_data(const char *id, size_t *len) +{ + int ret; + const struct entry_header *ent; + const struct rsc_hdr *rsc_hdr; + + ret = cmd_db_get_header(id, &ent, &rsc_hdr); + if (ret) + return ERR_PTR(ret); + + if (len) + *len = le16_to_cpu(ent->len); + + return rsc_offset(rsc_hdr, ent); +} +EXPORT_SYMBOL(cmd_db_read_aux_data); + +/** + * cmd_db_read_slave_id - Get the slave ID for a given resource address + * + * @id: Resource id to query the DB for version + * + * Return: cmd_db_hw_type enum on success, CMD_DB_HW_INVALID on error + */ +enum cmd_db_hw_type cmd_db_read_slave_id(const char *id) +{ + int ret; + const struct entry_header *ent; + u32 addr; + + ret = cmd_db_get_header(id, &ent, NULL); + if (ret < 0) + return CMD_DB_HW_INVALID; + + addr = le32_to_cpu(ent->addr); + return (addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK; +} +EXPORT_SYMBOL(cmd_db_read_slave_id); + +#ifdef CONFIG_DEBUG_FS +static int cmd_db_debugfs_dump(struct seq_file *seq, void *p) +{ + int i, j; + const struct rsc_hdr *rsc; + const struct entry_header *ent; + const char *name; + u16 len, version; + u8 major, minor; + + seq_puts(seq, "Command DB DUMP\n"); + + for (i = 0; i < MAX_SLV_ID; i++) { + rsc = &cmd_db_header->header[i]; + if (!rsc->slv_id) + break; + + switch (le16_to_cpu(rsc->slv_id)) { + case CMD_DB_HW_ARC: + name = "ARC"; + break; + case CMD_DB_HW_VRM: + name = "VRM"; + break; + case CMD_DB_HW_BCM: + name = "BCM"; + break; + default: + name = "Unknown"; + break; + } + + version = le16_to_cpu(rsc->version); + major = version >> 8; + minor = version; + + seq_printf(seq, "Slave %s (v%u.%u)\n", name, major, minor); + seq_puts(seq, "-------------------------\n"); + + ent = rsc_to_entry_header(rsc); + for (j = 0; j < le16_to_cpu(rsc->cnt); j++, ent++) { + seq_printf(seq, "0x%05x: %*pEp", le32_to_cpu(ent->addr), + (int)strnlen(ent->id, sizeof(ent->id)), ent->id); + + len = le16_to_cpu(ent->len); + if (len) { + seq_printf(seq, " [%*ph]", + len, rsc_offset(rsc, ent)); + } + seq_putc(seq, '\n'); + } + } + + return 0; +} + +static int open_cmd_db_debugfs(struct inode *inode, struct file *file) +{ + return single_open(file, cmd_db_debugfs_dump, inode->i_private); +} +#endif + +static const struct file_operations cmd_db_debugfs_ops = { +#ifdef CONFIG_DEBUG_FS + .open = open_cmd_db_debugfs, +#endif + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int cmd_db_dev_probe(struct platform_device *pdev) +{ + struct reserved_mem *rmem; + int ret = 0; + + rmem = of_reserved_mem_lookup(pdev->dev.of_node); + if (!rmem) { + dev_err(&pdev->dev, "failed to acquire memory region\n"); + return -EINVAL; + } + + cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB); + if (!cmd_db_header) { + ret = -ENOMEM; + cmd_db_header = NULL; + return ret; + } + + if (!cmd_db_magic_matches(cmd_db_header)) { + dev_err(&pdev->dev, "Invalid Command DB Magic\n"); + return -EINVAL; + } + + debugfs_create_file("cmd-db", 0400, NULL, NULL, &cmd_db_debugfs_ops); + + device_set_pm_not_required(&pdev->dev); + + return 0; +} + +static const struct of_device_id cmd_db_match_table[] = { + { .compatible = "qcom,cmd-db" }, + { } +}; +MODULE_DEVICE_TABLE(of, cmd_db_match_table); + +static struct platform_driver cmd_db_dev_driver = { + .probe = cmd_db_dev_probe, + .driver = { + .name = "cmd-db", + .of_match_table = cmd_db_match_table, + .suppress_bind_attrs = true, + }, +}; + +static int __init cmd_db_device_init(void) +{ + return platform_driver_register(&cmd_db_dev_driver); +} +arch_initcall(cmd_db_device_init); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Command DB Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c new file mode 100644 index 0000000000..adf2d523f1 --- /dev/null +++ b/drivers/soc/qcom/icc-bwmon.c @@ -0,0 +1,875 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2021-2022 Linaro Ltd + * Author: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, based on + * previous work of Thara Gopinath and msm-4.9 downstream sources. + */ + +#include <linux/err.h> +#include <linux/interconnect.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/regmap.h> +#include <linux/sizes.h> + +/* + * The BWMON samples data throughput within 'sample_ms' time. With three + * configurable thresholds (Low, Medium and High) gives four windows (called + * zones) of current bandwidth: + * + * Zone 0: byte count < THRES_LO + * Zone 1: THRES_LO < byte count < THRES_MED + * Zone 2: THRES_MED < byte count < THRES_HIGH + * Zone 3: THRES_HIGH < byte count + * + * Zones 0 and 2 are not used by this driver. + */ + +/* Internal sampling clock frequency */ +#define HW_TIMER_HZ 19200000 + +#define BWMON_V4_GLOBAL_IRQ_CLEAR 0x108 +#define BWMON_V4_GLOBAL_IRQ_ENABLE 0x10c +/* + * All values here and further are matching regmap fields, so without absolute + * register offsets. + */ +#define BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE BIT(0) + +/* + * Starting with SDM845, the BWMON4 register space has changed a bit: + * the global registers were jammed into the beginning of the monitor region. + * To keep the proper offsets, one would have to map <GLOBAL_BASE 0x200> and + * <GLOBAL_BASE+0x100 0x300>, which is straight up wrong. + * To facilitate for that, while allowing the older, arguably more proper + * implementations to work, offset the global registers by -0x100 to avoid + * having to map half of the global registers twice. + */ +#define BWMON_V4_845_OFFSET 0x100 +#define BWMON_V4_GLOBAL_IRQ_CLEAR_845 (BWMON_V4_GLOBAL_IRQ_CLEAR - BWMON_V4_845_OFFSET) +#define BWMON_V4_GLOBAL_IRQ_ENABLE_845 (BWMON_V4_GLOBAL_IRQ_ENABLE - BWMON_V4_845_OFFSET) + +#define BWMON_V4_IRQ_STATUS 0x100 +#define BWMON_V4_IRQ_CLEAR 0x108 + +#define BWMON_V4_IRQ_ENABLE 0x10c +#define BWMON_IRQ_ENABLE_MASK (BIT(1) | BIT(3)) +#define BWMON_V5_IRQ_STATUS 0x000 +#define BWMON_V5_IRQ_CLEAR 0x008 +#define BWMON_V5_IRQ_ENABLE 0x00c + +#define BWMON_V4_ENABLE 0x2a0 +#define BWMON_V5_ENABLE 0x010 +#define BWMON_ENABLE_ENABLE BIT(0) + +#define BWMON_V4_CLEAR 0x2a4 +#define BWMON_V5_CLEAR 0x014 +#define BWMON_CLEAR_CLEAR BIT(0) +#define BWMON_CLEAR_CLEAR_ALL BIT(1) + +#define BWMON_V4_SAMPLE_WINDOW 0x2a8 +#define BWMON_V5_SAMPLE_WINDOW 0x020 + +#define BWMON_V4_THRESHOLD_HIGH 0x2ac +#define BWMON_V4_THRESHOLD_MED 0x2b0 +#define BWMON_V4_THRESHOLD_LOW 0x2b4 +#define BWMON_V5_THRESHOLD_HIGH 0x024 +#define BWMON_V5_THRESHOLD_MED 0x028 +#define BWMON_V5_THRESHOLD_LOW 0x02c + +#define BWMON_V4_ZONE_ACTIONS 0x2b8 +#define BWMON_V5_ZONE_ACTIONS 0x030 +/* + * Actions to perform on some zone 'z' when current zone hits the threshold: + * Increment counter of zone 'z' + */ +#define BWMON_ZONE_ACTIONS_INCREMENT(z) (0x2 << ((z) * 2)) +/* Clear counter of zone 'z' */ +#define BWMON_ZONE_ACTIONS_CLEAR(z) (0x1 << ((z) * 2)) + +/* Zone 0 threshold hit: Clear zone count */ +#define BWMON_ZONE_ACTIONS_ZONE0 (BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 1 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE1 (BWMON_ZONE_ACTIONS_INCREMENT(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 2 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE2 (BWMON_ZONE_ACTIONS_INCREMENT(2) | \ + BWMON_ZONE_ACTIONS_CLEAR(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* Zone 3 threshold hit: Increment zone count & clear lower zones */ +#define BWMON_ZONE_ACTIONS_ZONE3 (BWMON_ZONE_ACTIONS_INCREMENT(3) | \ + BWMON_ZONE_ACTIONS_CLEAR(2) | \ + BWMON_ZONE_ACTIONS_CLEAR(1) | \ + BWMON_ZONE_ACTIONS_CLEAR(0)) + +/* + * There is no clear documentation/explanation of BWMON_V4_THRESHOLD_COUNT + * register. Based on observations, this is number of times one threshold has to + * be reached, to trigger interrupt in given zone. + * + * 0xff are maximum values meant to ignore the zones 0 and 2. + */ +#define BWMON_V4_THRESHOLD_COUNT 0x2bc +#define BWMON_V5_THRESHOLD_COUNT 0x034 +#define BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT 0xff +#define BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT 0xff + +#define BWMON_V4_ZONE_MAX(zone) (0x2e0 + 4 * (zone)) +#define BWMON_V5_ZONE_MAX(zone) (0x044 + 4 * (zone)) + +/* Quirks for specific BWMON types */ +#define BWMON_HAS_GLOBAL_IRQ BIT(0) +#define BWMON_NEEDS_FORCE_CLEAR BIT(1) + +enum bwmon_fields { + /* Global region fields, keep them at the top */ + F_GLOBAL_IRQ_CLEAR, + F_GLOBAL_IRQ_ENABLE, + F_NUM_GLOBAL_FIELDS, + + /* Monitor region fields */ + F_IRQ_STATUS = F_NUM_GLOBAL_FIELDS, + F_IRQ_CLEAR, + F_IRQ_ENABLE, + F_ENABLE, + F_CLEAR, + F_SAMPLE_WINDOW, + F_THRESHOLD_HIGH, + F_THRESHOLD_MED, + F_THRESHOLD_LOW, + F_ZONE_ACTIONS_ZONE0, + F_ZONE_ACTIONS_ZONE1, + F_ZONE_ACTIONS_ZONE2, + F_ZONE_ACTIONS_ZONE3, + F_THRESHOLD_COUNT_ZONE0, + F_THRESHOLD_COUNT_ZONE1, + F_THRESHOLD_COUNT_ZONE2, + F_THRESHOLD_COUNT_ZONE3, + F_ZONE0_MAX, + F_ZONE1_MAX, + F_ZONE2_MAX, + F_ZONE3_MAX, + + F_NUM_FIELDS +}; + +struct icc_bwmon_data { + unsigned int sample_ms; + unsigned int count_unit_kb; /* kbytes */ + u8 zone1_thres_count; + u8 zone3_thres_count; + unsigned int quirks; + + const struct regmap_config *regmap_cfg; + const struct reg_field *regmap_fields; + + const struct regmap_config *global_regmap_cfg; + const struct reg_field *global_regmap_fields; +}; + +struct icc_bwmon { + struct device *dev; + const struct icc_bwmon_data *data; + int irq; + + struct regmap_field *regs[F_NUM_FIELDS]; + struct regmap_field *global_regs[F_NUM_GLOBAL_FIELDS]; + + unsigned int max_bw_kbps; + unsigned int min_bw_kbps; + unsigned int target_kbps; + unsigned int current_kbps; +}; + +/* BWMON v4 */ +static const struct reg_field msm8998_bwmon_reg_fields[] = { + [F_GLOBAL_IRQ_CLEAR] = {}, + [F_GLOBAL_IRQ_ENABLE] = {}, + [F_IRQ_STATUS] = REG_FIELD(BWMON_V4_IRQ_STATUS, 4, 7), + [F_IRQ_CLEAR] = REG_FIELD(BWMON_V4_IRQ_CLEAR, 4, 7), + [F_IRQ_ENABLE] = REG_FIELD(BWMON_V4_IRQ_ENABLE, 4, 7), + /* F_ENABLE covers entire register to disable other features */ + [F_ENABLE] = REG_FIELD(BWMON_V4_ENABLE, 0, 31), + [F_CLEAR] = REG_FIELD(BWMON_V4_CLEAR, 0, 1), + [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V4_SAMPLE_WINDOW, 0, 23), + [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V4_THRESHOLD_HIGH, 0, 11), + [F_THRESHOLD_MED] = REG_FIELD(BWMON_V4_THRESHOLD_MED, 0, 11), + [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V4_THRESHOLD_LOW, 0, 11), + [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 0, 7), + [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 8, 15), + [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 16, 23), + [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 24, 31), + [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 0, 7), + [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 8, 15), + [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 16, 23), + [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 24, 31), + [F_ZONE0_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(0), 0, 11), + [F_ZONE1_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(1), 0, 11), + [F_ZONE2_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(2), 0, 11), + [F_ZONE3_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(3), 0, 11), +}; + +static const struct regmap_range msm8998_bwmon_reg_noread_ranges[] = { + regmap_reg_range(BWMON_V4_IRQ_CLEAR, BWMON_V4_IRQ_CLEAR), + regmap_reg_range(BWMON_V4_CLEAR, BWMON_V4_CLEAR), +}; + +static const struct regmap_access_table msm8998_bwmon_reg_read_table = { + .no_ranges = msm8998_bwmon_reg_noread_ranges, + .n_no_ranges = ARRAY_SIZE(msm8998_bwmon_reg_noread_ranges), +}; + +static const struct regmap_range msm8998_bwmon_reg_volatile_ranges[] = { + regmap_reg_range(BWMON_V4_IRQ_STATUS, BWMON_V4_IRQ_STATUS), + regmap_reg_range(BWMON_V4_ZONE_MAX(0), BWMON_V4_ZONE_MAX(3)), +}; + +static const struct regmap_access_table msm8998_bwmon_reg_volatile_table = { + .yes_ranges = msm8998_bwmon_reg_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(msm8998_bwmon_reg_volatile_ranges), +}; + +static const struct reg_field msm8998_bwmon_global_reg_fields[] = { + [F_GLOBAL_IRQ_CLEAR] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_CLEAR, 0, 0), + [F_GLOBAL_IRQ_ENABLE] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_ENABLE, 0, 0), +}; + +static const struct regmap_range msm8998_bwmon_global_reg_noread_ranges[] = { + regmap_reg_range(BWMON_V4_GLOBAL_IRQ_CLEAR, BWMON_V4_GLOBAL_IRQ_CLEAR), +}; + +static const struct regmap_access_table msm8998_bwmon_global_reg_read_table = { + .no_ranges = msm8998_bwmon_global_reg_noread_ranges, + .n_no_ranges = ARRAY_SIZE(msm8998_bwmon_global_reg_noread_ranges), +}; + +/* + * Fill the cache for non-readable registers only as rest does not really + * matter and can be read from the device. + */ +static const struct reg_default msm8998_bwmon_reg_defaults[] = { + { BWMON_V4_IRQ_CLEAR, 0x0 }, + { BWMON_V4_CLEAR, 0x0 }, +}; + +static const struct reg_default msm8998_bwmon_global_reg_defaults[] = { + { BWMON_V4_GLOBAL_IRQ_CLEAR, 0x0 }, +}; + +static const struct regmap_config msm8998_bwmon_regmap_cfg = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + /* + * No concurrent access expected - driver has one interrupt handler, + * regmap is not shared, no driver or user-space API. + */ + .disable_locking = true, + .rd_table = &msm8998_bwmon_reg_read_table, + .volatile_table = &msm8998_bwmon_reg_volatile_table, + .reg_defaults = msm8998_bwmon_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(msm8998_bwmon_reg_defaults), + /* + * Cache is necessary for using regmap fields with non-readable + * registers. + */ + .cache_type = REGCACHE_RBTREE, +}; + +static const struct regmap_config msm8998_bwmon_global_regmap_cfg = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + /* + * No concurrent access expected - driver has one interrupt handler, + * regmap is not shared, no driver or user-space API. + */ + .disable_locking = true, + .rd_table = &msm8998_bwmon_global_reg_read_table, + .reg_defaults = msm8998_bwmon_global_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(msm8998_bwmon_global_reg_defaults), + /* + * Cache is necessary for using regmap fields with non-readable + * registers. + */ + .cache_type = REGCACHE_RBTREE, +}; + +static const struct reg_field sdm845_cpu_bwmon_reg_fields[] = { + [F_GLOBAL_IRQ_CLEAR] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_CLEAR_845, 0, 0), + [F_GLOBAL_IRQ_ENABLE] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_ENABLE_845, 0, 0), + [F_IRQ_STATUS] = REG_FIELD(BWMON_V4_IRQ_STATUS, 4, 7), + [F_IRQ_CLEAR] = REG_FIELD(BWMON_V4_IRQ_CLEAR, 4, 7), + [F_IRQ_ENABLE] = REG_FIELD(BWMON_V4_IRQ_ENABLE, 4, 7), + /* F_ENABLE covers entire register to disable other features */ + [F_ENABLE] = REG_FIELD(BWMON_V4_ENABLE, 0, 31), + [F_CLEAR] = REG_FIELD(BWMON_V4_CLEAR, 0, 1), + [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V4_SAMPLE_WINDOW, 0, 23), + [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V4_THRESHOLD_HIGH, 0, 11), + [F_THRESHOLD_MED] = REG_FIELD(BWMON_V4_THRESHOLD_MED, 0, 11), + [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V4_THRESHOLD_LOW, 0, 11), + [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 0, 7), + [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 8, 15), + [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 16, 23), + [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 24, 31), + [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 0, 7), + [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 8, 15), + [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 16, 23), + [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 24, 31), + [F_ZONE0_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(0), 0, 11), + [F_ZONE1_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(1), 0, 11), + [F_ZONE2_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(2), 0, 11), + [F_ZONE3_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(3), 0, 11), +}; + +static const struct regmap_range sdm845_cpu_bwmon_reg_noread_ranges[] = { + regmap_reg_range(BWMON_V4_GLOBAL_IRQ_CLEAR_845, BWMON_V4_GLOBAL_IRQ_CLEAR_845), + regmap_reg_range(BWMON_V4_IRQ_CLEAR, BWMON_V4_IRQ_CLEAR), + regmap_reg_range(BWMON_V4_CLEAR, BWMON_V4_CLEAR), +}; + +static const struct regmap_access_table sdm845_cpu_bwmon_reg_read_table = { + .no_ranges = sdm845_cpu_bwmon_reg_noread_ranges, + .n_no_ranges = ARRAY_SIZE(sdm845_cpu_bwmon_reg_noread_ranges), +}; + +/* + * Fill the cache for non-readable registers only as rest does not really + * matter and can be read from the device. + */ +static const struct reg_default sdm845_cpu_bwmon_reg_defaults[] = { + { BWMON_V4_GLOBAL_IRQ_CLEAR_845, 0x0 }, + { BWMON_V4_IRQ_CLEAR, 0x0 }, + { BWMON_V4_CLEAR, 0x0 }, +}; + +static const struct regmap_config sdm845_cpu_bwmon_regmap_cfg = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + /* + * No concurrent access expected - driver has one interrupt handler, + * regmap is not shared, no driver or user-space API. + */ + .disable_locking = true, + .rd_table = &sdm845_cpu_bwmon_reg_read_table, + .volatile_table = &msm8998_bwmon_reg_volatile_table, + .reg_defaults = sdm845_cpu_bwmon_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(sdm845_cpu_bwmon_reg_defaults), + /* + * Cache is necessary for using regmap fields with non-readable + * registers. + */ + .cache_type = REGCACHE_RBTREE, +}; + +/* BWMON v5 */ +static const struct reg_field sdm845_llcc_bwmon_reg_fields[] = { + [F_GLOBAL_IRQ_CLEAR] = {}, + [F_GLOBAL_IRQ_ENABLE] = {}, + [F_IRQ_STATUS] = REG_FIELD(BWMON_V5_IRQ_STATUS, 0, 3), + [F_IRQ_CLEAR] = REG_FIELD(BWMON_V5_IRQ_CLEAR, 0, 3), + [F_IRQ_ENABLE] = REG_FIELD(BWMON_V5_IRQ_ENABLE, 0, 3), + /* F_ENABLE covers entire register to disable other features */ + [F_ENABLE] = REG_FIELD(BWMON_V5_ENABLE, 0, 31), + [F_CLEAR] = REG_FIELD(BWMON_V5_CLEAR, 0, 1), + [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V5_SAMPLE_WINDOW, 0, 19), + [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V5_THRESHOLD_HIGH, 0, 11), + [F_THRESHOLD_MED] = REG_FIELD(BWMON_V5_THRESHOLD_MED, 0, 11), + [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V5_THRESHOLD_LOW, 0, 11), + [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 0, 7), + [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 8, 15), + [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 16, 23), + [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 24, 31), + [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 0, 7), + [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 8, 15), + [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 16, 23), + [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 24, 31), + [F_ZONE0_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(0), 0, 11), + [F_ZONE1_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(1), 0, 11), + [F_ZONE2_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(2), 0, 11), + [F_ZONE3_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(3), 0, 11), +}; + +static const struct regmap_range sdm845_llcc_bwmon_reg_noread_ranges[] = { + regmap_reg_range(BWMON_V5_IRQ_CLEAR, BWMON_V5_IRQ_CLEAR), + regmap_reg_range(BWMON_V5_CLEAR, BWMON_V5_CLEAR), +}; + +static const struct regmap_access_table sdm845_llcc_bwmon_reg_read_table = { + .no_ranges = sdm845_llcc_bwmon_reg_noread_ranges, + .n_no_ranges = ARRAY_SIZE(sdm845_llcc_bwmon_reg_noread_ranges), +}; + +static const struct regmap_range sdm845_llcc_bwmon_reg_volatile_ranges[] = { + regmap_reg_range(BWMON_V5_IRQ_STATUS, BWMON_V5_IRQ_STATUS), + regmap_reg_range(BWMON_V5_ZONE_MAX(0), BWMON_V5_ZONE_MAX(3)), +}; + +static const struct regmap_access_table sdm845_llcc_bwmon_reg_volatile_table = { + .yes_ranges = sdm845_llcc_bwmon_reg_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(sdm845_llcc_bwmon_reg_volatile_ranges), +}; + +/* + * Fill the cache for non-readable registers only as rest does not really + * matter and can be read from the device. + */ +static const struct reg_default sdm845_llcc_bwmon_reg_defaults[] = { + { BWMON_V5_IRQ_CLEAR, 0x0 }, + { BWMON_V5_CLEAR, 0x0 }, +}; + +static const struct regmap_config sdm845_llcc_bwmon_regmap_cfg = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + /* + * No concurrent access expected - driver has one interrupt handler, + * regmap is not shared, no driver or user-space API. + */ + .disable_locking = true, + .rd_table = &sdm845_llcc_bwmon_reg_read_table, + .volatile_table = &sdm845_llcc_bwmon_reg_volatile_table, + .reg_defaults = sdm845_llcc_bwmon_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(sdm845_llcc_bwmon_reg_defaults), + /* + * Cache is necessary for using regmap fields with non-readable + * registers. + */ + .cache_type = REGCACHE_RBTREE, +}; + +static void bwmon_clear_counters(struct icc_bwmon *bwmon, bool clear_all) +{ + unsigned int val = BWMON_CLEAR_CLEAR; + + if (clear_all) + val |= BWMON_CLEAR_CLEAR_ALL; + /* + * Clear counters. The order and barriers are + * important. Quoting downstream Qualcomm msm-4.9 tree: + * + * The counter clear and IRQ clear bits are not in the same 4KB + * region. So, we need to make sure the counter clear is completed + * before we try to clear the IRQ or do any other counter operations. + */ + regmap_field_force_write(bwmon->regs[F_CLEAR], val); + if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR) + regmap_field_force_write(bwmon->regs[F_CLEAR], 0); +} + +static void bwmon_clear_irq(struct icc_bwmon *bwmon) +{ + struct regmap_field *global_irq_clr; + + if (bwmon->data->global_regmap_fields) + global_irq_clr = bwmon->global_regs[F_GLOBAL_IRQ_CLEAR]; + else + global_irq_clr = bwmon->regs[F_GLOBAL_IRQ_CLEAR]; + + /* + * Clear zone and global interrupts. The order and barriers are + * important. Quoting downstream Qualcomm msm-4.9 tree: + * + * Synchronize the local interrupt clear in mon_irq_clear() + * with the global interrupt clear here. Otherwise, the CPU + * may reorder the two writes and clear the global interrupt + * before the local interrupt, causing the global interrupt + * to be retriggered by the local interrupt still being high. + * + * Similarly, because the global registers are in a different + * region than the local registers, we need to ensure any register + * writes to enable the monitor after this call are ordered with the + * clearing here so that local writes don't happen before the + * interrupt is cleared. + */ + regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], BWMON_IRQ_ENABLE_MASK); + if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR) + regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], 0); + if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ) + regmap_field_force_write(global_irq_clr, + BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE); +} + +static void bwmon_disable(struct icc_bwmon *bwmon) +{ + struct regmap_field *global_irq_en; + + if (bwmon->data->global_regmap_fields) + global_irq_en = bwmon->global_regs[F_GLOBAL_IRQ_ENABLE]; + else + global_irq_en = bwmon->regs[F_GLOBAL_IRQ_ENABLE]; + + /* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */ + if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ) + regmap_field_write(global_irq_en, 0x0); + regmap_field_write(bwmon->regs[F_IRQ_ENABLE], 0x0); + + /* + * Disable bwmon. Must happen before bwmon_clear_irq() to avoid spurious + * IRQ. + */ + regmap_field_write(bwmon->regs[F_ENABLE], 0x0); +} + +static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable) +{ + struct regmap_field *global_irq_en; + + if (bwmon->data->global_regmap_fields) + global_irq_en = bwmon->global_regs[F_GLOBAL_IRQ_ENABLE]; + else + global_irq_en = bwmon->regs[F_GLOBAL_IRQ_ENABLE]; + + /* Enable interrupts */ + if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ) + regmap_field_write(global_irq_en, + BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE); + + regmap_field_write(bwmon->regs[F_IRQ_ENABLE], irq_enable); + + /* Enable bwmon */ + regmap_field_write(bwmon->regs[F_ENABLE], BWMON_ENABLE_ENABLE); +} + +static unsigned int bwmon_kbps_to_count(struct icc_bwmon *bwmon, + unsigned int kbps) +{ + return kbps / bwmon->data->count_unit_kb; +} + +static void bwmon_set_threshold(struct icc_bwmon *bwmon, + struct regmap_field *reg, unsigned int kbps) +{ + unsigned int thres; + + thres = mult_frac(bwmon_kbps_to_count(bwmon, kbps), + bwmon->data->sample_ms, MSEC_PER_SEC); + regmap_field_write(reg, thres); +} + +static void bwmon_start(struct icc_bwmon *bwmon) +{ + const struct icc_bwmon_data *data = bwmon->data; + u32 bw_low = 0; + int window; + + /* No need to check for errors, as this must have succeeded before. */ + dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0); + + bwmon_clear_counters(bwmon, true); + + window = mult_frac(bwmon->data->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC); + /* Maximum sampling window: 0xffffff for v4 and 0xfffff for v5 */ + regmap_field_write(bwmon->regs[F_SAMPLE_WINDOW], window); + + bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH], bw_low); + bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED], bw_low); + bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_LOW], 0); + + regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE0], + BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT); + regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE1], + data->zone1_thres_count); + regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE2], + BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT); + regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE3], + data->zone3_thres_count); + + regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE0], + BWMON_ZONE_ACTIONS_ZONE0); + regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE1], + BWMON_ZONE_ACTIONS_ZONE1); + regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE2], + BWMON_ZONE_ACTIONS_ZONE2); + regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE3], + BWMON_ZONE_ACTIONS_ZONE3); + + bwmon_clear_irq(bwmon); + bwmon_enable(bwmon, BWMON_IRQ_ENABLE_MASK); +} + +static irqreturn_t bwmon_intr(int irq, void *dev_id) +{ + struct icc_bwmon *bwmon = dev_id; + unsigned int status, max; + int zone; + + if (regmap_field_read(bwmon->regs[F_IRQ_STATUS], &status)) + return IRQ_NONE; + + status &= BWMON_IRQ_ENABLE_MASK; + if (!status) { + /* + * Only zone 1 and zone 3 interrupts are enabled but zone 2 + * threshold could be hit and trigger interrupt even if not + * enabled. + * Such spurious interrupt might come with valuable max count or + * not, so solution would be to always check all + * BWMON_ZONE_MAX() registers to find the highest value. + * Such case is currently ignored. + */ + return IRQ_NONE; + } + + bwmon_disable(bwmon); + + zone = get_bitmask_order(status) - 1; + /* + * Zone max bytes count register returns count units within sampling + * window. Downstream kernel for BWMONv4 (called BWMON type 2 in + * downstream) always increments the max bytes count by one. + */ + if (regmap_field_read(bwmon->regs[F_ZONE0_MAX + zone], &max)) + return IRQ_NONE; + + max += 1; + max *= bwmon->data->count_unit_kb; + bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->data->sample_ms); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t bwmon_intr_thread(int irq, void *dev_id) +{ + struct icc_bwmon *bwmon = dev_id; + unsigned int irq_enable = 0; + struct dev_pm_opp *opp, *target_opp; + unsigned int bw_kbps, up_kbps, down_kbps; + + bw_kbps = bwmon->target_kbps; + + target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0); + if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE) + target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0); + + bwmon->target_kbps = bw_kbps; + + bw_kbps--; + opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0); + if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE) + down_kbps = bwmon->target_kbps; + else + down_kbps = bw_kbps; + + up_kbps = bwmon->target_kbps + 1; + + if (bwmon->target_kbps >= bwmon->max_bw_kbps) + irq_enable = BIT(1); + else if (bwmon->target_kbps <= bwmon->min_bw_kbps) + irq_enable = BIT(3); + else + irq_enable = BWMON_IRQ_ENABLE_MASK; + + bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH], + up_kbps); + bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED], + down_kbps); + bwmon_clear_counters(bwmon, false); + bwmon_clear_irq(bwmon); + bwmon_enable(bwmon, irq_enable); + + if (bwmon->target_kbps == bwmon->current_kbps) + goto out; + + dev_pm_opp_set_opp(bwmon->dev, target_opp); + bwmon->current_kbps = bwmon->target_kbps; + +out: + dev_pm_opp_put(target_opp); + if (!IS_ERR(opp)) + dev_pm_opp_put(opp); + + return IRQ_HANDLED; +} + +static int bwmon_init_regmap(struct platform_device *pdev, + struct icc_bwmon *bwmon) +{ + struct device *dev = &pdev->dev; + void __iomem *base; + struct regmap *map; + int ret; + + /* Map the monitor base */ + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return dev_err_probe(dev, PTR_ERR(base), + "failed to map bwmon registers\n"); + + map = devm_regmap_init_mmio(dev, base, bwmon->data->regmap_cfg); + if (IS_ERR(map)) + return dev_err_probe(dev, PTR_ERR(map), + "failed to initialize regmap\n"); + + BUILD_BUG_ON(ARRAY_SIZE(msm8998_bwmon_global_reg_fields) != F_NUM_GLOBAL_FIELDS); + BUILD_BUG_ON(ARRAY_SIZE(msm8998_bwmon_reg_fields) != F_NUM_FIELDS); + BUILD_BUG_ON(ARRAY_SIZE(sdm845_cpu_bwmon_reg_fields) != F_NUM_FIELDS); + BUILD_BUG_ON(ARRAY_SIZE(sdm845_llcc_bwmon_reg_fields) != F_NUM_FIELDS); + + ret = devm_regmap_field_bulk_alloc(dev, map, bwmon->regs, + bwmon->data->regmap_fields, + F_NUM_FIELDS); + if (ret) + return ret; + + if (bwmon->data->global_regmap_cfg) { + /* Map the global base, if separate */ + base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(base)) + return dev_err_probe(dev, PTR_ERR(base), + "failed to map bwmon global registers\n"); + + map = devm_regmap_init_mmio(dev, base, bwmon->data->global_regmap_cfg); + if (IS_ERR(map)) + return dev_err_probe(dev, PTR_ERR(map), + "failed to initialize global regmap\n"); + + ret = devm_regmap_field_bulk_alloc(dev, map, bwmon->global_regs, + bwmon->data->global_regmap_fields, + F_NUM_GLOBAL_FIELDS); + } + + return ret; +} + +static int bwmon_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dev_pm_opp *opp; + struct icc_bwmon *bwmon; + int ret; + + bwmon = devm_kzalloc(dev, sizeof(*bwmon), GFP_KERNEL); + if (!bwmon) + return -ENOMEM; + + bwmon->data = of_device_get_match_data(dev); + + ret = bwmon_init_regmap(pdev, bwmon); + if (ret) + return ret; + + bwmon->irq = platform_get_irq(pdev, 0); + if (bwmon->irq < 0) + return bwmon->irq; + + ret = devm_pm_opp_of_add_table(dev); + if (ret) + return dev_err_probe(dev, ret, "failed to add OPP table\n"); + + bwmon->max_bw_kbps = UINT_MAX; + opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); + if (IS_ERR(opp)) + return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n"); + + bwmon->min_bw_kbps = 0; + opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); + if (IS_ERR(opp)) + return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n"); + + bwmon->dev = dev; + + bwmon_disable(bwmon); + ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr, + bwmon_intr_thread, + IRQF_ONESHOT, dev_name(dev), bwmon); + if (ret) + return dev_err_probe(dev, ret, "failed to request IRQ\n"); + + platform_set_drvdata(pdev, bwmon); + bwmon_start(bwmon); + + return 0; +} + +static int bwmon_remove(struct platform_device *pdev) +{ + struct icc_bwmon *bwmon = platform_get_drvdata(pdev); + + bwmon_disable(bwmon); + + return 0; +} + +static const struct icc_bwmon_data msm8998_bwmon_data = { + .sample_ms = 4, + .count_unit_kb = 1024, + .zone1_thres_count = 16, + .zone3_thres_count = 1, + .quirks = BWMON_HAS_GLOBAL_IRQ, + .regmap_fields = msm8998_bwmon_reg_fields, + .regmap_cfg = &msm8998_bwmon_regmap_cfg, + .global_regmap_fields = msm8998_bwmon_global_reg_fields, + .global_regmap_cfg = &msm8998_bwmon_global_regmap_cfg, +}; + +static const struct icc_bwmon_data sdm845_cpu_bwmon_data = { + .sample_ms = 4, + .count_unit_kb = 64, + .zone1_thres_count = 16, + .zone3_thres_count = 1, + .quirks = BWMON_HAS_GLOBAL_IRQ, + .regmap_fields = sdm845_cpu_bwmon_reg_fields, + .regmap_cfg = &sdm845_cpu_bwmon_regmap_cfg, +}; + +static const struct icc_bwmon_data sdm845_llcc_bwmon_data = { + .sample_ms = 4, + .count_unit_kb = 1024, + .zone1_thres_count = 16, + .zone3_thres_count = 1, + .regmap_fields = sdm845_llcc_bwmon_reg_fields, + .regmap_cfg = &sdm845_llcc_bwmon_regmap_cfg, +}; + +static const struct icc_bwmon_data sc7280_llcc_bwmon_data = { + .sample_ms = 4, + .count_unit_kb = 64, + .zone1_thres_count = 16, + .zone3_thres_count = 1, + .quirks = BWMON_NEEDS_FORCE_CLEAR, + .regmap_fields = sdm845_llcc_bwmon_reg_fields, + .regmap_cfg = &sdm845_llcc_bwmon_regmap_cfg, +}; + +static const struct of_device_id bwmon_of_match[] = { + /* BWMONv4, separate monitor and global register spaces */ + { .compatible = "qcom,msm8998-bwmon", .data = &msm8998_bwmon_data }, + /* BWMONv4, unified register space */ + { .compatible = "qcom,sdm845-bwmon", .data = &sdm845_cpu_bwmon_data }, + /* BWMONv5 */ + { .compatible = "qcom,sdm845-llcc-bwmon", .data = &sdm845_llcc_bwmon_data }, + { .compatible = "qcom,sc7280-llcc-bwmon", .data = &sc7280_llcc_bwmon_data }, + + /* Compatibles kept for legacy reasons */ + { .compatible = "qcom,sc7280-cpu-bwmon", .data = &sdm845_cpu_bwmon_data }, + { .compatible = "qcom,sc8280xp-cpu-bwmon", .data = &sdm845_cpu_bwmon_data }, + { .compatible = "qcom,sm8550-cpu-bwmon", .data = &sdm845_cpu_bwmon_data }, + {} +}; +MODULE_DEVICE_TABLE(of, bwmon_of_match); + +static struct platform_driver bwmon_driver = { + .probe = bwmon_probe, + .remove = bwmon_remove, + .driver = { + .name = "qcom-bwmon", + .of_match_table = bwmon_of_match, + }, +}; +module_platform_driver(bwmon_driver); + +MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>"); +MODULE_DESCRIPTION("QCOM BWMON driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c new file mode 100644 index 0000000000..fbab7fe5c6 --- /dev/null +++ b/drivers/soc/qcom/ice.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Qualcomm ICE (Inline Crypto Engine) support. + * + * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, Google LLC + * Copyright (c) 2023, Linaro Limited + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +#include <linux/firmware/qcom/qcom_scm.h> + +#include <soc/qcom/ice.h> + +#define AES_256_XTS_KEY_SIZE 64 + +/* QCOM ICE registers */ +#define QCOM_ICE_REG_VERSION 0x0008 +#define QCOM_ICE_REG_FUSE_SETTING 0x0010 +#define QCOM_ICE_REG_BIST_STATUS 0x0070 +#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 + +/* BIST ("built-in self-test") status flags */ +#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28) + +#define QCOM_ICE_FUSE_SETTING_MASK 0x1 +#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 +#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 + +#define qcom_ice_writel(engine, val, reg) \ + writel((val), (engine)->base + (reg)) + +#define qcom_ice_readl(engine, reg) \ + readl((engine)->base + (reg)) + +struct qcom_ice { + struct device *dev; + void __iomem *base; + struct device_link *link; + + struct clk *core_clk; +}; + +static bool qcom_ice_check_supported(struct qcom_ice *ice) +{ + u32 regval = qcom_ice_readl(ice, QCOM_ICE_REG_VERSION); + struct device *dev = ice->dev; + int major = FIELD_GET(GENMASK(31, 24), regval); + int minor = FIELD_GET(GENMASK(23, 16), regval); + int step = FIELD_GET(GENMASK(15, 0), regval); + + /* For now this driver only supports ICE version 3 and 4. */ + if (major != 3 && major != 4) { + dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n", + major, minor, step); + return false; + } + + dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n", + major, minor, step); + + /* If fuses are blown, ICE might not work in the standard way. */ + regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING); + if (regval & (QCOM_ICE_FUSE_SETTING_MASK | + QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK | + QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) { + dev_warn(dev, "Fuses are blown; ICE is unusable!\n"); + return false; + } + + return true; +} + +static void qcom_ice_low_power_mode_enable(struct qcom_ice *ice) +{ + u32 regval; + + regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL); + + /* Enable low power mode sequence */ + regval |= 0x7000; + qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL); +} + +static void qcom_ice_optimization_enable(struct qcom_ice *ice) +{ + u32 regval; + + /* ICE Optimizations Enable Sequence */ + regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL); + regval |= 0xd807100; + /* ICE HPG requires delay before writing */ + udelay(5); + qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL); + udelay(5); +} + +/* + * Wait until the ICE BIST (built-in self-test) has completed. + * + * This may be necessary before ICE can be used. + * Note that we don't really care whether the BIST passed or failed; + * we really just want to make sure that it isn't still running. This is + * because (a) the BIST is a FIPS compliance thing that never fails in + * practice, (b) ICE is documented to reject crypto requests if the BIST + * fails, so we needn't do it in software too, and (c) properly testing + * storage encryption requires testing the full storage stack anyway, + * and not relying on hardware-level self-tests. + */ +static int qcom_ice_wait_bist_status(struct qcom_ice *ice) +{ + u32 regval; + int err; + + err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS, + regval, !(regval & QCOM_ICE_BIST_STATUS_MASK), + 50, 5000); + if (err) + dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n"); + + return err; +} + +int qcom_ice_enable(struct qcom_ice *ice) +{ + qcom_ice_low_power_mode_enable(ice); + qcom_ice_optimization_enable(ice); + + return qcom_ice_wait_bist_status(ice); +} +EXPORT_SYMBOL_GPL(qcom_ice_enable); + +int qcom_ice_resume(struct qcom_ice *ice) +{ + struct device *dev = ice->dev; + int err; + + err = clk_prepare_enable(ice->core_clk); + if (err) { + dev_err(dev, "failed to enable core clock (%d)\n", + err); + return err; + } + + return qcom_ice_wait_bist_status(ice); +} +EXPORT_SYMBOL_GPL(qcom_ice_resume); + +int qcom_ice_suspend(struct qcom_ice *ice) +{ + clk_disable_unprepare(ice->core_clk); + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_ice_suspend); + +int qcom_ice_program_key(struct qcom_ice *ice, + u8 algorithm_id, u8 key_size, + const u8 crypto_key[], u8 data_unit_size, + int slot) +{ + struct device *dev = ice->dev; + union { + u8 bytes[AES_256_XTS_KEY_SIZE]; + u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)]; + } key; + int i; + int err; + + /* Only AES-256-XTS has been tested so far. */ + if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS || + key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) { + dev_err_ratelimited(dev, + "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", + algorithm_id, key_size); + return -EINVAL; + } + + memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE); + + /* The SCM call requires that the key words are encoded in big endian */ + for (i = 0; i < ARRAY_SIZE(key.words); i++) + __cpu_to_be32s(&key.words[i]); + + err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, + QCOM_SCM_ICE_CIPHER_AES_256_XTS, + data_unit_size); + + memzero_explicit(&key, sizeof(key)); + + return err; +} +EXPORT_SYMBOL_GPL(qcom_ice_program_key); + +int qcom_ice_evict_key(struct qcom_ice *ice, int slot) +{ + return qcom_scm_ice_invalidate_key(slot); +} +EXPORT_SYMBOL_GPL(qcom_ice_evict_key); + +static struct qcom_ice *qcom_ice_create(struct device *dev, + void __iomem *base) +{ + struct qcom_ice *engine; + + if (!qcom_scm_is_available()) + return ERR_PTR(-EPROBE_DEFER); + + if (!qcom_scm_ice_available()) { + dev_warn(dev, "ICE SCM interface not found\n"); + return NULL; + } + + engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); + if (!engine) + return ERR_PTR(-ENOMEM); + + engine->dev = dev; + engine->base = base; + + /* + * Legacy DT binding uses different clk names for each consumer, + * so lets try those first. If none of those are a match, it means + * the we only have one clock and it is part of the dedicated DT node. + * Also, enable the clock before we check what HW version the driver + * supports. + */ + engine->core_clk = devm_clk_get_optional_enabled(dev, "ice_core_clk"); + if (!engine->core_clk) + engine->core_clk = devm_clk_get_optional_enabled(dev, "ice"); + if (!engine->core_clk) + engine->core_clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(engine->core_clk)) + return ERR_CAST(engine->core_clk); + + if (!qcom_ice_check_supported(engine)) + return ERR_PTR(-EOPNOTSUPP); + + dev_dbg(dev, "Registered Qualcomm Inline Crypto Engine\n"); + + return engine; +} + +/** + * of_qcom_ice_get() - get an ICE instance from a DT node + * @dev: device pointer for the consumer device + * + * This function will provide an ICE instance either by creating one for the + * consumer device if its DT node provides the 'ice' reg range and the 'ice' + * clock (for legacy DT style). On the other hand, if consumer provides a + * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already + * be created and so this function will return that instead. + * + * Return: ICE pointer on success, NULL if there is no ICE data provided by the + * consumer or ERR_PTR() on error. + */ +struct qcom_ice *of_qcom_ice_get(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct qcom_ice *ice; + struct device_node *node; + struct resource *res; + void __iomem *base; + + if (!dev || !dev->of_node) + return ERR_PTR(-ENODEV); + + /* + * In order to support legacy style devicetree bindings, we need + * to create the ICE instance using the consumer device and the reg + * range called 'ice' it provides. + */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"); + if (res) { + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return ERR_CAST(base); + + /* create ICE instance using consumer dev */ + return qcom_ice_create(&pdev->dev, base); + } + + /* + * If the consumer node does not provider an 'ice' reg range + * (legacy DT binding), then it must at least provide a phandle + * to the ICE devicetree node, otherwise ICE is not supported. + */ + node = of_parse_phandle(dev->of_node, "qcom,ice", 0); + if (!node) + return NULL; + + pdev = of_find_device_by_node(node); + if (!pdev) { + dev_err(dev, "Cannot find device node %s\n", node->name); + ice = ERR_PTR(-EPROBE_DEFER); + goto out; + } + + ice = platform_get_drvdata(pdev); + if (!ice) { + dev_err(dev, "Cannot get ice instance from %s\n", + dev_name(&pdev->dev)); + platform_device_put(pdev); + ice = ERR_PTR(-EPROBE_DEFER); + goto out; + } + + ice->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER); + if (!ice->link) { + dev_err(&pdev->dev, + "Failed to create device link to consumer %s\n", + dev_name(dev)); + platform_device_put(pdev); + ice = ERR_PTR(-EINVAL); + } + +out: + of_node_put(node); + + return ice; +} +EXPORT_SYMBOL_GPL(of_qcom_ice_get); + +static int qcom_ice_probe(struct platform_device *pdev) +{ + struct qcom_ice *engine; + void __iomem *base; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) { + dev_warn(&pdev->dev, "ICE registers not found\n"); + return PTR_ERR(base); + } + + engine = qcom_ice_create(&pdev->dev, base); + if (IS_ERR(engine)) + return PTR_ERR(engine); + + platform_set_drvdata(pdev, engine); + + return 0; +} + +static const struct of_device_id qcom_ice_of_match_table[] = { + { .compatible = "qcom,inline-crypto-engine" }, + { }, +}; +MODULE_DEVICE_TABLE(of, qcom_ice_of_match_table); + +static struct platform_driver qcom_ice_driver = { + .probe = qcom_ice_probe, + .driver = { + .name = "qcom-ice", + .of_match_table = qcom_ice_of_match_table, + }, +}; + +module_platform_driver(qcom_ice_driver); + +MODULE_DESCRIPTION("Qualcomm Inline Crypto Engine driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c new file mode 100644 index 0000000000..7886af4fd7 --- /dev/null +++ b/drivers/soc/qcom/kryo-l2-accessors.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include <linux/spinlock.h> +#include <asm/barrier.h> +#include <asm/sysreg.h> +#include <soc/qcom/kryo-l2-accessors.h> + +#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6) +#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7) + +static DEFINE_RAW_SPINLOCK(l2_access_lock); + +/** + * kryo_l2_set_indirect_reg() - write value to an L2 register + * @reg: Address of L2 register. + * @val: Value to be written to register. + * + * Use architecturally required barriers for ordering between system register + * accesses, and system registers with respect to device memory + */ +void kryo_l2_set_indirect_reg(u64 reg, u64 val) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&l2_access_lock, flags); + write_sysreg_s(reg, L2CPUSRSELR_EL1); + isb(); + write_sysreg_s(val, L2CPUSRDR_EL1); + isb(); + raw_spin_unlock_irqrestore(&l2_access_lock, flags); +} +EXPORT_SYMBOL(kryo_l2_set_indirect_reg); + +/** + * kryo_l2_get_indirect_reg() - read an L2 register value + * @reg: Address of L2 register. + * + * Use architecturally required barriers for ordering between system register + * accesses, and system registers with respect to device memory + */ +u64 kryo_l2_get_indirect_reg(u64 reg) +{ + u64 val; + unsigned long flags; + + raw_spin_lock_irqsave(&l2_access_lock, flags); + write_sysreg_s(reg, L2CPUSRSELR_EL1); + isb(); + val = read_sysreg_s(L2CPUSRDR_EL1); + raw_spin_unlock_irqrestore(&l2_access_lock, flags); + + return val; +} +EXPORT_SYMBOL(kryo_l2_get_indirect_reg); diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c new file mode 100644 index 0000000000..e877aace11 --- /dev/null +++ b/drivers/soc/qcom/llcc-qcom.c @@ -0,0 +1,1083 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/soc/qcom/llcc-qcom.h> + +#define ACTIVATE BIT(0) +#define DEACTIVATE BIT(1) +#define ACT_CLEAR BIT(0) +#define ACT_COMPLETE BIT(4) +#define ACT_CTRL_OPCODE_ACTIVATE BIT(0) +#define ACT_CTRL_OPCODE_DEACTIVATE BIT(1) +#define ACT_CTRL_ACT_TRIG BIT(0) +#define ACT_CTRL_OPCODE_SHIFT 0x01 +#define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02 +#define ATTR1_FIXED_SIZE_SHIFT 0x03 +#define ATTR1_PRIORITY_SHIFT 0x04 +#define ATTR1_MAX_CAP_SHIFT 0x10 +#define ATTR0_RES_WAYS_MASK GENMASK(15, 0) +#define ATTR0_BONUS_WAYS_MASK GENMASK(31, 16) +#define ATTR0_BONUS_WAYS_SHIFT 0x10 +#define LLCC_STATUS_READ_DELAY 100 + +#define CACHE_LINE_SIZE_SHIFT 6 + +#define LLCC_LB_CNT_MASK GENMASK(31, 28) +#define LLCC_LB_CNT_SHIFT 28 + +#define MAX_CAP_TO_BYTES(n) (n * SZ_1K) +#define LLCC_TRP_ACT_CTRLn(n) (n * SZ_4K) +#define LLCC_TRP_ACT_CLEARn(n) (8 + n * SZ_4K) +#define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K) +#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n) +#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n) +#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_4 * n) + +#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00 +#define LLCC_TRP_PCB_ACT 0x21f04 +#define LLCC_TRP_ALGO_CFG1 0x21f0c +#define LLCC_TRP_ALGO_CFG2 0x21f10 +#define LLCC_TRP_ALGO_CFG3 0x21f14 +#define LLCC_TRP_ALGO_CFG4 0x21f18 +#define LLCC_TRP_ALGO_CFG5 0x21f1c +#define LLCC_TRP_WRSC_EN 0x21f20 +#define LLCC_TRP_ALGO_CFG6 0x21f24 +#define LLCC_TRP_ALGO_CFG7 0x21f28 +#define LLCC_TRP_WRSC_CACHEABLE_EN 0x21f2c +#define LLCC_TRP_ALGO_CFG8 0x21f30 + +#define LLCC_VERSION_2_0_0_0 0x02000000 +#define LLCC_VERSION_2_1_0_0 0x02010000 +#define LLCC_VERSION_4_1_0_0 0x04010000 + +/** + * struct llcc_slice_config - Data associated with the llcc slice + * @usecase_id: Unique id for the client's use case + * @slice_id: llcc slice id for each client + * @max_cap: The maximum capacity of the cache slice provided in KB + * @priority: Priority of the client used to select victim line for replacement + * @fixed_size: Boolean indicating if the slice has a fixed capacity + * @bonus_ways: Bonus ways are additional ways to be used for any slice, + * if client ends up using more than reserved cache ways. Bonus + * ways are allocated only if they are not reserved for some + * other client. + * @res_ways: Reserved ways for the cache slice, the reserved ways cannot + * be used by any other client than the one its assigned to. + * @cache_mode: Each slice operates as a cache, this controls the mode of the + * slice: normal or TCM(Tightly Coupled Memory) + * @probe_target_ways: Determines what ways to probe for access hit. When + * configured to 1 only bonus and reserved ways are probed. + * When configured to 0 all ways in llcc are probed. + * @dis_cap_alloc: Disable capacity based allocation for a client + * @retain_on_pc: If this bit is set and client has maintained active vote + * then the ways assigned to this client are not flushed on power + * collapse. + * @activate_on_init: Activate the slice immediately after it is programmed + * @write_scid_en: Bit enables write cache support for a given scid. + * @write_scid_cacheable_en: Enables write cache cacheable support for a + * given scid (not supported on v2 or older hardware). + */ +struct llcc_slice_config { + u32 usecase_id; + u32 slice_id; + u32 max_cap; + u32 priority; + bool fixed_size; + u32 bonus_ways; + u32 res_ways; + u32 cache_mode; + u32 probe_target_ways; + bool dis_cap_alloc; + bool retain_on_pc; + bool activate_on_init; + bool write_scid_en; + bool write_scid_cacheable_en; + bool stale_en; + bool stale_cap_en; + bool mru_uncap_en; + bool mru_rollover; + bool alloc_oneway_en; + bool ovcap_en; + bool ovcap_prio; + bool vict_prio; +}; + +struct qcom_llcc_config { + const struct llcc_slice_config *sct_data; + const u32 *reg_offset; + const struct llcc_edac_reg_offset *edac_reg_offset; + int size; + bool need_llcc_cfg; + bool no_edac; +}; + +enum llcc_reg_offset { + LLCC_COMMON_HW_INFO, + LLCC_COMMON_STATUS0, +}; + +static const struct llcc_slice_config sc7180_data[] = { + { LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 }, + { LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_GPUHTW, 11, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_GPU, 12, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 }, +}; + +static const struct llcc_slice_config sc7280_data[] = { + { LLCC_CPUSS, 1, 768, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 1, 0}, + { LLCC_MDMHPGRW, 7, 512, 2, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0}, + { LLCC_CMPT, 10, 768, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0}, + { LLCC_GPUHTW, 11, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0}, + { LLCC_GPU, 12, 512, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 0, 0}, + { LLCC_MMUHWT, 13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 0, 1, 0}, + { LLCC_MDMPNG, 21, 768, 0, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0}, + { LLCC_WLHW, 24, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0}, + { LLCC_MODPE, 29, 64, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0}, +}; + +static const struct llcc_slice_config sc8180x_data[] = { + { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1 }, + { LLCC_VIDSC0, 2, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_VIDSC1, 3, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MDMHPGRW, 7, 3072, 1, 1, 0x3ff, 0xc00, 0, 0, 0, 1, 0 }, + { LLCC_MDM, 8, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_GPU, 12, 5120, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1 }, + { LLCC_CMPTDMA, 15, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_VIDFW, 17, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MDMPNG, 21, 1024, 0, 1, 0xc, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_NPU, 23, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_WLHW, 24, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MODPE, 29, 512, 1, 1, 0xc, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_APTCM, 30, 512, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 }, + { LLCC_WRCACHE, 31, 128, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0 }, +}; + +static const struct llcc_slice_config sc8280xp_data[] = { + { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 }, + { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 }, + { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 }, + { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_GPU, 12, 4096, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 }, + { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_AUDHW, 22, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_DRE, 26, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0, 0 }, + { LLCC_WRCACHE, 31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_CPUSS1, 3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, +}; + +static const struct llcc_slice_config sdm845_data[] = { + { LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 }, + { LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 }, + { LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 }, + { LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0 }, + { LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 }, + { LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 }, + { LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0 }, + { LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 }, + { LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 }, + { LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0 }, + { LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0 }, + { LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1 }, + { LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 }, + { LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 }, + { LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 }, + { LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0 }, + { LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0 }, + { LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 }, +}; + +static const struct llcc_slice_config sm6350_data[] = { + { LLCC_CPUSS, 1, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1 }, + { LLCC_MDM, 8, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_GPUHTW, 11, 256, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_GPU, 12, 512, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_MDMPNG, 21, 768, 0, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_NPU, 23, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_MODPE, 29, 64, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 }, +}; + +static const struct llcc_slice_config sm7150_data[] = { + { LLCC_CPUSS, 1, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 1 }, + { LLCC_MDM, 8, 128, 2, 0, 0xF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_GPUHTW, 11, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_GPU, 12, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_NPU, 23, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0 }, +}; + +static const struct llcc_slice_config sm8150_data[] = { + { LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 }, + { LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_VIDSC1, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MDMHPGRW, 7, 3072, 1, 0, 0xFF, 0xF00, 0, 0, 0, 1, 0 }, + { LLCC_MDM, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MODHW, 9, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_CMPT, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_GPUHTW , 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_GPU, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MMUHWT, 13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1 }, + { LLCC_CMPTDMA, 15, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_DISP, 16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MDMHPFX, 21, 1024, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_NPU, 23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_WLHW, 24, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_MODPE, 29, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 }, + { LLCC_APTCM, 30, 256, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 }, + { LLCC_WRCACHE, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0 }, +}; + +static const struct llcc_slice_config sm8250_data[] = { + { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 }, + { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_AUDIO, 6, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 }, + { LLCC_CMPT, 10, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 }, + { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 }, + { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CMPTDMA, 15, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_DISP, 16, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_VIDFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_NPU, 23, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_WLHW, 24, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_CVP, 28, 256, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_APTCM, 30, 128, 3, 0, 0x0, 0x3, 1, 0, 0, 1, 0, 0 }, + { LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, +}; + +static const struct llcc_slice_config sm8350_data[] = { + { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 1 }, + { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 }, + { LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CMPT, 10, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 }, + { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, + { LLCC_DISP, 16, 3072, 2, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_MDMPNG, 21, 1024, 0, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_MODPE, 29, 256, 1, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 0, 1, 0 }, + { LLCC_WRCACHE, 31, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, + { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CPUSS1, 3, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, +}; + +static const struct llcc_slice_config sm8450_data[] = { + {LLCC_CPUSS, 1, 3072, 1, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 }, + {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 }, + {LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_MODHW, 9, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_GPU, 12, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 1, 0 }, + {LLCC_MMUHWT, 13, 768, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 }, + {LLCC_DISP, 16, 4096, 2, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_MDMPNG, 21, 1024, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 }, + {LLCC_CVP, 28, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_MODPE, 29, 64, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0 }, + {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 }, + {LLCC_CVPFW, 17, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CPUSS1, 3, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CAMEXP0, 4, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_CPUMTE, 23, 256, 1, 1, 0x0FFF, 0x0, 0, 0, 0, 0, 1, 0, 0 }, + {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 }, + {LLCC_CAMEXP1, 27, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 }, + {LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 }, +}; + +static const struct llcc_slice_config sm8550_data[] = { + {LLCC_CPUSS, 1, 5120, 1, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_VIDSC0, 2, 512, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_MDMHPGRW, 25, 1024, 4, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_MODHW, 26, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_GPU, 9, 3096, 1, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_MMUHWT, 18, 768, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_DISP, 16, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_MDMPNG, 27, 1024, 0, 1, 0xF00000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_CVP, 8, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_MODPE, 29, 64, 1, 1, 0xF00000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, }, + {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_CAMEXP0, 4, 256, 4, 1, 0xF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_CAMEXP1, 7, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_CMPTHCP, 17, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_LCPDARE, 30, 128, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, }, + {LLCC_AENPU, 3, 3072, 1, 1, 0xFE01FF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_ISLAND1, 12, 1792, 7, 1, 0xFE00, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_ISLAND4, 15, 256, 7, 1, 0x10000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_CAMEXP2, 19, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_CAMEXP3, 20, 3200, 2, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_CAMEXP4, 21, 3200, 2, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_DISP_WB, 23, 1024, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_DISP_1, 24, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + {LLCC_VIDVSP, 28, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, +}; + +static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = { + .trp_ecc_error_status0 = 0x20344, + .trp_ecc_error_status1 = 0x20348, + .trp_ecc_sb_err_syn0 = 0x2304c, + .trp_ecc_db_err_syn0 = 0x20370, + .trp_ecc_error_cntr_clear = 0x20440, + .trp_interrupt_0_status = 0x20480, + .trp_interrupt_0_clear = 0x20484, + .trp_interrupt_0_enable = 0x20488, + + /* LLCC Common registers */ + .cmn_status0 = 0x3000c, + .cmn_interrupt_0_enable = 0x3001c, + .cmn_interrupt_2_enable = 0x3003c, + + /* LLCC DRP registers */ + .drp_ecc_error_cfg = 0x40000, + .drp_ecc_error_cntr_clear = 0x40004, + .drp_interrupt_status = 0x41000, + .drp_interrupt_clear = 0x41008, + .drp_interrupt_enable = 0x4100c, + .drp_ecc_error_status0 = 0x42044, + .drp_ecc_error_status1 = 0x42048, + .drp_ecc_sb_err_syn0 = 0x4204c, + .drp_ecc_db_err_syn0 = 0x42070, +}; + +static const struct llcc_edac_reg_offset llcc_v2_1_edac_reg_offset = { + .trp_ecc_error_status0 = 0x20344, + .trp_ecc_error_status1 = 0x20348, + .trp_ecc_sb_err_syn0 = 0x2034c, + .trp_ecc_db_err_syn0 = 0x20370, + .trp_ecc_error_cntr_clear = 0x20440, + .trp_interrupt_0_status = 0x20480, + .trp_interrupt_0_clear = 0x20484, + .trp_interrupt_0_enable = 0x20488, + + /* LLCC Common registers */ + .cmn_status0 = 0x3400c, + .cmn_interrupt_0_enable = 0x3401c, + .cmn_interrupt_2_enable = 0x3403c, + + /* LLCC DRP registers */ + .drp_ecc_error_cfg = 0x50000, + .drp_ecc_error_cntr_clear = 0x50004, + .drp_interrupt_status = 0x50020, + .drp_interrupt_clear = 0x50028, + .drp_interrupt_enable = 0x5002c, + .drp_ecc_error_status0 = 0x520f4, + .drp_ecc_error_status1 = 0x520f8, + .drp_ecc_sb_err_syn0 = 0x520fc, + .drp_ecc_db_err_syn0 = 0x52120, +}; + +/* LLCC register offset starting from v1.0.0 */ +static const u32 llcc_v1_reg_offset[] = { + [LLCC_COMMON_HW_INFO] = 0x00030000, + [LLCC_COMMON_STATUS0] = 0x0003000c, +}; + +/* LLCC register offset starting from v2.0.1 */ +static const u32 llcc_v2_1_reg_offset[] = { + [LLCC_COMMON_HW_INFO] = 0x00034000, + [LLCC_COMMON_STATUS0] = 0x0003400c, +}; + +static const struct qcom_llcc_config sc7180_cfg = { + .sct_data = sc7180_data, + .size = ARRAY_SIZE(sc7180_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v1_reg_offset, + .edac_reg_offset = &llcc_v1_edac_reg_offset, +}; + +static const struct qcom_llcc_config sc7280_cfg = { + .sct_data = sc7280_data, + .size = ARRAY_SIZE(sc7280_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v1_reg_offset, + .edac_reg_offset = &llcc_v1_edac_reg_offset, +}; + +static const struct qcom_llcc_config sc8180x_cfg = { + .sct_data = sc8180x_data, + .size = ARRAY_SIZE(sc8180x_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v1_reg_offset, + .edac_reg_offset = &llcc_v1_edac_reg_offset, +}; + +static const struct qcom_llcc_config sc8280xp_cfg = { + .sct_data = sc8280xp_data, + .size = ARRAY_SIZE(sc8280xp_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v1_reg_offset, + .edac_reg_offset = &llcc_v1_edac_reg_offset, +}; + +static const struct qcom_llcc_config sdm845_cfg = { + .sct_data = sdm845_data, + .size = ARRAY_SIZE(sdm845_data), + .need_llcc_cfg = false, + .reg_offset = llcc_v1_reg_offset, + .edac_reg_offset = &llcc_v1_edac_reg_offset, + .no_edac = true, +}; + +static const struct qcom_llcc_config sm6350_cfg = { + .sct_data = sm6350_data, + .size = ARRAY_SIZE(sm6350_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v1_reg_offset, + .edac_reg_offset = &llcc_v1_edac_reg_offset, +}; + +static const struct qcom_llcc_config sm7150_cfg = { + .sct_data = sm7150_data, + .size = ARRAY_SIZE(sm7150_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v1_reg_offset, + .edac_reg_offset = &llcc_v1_edac_reg_offset, +}; + +static const struct qcom_llcc_config sm8150_cfg = { + .sct_data = sm8150_data, + .size = ARRAY_SIZE(sm8150_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v1_reg_offset, + .edac_reg_offset = &llcc_v1_edac_reg_offset, +}; + +static const struct qcom_llcc_config sm8250_cfg = { + .sct_data = sm8250_data, + .size = ARRAY_SIZE(sm8250_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v1_reg_offset, + .edac_reg_offset = &llcc_v1_edac_reg_offset, +}; + +static const struct qcom_llcc_config sm8350_cfg = { + .sct_data = sm8350_data, + .size = ARRAY_SIZE(sm8350_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v1_reg_offset, + .edac_reg_offset = &llcc_v1_edac_reg_offset, +}; + +static const struct qcom_llcc_config sm8450_cfg = { + .sct_data = sm8450_data, + .size = ARRAY_SIZE(sm8450_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v2_1_reg_offset, + .edac_reg_offset = &llcc_v2_1_edac_reg_offset, +}; + +static const struct qcom_llcc_config sm8550_cfg = { + .sct_data = sm8550_data, + .size = ARRAY_SIZE(sm8550_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v2_1_reg_offset, + .edac_reg_offset = &llcc_v2_1_edac_reg_offset, +}; + +static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; + +/** + * llcc_slice_getd - get llcc slice descriptor + * @uid: usecase_id for the client + * + * A pointer to llcc slice descriptor will be returned on success + * and error pointer is returned on failure + */ +struct llcc_slice_desc *llcc_slice_getd(u32 uid) +{ + const struct llcc_slice_config *cfg; + struct llcc_slice_desc *desc; + u32 sz, count; + + if (IS_ERR(drv_data)) + return ERR_CAST(drv_data); + + cfg = drv_data->cfg; + sz = drv_data->cfg_size; + + for (count = 0; cfg && count < sz; count++, cfg++) + if (cfg->usecase_id == uid) + break; + + if (count == sz || !cfg) + return ERR_PTR(-ENODEV); + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + + desc->slice_id = cfg->slice_id; + desc->slice_size = cfg->max_cap; + + return desc; +} +EXPORT_SYMBOL_GPL(llcc_slice_getd); + +/** + * llcc_slice_putd - llcc slice descritpor + * @desc: Pointer to llcc slice descriptor + */ +void llcc_slice_putd(struct llcc_slice_desc *desc) +{ + if (!IS_ERR_OR_NULL(desc)) + kfree(desc); +} +EXPORT_SYMBOL_GPL(llcc_slice_putd); + +static int llcc_update_act_ctrl(u32 sid, + u32 act_ctrl_reg_val, u32 status) +{ + u32 act_ctrl_reg; + u32 act_clear_reg; + u32 status_reg; + u32 slice_status; + int ret; + + if (IS_ERR(drv_data)) + return PTR_ERR(drv_data); + + act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid); + act_clear_reg = LLCC_TRP_ACT_CLEARn(sid); + status_reg = LLCC_TRP_STATUSn(sid); + + /* Set the ACTIVE trigger */ + act_ctrl_reg_val |= ACT_CTRL_ACT_TRIG; + ret = regmap_write(drv_data->bcast_regmap, act_ctrl_reg, + act_ctrl_reg_val); + if (ret) + return ret; + + /* Clear the ACTIVE trigger */ + act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG; + ret = regmap_write(drv_data->bcast_regmap, act_ctrl_reg, + act_ctrl_reg_val); + if (ret) + return ret; + + if (drv_data->version >= LLCC_VERSION_4_1_0_0) { + ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg, + slice_status, (slice_status & ACT_COMPLETE), + 0, LLCC_STATUS_READ_DELAY); + if (ret) + return ret; + } + + ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg, + slice_status, !(slice_status & status), + 0, LLCC_STATUS_READ_DELAY); + + if (drv_data->version >= LLCC_VERSION_4_1_0_0) + ret = regmap_write(drv_data->bcast_regmap, act_clear_reg, + ACT_CLEAR); + + return ret; +} + +/** + * llcc_slice_activate - Activate the llcc slice + * @desc: Pointer to llcc slice descriptor + * + * A value of zero will be returned on success and a negative errno will + * be returned in error cases + */ +int llcc_slice_activate(struct llcc_slice_desc *desc) +{ + int ret; + u32 act_ctrl_val; + + if (IS_ERR(drv_data)) + return PTR_ERR(drv_data); + + if (IS_ERR_OR_NULL(desc)) + return -EINVAL; + + mutex_lock(&drv_data->lock); + if (test_bit(desc->slice_id, drv_data->bitmap)) { + mutex_unlock(&drv_data->lock); + return 0; + } + + act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT; + + ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val, + DEACTIVATE); + if (ret) { + mutex_unlock(&drv_data->lock); + return ret; + } + + __set_bit(desc->slice_id, drv_data->bitmap); + mutex_unlock(&drv_data->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(llcc_slice_activate); + +/** + * llcc_slice_deactivate - Deactivate the llcc slice + * @desc: Pointer to llcc slice descriptor + * + * A value of zero will be returned on success and a negative errno will + * be returned in error cases + */ +int llcc_slice_deactivate(struct llcc_slice_desc *desc) +{ + u32 act_ctrl_val; + int ret; + + if (IS_ERR(drv_data)) + return PTR_ERR(drv_data); + + if (IS_ERR_OR_NULL(desc)) + return -EINVAL; + + mutex_lock(&drv_data->lock); + if (!test_bit(desc->slice_id, drv_data->bitmap)) { + mutex_unlock(&drv_data->lock); + return 0; + } + act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT; + + ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val, + ACTIVATE); + if (ret) { + mutex_unlock(&drv_data->lock); + return ret; + } + + __clear_bit(desc->slice_id, drv_data->bitmap); + mutex_unlock(&drv_data->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(llcc_slice_deactivate); + +/** + * llcc_get_slice_id - return the slice id + * @desc: Pointer to llcc slice descriptor + */ +int llcc_get_slice_id(struct llcc_slice_desc *desc) +{ + if (IS_ERR_OR_NULL(desc)) + return -EINVAL; + + return desc->slice_id; +} +EXPORT_SYMBOL_GPL(llcc_get_slice_id); + +/** + * llcc_get_slice_size - return the slice id + * @desc: Pointer to llcc slice descriptor + */ +size_t llcc_get_slice_size(struct llcc_slice_desc *desc) +{ + if (IS_ERR_OR_NULL(desc)) + return 0; + + return desc->slice_size; +} +EXPORT_SYMBOL_GPL(llcc_get_slice_size); + +static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config, + const struct qcom_llcc_config *cfg) +{ + int ret; + u32 attr2_cfg; + u32 attr1_cfg; + u32 attr0_cfg; + u32 attr2_val; + u32 attr1_val; + u32 attr0_val; + u32 max_cap_cacheline; + struct llcc_slice_desc desc; + + attr1_val = config->cache_mode; + attr1_val |= config->probe_target_ways << ATTR1_PROBE_TARGET_WAYS_SHIFT; + attr1_val |= config->fixed_size << ATTR1_FIXED_SIZE_SHIFT; + attr1_val |= config->priority << ATTR1_PRIORITY_SHIFT; + + max_cap_cacheline = MAX_CAP_TO_BYTES(config->max_cap); + + /* + * LLCC instances can vary for each target. + * The SW writes to broadcast register which gets propagated + * to each llcc instance (llcc0,.. llccN). + * Since the size of the memory is divided equally amongst the + * llcc instances, we need to configure the max cap accordingly. + */ + max_cap_cacheline = max_cap_cacheline / drv_data->num_banks; + max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT; + attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT; + + attr1_cfg = LLCC_TRP_ATTR1_CFGn(config->slice_id); + + ret = regmap_write(drv_data->bcast_regmap, attr1_cfg, attr1_val); + if (ret) + return ret; + + if (drv_data->version >= LLCC_VERSION_4_1_0_0) { + attr2_cfg = LLCC_TRP_ATTR2_CFGn(config->slice_id); + attr0_val = config->res_ways; + attr2_val = config->bonus_ways; + } else { + attr0_val = config->res_ways & ATTR0_RES_WAYS_MASK; + attr0_val |= config->bonus_ways << ATTR0_BONUS_WAYS_SHIFT; + } + + attr0_cfg = LLCC_TRP_ATTR0_CFGn(config->slice_id); + + ret = regmap_write(drv_data->bcast_regmap, attr0_cfg, attr0_val); + if (ret) + return ret; + + if (drv_data->version >= LLCC_VERSION_4_1_0_0) { + ret = regmap_write(drv_data->bcast_regmap, attr2_cfg, attr2_val); + if (ret) + return ret; + } + + if (cfg->need_llcc_cfg) { + u32 disable_cap_alloc, retain_pc; + + disable_cap_alloc = config->dis_cap_alloc << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_SCID_DIS_CAP_ALLOC, + BIT(config->slice_id), disable_cap_alloc); + if (ret) + return ret; + + if (drv_data->version < LLCC_VERSION_4_1_0_0) { + retain_pc = config->retain_on_pc << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_PCB_ACT, + BIT(config->slice_id), retain_pc); + if (ret) + return ret; + } + } + + if (drv_data->version >= LLCC_VERSION_2_0_0_0) { + u32 wren; + + wren = config->write_scid_en << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_WRSC_EN, + BIT(config->slice_id), wren); + if (ret) + return ret; + } + + if (drv_data->version >= LLCC_VERSION_2_1_0_0) { + u32 wr_cache_en; + + wr_cache_en = config->write_scid_cacheable_en << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_WRSC_CACHEABLE_EN, + BIT(config->slice_id), wr_cache_en); + if (ret) + return ret; + } + + if (drv_data->version >= LLCC_VERSION_4_1_0_0) { + u32 stale_en; + u32 stale_cap_en; + u32 mru_uncap_en; + u32 mru_rollover; + u32 alloc_oneway_en; + u32 ovcap_en; + u32 ovcap_prio; + u32 vict_prio; + + stale_en = config->stale_en << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG1, + BIT(config->slice_id), stale_en); + if (ret) + return ret; + + stale_cap_en = config->stale_cap_en << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG2, + BIT(config->slice_id), stale_cap_en); + if (ret) + return ret; + + mru_uncap_en = config->mru_uncap_en << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG3, + BIT(config->slice_id), mru_uncap_en); + if (ret) + return ret; + + mru_rollover = config->mru_rollover << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG4, + BIT(config->slice_id), mru_rollover); + if (ret) + return ret; + + alloc_oneway_en = config->alloc_oneway_en << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG5, + BIT(config->slice_id), alloc_oneway_en); + if (ret) + return ret; + + ovcap_en = config->ovcap_en << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG6, + BIT(config->slice_id), ovcap_en); + if (ret) + return ret; + + ovcap_prio = config->ovcap_prio << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG7, + BIT(config->slice_id), ovcap_prio); + if (ret) + return ret; + + vict_prio = config->vict_prio << config->slice_id; + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG8, + BIT(config->slice_id), vict_prio); + if (ret) + return ret; + } + + if (config->activate_on_init) { + desc.slice_id = config->slice_id; + ret = llcc_slice_activate(&desc); + } + + return ret; +} + +static int qcom_llcc_cfg_program(struct platform_device *pdev, + const struct qcom_llcc_config *cfg) +{ + int i; + u32 sz; + int ret = 0; + const struct llcc_slice_config *llcc_table; + + sz = drv_data->cfg_size; + llcc_table = drv_data->cfg; + + for (i = 0; i < sz; i++) { + ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg); + if (ret) + return ret; + } + + return ret; +} + +static int qcom_llcc_remove(struct platform_device *pdev) +{ + /* Set the global pointer to a error code to avoid referencing it */ + drv_data = ERR_PTR(-ENODEV); + return 0; +} + +static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index, + const char *name) +{ + void __iomem *base; + struct regmap_config llcc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, + }; + + base = devm_platform_ioremap_resource(pdev, index); + if (IS_ERR(base)) + return ERR_CAST(base); + + llcc_regmap_config.name = name; + return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config); +} + +static int qcom_llcc_probe(struct platform_device *pdev) +{ + u32 num_banks; + struct device *dev = &pdev->dev; + int ret, i; + struct platform_device *llcc_edac; + const struct qcom_llcc_config *cfg; + const struct llcc_slice_config *llcc_cfg; + u32 sz; + u32 version; + struct regmap *regmap; + + if (!IS_ERR(drv_data)) + return -EBUSY; + + drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL); + if (!drv_data) { + ret = -ENOMEM; + goto err; + } + + /* Initialize the first LLCC bank regmap */ + regmap = qcom_llcc_init_mmio(pdev, 0, "llcc0_base"); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto err; + } + + cfg = of_device_get_match_data(&pdev->dev); + + ret = regmap_read(regmap, cfg->reg_offset[LLCC_COMMON_STATUS0], &num_banks); + if (ret) + goto err; + + num_banks &= LLCC_LB_CNT_MASK; + num_banks >>= LLCC_LB_CNT_SHIFT; + drv_data->num_banks = num_banks; + + drv_data->regmaps = devm_kcalloc(dev, num_banks, sizeof(*drv_data->regmaps), GFP_KERNEL); + if (!drv_data->regmaps) { + ret = -ENOMEM; + goto err; + } + + drv_data->regmaps[0] = regmap; + + /* Initialize rest of LLCC bank regmaps */ + for (i = 1; i < num_banks; i++) { + char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i); + + drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base); + if (IS_ERR(drv_data->regmaps[i])) { + ret = PTR_ERR(drv_data->regmaps[i]); + kfree(base); + goto err; + } + + kfree(base); + } + + drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base"); + if (IS_ERR(drv_data->bcast_regmap)) { + ret = PTR_ERR(drv_data->bcast_regmap); + goto err; + } + + /* Extract version of the IP */ + ret = regmap_read(drv_data->bcast_regmap, cfg->reg_offset[LLCC_COMMON_HW_INFO], + &version); + if (ret) + goto err; + + drv_data->version = version; + + llcc_cfg = cfg->sct_data; + sz = cfg->size; + + for (i = 0; i < sz; i++) + if (llcc_cfg[i].slice_id > drv_data->max_slices) + drv_data->max_slices = llcc_cfg[i].slice_id; + + drv_data->bitmap = devm_bitmap_zalloc(dev, drv_data->max_slices, + GFP_KERNEL); + if (!drv_data->bitmap) { + ret = -ENOMEM; + goto err; + } + + drv_data->cfg = llcc_cfg; + drv_data->cfg_size = sz; + drv_data->edac_reg_offset = cfg->edac_reg_offset; + mutex_init(&drv_data->lock); + platform_set_drvdata(pdev, drv_data); + + ret = qcom_llcc_cfg_program(pdev, cfg); + if (ret) + goto err; + + drv_data->ecc_irq = platform_get_irq_optional(pdev, 0); + + /* + * On some platforms, the access to EDAC registers will be locked by + * the bootloader. So probing the EDAC driver will result in a crash. + * Hence, disable the creation of EDAC platform device for the + * problematic platforms. + */ + if (!cfg->no_edac) { + llcc_edac = platform_device_register_data(&pdev->dev, + "qcom_llcc_edac", -1, drv_data, + sizeof(*drv_data)); + if (IS_ERR(llcc_edac)) + dev_err(dev, "Failed to register llcc edac driver\n"); + } + + return 0; +err: + drv_data = ERR_PTR(-ENODEV); + return ret; +} + +static const struct of_device_id qcom_llcc_of_match[] = { + { .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg }, + { .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfg }, + { .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfg }, + { .compatible = "qcom,sc8280xp-llcc", .data = &sc8280xp_cfg }, + { .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg }, + { .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg }, + { .compatible = "qcom,sm7150-llcc", .data = &sm7150_cfg }, + { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg }, + { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg }, + { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg }, + { .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfg }, + { .compatible = "qcom,sm8550-llcc", .data = &sm8550_cfg }, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_llcc_of_match); + +static struct platform_driver qcom_llcc_driver = { + .driver = { + .name = "qcom-llcc", + .of_match_table = qcom_llcc_of_match, + }, + .probe = qcom_llcc_probe, + .remove = qcom_llcc_remove, +}; +module_platform_driver(qcom_llcc_driver); + +MODULE_DESCRIPTION("Qualcomm Last Level Cache Controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c new file mode 100644 index 0000000000..6f177e46fa --- /dev/null +++ b/drivers/soc/qcom/mdt_loader.c @@ -0,0 +1,449 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm Peripheral Image Loader + * + * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2015 Sony Mobile Communications Inc + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/device.h> +#include <linux/elf.h> +#include <linux/firmware.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/soc/qcom/mdt_loader.h> + +static bool mdt_phdr_valid(const struct elf32_phdr *phdr) +{ + if (phdr->p_type != PT_LOAD) + return false; + + if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) + return false; + + if (!phdr->p_memsz) + return false; + + return true; +} + +static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs, + unsigned int segment, const char *fw_name, + struct device *dev) +{ + const struct elf32_phdr *phdr = &phdrs[segment]; + const struct firmware *seg_fw; + char *seg_name; + ssize_t ret; + + if (strlen(fw_name) < 4) + return -EINVAL; + + seg_name = kstrdup(fw_name, GFP_KERNEL); + if (!seg_name) + return -ENOMEM; + + sprintf(seg_name + strlen(fw_name) - 3, "b%02d", segment); + ret = request_firmware_into_buf(&seg_fw, seg_name, dev, + ptr, phdr->p_filesz); + if (ret) { + dev_err(dev, "error %zd loading %s\n", ret, seg_name); + kfree(seg_name); + return ret; + } + + if (seg_fw->size != phdr->p_filesz) { + dev_err(dev, + "failed to load segment %d from truncated file %s\n", + segment, seg_name); + ret = -EINVAL; + } + + release_firmware(seg_fw); + kfree(seg_name); + + return ret; +} + +/** + * qcom_mdt_get_size() - acquire size of the memory region needed to load mdt + * @fw: firmware object for the mdt file + * + * Returns size of the loaded firmware blob, or -EINVAL on failure. + */ +ssize_t qcom_mdt_get_size(const struct firmware *fw) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + phys_addr_t min_addr = PHYS_ADDR_MAX; + phys_addr_t max_addr = 0; + int i; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!mdt_phdr_valid(phdr)) + continue; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + + if (phdr->p_paddr + phdr->p_memsz > max_addr) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + + return min_addr < max_addr ? max_addr - min_addr : -EINVAL; +} +EXPORT_SYMBOL_GPL(qcom_mdt_get_size); + +/** + * qcom_mdt_read_metadata() - read header and metadata from mdt or mbn + * @fw: firmware of mdt header or mbn + * @data_len: length of the read metadata blob + * @fw_name: name of the firmware, for construction of segment file names + * @dev: device handle to associate resources with + * + * The mechanism that performs the authentication of the loading firmware + * expects an ELF header directly followed by the segment of hashes, with no + * padding inbetween. This function allocates a chunk of memory for this pair + * and copy the two pieces into the buffer. + * + * In the case of split firmware the hash is found directly following the ELF + * header, rather than at p_offset described by the second program header. + * + * The caller is responsible to free (kfree()) the returned pointer. + * + * Return: pointer to data, or ERR_PTR() + */ +void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, + const char *fw_name, struct device *dev) +{ + const struct elf32_phdr *phdrs; + const struct elf32_hdr *ehdr; + unsigned int hash_segment = 0; + size_t hash_offset; + size_t hash_size; + size_t ehdr_size; + unsigned int i; + ssize_t ret; + void *data; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + if (ehdr->e_phnum < 2) + return ERR_PTR(-EINVAL); + + if (phdrs[0].p_type == PT_LOAD) + return ERR_PTR(-EINVAL); + + for (i = 1; i < ehdr->e_phnum; i++) { + if ((phdrs[i].p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) { + hash_segment = i; + break; + } + } + + if (!hash_segment) { + dev_err(dev, "no hash segment found in %s\n", fw_name); + return ERR_PTR(-EINVAL); + } + + ehdr_size = phdrs[0].p_filesz; + hash_size = phdrs[hash_segment].p_filesz; + + data = kmalloc(ehdr_size + hash_size, GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + /* Copy ELF header */ + memcpy(data, fw->data, ehdr_size); + + if (ehdr_size + hash_size == fw->size) { + /* Firmware is split and hash is packed following the ELF header */ + hash_offset = phdrs[0].p_filesz; + memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); + } else if (phdrs[hash_segment].p_offset + hash_size <= fw->size) { + /* Hash is in its own segment, but within the loaded file */ + hash_offset = phdrs[hash_segment].p_offset; + memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); + } else { + /* Hash is in its own segment, beyond the loaded file */ + ret = mdt_load_split_segment(data + ehdr_size, phdrs, hash_segment, fw_name, dev); + if (ret) { + kfree(data); + return ERR_PTR(ret); + } + } + + *data_len = ehdr_size + hash_size; + + return data; +} +EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata); + +/** + * qcom_mdt_pas_init() - initialize PAS region for firmware loading + * @dev: device handle to associate resources with + * @fw: firmware object for the mdt file + * @fw_name: name of the firmware, for construction of segment file names + * @pas_id: PAS identifier + * @mem_phys: physical address of allocated memory region + * @ctx: PAS metadata context, to be released by caller + * + * Returns 0 on success, negative errno otherwise. + */ +int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, phys_addr_t mem_phys, + struct qcom_scm_pas_metadata *ctx) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + phys_addr_t min_addr = PHYS_ADDR_MAX; + phys_addr_t max_addr = 0; + bool relocate = false; + size_t metadata_len; + void *metadata; + int ret; + int i; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!mdt_phdr_valid(phdr)) + continue; + + if (phdr->p_flags & QCOM_MDT_RELOCATABLE) + relocate = true; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + + if (phdr->p_paddr + phdr->p_memsz > max_addr) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + + metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev); + if (IS_ERR(metadata)) { + ret = PTR_ERR(metadata); + dev_err(dev, "error %d reading firmware %s metadata\n", ret, fw_name); + goto out; + } + + ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, ctx); + kfree(metadata); + if (ret) { + /* Invalid firmware metadata */ + dev_err(dev, "error %d initializing firmware %s\n", ret, fw_name); + goto out; + } + + if (relocate) { + ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr); + if (ret) { + /* Unable to set up relocation */ + dev_err(dev, "error %d setting up firmware %s\n", ret, fw_name); + goto out; + } + } + +out: + return ret; +} +EXPORT_SYMBOL_GPL(qcom_mdt_pas_init); + +static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_name) +{ + const struct elf32_phdr *phdrs; + const struct elf32_hdr *ehdr; + uint64_t seg_start, seg_end; + int i; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + for (i = 0; i < ehdr->e_phnum; i++) { + /* + * The size of the MDT file is not padded to include any + * zero-sized segments at the end. Ignore these, as they should + * not affect the decision about image being split or not. + */ + if (!phdrs[i].p_filesz) + continue; + + seg_start = phdrs[i].p_offset; + seg_end = phdrs[i].p_offset + phdrs[i].p_filesz; + if (seg_start > fw->size || seg_end > fw->size) + return true; + } + + return false; +} + +static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, void *mem_region, + phys_addr_t mem_phys, size_t mem_size, + phys_addr_t *reloc_base, bool pas_init) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + phys_addr_t mem_reloc; + phys_addr_t min_addr = PHYS_ADDR_MAX; + ssize_t offset; + bool relocate = false; + bool is_split; + void *ptr; + int ret = 0; + int i; + + if (!fw || !mem_region || !mem_phys || !mem_size) + return -EINVAL; + + is_split = qcom_mdt_bins_are_split(fw, fw_name); + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!mdt_phdr_valid(phdr)) + continue; + + if (phdr->p_flags & QCOM_MDT_RELOCATABLE) + relocate = true; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + } + + if (relocate) { + /* + * The image is relocatable, so offset each segment based on + * the lowest segment address. + */ + mem_reloc = min_addr; + } else { + /* + * Image is not relocatable, so offset each segment based on + * the allocated physical chunk of memory. + */ + mem_reloc = mem_phys; + } + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!mdt_phdr_valid(phdr)) + continue; + + offset = phdr->p_paddr - mem_reloc; + if (offset < 0 || offset + phdr->p_memsz > mem_size) { + dev_err(dev, "segment outside memory range\n"); + ret = -EINVAL; + break; + } + + if (phdr->p_filesz > phdr->p_memsz) { + dev_err(dev, + "refusing to load segment %d with p_filesz > p_memsz\n", + i); + ret = -EINVAL; + break; + } + + ptr = mem_region + offset; + + if (phdr->p_filesz && !is_split) { + /* Firmware is large enough to be non-split */ + if (phdr->p_offset + phdr->p_filesz > fw->size) { + dev_err(dev, "file %s segment %d would be truncated\n", + fw_name, i); + ret = -EINVAL; + break; + } + + memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); + } else if (phdr->p_filesz) { + /* Firmware not large enough, load split-out segments */ + ret = mdt_load_split_segment(ptr, phdrs, i, fw_name, dev); + if (ret) + break; + } + + if (phdr->p_memsz > phdr->p_filesz) + memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); + } + + if (reloc_base) + *reloc_base = mem_reloc; + + return ret; +} + +/** + * qcom_mdt_load() - load the firmware which header is loaded as fw + * @dev: device handle to associate resources with + * @fw: firmware object for the mdt file + * @firmware: name of the firmware, for construction of segment file names + * @pas_id: PAS identifier + * @mem_region: allocated memory region to load firmware into + * @mem_phys: physical address of allocated memory region + * @mem_size: size of the allocated memory region + * @reloc_base: adjusted physical address after relocation + * + * Returns 0 on success, negative errno otherwise. + */ +int qcom_mdt_load(struct device *dev, const struct firmware *fw, + const char *firmware, int pas_id, void *mem_region, + phys_addr_t mem_phys, size_t mem_size, + phys_addr_t *reloc_base) +{ + int ret; + + ret = qcom_mdt_pas_init(dev, fw, firmware, pas_id, mem_phys, NULL); + if (ret) + return ret; + + return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys, + mem_size, reloc_base, true); +} +EXPORT_SYMBOL_GPL(qcom_mdt_load); + +/** + * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw + * @dev: device handle to associate resources with + * @fw: firmware object for the mdt file + * @firmware: name of the firmware, for construction of segment file names + * @pas_id: PAS identifier + * @mem_region: allocated memory region to load firmware into + * @mem_phys: physical address of allocated memory region + * @mem_size: size of the allocated memory region + * @reloc_base: adjusted physical address after relocation + * + * Returns 0 on success, negative errno otherwise. + */ +int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, + const char *firmware, int pas_id, + void *mem_region, phys_addr_t mem_phys, + size_t mem_size, phys_addr_t *reloc_base) +{ + return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys, + mem_size, reloc_base, false); +} +EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init); + +MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c new file mode 100644 index 0000000000..20f5461d46 --- /dev/null +++ b/drivers/soc/qcom/ocmem.c @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The On Chip Memory (OCMEM) allocator allows various clients to allocate + * memory from OCMEM based on performance, latency and power requirements. + * This is typically used by the GPU, camera/video, and audio components on + * some Snapdragon SoCs. + * + * Copyright (C) 2019 Brian Masney <masneyb@onstation.org> + * Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <soc/qcom/ocmem.h> + +enum region_mode { + WIDE_MODE = 0x0, + THIN_MODE, + MODE_DEFAULT = WIDE_MODE, +}; + +enum ocmem_macro_state { + PASSTHROUGH = 0, + PERI_ON = 1, + CORE_ON = 2, + CLK_OFF = 4, +}; + +struct ocmem_region { + bool interleaved; + enum region_mode mode; + unsigned int num_macros; + enum ocmem_macro_state macro_state[4]; + unsigned long macro_size; + unsigned long region_size; +}; + +struct ocmem_config { + uint8_t num_regions; + unsigned long macro_size; +}; + +struct ocmem { + struct device *dev; + const struct ocmem_config *config; + struct resource *memory; + void __iomem *mmio; + struct clk *core_clk; + struct clk *iface_clk; + unsigned int num_ports; + unsigned int num_macros; + bool interleaved; + struct ocmem_region *regions; + unsigned long active_allocations; +}; + +#define OCMEM_MIN_ALIGN SZ_64K +#define OCMEM_MIN_ALLOC SZ_64K + +#define OCMEM_REG_HW_VERSION 0x00000000 +#define OCMEM_REG_HW_PROFILE 0x00000004 + +#define OCMEM_REG_REGION_MODE_CTL 0x00001000 +#define OCMEM_REGION_MODE_CTL_REG0_THIN 0x00000001 +#define OCMEM_REGION_MODE_CTL_REG1_THIN 0x00000002 +#define OCMEM_REGION_MODE_CTL_REG2_THIN 0x00000004 +#define OCMEM_REGION_MODE_CTL_REG3_THIN 0x00000008 + +#define OCMEM_REG_GFX_MPU_START 0x00001004 +#define OCMEM_REG_GFX_MPU_END 0x00001008 + +#define OCMEM_HW_VERSION_MAJOR(val) FIELD_GET(GENMASK(31, 28), val) +#define OCMEM_HW_VERSION_MINOR(val) FIELD_GET(GENMASK(27, 16), val) +#define OCMEM_HW_VERSION_STEP(val) FIELD_GET(GENMASK(15, 0), val) + +#define OCMEM_HW_PROFILE_NUM_PORTS(val) FIELD_GET(0x0000000f, (val)) +#define OCMEM_HW_PROFILE_NUM_MACROS(val) FIELD_GET(0x00003f00, (val)) + +#define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE 0x00010000 +#define OCMEM_HW_PROFILE_INTERLEAVING 0x00020000 +#define OCMEM_REG_GEN_STATUS 0x0000000c + +#define OCMEM_REG_PSGSC_STATUS 0x00000038 +#define OCMEM_REG_PSGSC_CTL(i0) (0x0000003c + 0x1*(i0)) + +#define OCMEM_PSGSC_CTL_MACRO0_MODE(val) FIELD_PREP(0x00000007, (val)) +#define OCMEM_PSGSC_CTL_MACRO1_MODE(val) FIELD_PREP(0x00000070, (val)) +#define OCMEM_PSGSC_CTL_MACRO2_MODE(val) FIELD_PREP(0x00000700, (val)) +#define OCMEM_PSGSC_CTL_MACRO3_MODE(val) FIELD_PREP(0x00007000, (val)) + +static inline void ocmem_write(struct ocmem *ocmem, u32 reg, u32 data) +{ + writel(data, ocmem->mmio + reg); +} + +static inline u32 ocmem_read(struct ocmem *ocmem, u32 reg) +{ + return readl(ocmem->mmio + reg); +} + +static void update_ocmem(struct ocmem *ocmem) +{ + uint32_t region_mode_ctrl = 0x0; + int i; + + if (!qcom_scm_ocmem_lock_available()) { + for (i = 0; i < ocmem->config->num_regions; i++) { + struct ocmem_region *region = &ocmem->regions[i]; + + if (region->mode == THIN_MODE) + region_mode_ctrl |= BIT(i); + } + + dev_dbg(ocmem->dev, "ocmem_region_mode_control %x\n", + region_mode_ctrl); + ocmem_write(ocmem, OCMEM_REG_REGION_MODE_CTL, region_mode_ctrl); + } + + for (i = 0; i < ocmem->config->num_regions; i++) { + struct ocmem_region *region = &ocmem->regions[i]; + u32 data; + + data = OCMEM_PSGSC_CTL_MACRO0_MODE(region->macro_state[0]) | + OCMEM_PSGSC_CTL_MACRO1_MODE(region->macro_state[1]) | + OCMEM_PSGSC_CTL_MACRO2_MODE(region->macro_state[2]) | + OCMEM_PSGSC_CTL_MACRO3_MODE(region->macro_state[3]); + + ocmem_write(ocmem, OCMEM_REG_PSGSC_CTL(i), data); + } +} + +static unsigned long phys_to_offset(struct ocmem *ocmem, + unsigned long addr) +{ + if (addr < ocmem->memory->start || addr >= ocmem->memory->end) + return 0; + + return addr - ocmem->memory->start; +} + +static unsigned long device_address(struct ocmem *ocmem, + enum ocmem_client client, + unsigned long addr) +{ + WARN_ON(client != OCMEM_GRAPHICS); + + /* TODO: gpu uses phys_to_offset, but others do not.. */ + return phys_to_offset(ocmem, addr); +} + +static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf, + enum ocmem_macro_state mstate, enum region_mode rmode) +{ + unsigned long offset = 0; + int i, j; + + for (i = 0; i < ocmem->config->num_regions; i++) { + struct ocmem_region *region = &ocmem->regions[i]; + + if (buf->offset <= offset && offset < buf->offset + buf->len) + region->mode = rmode; + + for (j = 0; j < region->num_macros; j++) { + if (buf->offset <= offset && + offset < buf->offset + buf->len) + region->macro_state[j] = mstate; + + offset += region->macro_size; + } + } + + update_ocmem(ocmem); +} + +struct ocmem *of_get_ocmem(struct device *dev) +{ + struct platform_device *pdev; + struct device_node *devnode; + struct ocmem *ocmem; + + devnode = of_parse_phandle(dev->of_node, "sram", 0); + if (!devnode || !devnode->parent) { + dev_err(dev, "Cannot look up sram phandle\n"); + of_node_put(devnode); + return ERR_PTR(-ENODEV); + } + + pdev = of_find_device_by_node(devnode->parent); + if (!pdev) { + dev_err(dev, "Cannot find device node %s\n", devnode->name); + of_node_put(devnode); + return ERR_PTR(-EPROBE_DEFER); + } + of_node_put(devnode); + + ocmem = platform_get_drvdata(pdev); + if (!ocmem) { + dev_err(dev, "Cannot get ocmem\n"); + put_device(&pdev->dev); + return ERR_PTR(-ENODEV); + } + return ocmem; +} +EXPORT_SYMBOL(of_get_ocmem); + +struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, + unsigned long size) +{ + struct ocmem_buf *buf; + int ret; + + /* TODO: add support for other clients... */ + if (WARN_ON(client != OCMEM_GRAPHICS)) + return ERR_PTR(-ENODEV); + + if (size < OCMEM_MIN_ALLOC || !IS_ALIGNED(size, OCMEM_MIN_ALIGN)) + return ERR_PTR(-EINVAL); + + if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations)) + return ERR_PTR(-EBUSY); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_unlock; + } + + buf->offset = 0; + buf->addr = device_address(ocmem, client, buf->offset); + buf->len = size; + + update_range(ocmem, buf, CORE_ON, WIDE_MODE); + + if (qcom_scm_ocmem_lock_available()) { + ret = qcom_scm_ocmem_lock(QCOM_SCM_OCMEM_GRAPHICS_ID, + buf->offset, buf->len, WIDE_MODE); + if (ret) { + dev_err(ocmem->dev, "could not lock: %d\n", ret); + ret = -EINVAL; + goto err_kfree; + } + } else { + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset); + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, + buf->offset + buf->len); + } + + dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n", + size / 1024, buf->addr, client); + + return buf; + +err_kfree: + kfree(buf); +err_unlock: + clear_bit_unlock(BIT(client), &ocmem->active_allocations); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL(ocmem_allocate); + +void ocmem_free(struct ocmem *ocmem, enum ocmem_client client, + struct ocmem_buf *buf) +{ + /* TODO: add support for other clients... */ + if (WARN_ON(client != OCMEM_GRAPHICS)) + return; + + update_range(ocmem, buf, CLK_OFF, MODE_DEFAULT); + + if (qcom_scm_ocmem_lock_available()) { + int ret; + + ret = qcom_scm_ocmem_unlock(QCOM_SCM_OCMEM_GRAPHICS_ID, + buf->offset, buf->len); + if (ret) + dev_err(ocmem->dev, "could not unlock: %d\n", ret); + } else { + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, 0x0); + ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, 0x0); + } + + kfree(buf); + + clear_bit_unlock(BIT(client), &ocmem->active_allocations); +} +EXPORT_SYMBOL(ocmem_free); + +static int ocmem_dev_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned long reg, region_size; + int i, j, ret, num_banks; + struct ocmem *ocmem; + + if (!qcom_scm_is_available()) + return -EPROBE_DEFER; + + ocmem = devm_kzalloc(dev, sizeof(*ocmem), GFP_KERNEL); + if (!ocmem) + return -ENOMEM; + + ocmem->dev = dev; + ocmem->config = device_get_match_data(dev); + + ocmem->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(ocmem->core_clk)) + return dev_err_probe(dev, PTR_ERR(ocmem->core_clk), + "Unable to get core clock\n"); + + ocmem->iface_clk = devm_clk_get_optional(dev, "iface"); + if (IS_ERR(ocmem->iface_clk)) + return dev_err_probe(dev, PTR_ERR(ocmem->iface_clk), + "Unable to get iface clock\n"); + + ocmem->mmio = devm_platform_ioremap_resource_byname(pdev, "ctrl"); + if (IS_ERR(ocmem->mmio)) + return dev_err_probe(&pdev->dev, PTR_ERR(ocmem->mmio), + "Failed to ioremap ocmem_ctrl resource\n"); + + ocmem->memory = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "mem"); + if (!ocmem->memory) { + dev_err(dev, "Could not get mem region\n"); + return -ENXIO; + } + + /* The core clock is synchronous with graphics */ + WARN_ON(clk_set_rate(ocmem->core_clk, 1000) < 0); + + ret = clk_prepare_enable(ocmem->core_clk); + if (ret) + return dev_err_probe(ocmem->dev, ret, "Failed to enable core clock\n"); + + ret = clk_prepare_enable(ocmem->iface_clk); + if (ret) { + clk_disable_unprepare(ocmem->core_clk); + return dev_err_probe(ocmem->dev, ret, "Failed to enable iface clock\n"); + } + + if (qcom_scm_restore_sec_cfg_available()) { + dev_dbg(dev, "configuring scm\n"); + ret = qcom_scm_restore_sec_cfg(QCOM_SCM_OCMEM_DEV_ID, 0); + if (ret) { + dev_err_probe(dev, ret, "Could not enable secure configuration\n"); + goto err_clk_disable; + } + } + + reg = ocmem_read(ocmem, OCMEM_REG_HW_VERSION); + dev_dbg(dev, "OCMEM hardware version: %lu.%lu.%lu\n", + OCMEM_HW_VERSION_MAJOR(reg), + OCMEM_HW_VERSION_MINOR(reg), + OCMEM_HW_VERSION_STEP(reg)); + + reg = ocmem_read(ocmem, OCMEM_REG_HW_PROFILE); + ocmem->num_ports = OCMEM_HW_PROFILE_NUM_PORTS(reg); + ocmem->num_macros = OCMEM_HW_PROFILE_NUM_MACROS(reg); + ocmem->interleaved = !!(reg & OCMEM_HW_PROFILE_INTERLEAVING); + + num_banks = ocmem->num_ports / 2; + region_size = ocmem->config->macro_size * num_banks; + + dev_info(dev, "%u ports, %u regions, %u macros, %sinterleaved\n", + ocmem->num_ports, ocmem->config->num_regions, + ocmem->num_macros, ocmem->interleaved ? "" : "not "); + + ocmem->regions = devm_kcalloc(dev, ocmem->config->num_regions, + sizeof(struct ocmem_region), GFP_KERNEL); + if (!ocmem->regions) { + ret = -ENOMEM; + goto err_clk_disable; + } + + for (i = 0; i < ocmem->config->num_regions; i++) { + struct ocmem_region *region = &ocmem->regions[i]; + + if (WARN_ON(num_banks > ARRAY_SIZE(region->macro_state))) { + ret = -EINVAL; + goto err_clk_disable; + } + + region->mode = MODE_DEFAULT; + region->num_macros = num_banks; + + if (i == (ocmem->config->num_regions - 1) && + reg & OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE) { + region->macro_size = ocmem->config->macro_size / 2; + region->region_size = region_size / 2; + } else { + region->macro_size = ocmem->config->macro_size; + region->region_size = region_size; + } + + for (j = 0; j < ARRAY_SIZE(region->macro_state); j++) + region->macro_state[j] = CLK_OFF; + } + + platform_set_drvdata(pdev, ocmem); + + return 0; + +err_clk_disable: + clk_disable_unprepare(ocmem->core_clk); + clk_disable_unprepare(ocmem->iface_clk); + return ret; +} + +static int ocmem_dev_remove(struct platform_device *pdev) +{ + struct ocmem *ocmem = platform_get_drvdata(pdev); + + clk_disable_unprepare(ocmem->core_clk); + clk_disable_unprepare(ocmem->iface_clk); + + return 0; +} + +static const struct ocmem_config ocmem_8226_config = { + .num_regions = 1, + .macro_size = SZ_128K, +}; + +static const struct ocmem_config ocmem_8974_config = { + .num_regions = 3, + .macro_size = SZ_128K, +}; + +static const struct of_device_id ocmem_of_match[] = { + { .compatible = "qcom,msm8226-ocmem", .data = &ocmem_8226_config }, + { .compatible = "qcom,msm8974-ocmem", .data = &ocmem_8974_config }, + { } +}; + +MODULE_DEVICE_TABLE(of, ocmem_of_match); + +static struct platform_driver ocmem_driver = { + .probe = ocmem_dev_probe, + .remove = ocmem_dev_remove, + .driver = { + .name = "ocmem", + .of_match_table = ocmem_of_match, + }, +}; + +module_platform_driver(ocmem_driver); + +MODULE_DESCRIPTION("On Chip Memory (OCMEM) allocator for some Snapdragon SoCs"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c new file mode 100644 index 0000000000..0034af927b --- /dev/null +++ b/drivers/soc/qcom/pdr_interface.c @@ -0,0 +1,755 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 The Linux Foundation. All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/workqueue.h> + +#include "pdr_internal.h" + +struct pdr_service { + char service_name[SERVREG_NAME_LENGTH + 1]; + char service_path[SERVREG_NAME_LENGTH + 1]; + + struct sockaddr_qrtr addr; + + unsigned int instance; + unsigned int service; + u8 service_data_valid; + u32 service_data; + int state; + + bool need_notifier_register; + bool need_notifier_remove; + bool need_locator_lookup; + bool service_connected; + + struct list_head node; +}; + +struct pdr_handle { + struct qmi_handle locator_hdl; + struct qmi_handle notifier_hdl; + + struct sockaddr_qrtr locator_addr; + + struct list_head lookups; + struct list_head indack_list; + + /* control access to pdr lookup/indack lists */ + struct mutex list_lock; + + /* serialize pd status invocation */ + struct mutex status_lock; + + /* control access to the locator state */ + struct mutex lock; + + bool locator_init_complete; + + struct work_struct locator_work; + struct work_struct notifier_work; + struct work_struct indack_work; + + struct workqueue_struct *notifier_wq; + struct workqueue_struct *indack_wq; + + void (*status)(int state, char *service_path, void *priv); + void *priv; +}; + +struct pdr_list_node { + enum servreg_service_state curr_state; + u16 transaction_id; + struct pdr_service *pds; + struct list_head node; +}; + +static int pdr_locator_new_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + locator_hdl); + struct pdr_service *pds; + + /* Create a local client port for QMI communication */ + pdr->locator_addr.sq_family = AF_QIPCRTR; + pdr->locator_addr.sq_node = svc->node; + pdr->locator_addr.sq_port = svc->port; + + mutex_lock(&pdr->lock); + pdr->locator_init_complete = true; + mutex_unlock(&pdr->lock); + + /* Service pending lookup requests */ + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->need_locator_lookup) + schedule_work(&pdr->locator_work); + } + mutex_unlock(&pdr->list_lock); + + return 0; +} + +static void pdr_locator_del_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + locator_hdl); + + mutex_lock(&pdr->lock); + pdr->locator_init_complete = false; + mutex_unlock(&pdr->lock); + + pdr->locator_addr.sq_node = 0; + pdr->locator_addr.sq_port = 0; +} + +static const struct qmi_ops pdr_locator_ops = { + .new_server = pdr_locator_new_server, + .del_server = pdr_locator_del_server, +}; + +static int pdr_register_listener(struct pdr_handle *pdr, + struct pdr_service *pds, + bool enable) +{ + struct servreg_register_listener_resp resp; + struct servreg_register_listener_req req; + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, + servreg_register_listener_resp_ei, + &resp); + if (ret < 0) + return ret; + + req.enable = enable; + strscpy(req.service_path, pds->service_path, sizeof(req.service_path)); + + ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr, + &txn, SERVREG_REGISTER_LISTENER_REQ, + SERVREG_REGISTER_LISTENER_REQ_LEN, + servreg_register_listener_req_ei, + &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) { + pr_err("PDR: %s register listener txn wait failed: %d\n", + pds->service_path, ret); + return ret; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("PDR: %s register listener failed: 0x%x\n", + pds->service_path, resp.resp.error); + return -EREMOTEIO; + } + + pds->state = resp.curr_state; + + return 0; +} + +static void pdr_notifier_work(struct work_struct *work) +{ + struct pdr_handle *pdr = container_of(work, struct pdr_handle, + notifier_work); + struct pdr_service *pds; + int ret; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->service_connected) { + if (!pds->need_notifier_register) + continue; + + pds->need_notifier_register = false; + ret = pdr_register_listener(pdr, pds, true); + if (ret < 0) + pds->state = SERVREG_SERVICE_STATE_DOWN; + } else { + if (!pds->need_notifier_remove) + continue; + + pds->need_notifier_remove = false; + pds->state = SERVREG_SERVICE_STATE_DOWN; + } + + mutex_lock(&pdr->status_lock); + pdr->status(pds->state, pds->service_path, pdr->priv); + mutex_unlock(&pdr->status_lock); + } + mutex_unlock(&pdr->list_lock); +} + +static int pdr_notifier_new_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + notifier_hdl); + struct pdr_service *pds; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->service == svc->service && + pds->instance == svc->instance) { + pds->service_connected = true; + pds->need_notifier_register = true; + pds->addr.sq_family = AF_QIPCRTR; + pds->addr.sq_node = svc->node; + pds->addr.sq_port = svc->port; + queue_work(pdr->notifier_wq, &pdr->notifier_work); + } + } + mutex_unlock(&pdr->list_lock); + + return 0; +} + +static void pdr_notifier_del_server(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + notifier_hdl); + struct pdr_service *pds; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(pds, &pdr->lookups, node) { + if (pds->service == svc->service && + pds->instance == svc->instance) { + pds->service_connected = false; + pds->need_notifier_remove = true; + pds->addr.sq_node = 0; + pds->addr.sq_port = 0; + queue_work(pdr->notifier_wq, &pdr->notifier_work); + } + } + mutex_unlock(&pdr->list_lock); +} + +static const struct qmi_ops pdr_notifier_ops = { + .new_server = pdr_notifier_new_server, + .del_server = pdr_notifier_del_server, +}; + +static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds, + u16 tid) +{ + struct servreg_set_ack_resp resp; + struct servreg_set_ack_req req; + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei, + &resp); + if (ret < 0) + return ret; + + req.transaction_id = tid; + strscpy(req.service_path, pds->service_path, sizeof(req.service_path)); + + ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr, + &txn, SERVREG_SET_ACK_REQ, + SERVREG_SET_ACK_REQ_LEN, + servreg_set_ack_req_ei, + &req); + + /* Skip waiting for response */ + qmi_txn_cancel(&txn); + return ret; +} + +static void pdr_indack_work(struct work_struct *work) +{ + struct pdr_handle *pdr = container_of(work, struct pdr_handle, + indack_work); + struct pdr_list_node *ind, *tmp; + struct pdr_service *pds; + + list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) { + pds = ind->pds; + + mutex_lock(&pdr->status_lock); + pds->state = ind->curr_state; + pdr->status(pds->state, pds->service_path, pdr->priv); + mutex_unlock(&pdr->status_lock); + + /* Ack the indication after clients release the PD resources */ + pdr_send_indack_msg(pdr, pds, ind->transaction_id); + + mutex_lock(&pdr->list_lock); + list_del(&ind->node); + mutex_unlock(&pdr->list_lock); + + kfree(ind); + } +} + +static void pdr_indication_cb(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *data) +{ + struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, + notifier_hdl); + const struct servreg_state_updated_ind *ind_msg = data; + struct pdr_list_node *ind; + struct pdr_service *pds = NULL, *iter; + + if (!ind_msg || !ind_msg->service_path[0] || + strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH) + return; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(iter, &pdr->lookups, node) { + if (strcmp(iter->service_path, ind_msg->service_path)) + continue; + + pds = iter; + break; + } + mutex_unlock(&pdr->list_lock); + + if (!pds) + return; + + pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n", + ind_msg->service_path, ind_msg->curr_state, + ind_msg->transaction_id); + + ind = kzalloc(sizeof(*ind), GFP_KERNEL); + if (!ind) + return; + + ind->transaction_id = ind_msg->transaction_id; + ind->curr_state = ind_msg->curr_state; + ind->pds = pds; + + mutex_lock(&pdr->list_lock); + list_add_tail(&ind->node, &pdr->indack_list); + mutex_unlock(&pdr->list_lock); + + queue_work(pdr->indack_wq, &pdr->indack_work); +} + +static const struct qmi_msg_handler qmi_indication_handler[] = { + { + .type = QMI_INDICATION, + .msg_id = SERVREG_STATE_UPDATED_IND_ID, + .ei = servreg_state_updated_ind_ei, + .decoded_size = sizeof(struct servreg_state_updated_ind), + .fn = pdr_indication_cb, + }, + {} +}; + +static int pdr_get_domain_list(struct servreg_get_domain_list_req *req, + struct servreg_get_domain_list_resp *resp, + struct pdr_handle *pdr) +{ + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(&pdr->locator_hdl, &txn, + servreg_get_domain_list_resp_ei, resp); + if (ret < 0) + return ret; + + ret = qmi_send_request(&pdr->locator_hdl, + &pdr->locator_addr, + &txn, SERVREG_GET_DOMAIN_LIST_REQ, + SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN, + servreg_get_domain_list_req_ei, + req); + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) { + pr_err("PDR: %s get domain list txn wait failed: %d\n", + req->service_name, ret); + return ret; + } + + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("PDR: %s get domain list failed: 0x%x\n", + req->service_name, resp->resp.error); + return -EREMOTEIO; + } + + return 0; +} + +static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds) +{ + struct servreg_get_domain_list_resp *resp; + struct servreg_get_domain_list_req req; + struct servreg_location_entry *entry; + int domains_read = 0; + int ret, i; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return -ENOMEM; + + /* Prepare req message */ + strscpy(req.service_name, pds->service_name, sizeof(req.service_name)); + req.domain_offset_valid = true; + req.domain_offset = 0; + + do { + req.domain_offset = domains_read; + ret = pdr_get_domain_list(&req, resp, pdr); + if (ret < 0) + goto out; + + for (i = domains_read; i < resp->domain_list_len; i++) { + entry = &resp->domain_list[i]; + + if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name)) + continue; + + if (!strcmp(entry->name, pds->service_path)) { + pds->service_data_valid = entry->service_data_valid; + pds->service_data = entry->service_data; + pds->instance = entry->instance; + goto out; + } + } + + /* Update ret to indicate that the service is not yet found */ + ret = -ENXIO; + + /* Always read total_domains from the response msg */ + if (resp->domain_list_len > resp->total_domains) + resp->domain_list_len = resp->total_domains; + + domains_read += resp->domain_list_len; + } while (domains_read < resp->total_domains); +out: + kfree(resp); + return ret; +} + +static void pdr_notify_lookup_failure(struct pdr_handle *pdr, + struct pdr_service *pds, + int err) +{ + pr_err("PDR: service lookup for %s failed: %d\n", + pds->service_name, err); + + if (err == -ENXIO) + return; + + list_del(&pds->node); + pds->state = SERVREG_LOCATOR_ERR; + mutex_lock(&pdr->status_lock); + pdr->status(pds->state, pds->service_path, pdr->priv); + mutex_unlock(&pdr->status_lock); + kfree(pds); +} + +static void pdr_locator_work(struct work_struct *work) +{ + struct pdr_handle *pdr = container_of(work, struct pdr_handle, + locator_work); + struct pdr_service *pds, *tmp; + int ret = 0; + + /* Bail out early if the SERVREG LOCATOR QMI service is not up */ + mutex_lock(&pdr->lock); + if (!pdr->locator_init_complete) { + mutex_unlock(&pdr->lock); + pr_debug("PDR: SERVICE LOCATOR service not available\n"); + return; + } + mutex_unlock(&pdr->lock); + + mutex_lock(&pdr->list_lock); + list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) { + if (!pds->need_locator_lookup) + continue; + + ret = pdr_locate_service(pdr, pds); + if (ret < 0) { + pdr_notify_lookup_failure(pdr, pds, ret); + continue; + } + + ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1, + pds->instance); + if (ret < 0) { + pdr_notify_lookup_failure(pdr, pds, ret); + continue; + } + + pds->need_locator_lookup = false; + } + mutex_unlock(&pdr->list_lock); +} + +/** + * pdr_add_lookup() - register a tracking request for a PD + * @pdr: PDR client handle + * @service_name: service name of the tracking request + * @service_path: service path of the tracking request + * + * Registering a pdr lookup allows for tracking the life cycle of the PD. + * + * Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is + * returned if a lookup is already in progress for the given service path. + */ +struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, + const char *service_name, + const char *service_path) +{ + struct pdr_service *pds, *tmp; + int ret; + + if (IS_ERR_OR_NULL(pdr)) + return ERR_PTR(-EINVAL); + + if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH || + !service_path || strlen(service_path) > SERVREG_NAME_LENGTH) + return ERR_PTR(-EINVAL); + + pds = kzalloc(sizeof(*pds), GFP_KERNEL); + if (!pds) + return ERR_PTR(-ENOMEM); + + pds->service = SERVREG_NOTIFIER_SERVICE; + strscpy(pds->service_name, service_name, sizeof(pds->service_name)); + strscpy(pds->service_path, service_path, sizeof(pds->service_path)); + pds->need_locator_lookup = true; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(tmp, &pdr->lookups, node) { + if (strcmp(tmp->service_path, service_path)) + continue; + + mutex_unlock(&pdr->list_lock); + ret = -EALREADY; + goto err; + } + + list_add(&pds->node, &pdr->lookups); + mutex_unlock(&pdr->list_lock); + + schedule_work(&pdr->locator_work); + + return pds; +err: + kfree(pds); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(pdr_add_lookup); + +/** + * pdr_restart_pd() - restart PD + * @pdr: PDR client handle + * @pds: PD service handle + * + * Restarts the PD tracked by the PDR client handle for a given service path. + * + * Return: 0 on success, negative errno on failure. + */ +int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds) +{ + struct servreg_restart_pd_resp resp; + struct servreg_restart_pd_req req = { 0 }; + struct sockaddr_qrtr addr; + struct pdr_service *tmp; + struct qmi_txn txn; + int ret; + + if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds)) + return -EINVAL; + + mutex_lock(&pdr->list_lock); + list_for_each_entry(tmp, &pdr->lookups, node) { + if (tmp != pds) + continue; + + if (!pds->service_connected) + break; + + /* Prepare req message */ + strscpy(req.service_path, pds->service_path, sizeof(req.service_path)); + addr = pds->addr; + break; + } + mutex_unlock(&pdr->list_lock); + + if (!req.service_path[0]) + return -EINVAL; + + ret = qmi_txn_init(&pdr->notifier_hdl, &txn, + servreg_restart_pd_resp_ei, + &resp); + if (ret < 0) + return ret; + + ret = qmi_send_request(&pdr->notifier_hdl, &addr, + &txn, SERVREG_RESTART_PD_REQ, + SERVREG_RESTART_PD_REQ_MAX_LEN, + servreg_restart_pd_req_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + + ret = qmi_txn_wait(&txn, 5 * HZ); + if (ret < 0) { + pr_err("PDR: %s PD restart txn wait failed: %d\n", + req.service_path, ret); + return ret; + } + + /* Check response if PDR is disabled */ + if (resp.resp.result == QMI_RESULT_FAILURE_V01 && + resp.resp.error == QMI_ERR_DISABLED_V01) { + pr_err("PDR: %s PD restart is disabled: 0x%x\n", + req.service_path, resp.resp.error); + return -EOPNOTSUPP; + } + + /* Check the response for other error case*/ + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("PDR: %s request for PD restart failed: 0x%x\n", + req.service_path, resp.resp.error); + return -EREMOTEIO; + } + + return 0; +} +EXPORT_SYMBOL(pdr_restart_pd); + +/** + * pdr_handle_alloc() - initialize the PDR client handle + * @status: function to be called on PD state change + * @priv: handle for client's use + * + * Initializes the PDR client handle to allow for tracking/restart of PDs. + * + * Return: pdr_handle object on success, ERR_PTR on failure. + */ +struct pdr_handle *pdr_handle_alloc(void (*status)(int state, + char *service_path, + void *priv), void *priv) +{ + struct pdr_handle *pdr; + int ret; + + if (!status) + return ERR_PTR(-EINVAL); + + pdr = kzalloc(sizeof(*pdr), GFP_KERNEL); + if (!pdr) + return ERR_PTR(-ENOMEM); + + pdr->status = status; + pdr->priv = priv; + + mutex_init(&pdr->status_lock); + mutex_init(&pdr->list_lock); + mutex_init(&pdr->lock); + + INIT_LIST_HEAD(&pdr->lookups); + INIT_LIST_HEAD(&pdr->indack_list); + + INIT_WORK(&pdr->locator_work, pdr_locator_work); + INIT_WORK(&pdr->notifier_work, pdr_notifier_work); + INIT_WORK(&pdr->indack_work, pdr_indack_work); + + pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq"); + if (!pdr->notifier_wq) { + ret = -ENOMEM; + goto free_pdr_handle; + } + + pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI); + if (!pdr->indack_wq) { + ret = -ENOMEM; + goto destroy_notifier; + } + + ret = qmi_handle_init(&pdr->locator_hdl, + SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN, + &pdr_locator_ops, NULL); + if (ret < 0) + goto destroy_indack; + + ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1); + if (ret < 0) + goto release_qmi_handle; + + ret = qmi_handle_init(&pdr->notifier_hdl, + SERVREG_STATE_UPDATED_IND_MAX_LEN, + &pdr_notifier_ops, + qmi_indication_handler); + if (ret < 0) + goto release_qmi_handle; + + return pdr; + +release_qmi_handle: + qmi_handle_release(&pdr->locator_hdl); +destroy_indack: + destroy_workqueue(pdr->indack_wq); +destroy_notifier: + destroy_workqueue(pdr->notifier_wq); +free_pdr_handle: + kfree(pdr); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL(pdr_handle_alloc); + +/** + * pdr_handle_release() - release the PDR client handle + * @pdr: PDR client handle + * + * Cleans up pending tracking requests and releases the underlying qmi handles. + */ +void pdr_handle_release(struct pdr_handle *pdr) +{ + struct pdr_service *pds, *tmp; + + if (IS_ERR_OR_NULL(pdr)) + return; + + mutex_lock(&pdr->list_lock); + list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) { + list_del(&pds->node); + kfree(pds); + } + mutex_unlock(&pdr->list_lock); + + cancel_work_sync(&pdr->locator_work); + cancel_work_sync(&pdr->notifier_work); + cancel_work_sync(&pdr->indack_work); + + destroy_workqueue(pdr->notifier_wq); + destroy_workqueue(pdr->indack_wq); + + qmi_handle_release(&pdr->locator_hdl); + qmi_handle_release(&pdr->notifier_hdl); + + kfree(pdr); +} +EXPORT_SYMBOL(pdr_handle_release); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers"); diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h new file mode 100644 index 0000000000..03c282b7f1 --- /dev/null +++ b/drivers/soc/qcom/pdr_internal.h @@ -0,0 +1,379 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_PDR_HELPER_INTERNAL__ +#define __QCOM_PDR_HELPER_INTERNAL__ + +#include <linux/soc/qcom/pdr.h> + +#define SERVREG_LOCATOR_SERVICE 0x40 +#define SERVREG_NOTIFIER_SERVICE 0x42 + +#define SERVREG_REGISTER_LISTENER_REQ 0x20 +#define SERVREG_GET_DOMAIN_LIST_REQ 0x21 +#define SERVREG_STATE_UPDATED_IND_ID 0x22 +#define SERVREG_SET_ACK_REQ 0x23 +#define SERVREG_RESTART_PD_REQ 0x24 + +#define SERVREG_DOMAIN_LIST_LENGTH 32 +#define SERVREG_RESTART_PD_REQ_MAX_LEN 67 +#define SERVREG_REGISTER_LISTENER_REQ_LEN 71 +#define SERVREG_SET_ACK_REQ_LEN 72 +#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74 +#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79 +#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389 + +struct servreg_location_entry { + char name[SERVREG_NAME_LENGTH + 1]; + u8 service_data_valid; + u32 service_data; + u32 instance; +}; + +static const struct qmi_elem_info servreg_location_entry_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + name), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + instance), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + service_data_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct servreg_location_entry, + service_data), + }, + {} +}; + +struct servreg_get_domain_list_req { + char service_name[SERVREG_NAME_LENGTH + 1]; + u8 domain_offset_valid; + u32 domain_offset; +}; + +static const struct qmi_elem_info servreg_get_domain_list_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_get_domain_list_req, + service_name), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_req, + domain_offset_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_req, + domain_offset), + }, + {} +}; + +struct servreg_get_domain_list_resp { + struct qmi_response_type_v01 resp; + u8 total_domains_valid; + u16 total_domains; + u8 db_rev_count_valid; + u16 db_rev_count; + u8 domain_list_valid; + u32 domain_list_len; + struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH]; +}; + +static const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_get_domain_list_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_resp, + total_domains_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_get_domain_list_resp, + total_domains), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct servreg_get_domain_list_resp, + db_rev_count_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct servreg_get_domain_list_resp, + db_rev_count), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = SERVREG_DOMAIN_LIST_LENGTH, + .elem_size = sizeof(struct servreg_location_entry), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct servreg_get_domain_list_resp, + domain_list), + .ei_array = servreg_location_entry_ei, + }, + {} +}; + +struct servreg_register_listener_req { + u8 enable; + char service_path[SERVREG_NAME_LENGTH + 1]; +}; + +static const struct qmi_elem_info servreg_register_listener_req_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_register_listener_req, + enable), + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_register_listener_req, + service_path), + }, + {} +}; + +struct servreg_register_listener_resp { + struct qmi_response_type_v01 resp; + u8 curr_state_valid; + enum servreg_service_state curr_state; +}; + +static const struct qmi_elem_info servreg_register_listener_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_register_listener_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_register_listener_resp, + curr_state_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum servreg_service_state), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct servreg_register_listener_resp, + curr_state), + }, + {} +}; + +struct servreg_restart_pd_req { + char service_path[SERVREG_NAME_LENGTH + 1]; +}; + +static const struct qmi_elem_info servreg_restart_pd_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_restart_pd_req, + service_path), + }, + {} +}; + +struct servreg_restart_pd_resp { + struct qmi_response_type_v01 resp; +}; + +static const struct qmi_elem_info servreg_restart_pd_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_restart_pd_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +struct servreg_state_updated_ind { + enum servreg_service_state curr_state; + char service_path[SERVREG_NAME_LENGTH + 1]; + u16 transaction_id; +}; + +static const struct qmi_elem_info servreg_state_updated_ind_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_state_updated_ind, + curr_state), + }, + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_state_updated_ind, + service_path), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct servreg_state_updated_ind, + transaction_id), + }, + {} +}; + +struct servreg_set_ack_req { + char service_path[SERVREG_NAME_LENGTH + 1]; + u16 transaction_id; +}; + +static const struct qmi_elem_info servreg_set_ack_req_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = SERVREG_NAME_LENGTH + 1, + .elem_size = sizeof(char), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct servreg_set_ack_req, + service_path), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_set_ack_req, + transaction_id), + }, + {} +}; + +struct servreg_set_ack_resp { + struct qmi_response_type_v01 resp; +}; + +static const struct qmi_elem_info servreg_set_ack_resp_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct servreg_set_ack_resp, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + {} +}; + +#endif diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c new file mode 100644 index 0000000000..61c89ddfc7 --- /dev/null +++ b/drivers/soc/qcom/pmic_glink.c @@ -0,0 +1,379 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Linaro Ltd + */ +#include <linux/auxiliary_bus.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/rpmsg.h> +#include <linux/slab.h> +#include <linux/soc/qcom/pdr.h> +#include <linux/soc/qcom/pmic_glink.h> + +enum { + PMIC_GLINK_CLIENT_BATT = 0, + PMIC_GLINK_CLIENT_ALTMODE, + PMIC_GLINK_CLIENT_UCSI, +}; + +#define PMIC_GLINK_CLIENT_DEFAULT (BIT(PMIC_GLINK_CLIENT_BATT) | \ + BIT(PMIC_GLINK_CLIENT_ALTMODE)) + +struct pmic_glink { + struct device *dev; + struct pdr_handle *pdr; + + struct rpmsg_endpoint *ept; + + unsigned long client_mask; + + struct auxiliary_device altmode_aux; + struct auxiliary_device ps_aux; + struct auxiliary_device ucsi_aux; + + /* serializing client_state and pdr_state updates */ + struct mutex state_lock; + unsigned int client_state; + unsigned int pdr_state; + + /* serializing clients list updates */ + struct mutex client_lock; + struct list_head clients; +}; + +static struct pmic_glink *__pmic_glink; +static DEFINE_MUTEX(__pmic_glink_lock); + +struct pmic_glink_client { + struct list_head node; + + struct pmic_glink *pg; + unsigned int id; + + void (*cb)(const void *data, size_t len, void *priv); + void (*pdr_notify)(void *priv, int state); + void *priv; +}; + +static void _devm_pmic_glink_release_client(struct device *dev, void *res) +{ + struct pmic_glink_client *client = (struct pmic_glink_client *)res; + struct pmic_glink *pg = client->pg; + + mutex_lock(&pg->client_lock); + list_del(&client->node); + mutex_unlock(&pg->client_lock); +} + +struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev, + unsigned int id, + void (*cb)(const void *, size_t, void *), + void (*pdr)(void *, int), + void *priv) +{ + struct pmic_glink_client *client; + struct pmic_glink *pg = dev_get_drvdata(dev->parent); + + client = devres_alloc(_devm_pmic_glink_release_client, sizeof(*client), GFP_KERNEL); + if (!client) + return ERR_PTR(-ENOMEM); + + client->pg = pg; + client->id = id; + client->cb = cb; + client->pdr_notify = pdr; + client->priv = priv; + + mutex_lock(&pg->client_lock); + list_add(&client->node, &pg->clients); + mutex_unlock(&pg->client_lock); + + devres_add(dev, client); + + return client; +} +EXPORT_SYMBOL_GPL(devm_pmic_glink_register_client); + +int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len) +{ + struct pmic_glink *pg = client->pg; + + return rpmsg_send(pg->ept, data, len); +} +EXPORT_SYMBOL_GPL(pmic_glink_send); + +static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data, + int len, void *priv, u32 addr) +{ + struct pmic_glink_client *client; + struct pmic_glink_hdr *hdr; + struct pmic_glink *pg = dev_get_drvdata(&rpdev->dev); + + if (len < sizeof(*hdr)) { + dev_warn(pg->dev, "ignoring truncated message\n"); + return 0; + } + + hdr = data; + + list_for_each_entry(client, &pg->clients, node) { + if (client->id == le32_to_cpu(hdr->owner)) + client->cb(data, len, client->priv); + } + + return 0; +} + +static void pmic_glink_aux_release(struct device *dev) {} + +static int pmic_glink_add_aux_device(struct pmic_glink *pg, + struct auxiliary_device *aux, + const char *name) +{ + struct device *parent = pg->dev; + int ret; + + aux->name = name; + aux->dev.parent = parent; + aux->dev.release = pmic_glink_aux_release; + device_set_of_node_from_dev(&aux->dev, parent); + ret = auxiliary_device_init(aux); + if (ret) + return ret; + + ret = auxiliary_device_add(aux); + if (ret) + auxiliary_device_uninit(aux); + + return ret; +} + +static void pmic_glink_del_aux_device(struct pmic_glink *pg, + struct auxiliary_device *aux) +{ + auxiliary_device_delete(aux); + auxiliary_device_uninit(aux); +} + +static void pmic_glink_state_notify_clients(struct pmic_glink *pg) +{ + struct pmic_glink_client *client; + unsigned int new_state = pg->client_state; + + if (pg->client_state != SERVREG_SERVICE_STATE_UP) { + if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept) + new_state = SERVREG_SERVICE_STATE_UP; + } else { + if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept) + new_state = SERVREG_SERVICE_STATE_DOWN; + } + + if (new_state != pg->client_state) { + list_for_each_entry(client, &pg->clients, node) + client->pdr_notify(client->priv, new_state); + pg->client_state = new_state; + } +} + +static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv) +{ + struct pmic_glink *pg = priv; + + mutex_lock(&pg->state_lock); + pg->pdr_state = state; + + pmic_glink_state_notify_clients(pg); + mutex_unlock(&pg->state_lock); +} + +static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev) +{ + struct pmic_glink *pg = __pmic_glink; + int ret = 0; + + mutex_lock(&__pmic_glink_lock); + if (!pg) { + ret = dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n"); + goto out_unlock; + } + + dev_set_drvdata(&rpdev->dev, pg); + + mutex_lock(&pg->state_lock); + pg->ept = rpdev->ept; + pmic_glink_state_notify_clients(pg); + mutex_unlock(&pg->state_lock); + +out_unlock: + mutex_unlock(&__pmic_glink_lock); + return ret; +} + +static void pmic_glink_rpmsg_remove(struct rpmsg_device *rpdev) +{ + struct pmic_glink *pg; + + mutex_lock(&__pmic_glink_lock); + pg = __pmic_glink; + if (!pg) + goto out_unlock; + + mutex_lock(&pg->state_lock); + pg->ept = NULL; + pmic_glink_state_notify_clients(pg); + mutex_unlock(&pg->state_lock); +out_unlock: + mutex_unlock(&__pmic_glink_lock); +} + +static const struct rpmsg_device_id pmic_glink_rpmsg_id_match[] = { + { "PMIC_RTR_ADSP_APPS" }, + {} +}; + +static struct rpmsg_driver pmic_glink_rpmsg_driver = { + .probe = pmic_glink_rpmsg_probe, + .remove = pmic_glink_rpmsg_remove, + .callback = pmic_glink_rpmsg_callback, + .id_table = pmic_glink_rpmsg_id_match, + .drv = { + .name = "qcom_pmic_glink_rpmsg", + }, +}; + +static int pmic_glink_probe(struct platform_device *pdev) +{ + const unsigned long *match_data; + struct pdr_service *service; + struct pmic_glink *pg; + int ret; + + pg = devm_kzalloc(&pdev->dev, sizeof(*pg), GFP_KERNEL); + if (!pg) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, pg); + + pg->dev = &pdev->dev; + + INIT_LIST_HEAD(&pg->clients); + mutex_init(&pg->client_lock); + mutex_init(&pg->state_lock); + + match_data = (unsigned long *)of_device_get_match_data(&pdev->dev); + if (match_data) + pg->client_mask = *match_data; + else + pg->client_mask = PMIC_GLINK_CLIENT_DEFAULT; + + if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) { + ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi"); + if (ret) + return ret; + } + if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) { + ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode"); + if (ret) + goto out_release_ucsi_aux; + } + if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT)) { + ret = pmic_glink_add_aux_device(pg, &pg->ps_aux, "power-supply"); + if (ret) + goto out_release_altmode_aux; + } + + pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg); + if (IS_ERR(pg->pdr)) { + ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n"); + goto out_release_aux_devices; + } + + service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd"); + if (IS_ERR(service)) { + ret = dev_err_probe(&pdev->dev, PTR_ERR(service), + "failed adding pdr lookup for charger_pd\n"); + goto out_release_pdr_handle; + } + + mutex_lock(&__pmic_glink_lock); + __pmic_glink = pg; + mutex_unlock(&__pmic_glink_lock); + + return 0; + +out_release_pdr_handle: + pdr_handle_release(pg->pdr); +out_release_aux_devices: + if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT)) + pmic_glink_del_aux_device(pg, &pg->ps_aux); +out_release_altmode_aux: + if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) + pmic_glink_del_aux_device(pg, &pg->altmode_aux); +out_release_ucsi_aux: + if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) + pmic_glink_del_aux_device(pg, &pg->ucsi_aux); + + return ret; +} + +static int pmic_glink_remove(struct platform_device *pdev) +{ + struct pmic_glink *pg = dev_get_drvdata(&pdev->dev); + + pdr_handle_release(pg->pdr); + + if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT)) + pmic_glink_del_aux_device(pg, &pg->ps_aux); + if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) + pmic_glink_del_aux_device(pg, &pg->altmode_aux); + if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) + pmic_glink_del_aux_device(pg, &pg->ucsi_aux); + + mutex_lock(&__pmic_glink_lock); + __pmic_glink = NULL; + mutex_unlock(&__pmic_glink_lock); + + return 0; +} + +static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) | + BIT(PMIC_GLINK_CLIENT_ALTMODE) | + BIT(PMIC_GLINK_CLIENT_UCSI); + +static const struct of_device_id pmic_glink_of_match[] = { + { .compatible = "qcom,sm8450-pmic-glink", .data = &pmic_glink_sm8450_client_mask }, + { .compatible = "qcom,sm8550-pmic-glink", .data = &pmic_glink_sm8450_client_mask }, + { .compatible = "qcom,pmic-glink" }, + {} +}; +MODULE_DEVICE_TABLE(of, pmic_glink_of_match); + +static struct platform_driver pmic_glink_driver = { + .probe = pmic_glink_probe, + .remove = pmic_glink_remove, + .driver = { + .name = "qcom_pmic_glink", + .of_match_table = pmic_glink_of_match, + }, +}; + +static int pmic_glink_init(void) +{ + platform_driver_register(&pmic_glink_driver); + register_rpmsg_driver(&pmic_glink_rpmsg_driver); + + return 0; +}; +module_init(pmic_glink_init); + +static void pmic_glink_exit(void) +{ + unregister_rpmsg_driver(&pmic_glink_rpmsg_driver); + platform_driver_unregister(&pmic_glink_driver); +}; +module_exit(pmic_glink_exit); + +MODULE_DESCRIPTION("Qualcomm PMIC GLINK driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c new file mode 100644 index 0000000000..9b0000b5f0 --- /dev/null +++ b/drivers/soc/qcom/pmic_glink_altmode.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Linaro Ltd + */ +#include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/mutex.h> +#include <linux/property.h> +#include <linux/soc/qcom/pdr.h> +#include <drm/drm_bridge.h> + +#include <linux/usb/typec_altmode.h> +#include <linux/usb/typec_dp.h> +#include <linux/usb/typec_mux.h> +#include <linux/usb/typec_retimer.h> + +#include <linux/soc/qcom/pmic_glink.h> + +#define PMIC_GLINK_MAX_PORTS 2 + +#define USBC_SC8180X_NOTIFY_IND 0x13 +#define USBC_CMD_WRITE_REQ 0x15 +#define USBC_NOTIFY_IND 0x16 + +#define ALTMODE_PAN_EN 0x10 +#define ALTMODE_PAN_ACK 0x11 + +struct usbc_write_req { + struct pmic_glink_hdr hdr; + __le32 cmd; + __le32 arg; + __le32 reserved; +}; + +#define NOTIFY_PAYLOAD_SIZE 16 +struct usbc_notify { + struct pmic_glink_hdr hdr; + char payload[NOTIFY_PAYLOAD_SIZE]; + u32 reserved; +}; + +struct usbc_sc8180x_notify { + struct pmic_glink_hdr hdr; + __le32 notification; + __le32 reserved[2]; +}; + +enum pmic_glink_altmode_pin_assignment { + DPAM_HPD_OUT, + DPAM_HPD_A, + DPAM_HPD_B, + DPAM_HPD_C, + DPAM_HPD_D, + DPAM_HPD_E, + DPAM_HPD_F, +}; + +struct pmic_glink_altmode; + +#define work_to_altmode_port(w) container_of((w), struct pmic_glink_altmode_port, work) + +struct pmic_glink_altmode_port { + struct pmic_glink_altmode *altmode; + unsigned int index; + + struct typec_switch *typec_switch; + struct typec_mux *typec_mux; + struct typec_mux_state state; + struct typec_retimer *typec_retimer; + struct typec_retimer_state retimer_state; + struct typec_altmode dp_alt; + + struct work_struct work; + + struct drm_bridge bridge; + + enum typec_orientation orientation; + u16 svid; + u8 dp_data; + u8 mode; + u8 hpd_state; + u8 hpd_irq; +}; + +#define work_to_altmode(w) container_of((w), struct pmic_glink_altmode, enable_work) + +struct pmic_glink_altmode { + struct device *dev; + + unsigned int owner_id; + + /* To synchronize WRITE_REQ acks */ + struct mutex lock; + + struct completion pan_ack; + struct pmic_glink_client *client; + + struct work_struct enable_work; + + struct pmic_glink_altmode_port ports[PMIC_GLINK_MAX_PORTS]; +}; + +static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cmd, u32 arg) +{ + struct usbc_write_req req = {}; + unsigned long left; + int ret; + + /* + * The USBC_CMD_WRITE_REQ ack doesn't identify the request, so wait for + * one ack at a time. + */ + mutex_lock(&altmode->lock); + + req.hdr.owner = cpu_to_le32(altmode->owner_id); + req.hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP); + req.hdr.opcode = cpu_to_le32(USBC_CMD_WRITE_REQ); + req.cmd = cpu_to_le32(cmd); + req.arg = cpu_to_le32(arg); + + ret = pmic_glink_send(altmode->client, &req, sizeof(req)); + if (ret) { + dev_err(altmode->dev, "failed to send altmode request: %#x (%d)\n", cmd, ret); + goto out_unlock; + } + + left = wait_for_completion_timeout(&altmode->pan_ack, 5 * HZ); + if (!left) { + dev_err(altmode->dev, "timeout waiting for altmode request ack for: %#x\n", cmd); + ret = -ETIMEDOUT; + } + +out_unlock: + mutex_unlock(&altmode->lock); + return ret; +} + +static void pmic_glink_altmode_enable_dp(struct pmic_glink_altmode *altmode, + struct pmic_glink_altmode_port *port, + u8 mode, bool hpd_state, + bool hpd_irq) +{ + struct typec_displayport_data dp_data = {}; + int ret; + + dp_data.status = DP_STATUS_ENABLED; + if (hpd_state) + dp_data.status |= DP_STATUS_HPD_STATE; + if (hpd_irq) + dp_data.status |= DP_STATUS_IRQ_HPD; + dp_data.conf = DP_CONF_SET_PIN_ASSIGN(mode); + + port->state.alt = &port->dp_alt; + port->state.data = &dp_data; + port->state.mode = TYPEC_MODAL_STATE(mode); + + ret = typec_mux_set(port->typec_mux, &port->state); + if (ret) + dev_err(altmode->dev, "failed to switch mux to DP\n"); + + port->retimer_state.alt = &port->dp_alt; + port->retimer_state.data = &dp_data; + port->retimer_state.mode = TYPEC_MODAL_STATE(mode); + + ret = typec_retimer_set(port->typec_retimer, &port->retimer_state); + if (ret) + dev_err(altmode->dev, "failed to setup retimer to DP\n"); +} + +static void pmic_glink_altmode_enable_usb(struct pmic_glink_altmode *altmode, + struct pmic_glink_altmode_port *port) +{ + int ret; + + port->state.alt = NULL; + port->state.data = NULL; + port->state.mode = TYPEC_STATE_USB; + + ret = typec_mux_set(port->typec_mux, &port->state); + if (ret) + dev_err(altmode->dev, "failed to switch mux to USB\n"); + + port->retimer_state.alt = NULL; + port->retimer_state.data = NULL; + port->retimer_state.mode = TYPEC_STATE_USB; + + ret = typec_retimer_set(port->typec_retimer, &port->retimer_state); + if (ret) + dev_err(altmode->dev, "failed to setup retimer to USB\n"); +} + +static void pmic_glink_altmode_safe(struct pmic_glink_altmode *altmode, + struct pmic_glink_altmode_port *port) +{ + int ret; + + port->state.alt = NULL; + port->state.data = NULL; + port->state.mode = TYPEC_STATE_SAFE; + + ret = typec_mux_set(port->typec_mux, &port->state); + if (ret) + dev_err(altmode->dev, "failed to switch mux to safe mode\n"); + + port->retimer_state.alt = NULL; + port->retimer_state.data = NULL; + port->retimer_state.mode = TYPEC_STATE_SAFE; + + ret = typec_retimer_set(port->typec_retimer, &port->retimer_state); + if (ret) + dev_err(altmode->dev, "failed to setup retimer to USB\n"); +} + +static void pmic_glink_altmode_worker(struct work_struct *work) +{ + struct pmic_glink_altmode_port *alt_port = work_to_altmode_port(work); + struct pmic_glink_altmode *altmode = alt_port->altmode; + + typec_switch_set(alt_port->typec_switch, alt_port->orientation); + + if (alt_port->svid == USB_TYPEC_DP_SID && alt_port->mode == 0xff) + pmic_glink_altmode_safe(altmode, alt_port); + else if (alt_port->svid == USB_TYPEC_DP_SID) + pmic_glink_altmode_enable_dp(altmode, alt_port, alt_port->mode, + alt_port->hpd_state, alt_port->hpd_irq); + else + pmic_glink_altmode_enable_usb(altmode, alt_port); + + if (alt_port->hpd_state) + drm_bridge_hpd_notify(&alt_port->bridge, connector_status_connected); + else + drm_bridge_hpd_notify(&alt_port->bridge, connector_status_disconnected); + + pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index); +}; + +static enum typec_orientation pmic_glink_altmode_orientation(unsigned int orientation) +{ + if (orientation == 0) + return TYPEC_ORIENTATION_NORMAL; + else if (orientation == 1) + return TYPEC_ORIENTATION_REVERSE; + else + return TYPEC_ORIENTATION_NONE; +} + +#define SC8180X_PORT_MASK 0x000000ff +#define SC8180X_ORIENTATION_MASK 0x0000ff00 +#define SC8180X_MUX_MASK 0x00ff0000 +#define SC8180X_MODE_MASK 0x3f000000 +#define SC8180X_HPD_STATE_MASK 0x40000000 +#define SC8180X_HPD_IRQ_MASK 0x80000000 + +static void pmic_glink_altmode_sc8180xp_notify(struct pmic_glink_altmode *altmode, + const void *data, size_t len) +{ + struct pmic_glink_altmode_port *alt_port; + const struct usbc_sc8180x_notify *msg; + u32 notification; + u8 orientation; + u8 hpd_state; + u8 hpd_irq; + u16 svid; + u8 port; + u8 mode; + u8 mux; + + if (len != sizeof(*msg)) { + dev_warn(altmode->dev, "invalid length of USBC_NOTIFY indication: %zd\n", len); + return; + } + + msg = data; + notification = le32_to_cpu(msg->notification); + port = FIELD_GET(SC8180X_PORT_MASK, notification); + orientation = FIELD_GET(SC8180X_ORIENTATION_MASK, notification); + mux = FIELD_GET(SC8180X_MUX_MASK, notification); + mode = FIELD_GET(SC8180X_MODE_MASK, notification); + hpd_state = FIELD_GET(SC8180X_HPD_STATE_MASK, notification); + hpd_irq = FIELD_GET(SC8180X_HPD_IRQ_MASK, notification); + + svid = mux == 2 ? USB_TYPEC_DP_SID : 0; + + if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) { + dev_dbg(altmode->dev, "notification on undefined port %d\n", port); + return; + } + + alt_port = &altmode->ports[port]; + alt_port->orientation = pmic_glink_altmode_orientation(orientation); + alt_port->svid = svid; + alt_port->mode = mode; + alt_port->hpd_state = hpd_state; + alt_port->hpd_irq = hpd_irq; + schedule_work(&alt_port->work); +} + +#define SC8280XP_DPAM_MASK 0x3f +#define SC8280XP_HPD_STATE_MASK BIT(6) +#define SC8280XP_HPD_IRQ_MASK BIT(7) + +static void pmic_glink_altmode_sc8280xp_notify(struct pmic_glink_altmode *altmode, + u16 svid, const void *data, size_t len) +{ + struct pmic_glink_altmode_port *alt_port; + const struct usbc_notify *notify; + u8 orientation; + u8 hpd_state; + u8 hpd_irq; + u8 mode; + u8 port; + + if (len != sizeof(*notify)) { + dev_warn(altmode->dev, "invalid length USBC_NOTIFY_IND: %zd\n", + len); + return; + } + + notify = data; + + port = notify->payload[0]; + orientation = notify->payload[1]; + mode = FIELD_GET(SC8280XP_DPAM_MASK, notify->payload[8]) - DPAM_HPD_A; + hpd_state = FIELD_GET(SC8280XP_HPD_STATE_MASK, notify->payload[8]); + hpd_irq = FIELD_GET(SC8280XP_HPD_IRQ_MASK, notify->payload[8]); + + if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) { + dev_dbg(altmode->dev, "notification on undefined port %d\n", port); + return; + } + + alt_port = &altmode->ports[port]; + alt_port->orientation = pmic_glink_altmode_orientation(orientation); + alt_port->svid = svid; + alt_port->mode = mode; + alt_port->hpd_state = hpd_state; + alt_port->hpd_irq = hpd_irq; + schedule_work(&alt_port->work); +} + +static void pmic_glink_altmode_callback(const void *data, size_t len, void *priv) +{ + struct pmic_glink_altmode *altmode = priv; + const struct pmic_glink_hdr *hdr = data; + u16 opcode; + u16 svid; + + opcode = le32_to_cpu(hdr->opcode) & 0xff; + svid = le32_to_cpu(hdr->opcode) >> 16; + + switch (opcode) { + case USBC_CMD_WRITE_REQ: + complete(&altmode->pan_ack); + break; + case USBC_NOTIFY_IND: + pmic_glink_altmode_sc8280xp_notify(altmode, svid, data, len); + break; + case USBC_SC8180X_NOTIFY_IND: + pmic_glink_altmode_sc8180xp_notify(altmode, data, len); + break; + } +} + +static int pmic_glink_altmode_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; +} + +static const struct drm_bridge_funcs pmic_glink_altmode_bridge_funcs = { + .attach = pmic_glink_altmode_attach, +}; + +static void pmic_glink_altmode_put_retimer(void *data) +{ + typec_retimer_put(data); +} + +static void pmic_glink_altmode_put_mux(void *data) +{ + typec_mux_put(data); +} + +static void pmic_glink_altmode_put_switch(void *data) +{ + typec_switch_put(data); +} + +static void pmic_glink_altmode_enable_worker(struct work_struct *work) +{ + struct pmic_glink_altmode *altmode = work_to_altmode(work); + int ret; + + ret = pmic_glink_altmode_request(altmode, ALTMODE_PAN_EN, 0); + if (ret) + dev_err(altmode->dev, "failed to request altmode notifications\n"); +} + +static void pmic_glink_altmode_pdr_notify(void *priv, int state) +{ + struct pmic_glink_altmode *altmode = priv; + + if (state == SERVREG_SERVICE_STATE_UP) + schedule_work(&altmode->enable_work); +} + +static const struct of_device_id pmic_glink_altmode_of_quirks[] = { + { .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)PMIC_GLINK_OWNER_USBC }, + {} +}; + +static int pmic_glink_altmode_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct pmic_glink_altmode_port *alt_port; + struct pmic_glink_altmode *altmode; + const struct of_device_id *match; + struct fwnode_handle *fwnode; + struct device *dev = &adev->dev; + u32 port; + int ret; + + altmode = devm_kzalloc(dev, sizeof(*altmode), GFP_KERNEL); + if (!altmode) + return -ENOMEM; + + altmode->dev = dev; + + match = of_match_device(pmic_glink_altmode_of_quirks, dev->parent); + if (match) + altmode->owner_id = (unsigned long)match->data; + else + altmode->owner_id = PMIC_GLINK_OWNER_USBC_PAN; + + INIT_WORK(&altmode->enable_work, pmic_glink_altmode_enable_worker); + init_completion(&altmode->pan_ack); + mutex_init(&altmode->lock); + + device_for_each_child_node(dev, fwnode) { + ret = fwnode_property_read_u32(fwnode, "reg", &port); + if (ret < 0) { + dev_err(dev, "missing reg property of %pOFn\n", fwnode); + fwnode_handle_put(fwnode); + return ret; + } + + if (port >= ARRAY_SIZE(altmode->ports)) { + dev_warn(dev, "invalid connector number, ignoring\n"); + continue; + } + + if (altmode->ports[port].altmode) { + dev_err(dev, "multiple connector definition for port %u\n", port); + fwnode_handle_put(fwnode); + return -EINVAL; + } + + alt_port = &altmode->ports[port]; + alt_port->altmode = altmode; + alt_port->index = port; + INIT_WORK(&alt_port->work, pmic_glink_altmode_worker); + + alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs; + alt_port->bridge.of_node = to_of_node(fwnode); + alt_port->bridge.ops = DRM_BRIDGE_OP_HPD; + alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; + + ret = devm_drm_bridge_add(dev, &alt_port->bridge); + if (ret) { + fwnode_handle_put(fwnode); + return ret; + } + + alt_port->dp_alt.svid = USB_TYPEC_DP_SID; + alt_port->dp_alt.mode = USB_TYPEC_DP_MODE; + alt_port->dp_alt.active = 1; + + alt_port->typec_mux = fwnode_typec_mux_get(fwnode); + if (IS_ERR(alt_port->typec_mux)) { + fwnode_handle_put(fwnode); + return dev_err_probe(dev, PTR_ERR(alt_port->typec_mux), + "failed to acquire mode-switch for port: %d\n", + port); + } + + ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_mux, + alt_port->typec_mux); + if (ret) { + fwnode_handle_put(fwnode); + return ret; + } + + alt_port->typec_retimer = fwnode_typec_retimer_get(fwnode); + if (IS_ERR(alt_port->typec_retimer)) { + fwnode_handle_put(fwnode); + return dev_err_probe(dev, PTR_ERR(alt_port->typec_retimer), + "failed to acquire retimer-switch for port: %d\n", + port); + } + + ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_retimer, + alt_port->typec_retimer); + if (ret) { + fwnode_handle_put(fwnode); + return ret; + } + + alt_port->typec_switch = fwnode_typec_switch_get(fwnode); + if (IS_ERR(alt_port->typec_switch)) { + fwnode_handle_put(fwnode); + return dev_err_probe(dev, PTR_ERR(alt_port->typec_switch), + "failed to acquire orientation-switch for port: %d\n", + port); + } + + ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_switch, + alt_port->typec_switch); + if (ret) { + fwnode_handle_put(fwnode); + return ret; + } + } + + altmode->client = devm_pmic_glink_register_client(dev, + altmode->owner_id, + pmic_glink_altmode_callback, + pmic_glink_altmode_pdr_notify, + altmode); + return PTR_ERR_OR_ZERO(altmode->client); +} + +static const struct auxiliary_device_id pmic_glink_altmode_id_table[] = { + { .name = "pmic_glink.altmode", }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, pmic_glink_altmode_id_table); + +static struct auxiliary_driver pmic_glink_altmode_driver = { + .name = "pmic_glink_altmode", + .probe = pmic_glink_altmode_probe, + .id_table = pmic_glink_altmode_id_table, +}; + +module_auxiliary_driver(pmic_glink_altmode_driver); + +MODULE_DESCRIPTION("Qualcomm PMIC GLINK Altmode driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c new file mode 100644 index 0000000000..ba78876283 --- /dev/null +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -0,0 +1,982 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + +/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */ +#define __DISABLE_TRACE_MMIO__ + +#include <linux/acpi.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/soc/qcom/geni-se.h> + +/** + * DOC: Overview + * + * Generic Interface (GENI) Serial Engine (SE) Wrapper driver is introduced + * to manage GENI firmware based Qualcomm Universal Peripheral (QUP) Wrapper + * controller. QUP Wrapper is designed to support various serial bus protocols + * like UART, SPI, I2C, I3C, etc. + */ + +/** + * DOC: Hardware description + * + * GENI based QUP is a highly-flexible and programmable module for supporting + * a wide range of serial interfaces like UART, SPI, I2C, I3C, etc. A single + * QUP module can provide upto 8 serial interfaces, using its internal + * serial engines. The actual configuration is determined by the target + * platform configuration. The protocol supported by each interface is + * determined by the firmware loaded to the serial engine. Each SE consists + * of a DMA Engine and GENI sub modules which enable serial engines to + * support FIFO and DMA modes of operation. + * + * + * +-----------------------------------------+ + * |QUP Wrapper | + * | +----------------------------+ | + * --QUP & SE Clocks--> | Serial Engine N | +-IO------> + * | | ... | | Interface + * <---Clock Perf.----+ +----+-----------------------+ | | + * State Interface | | Serial Engine 1 | | | + * | | | | | + * | | | | | + * <--------AHB-------> | | | | + * | | +----+ | + * | | | | + * | | | | + * <------SE IRQ------+ +----------------------------+ | + * | | + * +-----------------------------------------+ + * + * Figure 1: GENI based QUP Wrapper + * + * The GENI submodules include primary and secondary sequencers which are + * used to drive TX & RX operations. On serial interfaces that operate using + * master-slave model, primary sequencer drives both TX & RX operations. On + * serial interfaces that operate using peer-to-peer model, primary sequencer + * drives TX operation and secondary sequencer drives RX operation. + */ + +/** + * DOC: Software description + * + * GENI SE Wrapper driver is structured into 2 parts: + * + * geni_wrapper represents QUP Wrapper controller. This part of the driver + * manages QUP Wrapper information such as hardware version, clock + * performance table that is common to all the internal serial engines. + * + * geni_se represents serial engine. This part of the driver manages serial + * engine information such as clocks, containing QUP Wrapper, etc. This part + * of driver also supports operations (eg. initialize the concerned serial + * engine, select between FIFO and DMA mode of operation etc.) that are + * common to all the serial engines and are independent of serial interfaces. + */ + +#define MAX_CLK_PERF_LEVEL 32 +#define MAX_CLKS 2 + +/** + * struct geni_wrapper - Data structure to represent the QUP Wrapper Core + * @dev: Device pointer of the QUP wrapper core + * @base: Base address of this instance of QUP wrapper core + * @clks: Handle to the primary & optional secondary AHB clocks + * @num_clks: Count of clocks + * @to_core: Core ICC path + */ +struct geni_wrapper { + struct device *dev; + void __iomem *base; + struct clk_bulk_data clks[MAX_CLKS]; + unsigned int num_clks; +}; + +/** + * struct geni_se_desc - Data structure to represent the QUP Wrapper resources + * @clks: Name of the primary & optional secondary AHB clocks + * @num_clks: Count of clock names + */ +struct geni_se_desc { + unsigned int num_clks; + const char * const *clks; +}; + +static const char * const icc_path_names[] = {"qup-core", "qup-config", + "qup-memory"}; + +#define QUP_HW_VER_REG 0x4 + +/* Common SE registers */ +#define GENI_INIT_CFG_REVISION 0x0 +#define GENI_S_INIT_CFG_REVISION 0x4 +#define GENI_OUTPUT_CTRL 0x24 +#define GENI_CGC_CTRL 0x28 +#define GENI_CLK_CTRL_RO 0x60 +#define GENI_FW_S_REVISION_RO 0x6c +#define SE_GENI_BYTE_GRAN 0x254 +#define SE_GENI_TX_PACKING_CFG0 0x260 +#define SE_GENI_TX_PACKING_CFG1 0x264 +#define SE_GENI_RX_PACKING_CFG0 0x284 +#define SE_GENI_RX_PACKING_CFG1 0x288 +#define SE_GENI_M_GP_LENGTH 0x910 +#define SE_GENI_S_GP_LENGTH 0x914 +#define SE_DMA_TX_PTR_L 0xc30 +#define SE_DMA_TX_PTR_H 0xc34 +#define SE_DMA_TX_ATTR 0xc38 +#define SE_DMA_TX_LEN 0xc3c +#define SE_DMA_TX_IRQ_EN 0xc48 +#define SE_DMA_TX_IRQ_EN_SET 0xc4c +#define SE_DMA_TX_IRQ_EN_CLR 0xc50 +#define SE_DMA_TX_LEN_IN 0xc54 +#define SE_DMA_TX_MAX_BURST 0xc5c +#define SE_DMA_RX_PTR_L 0xd30 +#define SE_DMA_RX_PTR_H 0xd34 +#define SE_DMA_RX_ATTR 0xd38 +#define SE_DMA_RX_LEN 0xd3c +#define SE_DMA_RX_IRQ_EN 0xd48 +#define SE_DMA_RX_IRQ_EN_SET 0xd4c +#define SE_DMA_RX_IRQ_EN_CLR 0xd50 +#define SE_DMA_RX_LEN_IN 0xd54 +#define SE_DMA_RX_MAX_BURST 0xd5c +#define SE_DMA_RX_FLUSH 0xd60 +#define SE_GSI_EVENT_EN 0xe18 +#define SE_IRQ_EN 0xe1c +#define SE_DMA_GENERAL_CFG 0xe30 + +/* GENI_OUTPUT_CTRL fields */ +#define DEFAULT_IO_OUTPUT_CTRL_MSK GENMASK(6, 0) + +/* GENI_CGC_CTRL fields */ +#define CFG_AHB_CLK_CGC_ON BIT(0) +#define CFG_AHB_WR_ACLK_CGC_ON BIT(1) +#define DATA_AHB_CLK_CGC_ON BIT(2) +#define SCLK_CGC_ON BIT(3) +#define TX_CLK_CGC_ON BIT(4) +#define RX_CLK_CGC_ON BIT(5) +#define EXT_CLK_CGC_ON BIT(6) +#define PROG_RAM_HCLK_OFF BIT(8) +#define PROG_RAM_SCLK_OFF BIT(9) +#define DEFAULT_CGC_EN GENMASK(6, 0) + +/* SE_GSI_EVENT_EN fields */ +#define DMA_RX_EVENT_EN BIT(0) +#define DMA_TX_EVENT_EN BIT(1) +#define GENI_M_EVENT_EN BIT(2) +#define GENI_S_EVENT_EN BIT(3) + +/* SE_IRQ_EN fields */ +#define DMA_RX_IRQ_EN BIT(0) +#define DMA_TX_IRQ_EN BIT(1) +#define GENI_M_IRQ_EN BIT(2) +#define GENI_S_IRQ_EN BIT(3) + +/* SE_DMA_GENERAL_CFG */ +#define DMA_RX_CLK_CGC_ON BIT(0) +#define DMA_TX_CLK_CGC_ON BIT(1) +#define DMA_AHB_SLV_CFG_ON BIT(2) +#define AHB_SEC_SLV_CLK_CGC_ON BIT(3) +#define DUMMY_RX_NON_BUFFERABLE BIT(4) +#define RX_DMA_ZERO_PADDING_EN BIT(5) +#define RX_DMA_IRQ_DELAY_MSK GENMASK(8, 6) +#define RX_DMA_IRQ_DELAY_SHFT 6 + +/** + * geni_se_get_qup_hw_version() - Read the QUP wrapper Hardware version + * @se: Pointer to the corresponding serial engine. + * + * Return: Hardware Version of the wrapper. + */ +u32 geni_se_get_qup_hw_version(struct geni_se *se) +{ + struct geni_wrapper *wrapper = se->wrapper; + + return readl_relaxed(wrapper->base + QUP_HW_VER_REG); +} +EXPORT_SYMBOL(geni_se_get_qup_hw_version); + +static void geni_se_io_set_mode(void __iomem *base) +{ + u32 val; + + val = readl_relaxed(base + SE_IRQ_EN); + val |= GENI_M_IRQ_EN | GENI_S_IRQ_EN; + val |= DMA_TX_IRQ_EN | DMA_RX_IRQ_EN; + writel_relaxed(val, base + SE_IRQ_EN); + + val = readl_relaxed(base + SE_GENI_DMA_MODE_EN); + val &= ~GENI_DMA_MODE_EN; + writel_relaxed(val, base + SE_GENI_DMA_MODE_EN); + + writel_relaxed(0, base + SE_GSI_EVENT_EN); +} + +static void geni_se_io_init(void __iomem *base) +{ + u32 val; + + val = readl_relaxed(base + GENI_CGC_CTRL); + val |= DEFAULT_CGC_EN; + writel_relaxed(val, base + GENI_CGC_CTRL); + + val = readl_relaxed(base + SE_DMA_GENERAL_CFG); + val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON; + val |= DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON; + writel_relaxed(val, base + SE_DMA_GENERAL_CFG); + + writel_relaxed(DEFAULT_IO_OUTPUT_CTRL_MSK, base + GENI_OUTPUT_CTRL); + writel_relaxed(FORCE_DEFAULT, base + GENI_FORCE_DEFAULT_REG); +} + +static void geni_se_irq_clear(struct geni_se *se) +{ + writel_relaxed(0, se->base + SE_GSI_EVENT_EN); + writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR); + writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR); + writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR); + writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR); + writel_relaxed(0xffffffff, se->base + SE_IRQ_EN); +} + +/** + * geni_se_init() - Initialize the GENI serial engine + * @se: Pointer to the concerned serial engine. + * @rx_wm: Receive watermark, in units of FIFO words. + * @rx_rfr: Ready-for-receive watermark, in units of FIFO words. + * + * This function is used to initialize the GENI serial engine, configure + * receive watermark and ready-for-receive watermarks. + */ +void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr) +{ + u32 val; + + geni_se_irq_clear(se); + geni_se_io_init(se->base); + geni_se_io_set_mode(se->base); + + writel_relaxed(rx_wm, se->base + SE_GENI_RX_WATERMARK_REG); + writel_relaxed(rx_rfr, se->base + SE_GENI_RX_RFR_WATERMARK_REG); + + val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN); + val |= M_COMMON_GENI_M_IRQ_EN; + writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN); + + val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN); + val |= S_COMMON_GENI_S_IRQ_EN; + writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN); +} +EXPORT_SYMBOL(geni_se_init); + +static void geni_se_select_fifo_mode(struct geni_se *se) +{ + u32 proto = geni_se_read_proto(se); + u32 val, val_old; + + geni_se_irq_clear(se); + + /* UART driver manages enabling / disabling interrupts internally */ + if (proto != GENI_SE_UART) { + /* Non-UART use only primary sequencer so dont bother about S_IRQ */ + val_old = val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN); + val |= M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN; + val |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN; + if (val != val_old) + writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN); + } + + val_old = val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN); + val &= ~GENI_DMA_MODE_EN; + if (val != val_old) + writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN); +} + +static void geni_se_select_dma_mode(struct geni_se *se) +{ + u32 proto = geni_se_read_proto(se); + u32 val, val_old; + + geni_se_irq_clear(se); + + /* UART driver manages enabling / disabling interrupts internally */ + if (proto != GENI_SE_UART) { + /* Non-UART use only primary sequencer so dont bother about S_IRQ */ + val_old = val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN); + val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN); + val &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN); + if (val != val_old) + writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN); + } + + val_old = val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN); + val |= GENI_DMA_MODE_EN; + if (val != val_old) + writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN); +} + +static void geni_se_select_gpi_mode(struct geni_se *se) +{ + u32 val; + + geni_se_irq_clear(se); + + writel(0, se->base + SE_IRQ_EN); + + val = readl(se->base + SE_GENI_M_IRQ_EN); + val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN | + M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN); + writel(val, se->base + SE_GENI_M_IRQ_EN); + + writel(GENI_DMA_MODE_EN, se->base + SE_GENI_DMA_MODE_EN); + + val = readl(se->base + SE_GSI_EVENT_EN); + val |= (DMA_RX_EVENT_EN | DMA_TX_EVENT_EN | GENI_M_EVENT_EN | GENI_S_EVENT_EN); + writel(val, se->base + SE_GSI_EVENT_EN); +} + +/** + * geni_se_select_mode() - Select the serial engine transfer mode + * @se: Pointer to the concerned serial engine. + * @mode: Transfer mode to be selected. + */ +void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode) +{ + WARN_ON(mode != GENI_SE_FIFO && mode != GENI_SE_DMA && mode != GENI_GPI_DMA); + + switch (mode) { + case GENI_SE_FIFO: + geni_se_select_fifo_mode(se); + break; + case GENI_SE_DMA: + geni_se_select_dma_mode(se); + break; + case GENI_GPI_DMA: + geni_se_select_gpi_mode(se); + break; + case GENI_SE_INVALID: + default: + break; + } +} +EXPORT_SYMBOL(geni_se_select_mode); + +/** + * DOC: Overview + * + * GENI FIFO packing is highly configurable. TX/RX packing/unpacking consist + * of up to 4 operations, each operation represented by 4 configuration vectors + * of 10 bits programmed in GENI_TX_PACKING_CFG0 and GENI_TX_PACKING_CFG1 for + * TX FIFO and in GENI_RX_PACKING_CFG0 and GENI_RX_PACKING_CFG1 for RX FIFO. + * Refer to below examples for detailed bit-field description. + * + * Example 1: word_size = 7, packing_mode = 4 x 8, msb_to_lsb = 1 + * + * +-----------+-------+-------+-------+-------+ + * | | vec_0 | vec_1 | vec_2 | vec_3 | + * +-----------+-------+-------+-------+-------+ + * | start | 0x6 | 0xe | 0x16 | 0x1e | + * | direction | 1 | 1 | 1 | 1 | + * | length | 6 | 6 | 6 | 6 | + * | stop | 0 | 0 | 0 | 1 | + * +-----------+-------+-------+-------+-------+ + * + * Example 2: word_size = 15, packing_mode = 2 x 16, msb_to_lsb = 0 + * + * +-----------+-------+-------+-------+-------+ + * | | vec_0 | vec_1 | vec_2 | vec_3 | + * +-----------+-------+-------+-------+-------+ + * | start | 0x0 | 0x8 | 0x10 | 0x18 | + * | direction | 0 | 0 | 0 | 0 | + * | length | 7 | 6 | 7 | 6 | + * | stop | 0 | 0 | 0 | 1 | + * +-----------+-------+-------+-------+-------+ + * + * Example 3: word_size = 23, packing_mode = 1 x 32, msb_to_lsb = 1 + * + * +-----------+-------+-------+-------+-------+ + * | | vec_0 | vec_1 | vec_2 | vec_3 | + * +-----------+-------+-------+-------+-------+ + * | start | 0x16 | 0xe | 0x6 | 0x0 | + * | direction | 1 | 1 | 1 | 1 | + * | length | 7 | 7 | 6 | 0 | + * | stop | 0 | 0 | 1 | 0 | + * +-----------+-------+-------+-------+-------+ + * + */ + +#define NUM_PACKING_VECTORS 4 +#define PACKING_START_SHIFT 5 +#define PACKING_DIR_SHIFT 4 +#define PACKING_LEN_SHIFT 1 +#define PACKING_STOP_BIT BIT(0) +#define PACKING_VECTOR_SHIFT 10 +/** + * geni_se_config_packing() - Packing configuration of the serial engine + * @se: Pointer to the concerned serial engine + * @bpw: Bits of data per transfer word. + * @pack_words: Number of words per fifo element. + * @msb_to_lsb: Transfer from MSB to LSB or vice-versa. + * @tx_cfg: Flag to configure the TX Packing. + * @rx_cfg: Flag to configure the RX Packing. + * + * This function is used to configure the packing rules for the current + * transfer. + */ +void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words, + bool msb_to_lsb, bool tx_cfg, bool rx_cfg) +{ + u32 cfg0, cfg1, cfg[NUM_PACKING_VECTORS] = {0}; + int len; + int temp_bpw = bpw; + int idx_start = msb_to_lsb ? bpw - 1 : 0; + int idx = idx_start; + int idx_delta = msb_to_lsb ? -BITS_PER_BYTE : BITS_PER_BYTE; + int ceil_bpw = ALIGN(bpw, BITS_PER_BYTE); + int iter = (ceil_bpw * pack_words) / BITS_PER_BYTE; + int i; + + if (iter <= 0 || iter > NUM_PACKING_VECTORS) + return; + + for (i = 0; i < iter; i++) { + len = min_t(int, temp_bpw, BITS_PER_BYTE) - 1; + cfg[i] = idx << PACKING_START_SHIFT; + cfg[i] |= msb_to_lsb << PACKING_DIR_SHIFT; + cfg[i] |= len << PACKING_LEN_SHIFT; + + if (temp_bpw <= BITS_PER_BYTE) { + idx = ((i + 1) * BITS_PER_BYTE) + idx_start; + temp_bpw = bpw; + } else { + idx = idx + idx_delta; + temp_bpw = temp_bpw - BITS_PER_BYTE; + } + } + cfg[iter - 1] |= PACKING_STOP_BIT; + cfg0 = cfg[0] | (cfg[1] << PACKING_VECTOR_SHIFT); + cfg1 = cfg[2] | (cfg[3] << PACKING_VECTOR_SHIFT); + + if (tx_cfg) { + writel_relaxed(cfg0, se->base + SE_GENI_TX_PACKING_CFG0); + writel_relaxed(cfg1, se->base + SE_GENI_TX_PACKING_CFG1); + } + if (rx_cfg) { + writel_relaxed(cfg0, se->base + SE_GENI_RX_PACKING_CFG0); + writel_relaxed(cfg1, se->base + SE_GENI_RX_PACKING_CFG1); + } + + /* + * Number of protocol words in each FIFO entry + * 0 - 4x8, four words in each entry, max word size of 8 bits + * 1 - 2x16, two words in each entry, max word size of 16 bits + * 2 - 1x32, one word in each entry, max word size of 32 bits + * 3 - undefined + */ + if (pack_words || bpw == 32) + writel_relaxed(bpw / 16, se->base + SE_GENI_BYTE_GRAN); +} +EXPORT_SYMBOL(geni_se_config_packing); + +static void geni_se_clks_off(struct geni_se *se) +{ + struct geni_wrapper *wrapper = se->wrapper; + + clk_disable_unprepare(se->clk); + clk_bulk_disable_unprepare(wrapper->num_clks, wrapper->clks); +} + +/** + * geni_se_resources_off() - Turn off resources associated with the serial + * engine + * @se: Pointer to the concerned serial engine. + * + * Return: 0 on success, standard Linux error codes on failure/error. + */ +int geni_se_resources_off(struct geni_se *se) +{ + int ret; + + if (has_acpi_companion(se->dev)) + return 0; + + ret = pinctrl_pm_select_sleep_state(se->dev); + if (ret) + return ret; + + geni_se_clks_off(se); + return 0; +} +EXPORT_SYMBOL(geni_se_resources_off); + +static int geni_se_clks_on(struct geni_se *se) +{ + int ret; + struct geni_wrapper *wrapper = se->wrapper; + + ret = clk_bulk_prepare_enable(wrapper->num_clks, wrapper->clks); + if (ret) + return ret; + + ret = clk_prepare_enable(se->clk); + if (ret) + clk_bulk_disable_unprepare(wrapper->num_clks, wrapper->clks); + return ret; +} + +/** + * geni_se_resources_on() - Turn on resources associated with the serial + * engine + * @se: Pointer to the concerned serial engine. + * + * Return: 0 on success, standard Linux error codes on failure/error. + */ +int geni_se_resources_on(struct geni_se *se) +{ + int ret; + + if (has_acpi_companion(se->dev)) + return 0; + + ret = geni_se_clks_on(se); + if (ret) + return ret; + + ret = pinctrl_pm_select_default_state(se->dev); + if (ret) + geni_se_clks_off(se); + + return ret; +} +EXPORT_SYMBOL(geni_se_resources_on); + +/** + * geni_se_clk_tbl_get() - Get the clock table to program DFS + * @se: Pointer to the concerned serial engine. + * @tbl: Table in which the output is returned. + * + * This function is called by the protocol drivers to determine the different + * clock frequencies supported by serial engine core clock. The protocol + * drivers use the output to determine the clock frequency index to be + * programmed into DFS. + * + * Return: number of valid performance levels in the table on success, + * standard Linux error codes on failure. + */ +int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl) +{ + long freq = 0; + int i; + + if (se->clk_perf_tbl) { + *tbl = se->clk_perf_tbl; + return se->num_clk_levels; + } + + se->clk_perf_tbl = devm_kcalloc(se->dev, MAX_CLK_PERF_LEVEL, + sizeof(*se->clk_perf_tbl), + GFP_KERNEL); + if (!se->clk_perf_tbl) + return -ENOMEM; + + for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) { + freq = clk_round_rate(se->clk, freq + 1); + if (freq <= 0 || freq == se->clk_perf_tbl[i - 1]) + break; + se->clk_perf_tbl[i] = freq; + } + se->num_clk_levels = i; + *tbl = se->clk_perf_tbl; + return se->num_clk_levels; +} +EXPORT_SYMBOL(geni_se_clk_tbl_get); + +/** + * geni_se_clk_freq_match() - Get the matching or closest SE clock frequency + * @se: Pointer to the concerned serial engine. + * @req_freq: Requested clock frequency. + * @index: Index of the resultant frequency in the table. + * @res_freq: Resultant frequency of the source clock. + * @exact: Flag to indicate exact multiple requirement of the requested + * frequency. + * + * This function is called by the protocol drivers to determine the best match + * of the requested frequency as provided by the serial engine clock in order + * to meet the performance requirements. + * + * If we return success: + * - if @exact is true then @res_freq / <an_integer> == @req_freq + * - if @exact is false then @res_freq / <an_integer> <= @req_freq + * + * Return: 0 on success, standard Linux error codes on failure. + */ +int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq, + unsigned int *index, unsigned long *res_freq, + bool exact) +{ + unsigned long *tbl; + int num_clk_levels; + int i; + unsigned long best_delta; + unsigned long new_delta; + unsigned int divider; + + num_clk_levels = geni_se_clk_tbl_get(se, &tbl); + if (num_clk_levels < 0) + return num_clk_levels; + + if (num_clk_levels == 0) + return -EINVAL; + + best_delta = ULONG_MAX; + for (i = 0; i < num_clk_levels; i++) { + divider = DIV_ROUND_UP(tbl[i], req_freq); + new_delta = req_freq - tbl[i] / divider; + if (new_delta < best_delta) { + /* We have a new best! */ + *index = i; + *res_freq = tbl[i]; + + /* If the new best is exact then we're done */ + if (new_delta == 0) + return 0; + + /* Record how close we got */ + best_delta = new_delta; + } + } + + if (exact) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(geni_se_clk_freq_match); + +#define GENI_SE_DMA_DONE_EN BIT(0) +#define GENI_SE_DMA_EOT_EN BIT(1) +#define GENI_SE_DMA_AHB_ERR_EN BIT(2) +#define GENI_SE_DMA_EOT_BUF BIT(0) + +/** + * geni_se_tx_init_dma() - Initiate TX DMA transfer on the serial engine + * @se: Pointer to the concerned serial engine. + * @iova: Mapped DMA address. + * @len: Length of the TX buffer. + * + * This function is used to initiate DMA TX transfer. + */ +void geni_se_tx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len) +{ + u32 val; + + val = GENI_SE_DMA_DONE_EN; + val |= GENI_SE_DMA_EOT_EN; + val |= GENI_SE_DMA_AHB_ERR_EN; + writel_relaxed(val, se->base + SE_DMA_TX_IRQ_EN_SET); + writel_relaxed(lower_32_bits(iova), se->base + SE_DMA_TX_PTR_L); + writel_relaxed(upper_32_bits(iova), se->base + SE_DMA_TX_PTR_H); + writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR); + writel(len, se->base + SE_DMA_TX_LEN); +} +EXPORT_SYMBOL(geni_se_tx_init_dma); + +/** + * geni_se_tx_dma_prep() - Prepare the serial engine for TX DMA transfer + * @se: Pointer to the concerned serial engine. + * @buf: Pointer to the TX buffer. + * @len: Length of the TX buffer. + * @iova: Pointer to store the mapped DMA address. + * + * This function is used to prepare the buffers for DMA TX. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len, + dma_addr_t *iova) +{ + struct geni_wrapper *wrapper = se->wrapper; + + if (!wrapper) + return -EINVAL; + + *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE); + if (dma_mapping_error(wrapper->dev, *iova)) + return -EIO; + + geni_se_tx_init_dma(se, *iova, len); + return 0; +} +EXPORT_SYMBOL(geni_se_tx_dma_prep); + +/** + * geni_se_rx_init_dma() - Initiate RX DMA transfer on the serial engine + * @se: Pointer to the concerned serial engine. + * @iova: Mapped DMA address. + * @len: Length of the RX buffer. + * + * This function is used to initiate DMA RX transfer. + */ +void geni_se_rx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len) +{ + u32 val; + + val = GENI_SE_DMA_DONE_EN; + val |= GENI_SE_DMA_EOT_EN; + val |= GENI_SE_DMA_AHB_ERR_EN; + writel_relaxed(val, se->base + SE_DMA_RX_IRQ_EN_SET); + writel_relaxed(lower_32_bits(iova), se->base + SE_DMA_RX_PTR_L); + writel_relaxed(upper_32_bits(iova), se->base + SE_DMA_RX_PTR_H); + /* RX does not have EOT buffer type bit. So just reset RX_ATTR */ + writel_relaxed(0, se->base + SE_DMA_RX_ATTR); + writel(len, se->base + SE_DMA_RX_LEN); +} +EXPORT_SYMBOL(geni_se_rx_init_dma); + +/** + * geni_se_rx_dma_prep() - Prepare the serial engine for RX DMA transfer + * @se: Pointer to the concerned serial engine. + * @buf: Pointer to the RX buffer. + * @len: Length of the RX buffer. + * @iova: Pointer to store the mapped DMA address. + * + * This function is used to prepare the buffers for DMA RX. + * + * Return: 0 on success, standard Linux error codes on failure. + */ +int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, + dma_addr_t *iova) +{ + struct geni_wrapper *wrapper = se->wrapper; + + if (!wrapper) + return -EINVAL; + + *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE); + if (dma_mapping_error(wrapper->dev, *iova)) + return -EIO; + + geni_se_rx_init_dma(se, *iova, len); + return 0; +} +EXPORT_SYMBOL(geni_se_rx_dma_prep); + +/** + * geni_se_tx_dma_unprep() - Unprepare the serial engine after TX DMA transfer + * @se: Pointer to the concerned serial engine. + * @iova: DMA address of the TX buffer. + * @len: Length of the TX buffer. + * + * This function is used to unprepare the DMA buffers after DMA TX. + */ +void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len) +{ + struct geni_wrapper *wrapper = se->wrapper; + + if (!dma_mapping_error(wrapper->dev, iova)) + dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE); +} +EXPORT_SYMBOL(geni_se_tx_dma_unprep); + +/** + * geni_se_rx_dma_unprep() - Unprepare the serial engine after RX DMA transfer + * @se: Pointer to the concerned serial engine. + * @iova: DMA address of the RX buffer. + * @len: Length of the RX buffer. + * + * This function is used to unprepare the DMA buffers after DMA RX. + */ +void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len) +{ + struct geni_wrapper *wrapper = se->wrapper; + + if (!dma_mapping_error(wrapper->dev, iova)) + dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE); +} +EXPORT_SYMBOL(geni_se_rx_dma_unprep); + +int geni_icc_get(struct geni_se *se, const char *icc_ddr) +{ + int i, err; + const char *icc_names[] = {"qup-core", "qup-config", icc_ddr}; + + if (has_acpi_companion(se->dev)) + return 0; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + if (!icc_names[i]) + continue; + + se->icc_paths[i].path = devm_of_icc_get(se->dev, icc_names[i]); + if (IS_ERR(se->icc_paths[i].path)) + goto err; + } + + return 0; + +err: + err = PTR_ERR(se->icc_paths[i].path); + if (err != -EPROBE_DEFER) + dev_err_ratelimited(se->dev, "Failed to get ICC path '%s': %d\n", + icc_names[i], err); + return err; + +} +EXPORT_SYMBOL(geni_icc_get); + +int geni_icc_set_bw(struct geni_se *se) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + ret = icc_set_bw(se->icc_paths[i].path, + se->icc_paths[i].avg_bw, se->icc_paths[i].avg_bw); + if (ret) { + dev_err_ratelimited(se->dev, "ICC BW voting failed on path '%s': %d\n", + icc_path_names[i], ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(geni_icc_set_bw); + +void geni_icc_set_tag(struct geni_se *se, u32 tag) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) + icc_set_tag(se->icc_paths[i].path, tag); +} +EXPORT_SYMBOL(geni_icc_set_tag); + +/* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */ +int geni_icc_enable(struct geni_se *se) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + ret = icc_enable(se->icc_paths[i].path); + if (ret) { + dev_err_ratelimited(se->dev, "ICC enable failed on path '%s': %d\n", + icc_path_names[i], ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(geni_icc_enable); + +int geni_icc_disable(struct geni_se *se) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) { + ret = icc_disable(se->icc_paths[i].path); + if (ret) { + dev_err_ratelimited(se->dev, "ICC disable failed on path '%s': %d\n", + icc_path_names[i], ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(geni_icc_disable); + +static int geni_se_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct geni_wrapper *wrapper; + int ret; + + wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL); + if (!wrapper) + return -ENOMEM; + + wrapper->dev = dev; + wrapper->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(wrapper->base)) + return PTR_ERR(wrapper->base); + + if (!has_acpi_companion(&pdev->dev)) { + const struct geni_se_desc *desc; + int i; + + desc = device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + wrapper->num_clks = min_t(unsigned int, desc->num_clks, MAX_CLKS); + + for (i = 0; i < wrapper->num_clks; ++i) + wrapper->clks[i].id = desc->clks[i]; + + ret = of_count_phandle_with_args(dev->of_node, "clocks", "#clock-cells"); + if (ret < 0) { + dev_err(dev, "invalid clocks property at %pOF\n", dev->of_node); + return ret; + } + + if (ret < wrapper->num_clks) { + dev_err(dev, "invalid clocks count at %pOF, expected %d entries\n", + dev->of_node, wrapper->num_clks); + return -EINVAL; + } + + ret = devm_clk_bulk_get(dev, wrapper->num_clks, wrapper->clks); + if (ret) { + dev_err(dev, "Err getting clks %d\n", ret); + return ret; + } + } + + dev_set_drvdata(dev, wrapper); + dev_dbg(dev, "GENI SE Driver probed\n"); + return devm_of_platform_populate(dev); +} + +static const char * const qup_clks[] = { + "m-ahb", + "s-ahb", +}; + +static const struct geni_se_desc qup_desc = { + .clks = qup_clks, + .num_clks = ARRAY_SIZE(qup_clks), +}; + +static const char * const i2c_master_hub_clks[] = { + "s-ahb", +}; + +static const struct geni_se_desc i2c_master_hub_desc = { + .clks = i2c_master_hub_clks, + .num_clks = ARRAY_SIZE(i2c_master_hub_clks), +}; + +static const struct of_device_id geni_se_dt_match[] = { + { .compatible = "qcom,geni-se-qup", .data = &qup_desc }, + { .compatible = "qcom,geni-se-i2c-master-hub", .data = &i2c_master_hub_desc }, + {} +}; +MODULE_DEVICE_TABLE(of, geni_se_dt_match); + +static struct platform_driver geni_se_driver = { + .driver = { + .name = "geni_se_qup", + .of_match_table = geni_se_dt_match, + }, + .probe = geni_se_probe, +}; +module_platform_driver(geni_se_driver); + +MODULE_DESCRIPTION("GENI Serial Engine Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c new file mode 100644 index 0000000000..77f0cf1266 --- /dev/null +++ b/drivers/soc/qcom/qcom_aoss.c @@ -0,0 +1,573 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019, Linaro Ltd + */ +#include <linux/clk-provider.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/thermal.h> +#include <linux/slab.h> +#include <linux/soc/qcom/qcom_aoss.h> + +#define QMP_DESC_MAGIC 0x0 +#define QMP_DESC_VERSION 0x4 +#define QMP_DESC_FEATURES 0x8 + +/* AOP-side offsets */ +#define QMP_DESC_UCORE_LINK_STATE 0xc +#define QMP_DESC_UCORE_LINK_STATE_ACK 0x10 +#define QMP_DESC_UCORE_CH_STATE 0x14 +#define QMP_DESC_UCORE_CH_STATE_ACK 0x18 +#define QMP_DESC_UCORE_MBOX_SIZE 0x1c +#define QMP_DESC_UCORE_MBOX_OFFSET 0x20 + +/* Linux-side offsets */ +#define QMP_DESC_MCORE_LINK_STATE 0x24 +#define QMP_DESC_MCORE_LINK_STATE_ACK 0x28 +#define QMP_DESC_MCORE_CH_STATE 0x2c +#define QMP_DESC_MCORE_CH_STATE_ACK 0x30 +#define QMP_DESC_MCORE_MBOX_SIZE 0x34 +#define QMP_DESC_MCORE_MBOX_OFFSET 0x38 + +#define QMP_STATE_UP GENMASK(15, 0) +#define QMP_STATE_DOWN GENMASK(31, 16) + +#define QMP_MAGIC 0x4d41494c /* mail */ +#define QMP_VERSION 1 + +/* 64 bytes is enough to store the requests and provides padding to 4 bytes */ +#define QMP_MSG_LEN 64 + +#define QMP_NUM_COOLING_RESOURCES 2 + +static bool qmp_cdev_max_state = 1; + +struct qmp_cooling_device { + struct thermal_cooling_device *cdev; + struct qmp *qmp; + char *name; + bool state; +}; + +/** + * struct qmp - driver state for QMP implementation + * @msgram: iomem referencing the message RAM used for communication + * @dev: reference to QMP device + * @mbox_client: mailbox client used to ring the doorbell on transmit + * @mbox_chan: mailbox channel used to ring the doorbell on transmit + * @offset: offset within @msgram where messages should be written + * @size: maximum size of the messages to be transmitted + * @event: wait_queue for synchronization with the IRQ + * @tx_lock: provides synchronization between multiple callers of qmp_send() + * @qdss_clk: QDSS clock hw struct + * @cooling_devs: thermal cooling devices + */ +struct qmp { + void __iomem *msgram; + struct device *dev; + + struct mbox_client mbox_client; + struct mbox_chan *mbox_chan; + + size_t offset; + size_t size; + + wait_queue_head_t event; + + struct mutex tx_lock; + + struct clk_hw qdss_clk; + struct qmp_cooling_device *cooling_devs; +}; + +static void qmp_kick(struct qmp *qmp) +{ + mbox_send_message(qmp->mbox_chan, NULL); + mbox_client_txdone(qmp->mbox_chan, 0); +} + +static bool qmp_magic_valid(struct qmp *qmp) +{ + return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC; +} + +static bool qmp_link_acked(struct qmp *qmp) +{ + return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP; +} + +static bool qmp_mcore_channel_acked(struct qmp *qmp) +{ + return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP; +} + +static bool qmp_ucore_channel_up(struct qmp *qmp) +{ + return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP; +} + +static int qmp_open(struct qmp *qmp) +{ + int ret; + u32 val; + + if (!qmp_magic_valid(qmp)) { + dev_err(qmp->dev, "QMP magic doesn't match\n"); + return -EINVAL; + } + + val = readl(qmp->msgram + QMP_DESC_VERSION); + if (val != QMP_VERSION) { + dev_err(qmp->dev, "unsupported QMP version %d\n", val); + return -EINVAL; + } + + qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET); + qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE); + if (!qmp->size) { + dev_err(qmp->dev, "invalid mailbox size\n"); + return -EINVAL; + } + + /* Ack remote core's link state */ + val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE); + writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK); + + /* Set local core's link state to up */ + writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); + + qmp_kick(qmp); + + ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ); + if (!ret) { + dev_err(qmp->dev, "ucore didn't ack link\n"); + goto timeout_close_link; + } + + writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE); + + qmp_kick(qmp); + + ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ); + if (!ret) { + dev_err(qmp->dev, "ucore didn't open channel\n"); + goto timeout_close_channel; + } + + /* Ack remote core's channel state */ + writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK); + + qmp_kick(qmp); + + ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ); + if (!ret) { + dev_err(qmp->dev, "ucore didn't ack channel\n"); + goto timeout_close_channel; + } + + return 0; + +timeout_close_channel: + writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); + +timeout_close_link: + writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); + qmp_kick(qmp); + + return -ETIMEDOUT; +} + +static void qmp_close(struct qmp *qmp) +{ + writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); + writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); + qmp_kick(qmp); +} + +static irqreturn_t qmp_intr(int irq, void *data) +{ + struct qmp *qmp = data; + + wake_up_all(&qmp->event); + + return IRQ_HANDLED; +} + +static bool qmp_message_empty(struct qmp *qmp) +{ + return readl(qmp->msgram + qmp->offset) == 0; +} + +/** + * qmp_send() - send a message to the AOSS + * @qmp: qmp context + * @fmt: format string for message to be sent + * @...: arguments for the format string + * + * Transmit message to AOSS and wait for the AOSS to acknowledge the message. + * data must not be longer than the mailbox size. Access is synchronized by + * this implementation. + * + * Return: 0 on success, negative errno on failure + */ +int qmp_send(struct qmp *qmp, const char *fmt, ...) +{ + char buf[QMP_MSG_LEN]; + long time_left; + va_list args; + int len; + int ret; + + if (WARN_ON(IS_ERR_OR_NULL(qmp) || !fmt)) + return -EINVAL; + + memset(buf, 0, sizeof(buf)); + va_start(args, fmt); + len = vsnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + + if (WARN_ON(len >= sizeof(buf))) + return -EINVAL; + + mutex_lock(&qmp->tx_lock); + + /* The message RAM only implements 32-bit accesses */ + __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32), + buf, sizeof(buf) / sizeof(u32)); + writel(sizeof(buf), qmp->msgram + qmp->offset); + + /* Read back length to confirm data written in message RAM */ + readl(qmp->msgram + qmp->offset); + qmp_kick(qmp); + + time_left = wait_event_interruptible_timeout(qmp->event, + qmp_message_empty(qmp), HZ); + if (!time_left) { + dev_err(qmp->dev, "ucore did not ack channel\n"); + ret = -ETIMEDOUT; + + /* Clear message from buffer */ + writel(0, qmp->msgram + qmp->offset); + } else { + ret = 0; + } + + mutex_unlock(&qmp->tx_lock); + + return ret; +} +EXPORT_SYMBOL(qmp_send); + +static int qmp_qdss_clk_prepare(struct clk_hw *hw) +{ + static const char *buf = "{class: clock, res: qdss, val: 1}"; + struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); + + return qmp_send(qmp, buf); +} + +static void qmp_qdss_clk_unprepare(struct clk_hw *hw) +{ + static const char *buf = "{class: clock, res: qdss, val: 0}"; + struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); + + qmp_send(qmp, buf); +} + +static const struct clk_ops qmp_qdss_clk_ops = { + .prepare = qmp_qdss_clk_prepare, + .unprepare = qmp_qdss_clk_unprepare, +}; + +static int qmp_qdss_clk_add(struct qmp *qmp) +{ + static const struct clk_init_data qdss_init = { + .ops = &qmp_qdss_clk_ops, + .name = "qdss", + }; + int ret; + + qmp->qdss_clk.init = &qdss_init; + ret = clk_hw_register(qmp->dev, &qmp->qdss_clk); + if (ret < 0) { + dev_err(qmp->dev, "failed to register qdss clock\n"); + return ret; + } + + ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get, + &qmp->qdss_clk); + if (ret < 0) { + dev_err(qmp->dev, "unable to register of clk hw provider\n"); + clk_hw_unregister(&qmp->qdss_clk); + } + + return ret; +} + +static void qmp_qdss_clk_remove(struct qmp *qmp) +{ + of_clk_del_provider(qmp->dev->of_node); + clk_hw_unregister(&qmp->qdss_clk); +} + +static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = qmp_cdev_max_state; + return 0; +} + +static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct qmp_cooling_device *qmp_cdev = cdev->devdata; + + *state = qmp_cdev->state; + return 0; +} + +static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) +{ + struct qmp_cooling_device *qmp_cdev = cdev->devdata; + bool cdev_state; + int ret; + + /* Normalize state */ + cdev_state = !!state; + + if (qmp_cdev->state == state) + return 0; + + ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}", + qmp_cdev->name, cdev_state ? "on" : "off"); + if (!ret) + qmp_cdev->state = cdev_state; + + return ret; +} + +static const struct thermal_cooling_device_ops qmp_cooling_device_ops = { + .get_max_state = qmp_cdev_get_max_state, + .get_cur_state = qmp_cdev_get_cur_state, + .set_cur_state = qmp_cdev_set_cur_state, +}; + +static int qmp_cooling_device_add(struct qmp *qmp, + struct qmp_cooling_device *qmp_cdev, + struct device_node *node) +{ + char *cdev_name = (char *)node->name; + + qmp_cdev->qmp = qmp; + qmp_cdev->state = !qmp_cdev_max_state; + qmp_cdev->name = cdev_name; + qmp_cdev->cdev = devm_thermal_of_cooling_device_register + (qmp->dev, node, + cdev_name, + qmp_cdev, &qmp_cooling_device_ops); + + if (IS_ERR(qmp_cdev->cdev)) + dev_err(qmp->dev, "unable to register %s cooling device\n", + cdev_name); + + return PTR_ERR_OR_ZERO(qmp_cdev->cdev); +} + +static int qmp_cooling_devices_register(struct qmp *qmp) +{ + struct device_node *np, *child; + int count = 0; + int ret; + + np = qmp->dev->of_node; + + qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES, + sizeof(*qmp->cooling_devs), + GFP_KERNEL); + + if (!qmp->cooling_devs) + return -ENOMEM; + + for_each_available_child_of_node(np, child) { + if (!of_property_present(child, "#cooling-cells")) + continue; + ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], + child); + if (ret) { + of_node_put(child); + goto unroll; + } + } + + if (!count) + devm_kfree(qmp->dev, qmp->cooling_devs); + + return 0; + +unroll: + while (--count >= 0) + thermal_cooling_device_unregister + (qmp->cooling_devs[count].cdev); + devm_kfree(qmp->dev, qmp->cooling_devs); + + return ret; +} + +static void qmp_cooling_devices_remove(struct qmp *qmp) +{ + int i; + + for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++) + thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev); +} + +/** + * qmp_get() - get a qmp handle from a device + * @dev: client device pointer + * + * Return: handle to qmp device on success, ERR_PTR() on failure + */ +struct qmp *qmp_get(struct device *dev) +{ + struct platform_device *pdev; + struct device_node *np; + struct qmp *qmp; + + if (!dev || !dev->of_node) + return ERR_PTR(-EINVAL); + + np = of_parse_phandle(dev->of_node, "qcom,qmp", 0); + if (!np) + return ERR_PTR(-ENODEV); + + pdev = of_find_device_by_node(np); + of_node_put(np); + if (!pdev) + return ERR_PTR(-EINVAL); + + qmp = platform_get_drvdata(pdev); + + if (!qmp) { + put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + return qmp; +} +EXPORT_SYMBOL(qmp_get); + +/** + * qmp_put() - release a qmp handle + * @qmp: qmp handle obtained from qmp_get() + */ +void qmp_put(struct qmp *qmp) +{ + /* + * Match get_device() inside of_find_device_by_node() in + * qmp_get() + */ + if (!IS_ERR_OR_NULL(qmp)) + put_device(qmp->dev); +} +EXPORT_SYMBOL(qmp_put); + +static int qmp_probe(struct platform_device *pdev) +{ + struct qmp *qmp; + int irq; + int ret; + + qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL); + if (!qmp) + return -ENOMEM; + + qmp->dev = &pdev->dev; + init_waitqueue_head(&qmp->event); + mutex_init(&qmp->tx_lock); + + qmp->msgram = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(qmp->msgram)) + return PTR_ERR(qmp->msgram); + + qmp->mbox_client.dev = &pdev->dev; + qmp->mbox_client.knows_txdone = true; + qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0); + if (IS_ERR(qmp->mbox_chan)) { + dev_err(&pdev->dev, "failed to acquire ipc mailbox\n"); + return PTR_ERR(qmp->mbox_chan); + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0, + "aoss-qmp", qmp); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request interrupt\n"); + goto err_free_mbox; + } + + ret = qmp_open(qmp); + if (ret < 0) + goto err_free_mbox; + + ret = qmp_qdss_clk_add(qmp); + if (ret) + goto err_close_qmp; + + ret = qmp_cooling_devices_register(qmp); + if (ret) + dev_err(&pdev->dev, "failed to register aoss cooling devices\n"); + + platform_set_drvdata(pdev, qmp); + + return 0; + +err_close_qmp: + qmp_close(qmp); +err_free_mbox: + mbox_free_channel(qmp->mbox_chan); + + return ret; +} + +static int qmp_remove(struct platform_device *pdev) +{ + struct qmp *qmp = platform_get_drvdata(pdev); + + qmp_qdss_clk_remove(qmp); + qmp_cooling_devices_remove(qmp); + + qmp_close(qmp); + mbox_free_channel(qmp->mbox_chan); + + return 0; +} + +static const struct of_device_id qmp_dt_match[] = { + { .compatible = "qcom,sc7180-aoss-qmp", }, + { .compatible = "qcom,sc7280-aoss-qmp", }, + { .compatible = "qcom,sdm845-aoss-qmp", }, + { .compatible = "qcom,sm8150-aoss-qmp", }, + { .compatible = "qcom,sm8250-aoss-qmp", }, + { .compatible = "qcom,sm8350-aoss-qmp", }, + { .compatible = "qcom,aoss-qmp", }, + {} +}; +MODULE_DEVICE_TABLE(of, qmp_dt_match); + +static struct platform_driver qmp_driver = { + .driver = { + .name = "qcom_aoss_qmp", + .of_match_table = qmp_dt_match, + .suppress_bind_attrs = true, + }, + .probe = qmp_probe, + .remove = qmp_remove, +}; +module_platform_driver(qmp_driver); + +MODULE_DESCRIPTION("Qualcomm AOSS QMP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c new file mode 100644 index 0000000000..df7907a83a --- /dev/null +++ b/drivers/soc/qcom/qcom_gsbi.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014, The Linux foundation. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <dt-bindings/soc/qcom,gsbi.h> + +#define GSBI_CTRL_REG 0x0000 +#define GSBI_PROTOCOL_SHIFT 4 +#define MAX_GSBI 12 + +#define TCSR_ADM_CRCI_BASE 0x70 + +struct crci_config { + u32 num_rows; + const u32 (*array)[MAX_GSBI]; +}; + +static const u32 crci_ipq8064[][MAX_GSBI] = { + { + 0x000003, 0x00000c, 0x000030, 0x0000c0, + 0x000300, 0x000c00, 0x003000, 0x00c000, + 0x030000, 0x0c0000, 0x300000, 0xc00000 + }, + { + 0x000003, 0x00000c, 0x000030, 0x0000c0, + 0x000300, 0x000c00, 0x003000, 0x00c000, + 0x030000, 0x0c0000, 0x300000, 0xc00000 + }, +}; + +static const struct crci_config config_ipq8064 = { + .num_rows = ARRAY_SIZE(crci_ipq8064), + .array = crci_ipq8064, +}; + +static const unsigned int crci_apq8064[][MAX_GSBI] = { + { + 0x001800, 0x006000, 0x000030, 0x0000c0, + 0x000300, 0x000400, 0x000000, 0x000000, + 0x000000, 0x000000, 0x000000, 0x000000 + }, + { + 0x000000, 0x000000, 0x000000, 0x000000, + 0x000000, 0x000020, 0x0000c0, 0x000000, + 0x000000, 0x000000, 0x000000, 0x000000 + }, +}; + +static const struct crci_config config_apq8064 = { + .num_rows = ARRAY_SIZE(crci_apq8064), + .array = crci_apq8064, +}; + +static const unsigned int crci_msm8960[][MAX_GSBI] = { + { + 0x000003, 0x00000c, 0x000030, 0x0000c0, + 0x000300, 0x000400, 0x000000, 0x000000, + 0x000000, 0x000000, 0x000000, 0x000000 + }, + { + 0x000000, 0x000000, 0x000000, 0x000000, + 0x000000, 0x000020, 0x0000c0, 0x000300, + 0x001800, 0x006000, 0x000000, 0x000000 + }, +}; + +static const struct crci_config config_msm8960 = { + .num_rows = ARRAY_SIZE(crci_msm8960), + .array = crci_msm8960, +}; + +static const unsigned int crci_msm8660[][MAX_GSBI] = { + { /* ADM 0 - B */ + 0x000003, 0x00000c, 0x000030, 0x0000c0, + 0x000300, 0x000c00, 0x003000, 0x00c000, + 0x030000, 0x0c0000, 0x300000, 0xc00000 + }, + { /* ADM 0 - B */ + 0x000003, 0x00000c, 0x000030, 0x0000c0, + 0x000300, 0x000c00, 0x003000, 0x00c000, + 0x030000, 0x0c0000, 0x300000, 0xc00000 + }, + { /* ADM 1 - A */ + 0x000003, 0x00000c, 0x000030, 0x0000c0, + 0x000300, 0x000c00, 0x003000, 0x00c000, + 0x030000, 0x0c0000, 0x300000, 0xc00000 + }, + { /* ADM 1 - B */ + 0x000003, 0x00000c, 0x000030, 0x0000c0, + 0x000300, 0x000c00, 0x003000, 0x00c000, + 0x030000, 0x0c0000, 0x300000, 0xc00000 + }, +}; + +static const struct crci_config config_msm8660 = { + .num_rows = ARRAY_SIZE(crci_msm8660), + .array = crci_msm8660, +}; + +struct gsbi_info { + struct clk *hclk; + u32 mode; + u32 crci; + struct regmap *tcsr; +}; + +static const struct of_device_id tcsr_dt_match[] __maybe_unused = { + { .compatible = "qcom,tcsr-ipq8064", .data = &config_ipq8064}, + { .compatible = "qcom,tcsr-apq8064", .data = &config_apq8064}, + { .compatible = "qcom,tcsr-msm8960", .data = &config_msm8960}, + { .compatible = "qcom,tcsr-msm8660", .data = &config_msm8660}, + { }, +}; + +static int gsbi_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct device_node *tcsr_node; + const struct of_device_id *match; + void __iomem *base; + struct gsbi_info *gsbi; + int i; + u32 mask, gsbi_num; + const struct crci_config *config = NULL; + + gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL); + + if (!gsbi) + return -ENOMEM; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + /* get the tcsr node and setup the config and regmap */ + gsbi->tcsr = syscon_regmap_lookup_by_phandle(node, "syscon-tcsr"); + + if (!IS_ERR(gsbi->tcsr)) { + tcsr_node = of_parse_phandle(node, "syscon-tcsr", 0); + if (tcsr_node) { + match = of_match_node(tcsr_dt_match, tcsr_node); + if (match) + config = match->data; + else + dev_warn(&pdev->dev, "no matching TCSR\n"); + + of_node_put(tcsr_node); + } + } + + if (of_property_read_u32(node, "cell-index", &gsbi_num)) { + dev_err(&pdev->dev, "missing cell-index\n"); + return -EINVAL; + } + + if (gsbi_num < 1 || gsbi_num > MAX_GSBI) { + dev_err(&pdev->dev, "invalid cell-index\n"); + return -EINVAL; + } + + if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) { + dev_err(&pdev->dev, "missing mode configuration\n"); + return -EINVAL; + } + + /* not required, so default to 0 if not present */ + of_property_read_u32(node, "qcom,crci", &gsbi->crci); + + dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", + gsbi->mode, gsbi->crci); + gsbi->hclk = devm_clk_get_enabled(&pdev->dev, "iface"); + if (IS_ERR(gsbi->hclk)) + return PTR_ERR(gsbi->hclk); + + writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci, + base + GSBI_CTRL_REG); + + /* + * modify tcsr to reflect mode and ADM CRCI mux + * Each gsbi contains a pair of bits, one for RX and one for TX + * SPI mode requires both bits cleared, otherwise they are set + */ + if (config) { + for (i = 0; i < config->num_rows; i++) { + mask = config->array[i][gsbi_num - 1]; + + if (gsbi->mode == GSBI_PROT_SPI) + regmap_update_bits(gsbi->tcsr, + TCSR_ADM_CRCI_BASE + 4 * i, mask, 0); + else + regmap_update_bits(gsbi->tcsr, + TCSR_ADM_CRCI_BASE + 4 * i, mask, mask); + + } + } + + /* make sure the gsbi control write is not reordered */ + wmb(); + + platform_set_drvdata(pdev, gsbi); + + return of_platform_populate(node, NULL, NULL, &pdev->dev); +} + +static int gsbi_remove(struct platform_device *pdev) +{ + struct gsbi_info *gsbi = platform_get_drvdata(pdev); + + clk_disable_unprepare(gsbi->hclk); + + return 0; +} + +static const struct of_device_id gsbi_dt_match[] = { + { .compatible = "qcom,gsbi-v1.0.0", }, + { }, +}; + +MODULE_DEVICE_TABLE(of, gsbi_dt_match); + +static struct platform_driver gsbi_driver = { + .driver = { + .name = "gsbi", + .of_match_table = gsbi_dt_match, + }, + .probe = gsbi_probe, + .remove = gsbi_remove, +}; + +module_platform_driver(gsbi_driver); + +MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>"); +MODULE_DESCRIPTION("QCOM GSBI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c new file mode 100644 index 0000000000..c207bb96c5 --- /dev/null +++ b/drivers/soc/qcom/qcom_stats.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/seq_file.h> + +#include <linux/soc/qcom/smem.h> +#include <clocksource/arm_arch_timer.h> + +#define RPM_DYNAMIC_ADDR 0x14 +#define RPM_DYNAMIC_ADDR_MASK 0xFFFF + +#define STAT_TYPE_OFFSET 0x0 +#define COUNT_OFFSET 0x4 +#define LAST_ENTERED_AT_OFFSET 0x8 +#define LAST_EXITED_AT_OFFSET 0x10 +#define ACCUMULATED_OFFSET 0x18 +#define CLIENT_VOTES_OFFSET 0x20 + +struct subsystem_data { + const char *name; + u32 smem_item; + u32 pid; +}; + +static const struct subsystem_data subsystems[] = { + { "modem", 605, 1 }, + { "wpss", 605, 13 }, + { "adsp", 606, 2 }, + { "cdsp", 607, 5 }, + { "slpi", 608, 3 }, + { "gpu", 609, 0 }, + { "display", 610, 0 }, + { "adsp_island", 613, 2 }, + { "slpi_island", 613, 3 }, +}; + +struct stats_config { + size_t stats_offset; + size_t num_records; + bool appended_stats_avail; + bool dynamic_offset; + bool subsystem_stats_in_smem; +}; + +struct stats_data { + bool appended_stats_avail; + void __iomem *base; +}; + +struct sleep_stats { + u32 stat_type; + u32 count; + u64 last_entered_at; + u64 last_exited_at; + u64 accumulated; +}; + +struct appended_stats { + u32 client_votes; + u32 reserved[3]; +}; + +static void qcom_print_stats(struct seq_file *s, const struct sleep_stats *stat) +{ + u64 accumulated = stat->accumulated; + /* + * If a subsystem is in sleep when reading the sleep stats adjust + * the accumulated sleep duration to show actual sleep time. + */ + if (stat->last_entered_at > stat->last_exited_at) + accumulated += arch_timer_read_counter() - stat->last_entered_at; + + seq_printf(s, "Count: %u\n", stat->count); + seq_printf(s, "Last Entered At: %llu\n", stat->last_entered_at); + seq_printf(s, "Last Exited At: %llu\n", stat->last_exited_at); + seq_printf(s, "Accumulated Duration: %llu\n", accumulated); +} + +static int qcom_subsystem_sleep_stats_show(struct seq_file *s, void *unused) +{ + struct subsystem_data *subsystem = s->private; + struct sleep_stats *stat; + + /* Items are allocated lazily, so lookup pointer each time */ + stat = qcom_smem_get(subsystem->pid, subsystem->smem_item, NULL); + if (IS_ERR(stat)) + return 0; + + qcom_print_stats(s, stat); + + return 0; +} + +static int qcom_soc_sleep_stats_show(struct seq_file *s, void *unused) +{ + struct stats_data *d = s->private; + void __iomem *reg = d->base; + struct sleep_stats stat; + + memcpy_fromio(&stat, reg, sizeof(stat)); + qcom_print_stats(s, &stat); + + if (d->appended_stats_avail) { + struct appended_stats votes; + + memcpy_fromio(&votes, reg + CLIENT_VOTES_OFFSET, sizeof(votes)); + seq_printf(s, "Client Votes: %#x\n", votes.client_votes); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(qcom_soc_sleep_stats); +DEFINE_SHOW_ATTRIBUTE(qcom_subsystem_sleep_stats); + +static void qcom_create_soc_sleep_stat_files(struct dentry *root, void __iomem *reg, + struct stats_data *d, + const struct stats_config *config) +{ + char stat_type[sizeof(u32) + 1] = {0}; + size_t stats_offset = config->stats_offset; + u32 offset = 0, type; + int i, j; + + /* + * On RPM targets, stats offset location is dynamic and changes from target + * to target and sometimes from build to build for same target. + * + * In such cases the dynamic address is present at 0x14 offset from base + * address in devicetree. The last 16bits indicates the stats_offset. + */ + if (config->dynamic_offset) { + stats_offset = readl(reg + RPM_DYNAMIC_ADDR); + stats_offset &= RPM_DYNAMIC_ADDR_MASK; + } + + for (i = 0; i < config->num_records; i++) { + d[i].base = reg + offset + stats_offset; + + /* + * Read the low power mode name and create debugfs file for it. + * The names read could be of below, + * (may change depending on low power mode supported). + * For rpmh-sleep-stats: "aosd", "cxsd" and "ddr". + * For rpm-sleep-stats: "vmin" and "vlow". + */ + type = readl(d[i].base); + for (j = 0; j < sizeof(u32); j++) { + stat_type[j] = type & 0xff; + type = type >> 8; + } + strim(stat_type); + debugfs_create_file(stat_type, 0400, root, &d[i], + &qcom_soc_sleep_stats_fops); + + offset += sizeof(struct sleep_stats); + if (d[i].appended_stats_avail) + offset += sizeof(struct appended_stats); + } +} + +static void qcom_create_subsystem_stat_files(struct dentry *root, + const struct stats_config *config) +{ + int i; + + if (!config->subsystem_stats_in_smem) + return; + + for (i = 0; i < ARRAY_SIZE(subsystems); i++) + debugfs_create_file(subsystems[i].name, 0400, root, (void *)&subsystems[i], + &qcom_subsystem_sleep_stats_fops); +} + +static int qcom_stats_probe(struct platform_device *pdev) +{ + void __iomem *reg; + struct dentry *root; + const struct stats_config *config; + struct stats_data *d; + int i; + + config = device_get_match_data(&pdev->dev); + if (!config) + return -ENODEV; + + reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(reg)) + return -ENOMEM; + + d = devm_kcalloc(&pdev->dev, config->num_records, + sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + for (i = 0; i < config->num_records; i++) + d[i].appended_stats_avail = config->appended_stats_avail; + + root = debugfs_create_dir("qcom_stats", NULL); + + qcom_create_subsystem_stat_files(root, config); + qcom_create_soc_sleep_stat_files(root, reg, d, config); + + platform_set_drvdata(pdev, root); + + device_set_pm_not_required(&pdev->dev); + + return 0; +} + +static int qcom_stats_remove(struct platform_device *pdev) +{ + struct dentry *root = platform_get_drvdata(pdev); + + debugfs_remove_recursive(root); + + return 0; +} + +static const struct stats_config rpm_data = { + .stats_offset = 0, + .num_records = 2, + .appended_stats_avail = true, + .dynamic_offset = true, + .subsystem_stats_in_smem = false, +}; + +/* Older RPM firmwares have the stats at a fixed offset instead */ +static const struct stats_config rpm_data_dba0 = { + .stats_offset = 0xdba0, + .num_records = 2, + .appended_stats_avail = true, + .dynamic_offset = false, + .subsystem_stats_in_smem = false, +}; + +static const struct stats_config rpmh_data_sdm845 = { + .stats_offset = 0x48, + .num_records = 2, + .appended_stats_avail = false, + .dynamic_offset = false, + .subsystem_stats_in_smem = true, +}; + +static const struct stats_config rpmh_data = { + .stats_offset = 0x48, + .num_records = 3, + .appended_stats_avail = false, + .dynamic_offset = false, + .subsystem_stats_in_smem = true, +}; + +static const struct of_device_id qcom_stats_table[] = { + { .compatible = "qcom,apq8084-rpm-stats", .data = &rpm_data_dba0 }, + { .compatible = "qcom,msm8226-rpm-stats", .data = &rpm_data_dba0 }, + { .compatible = "qcom,msm8916-rpm-stats", .data = &rpm_data_dba0 }, + { .compatible = "qcom,msm8974-rpm-stats", .data = &rpm_data_dba0 }, + { .compatible = "qcom,rpm-stats", .data = &rpm_data }, + { .compatible = "qcom,rpmh-stats", .data = &rpmh_data }, + { .compatible = "qcom,sdm845-rpmh-stats", .data = &rpmh_data_sdm845 }, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_stats_table); + +static struct platform_driver qcom_stats = { + .probe = qcom_stats_probe, + .remove = qcom_stats_remove, + .driver = { + .name = "qcom_stats", + .of_match_table = qcom_stats_table, + }, +}; + +static int __init qcom_stats_init(void) +{ + return platform_driver_register(&qcom_stats); +} +late_initcall(qcom_stats_init); + +static void __exit qcom_stats_exit(void) +{ + platform_driver_unregister(&qcom_stats); +} +module_exit(qcom_stats_exit) + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. (QTI) Stats driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c new file mode 100644 index 0000000000..5c7161b18b --- /dev/null +++ b/drivers/soc/qcom/qmi_encdec.c @@ -0,0 +1,816 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/soc/qcom/qmi.h> + +#define QMI_ENCDEC_ENCODE_TLV(type, length, p_dst) do { \ + *p_dst++ = type; \ + *p_dst++ = ((u8)((length) & 0xFF)); \ + *p_dst++ = ((u8)(((length) >> 8) & 0xFF)); \ +} while (0) + +#define QMI_ENCDEC_DECODE_TLV(p_type, p_length, p_src) do { \ + *p_type = (u8)*p_src++; \ + *p_length = (u8)*p_src++; \ + *p_length |= ((u8)*p_src) << 8; \ +} while (0) + +#define QMI_ENCDEC_ENCODE_N_BYTES(p_dst, p_src, size) \ +do { \ + memcpy(p_dst, p_src, size); \ + p_dst = (u8 *)p_dst + size; \ + p_src = (u8 *)p_src + size; \ +} while (0) + +#define QMI_ENCDEC_DECODE_N_BYTES(p_dst, p_src, size) \ +do { \ + memcpy(p_dst, p_src, size); \ + p_dst = (u8 *)p_dst + size; \ + p_src = (u8 *)p_src + size; \ +} while (0) + +#define UPDATE_ENCODE_VARIABLES(temp_si, buf_dst, \ + encoded_bytes, tlv_len, encode_tlv, rc) \ +do { \ + buf_dst = (u8 *)buf_dst + rc; \ + encoded_bytes += rc; \ + tlv_len += rc; \ + temp_si = temp_si + 1; \ + encode_tlv = 1; \ +} while (0) + +#define UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc) \ +do { \ + buf_src = (u8 *)buf_src + rc; \ + decoded_bytes += rc; \ +} while (0) + +#define TLV_LEN_SIZE sizeof(u16) +#define TLV_TYPE_SIZE sizeof(u8) +#define OPTIONAL_TLV_TYPE_START 0x10 + +static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf, + const void *in_c_struct, u32 out_buf_len, + int enc_level); + +static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct, + const void *in_buf, u32 in_buf_len, int dec_level); + +/** + * skip_to_next_elem() - Skip to next element in the structure to be encoded + * @ei_array: Struct info describing the element to be skipped. + * @level: Depth level of encoding/decoding to identify nested structures. + * + * This function is used while encoding optional elements. If the flag + * corresponding to an optional element is not set, then encoding the + * optional element can be skipped. This function can be used to perform + * that operation. + * + * Return: struct info of the next element that can be encoded. + */ +static const struct qmi_elem_info * +skip_to_next_elem(const struct qmi_elem_info *ei_array, int level) +{ + const struct qmi_elem_info *temp_ei = ei_array; + u8 tlv_type; + + if (level > 1) { + temp_ei = temp_ei + 1; + } else { + do { + tlv_type = temp_ei->tlv_type; + temp_ei = temp_ei + 1; + } while (tlv_type == temp_ei->tlv_type); + } + + return temp_ei; +} + +/** + * qmi_calc_min_msg_len() - Calculate the minimum length of a QMI message + * @ei_array: Struct info array describing the structure. + * @level: Level to identify the depth of the nested structures. + * + * Return: Expected minimum length of the QMI message or 0 on error. + */ +static int qmi_calc_min_msg_len(const struct qmi_elem_info *ei_array, + int level) +{ + int min_msg_len = 0; + const struct qmi_elem_info *temp_ei = ei_array; + + if (!ei_array) + return min_msg_len; + + while (temp_ei->data_type != QMI_EOTI) { + /* Optional elements do not count in minimum length */ + if (temp_ei->data_type == QMI_OPT_FLAG) { + temp_ei = skip_to_next_elem(temp_ei, level); + continue; + } + + if (temp_ei->data_type == QMI_DATA_LEN) { + min_msg_len += (temp_ei->elem_size == sizeof(u8) ? + sizeof(u8) : sizeof(u16)); + temp_ei++; + continue; + } else if (temp_ei->data_type == QMI_STRUCT) { + min_msg_len += qmi_calc_min_msg_len(temp_ei->ei_array, + (level + 1)); + temp_ei++; + } else if (temp_ei->data_type == QMI_STRING) { + if (level > 1) + min_msg_len += temp_ei->elem_len <= U8_MAX ? + sizeof(u8) : sizeof(u16); + min_msg_len += temp_ei->elem_len * temp_ei->elem_size; + temp_ei++; + } else { + min_msg_len += (temp_ei->elem_len * temp_ei->elem_size); + temp_ei++; + } + + /* + * Type & Length info. not prepended for elements in the + * nested structure. + */ + if (level == 1) + min_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE); + } + + return min_msg_len; +} + +/** + * qmi_encode_basic_elem() - Encodes elements of basic/primary data type + * @buf_dst: Buffer to store the encoded information. + * @buf_src: Buffer containing the elements to be encoded. + * @elem_len: Number of elements, in the buf_src, to be encoded. + * @elem_size: Size of a single instance of the element to be encoded. + * + * This function encodes the "elem_len" number of data elements, each of + * size "elem_size" bytes from the source buffer "buf_src" and stores the + * encoded information in the destination buffer "buf_dst". The elements are + * of primary data type which include u8 - u64 or similar. This + * function returns the number of bytes of encoded information. + * + * Return: The number of bytes of encoded information. + */ +static int qmi_encode_basic_elem(void *buf_dst, const void *buf_src, + u32 elem_len, u32 elem_size) +{ + u32 i, rc = 0; + + for (i = 0; i < elem_len; i++) { + QMI_ENCDEC_ENCODE_N_BYTES(buf_dst, buf_src, elem_size); + rc += elem_size; + } + + return rc; +} + +/** + * qmi_encode_struct_elem() - Encodes elements of struct data type + * @ei_array: Struct info array descibing the struct element. + * @buf_dst: Buffer to store the encoded information. + * @buf_src: Buffer containing the elements to be encoded. + * @elem_len: Number of elements, in the buf_src, to be encoded. + * @out_buf_len: Available space in the encode buffer. + * @enc_level: Depth of the nested structure from the main structure. + * + * This function encodes the "elem_len" number of struct elements, each of + * size "ei_array->elem_size" bytes from the source buffer "buf_src" and + * stores the encoded information in the destination buffer "buf_dst". The + * elements are of struct data type which includes any C structure. This + * function returns the number of bytes of encoded information. + * + * Return: The number of bytes of encoded information on success or negative + * errno on error. + */ +static int qmi_encode_struct_elem(const struct qmi_elem_info *ei_array, + void *buf_dst, const void *buf_src, + u32 elem_len, u32 out_buf_len, + int enc_level) +{ + int i, rc, encoded_bytes = 0; + const struct qmi_elem_info *temp_ei = ei_array; + + for (i = 0; i < elem_len; i++) { + rc = qmi_encode(temp_ei->ei_array, buf_dst, buf_src, + out_buf_len - encoded_bytes, enc_level); + if (rc < 0) { + pr_err("%s: STRUCT Encode failure\n", __func__); + return rc; + } + buf_dst = buf_dst + rc; + buf_src = buf_src + temp_ei->elem_size; + encoded_bytes += rc; + } + + return encoded_bytes; +} + +/** + * qmi_encode_string_elem() - Encodes elements of string data type + * @ei_array: Struct info array descibing the string element. + * @buf_dst: Buffer to store the encoded information. + * @buf_src: Buffer containing the elements to be encoded. + * @out_buf_len: Available space in the encode buffer. + * @enc_level: Depth of the string element from the main structure. + * + * This function encodes a string element of maximum length "ei_array->elem_len" + * bytes from the source buffer "buf_src" and stores the encoded information in + * the destination buffer "buf_dst". This function returns the number of bytes + * of encoded information. + * + * Return: The number of bytes of encoded information on success or negative + * errno on error. + */ +static int qmi_encode_string_elem(const struct qmi_elem_info *ei_array, + void *buf_dst, const void *buf_src, + u32 out_buf_len, int enc_level) +{ + int rc; + int encoded_bytes = 0; + const struct qmi_elem_info *temp_ei = ei_array; + u32 string_len = 0; + u32 string_len_sz = 0; + + string_len = strlen(buf_src); + string_len_sz = temp_ei->elem_len <= U8_MAX ? + sizeof(u8) : sizeof(u16); + if (string_len > temp_ei->elem_len) { + pr_err("%s: String to be encoded is longer - %d > %d\n", + __func__, string_len, temp_ei->elem_len); + return -EINVAL; + } + + if (enc_level == 1) { + if (string_len + TLV_LEN_SIZE + TLV_TYPE_SIZE > + out_buf_len) { + pr_err("%s: Output len %d > Out Buf len %d\n", + __func__, string_len, out_buf_len); + return -ETOOSMALL; + } + } else { + if (string_len + string_len_sz > out_buf_len) { + pr_err("%s: Output len %d > Out Buf len %d\n", + __func__, string_len, out_buf_len); + return -ETOOSMALL; + } + rc = qmi_encode_basic_elem(buf_dst, &string_len, + 1, string_len_sz); + encoded_bytes += rc; + } + + rc = qmi_encode_basic_elem(buf_dst + encoded_bytes, buf_src, + string_len, temp_ei->elem_size); + encoded_bytes += rc; + + return encoded_bytes; +} + +/** + * qmi_encode() - Core Encode Function + * @ei_array: Struct info array describing the structure to be encoded. + * @out_buf: Buffer to hold the encoded QMI message. + * @in_c_struct: Pointer to the C structure to be encoded. + * @out_buf_len: Available space in the encode buffer. + * @enc_level: Encode level to indicate the depth of the nested structure, + * within the main structure, being encoded. + * + * Return: The number of bytes of encoded information on success or negative + * errno on error. + */ +static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf, + const void *in_c_struct, u32 out_buf_len, + int enc_level) +{ + const struct qmi_elem_info *temp_ei = ei_array; + u8 opt_flag_value = 0; + u32 data_len_value = 0, data_len_sz; + u8 *buf_dst = (u8 *)out_buf; + u8 *tlv_pointer; + u32 tlv_len; + u8 tlv_type; + u32 encoded_bytes = 0; + const void *buf_src; + int encode_tlv = 0; + int rc; + + if (!ei_array) + return 0; + + tlv_pointer = buf_dst; + tlv_len = 0; + if (enc_level == 1) + buf_dst = buf_dst + (TLV_LEN_SIZE + TLV_TYPE_SIZE); + + while (temp_ei->data_type != QMI_EOTI) { + buf_src = in_c_struct + temp_ei->offset; + tlv_type = temp_ei->tlv_type; + + if (temp_ei->array_type == NO_ARRAY) { + data_len_value = 1; + } else if (temp_ei->array_type == STATIC_ARRAY) { + data_len_value = temp_ei->elem_len; + } else if (data_len_value <= 0 || + temp_ei->elem_len < data_len_value) { + pr_err("%s: Invalid data length\n", __func__); + return -EINVAL; + } + + switch (temp_ei->data_type) { + case QMI_OPT_FLAG: + rc = qmi_encode_basic_elem(&opt_flag_value, buf_src, + 1, sizeof(u8)); + if (opt_flag_value) + temp_ei = temp_ei + 1; + else + temp_ei = skip_to_next_elem(temp_ei, enc_level); + break; + + case QMI_DATA_LEN: + memcpy(&data_len_value, buf_src, temp_ei->elem_size); + data_len_sz = temp_ei->elem_size == sizeof(u8) ? + sizeof(u8) : sizeof(u16); + /* Check to avoid out of range buffer access */ + if ((data_len_sz + encoded_bytes + TLV_LEN_SIZE + + TLV_TYPE_SIZE) > out_buf_len) { + pr_err("%s: Too Small Buffer @DATA_LEN\n", + __func__); + return -ETOOSMALL; + } + rc = qmi_encode_basic_elem(buf_dst, &data_len_value, + 1, data_len_sz); + UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst, + encoded_bytes, tlv_len, + encode_tlv, rc); + if (!data_len_value) + temp_ei = skip_to_next_elem(temp_ei, enc_level); + else + encode_tlv = 0; + break; + + case QMI_UNSIGNED_1_BYTE: + case QMI_UNSIGNED_2_BYTE: + case QMI_UNSIGNED_4_BYTE: + case QMI_UNSIGNED_8_BYTE: + case QMI_SIGNED_2_BYTE_ENUM: + case QMI_SIGNED_4_BYTE_ENUM: + /* Check to avoid out of range buffer access */ + if (((data_len_value * temp_ei->elem_size) + + encoded_bytes + TLV_LEN_SIZE + TLV_TYPE_SIZE) > + out_buf_len) { + pr_err("%s: Too Small Buffer @data_type:%d\n", + __func__, temp_ei->data_type); + return -ETOOSMALL; + } + rc = qmi_encode_basic_elem(buf_dst, buf_src, + data_len_value, + temp_ei->elem_size); + UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst, + encoded_bytes, tlv_len, + encode_tlv, rc); + break; + + case QMI_STRUCT: + rc = qmi_encode_struct_elem(temp_ei, buf_dst, buf_src, + data_len_value, + out_buf_len - encoded_bytes, + enc_level + 1); + if (rc < 0) + return rc; + UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst, + encoded_bytes, tlv_len, + encode_tlv, rc); + break; + + case QMI_STRING: + rc = qmi_encode_string_elem(temp_ei, buf_dst, buf_src, + out_buf_len - encoded_bytes, + enc_level); + if (rc < 0) + return rc; + UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst, + encoded_bytes, tlv_len, + encode_tlv, rc); + break; + default: + pr_err("%s: Unrecognized data type\n", __func__); + return -EINVAL; + } + + if (encode_tlv && enc_level == 1) { + QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer); + encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE); + tlv_pointer = buf_dst; + tlv_len = 0; + buf_dst = buf_dst + TLV_LEN_SIZE + TLV_TYPE_SIZE; + encode_tlv = 0; + } + } + + return encoded_bytes; +} + +/** + * qmi_decode_basic_elem() - Decodes elements of basic/primary data type + * @buf_dst: Buffer to store the decoded element. + * @buf_src: Buffer containing the elements in QMI wire format. + * @elem_len: Number of elements to be decoded. + * @elem_size: Size of a single instance of the element to be decoded. + * + * This function decodes the "elem_len" number of elements in QMI wire format, + * each of size "elem_size" bytes from the source buffer "buf_src" and stores + * the decoded elements in the destination buffer "buf_dst". The elements are + * of primary data type which include u8 - u64 or similar. This + * function returns the number of bytes of decoded information. + * + * Return: The total size of the decoded data elements, in bytes. + */ +static int qmi_decode_basic_elem(void *buf_dst, const void *buf_src, + u32 elem_len, u32 elem_size) +{ + u32 i, rc = 0; + + for (i = 0; i < elem_len; i++) { + QMI_ENCDEC_DECODE_N_BYTES(buf_dst, buf_src, elem_size); + rc += elem_size; + } + + return rc; +} + +/** + * qmi_decode_struct_elem() - Decodes elements of struct data type + * @ei_array: Struct info array describing the struct element. + * @buf_dst: Buffer to store the decoded element. + * @buf_src: Buffer containing the elements in QMI wire format. + * @elem_len: Number of elements to be decoded. + * @tlv_len: Total size of the encoded information corresponding to + * this struct element. + * @dec_level: Depth of the nested structure from the main structure. + * + * This function decodes the "elem_len" number of elements in QMI wire format, + * each of size "(tlv_len/elem_len)" bytes from the source buffer "buf_src" + * and stores the decoded elements in the destination buffer "buf_dst". The + * elements are of struct data type which includes any C structure. This + * function returns the number of bytes of decoded information. + * + * Return: The total size of the decoded data elements on success, negative + * errno on error. + */ +static int qmi_decode_struct_elem(const struct qmi_elem_info *ei_array, + void *buf_dst, const void *buf_src, + u32 elem_len, u32 tlv_len, + int dec_level) +{ + int i, rc, decoded_bytes = 0; + const struct qmi_elem_info *temp_ei = ei_array; + + for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) { + rc = qmi_decode(temp_ei->ei_array, buf_dst, buf_src, + tlv_len - decoded_bytes, dec_level); + if (rc < 0) + return rc; + buf_src = buf_src + rc; + buf_dst = buf_dst + temp_ei->elem_size; + decoded_bytes += rc; + } + + if ((dec_level <= 2 && decoded_bytes != tlv_len) || + (dec_level > 2 && (i < elem_len || decoded_bytes > tlv_len))) { + pr_err("%s: Fault in decoding: dl(%d), db(%d), tl(%d), i(%d), el(%d)\n", + __func__, dec_level, decoded_bytes, tlv_len, + i, elem_len); + return -EFAULT; + } + + return decoded_bytes; +} + +/** + * qmi_decode_string_elem() - Decodes elements of string data type + * @ei_array: Struct info array describing the string element. + * @buf_dst: Buffer to store the decoded element. + * @buf_src: Buffer containing the elements in QMI wire format. + * @tlv_len: Total size of the encoded information corresponding to + * this string element. + * @dec_level: Depth of the string element from the main structure. + * + * This function decodes the string element of maximum length + * "ei_array->elem_len" from the source buffer "buf_src" and puts it into + * the destination buffer "buf_dst". This function returns number of bytes + * decoded from the input buffer. + * + * Return: The total size of the decoded data elements on success, negative + * errno on error. + */ +static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array, + void *buf_dst, const void *buf_src, + u32 tlv_len, int dec_level) +{ + int rc; + int decoded_bytes = 0; + u32 string_len = 0; + u32 string_len_sz = 0; + const struct qmi_elem_info *temp_ei = ei_array; + + if (dec_level == 1) { + string_len = tlv_len; + } else { + string_len_sz = temp_ei->elem_len <= U8_MAX ? + sizeof(u8) : sizeof(u16); + rc = qmi_decode_basic_elem(&string_len, buf_src, + 1, string_len_sz); + decoded_bytes += rc; + } + + if (string_len >= temp_ei->elem_len) { + pr_err("%s: String len %d >= Max Len %d\n", + __func__, string_len, temp_ei->elem_len); + return -ETOOSMALL; + } else if (string_len > tlv_len) { + pr_err("%s: String len %d > Input Buffer Len %d\n", + __func__, string_len, tlv_len); + return -EFAULT; + } + + rc = qmi_decode_basic_elem(buf_dst, buf_src + decoded_bytes, + string_len, temp_ei->elem_size); + *((char *)buf_dst + string_len) = '\0'; + decoded_bytes += rc; + + return decoded_bytes; +} + +/** + * find_ei() - Find element info corresponding to TLV Type + * @ei_array: Struct info array of the message being decoded. + * @type: TLV Type of the element being searched. + * + * Every element that got encoded in the QMI message will have a type + * information associated with it. While decoding the QMI message, + * this function is used to find the struct info regarding the element + * that corresponds to the type being decoded. + * + * Return: Pointer to struct info, if found + */ +static const struct qmi_elem_info *find_ei(const struct qmi_elem_info *ei_array, + u32 type) +{ + const struct qmi_elem_info *temp_ei = ei_array; + + while (temp_ei->data_type != QMI_EOTI) { + if (temp_ei->tlv_type == (u8)type) + return temp_ei; + temp_ei = temp_ei + 1; + } + + return NULL; +} + +/** + * qmi_decode() - Core Decode Function + * @ei_array: Struct info array describing the structure to be decoded. + * @out_c_struct: Buffer to hold the decoded C struct + * @in_buf: Buffer containing the QMI message to be decoded + * @in_buf_len: Length of the QMI message to be decoded + * @dec_level: Decode level to indicate the depth of the nested structure, + * within the main structure, being decoded + * + * Return: The number of bytes of decoded information on success, negative + * errno on error. + */ +static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct, + const void *in_buf, u32 in_buf_len, + int dec_level) +{ + const struct qmi_elem_info *temp_ei = ei_array; + u8 opt_flag_value = 1; + u32 data_len_value = 0, data_len_sz = 0; + u8 *buf_dst = out_c_struct; + const u8 *tlv_pointer; + u32 tlv_len = 0; + u32 tlv_type; + u32 decoded_bytes = 0; + const void *buf_src = in_buf; + int rc; + + while (decoded_bytes < in_buf_len) { + if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI) + return decoded_bytes; + + if (dec_level == 1) { + tlv_pointer = buf_src; + QMI_ENCDEC_DECODE_TLV(&tlv_type, + &tlv_len, tlv_pointer); + buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE); + decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE); + temp_ei = find_ei(ei_array, tlv_type); + if (!temp_ei && tlv_type < OPTIONAL_TLV_TYPE_START) { + pr_err("%s: Inval element info\n", __func__); + return -EINVAL; + } else if (!temp_ei) { + UPDATE_DECODE_VARIABLES(buf_src, + decoded_bytes, tlv_len); + continue; + } + } else { + /* + * No length information for elements in nested + * structures. So use remaining decodable buffer space. + */ + tlv_len = in_buf_len - decoded_bytes; + } + + buf_dst = out_c_struct + temp_ei->offset; + if (temp_ei->data_type == QMI_OPT_FLAG) { + memcpy(buf_dst, &opt_flag_value, sizeof(u8)); + temp_ei = temp_ei + 1; + buf_dst = out_c_struct + temp_ei->offset; + } + + if (temp_ei->data_type == QMI_DATA_LEN) { + data_len_sz = temp_ei->elem_size == sizeof(u8) ? + sizeof(u8) : sizeof(u16); + rc = qmi_decode_basic_elem(&data_len_value, buf_src, + 1, data_len_sz); + memcpy(buf_dst, &data_len_value, sizeof(u32)); + temp_ei = temp_ei + 1; + buf_dst = out_c_struct + temp_ei->offset; + tlv_len -= data_len_sz; + UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc); + } + + if (temp_ei->array_type == NO_ARRAY) { + data_len_value = 1; + } else if (temp_ei->array_type == STATIC_ARRAY) { + data_len_value = temp_ei->elem_len; + } else if (data_len_value > temp_ei->elem_len) { + pr_err("%s: Data len %d > max spec %d\n", + __func__, data_len_value, temp_ei->elem_len); + return -ETOOSMALL; + } + + switch (temp_ei->data_type) { + case QMI_UNSIGNED_1_BYTE: + case QMI_UNSIGNED_2_BYTE: + case QMI_UNSIGNED_4_BYTE: + case QMI_UNSIGNED_8_BYTE: + case QMI_SIGNED_2_BYTE_ENUM: + case QMI_SIGNED_4_BYTE_ENUM: + rc = qmi_decode_basic_elem(buf_dst, buf_src, + data_len_value, + temp_ei->elem_size); + UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc); + break; + + case QMI_STRUCT: + rc = qmi_decode_struct_elem(temp_ei, buf_dst, buf_src, + data_len_value, tlv_len, + dec_level + 1); + if (rc < 0) + return rc; + UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc); + break; + + case QMI_STRING: + rc = qmi_decode_string_elem(temp_ei, buf_dst, buf_src, + tlv_len, dec_level); + if (rc < 0) + return rc; + UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc); + break; + + default: + pr_err("%s: Unrecognized data type\n", __func__); + return -EINVAL; + } + temp_ei = temp_ei + 1; + } + + return decoded_bytes; +} + +/** + * qmi_encode_message() - Encode C structure as QMI encoded message + * @type: Type of QMI message + * @msg_id: Message ID of the message + * @len: Passed as max length of the message, updated to actual size + * @txn_id: Transaction ID + * @ei: QMI message descriptor + * @c_struct: Reference to structure to encode + * + * Return: Buffer with encoded message, or negative ERR_PTR() on error + */ +void *qmi_encode_message(int type, unsigned int msg_id, size_t *len, + unsigned int txn_id, const struct qmi_elem_info *ei, + const void *c_struct) +{ + struct qmi_header *hdr; + ssize_t msglen = 0; + void *msg; + int ret; + + /* Check the possibility of a zero length QMI message */ + if (!c_struct) { + ret = qmi_calc_min_msg_len(ei, 1); + if (ret) { + pr_err("%s: Calc. len %d != 0, but NULL c_struct\n", + __func__, ret); + return ERR_PTR(-EINVAL); + } + } + + msg = kzalloc(sizeof(*hdr) + *len, GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + /* Encode message, if we have a message */ + if (c_struct) { + msglen = qmi_encode(ei, msg + sizeof(*hdr), c_struct, *len, 1); + if (msglen < 0) { + kfree(msg); + return ERR_PTR(msglen); + } + } + + hdr = msg; + hdr->type = type; + hdr->txn_id = txn_id; + hdr->msg_id = msg_id; + hdr->msg_len = msglen; + + *len = sizeof(*hdr) + msglen; + + return msg; +} +EXPORT_SYMBOL(qmi_encode_message); + +/** + * qmi_decode_message() - Decode QMI encoded message to C structure + * @buf: Buffer with encoded message + * @len: Amount of data in @buf + * @ei: QMI message descriptor + * @c_struct: Reference to structure to decode into + * + * Return: The number of bytes of decoded information on success, negative + * errno on error. + */ +int qmi_decode_message(const void *buf, size_t len, + const struct qmi_elem_info *ei, void *c_struct) +{ + if (!ei) + return -EINVAL; + + if (!c_struct || !buf || !len) + return -EINVAL; + + return qmi_decode(ei, c_struct, buf + sizeof(struct qmi_header), + len - sizeof(struct qmi_header), 1); +} +EXPORT_SYMBOL(qmi_decode_message); + +/* Common header in all QMI responses */ +const struct qmi_elem_info qmi_response_type_v01_ei[] = { + { + .data_type = QMI_SIGNED_2_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct qmi_response_type_v01, result), + .ei_array = NULL, + }, + { + .data_type = QMI_SIGNED_2_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct qmi_response_type_v01, error), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .elem_len = 0, + .elem_size = 0, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = 0, + .ei_array = NULL, + }, +}; +EXPORT_SYMBOL(qmi_response_type_v01_ei); + +MODULE_DESCRIPTION("QMI encoder/decoder helper"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c new file mode 100644 index 0000000000..78d7361fdc --- /dev/null +++ b/drivers/soc/qcom/qmi_interface.c @@ -0,0 +1,854 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/qrtr.h> +#include <linux/net.h> +#include <linux/completion.h> +#include <linux/idr.h> +#include <linux/string.h> +#include <net/sock.h> +#include <linux/workqueue.h> +#include <trace/events/sock.h> +#include <linux/soc/qcom/qmi.h> + +static struct socket *qmi_sock_create(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq); + +/** + * qmi_recv_new_server() - handler of NEW_SERVER control message + * @qmi: qmi handle + * @service: service id of the new server + * @instance: instance id of the new server + * @node: node of the new server + * @port: port of the new server + * + * Calls the new_server callback to inform the client about a newly registered + * server matching the currently registered service lookup. + */ +static void qmi_recv_new_server(struct qmi_handle *qmi, + unsigned int service, unsigned int instance, + unsigned int node, unsigned int port) +{ + struct qmi_ops *ops = &qmi->ops; + struct qmi_service *svc; + int ret; + + if (!ops->new_server) + return; + + /* Ignore EOF marker */ + if (!node && !port) + return; + + svc = kzalloc(sizeof(*svc), GFP_KERNEL); + if (!svc) + return; + + svc->service = service; + svc->version = instance & 0xff; + svc->instance = instance >> 8; + svc->node = node; + svc->port = port; + + ret = ops->new_server(qmi, svc); + if (ret < 0) + kfree(svc); + else + list_add(&svc->list_node, &qmi->lookup_results); +} + +/** + * qmi_recv_del_server() - handler of DEL_SERVER control message + * @qmi: qmi handle + * @node: node of the dying server, a value of -1 matches all nodes + * @port: port of the dying server, a value of -1 matches all ports + * + * Calls the del_server callback for each previously seen server, allowing the + * client to react to the disappearing server. + */ +static void qmi_recv_del_server(struct qmi_handle *qmi, + unsigned int node, unsigned int port) +{ + struct qmi_ops *ops = &qmi->ops; + struct qmi_service *svc; + struct qmi_service *tmp; + + list_for_each_entry_safe(svc, tmp, &qmi->lookup_results, list_node) { + if (node != -1 && svc->node != node) + continue; + if (port != -1 && svc->port != port) + continue; + + if (ops->del_server) + ops->del_server(qmi, svc); + + list_del(&svc->list_node); + kfree(svc); + } +} + +/** + * qmi_recv_bye() - handler of BYE control message + * @qmi: qmi handle + * @node: id of the dying node + * + * Signals the client that all previously registered services on this node are + * now gone and then calls the bye callback to allow the client further + * cleaning up resources associated with this remote. + */ +static void qmi_recv_bye(struct qmi_handle *qmi, + unsigned int node) +{ + struct qmi_ops *ops = &qmi->ops; + + qmi_recv_del_server(qmi, node, -1); + + if (ops->bye) + ops->bye(qmi, node); +} + +/** + * qmi_recv_del_client() - handler of DEL_CLIENT control message + * @qmi: qmi handle + * @node: node of the dying client + * @port: port of the dying client + * + * Signals the client about a dying client, by calling the del_client callback. + */ +static void qmi_recv_del_client(struct qmi_handle *qmi, + unsigned int node, unsigned int port) +{ + struct qmi_ops *ops = &qmi->ops; + + if (ops->del_client) + ops->del_client(qmi, node, port); +} + +static void qmi_recv_ctrl_pkt(struct qmi_handle *qmi, + const void *buf, size_t len) +{ + const struct qrtr_ctrl_pkt *pkt = buf; + + if (len < sizeof(struct qrtr_ctrl_pkt)) { + pr_debug("ignoring short control packet\n"); + return; + } + + switch (le32_to_cpu(pkt->cmd)) { + case QRTR_TYPE_BYE: + qmi_recv_bye(qmi, le32_to_cpu(pkt->client.node)); + break; + case QRTR_TYPE_NEW_SERVER: + qmi_recv_new_server(qmi, + le32_to_cpu(pkt->server.service), + le32_to_cpu(pkt->server.instance), + le32_to_cpu(pkt->server.node), + le32_to_cpu(pkt->server.port)); + break; + case QRTR_TYPE_DEL_SERVER: + qmi_recv_del_server(qmi, + le32_to_cpu(pkt->server.node), + le32_to_cpu(pkt->server.port)); + break; + case QRTR_TYPE_DEL_CLIENT: + qmi_recv_del_client(qmi, + le32_to_cpu(pkt->client.node), + le32_to_cpu(pkt->client.port)); + break; + } +} + +static void qmi_send_new_lookup(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct qrtr_ctrl_pkt pkt; + struct sockaddr_qrtr sq; + struct msghdr msg = { }; + struct kvec iv = { &pkt, sizeof(pkt) }; + int ret; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP); + pkt.server.service = cpu_to_le32(svc->service); + pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8); + + sq.sq_family = qmi->sq.sq_family; + sq.sq_node = qmi->sq.sq_node; + sq.sq_port = QRTR_PORT_CTRL; + + msg.msg_name = &sq; + msg.msg_namelen = sizeof(sq); + + mutex_lock(&qmi->sock_lock); + if (qmi->sock) { + ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0) + pr_err("failed to send lookup registration: %d\n", ret); + } + mutex_unlock(&qmi->sock_lock); +} + +/** + * qmi_add_lookup() - register a new lookup with the name service + * @qmi: qmi handle + * @service: service id of the request + * @instance: instance id of the request + * @version: version number of the request + * + * Registering a lookup query with the name server will cause the name server + * to send NEW_SERVER and DEL_SERVER control messages to this socket as + * matching services are registered. + * + * Return: 0 on success, negative errno on failure. + */ +int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance) +{ + struct qmi_service *svc; + + svc = kzalloc(sizeof(*svc), GFP_KERNEL); + if (!svc) + return -ENOMEM; + + svc->service = service; + svc->version = version; + svc->instance = instance; + + list_add(&svc->list_node, &qmi->lookups); + + qmi_send_new_lookup(qmi, svc); + + return 0; +} +EXPORT_SYMBOL(qmi_add_lookup); + +static void qmi_send_new_server(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct qrtr_ctrl_pkt pkt; + struct sockaddr_qrtr sq; + struct msghdr msg = { }; + struct kvec iv = { &pkt, sizeof(pkt) }; + int ret; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER); + pkt.server.service = cpu_to_le32(svc->service); + pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8); + pkt.server.node = cpu_to_le32(qmi->sq.sq_node); + pkt.server.port = cpu_to_le32(qmi->sq.sq_port); + + sq.sq_family = qmi->sq.sq_family; + sq.sq_node = qmi->sq.sq_node; + sq.sq_port = QRTR_PORT_CTRL; + + msg.msg_name = &sq; + msg.msg_namelen = sizeof(sq); + + mutex_lock(&qmi->sock_lock); + if (qmi->sock) { + ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt)); + if (ret < 0) + pr_err("send service registration failed: %d\n", ret); + } + mutex_unlock(&qmi->sock_lock); +} + +/** + * qmi_add_server() - register a service with the name service + * @qmi: qmi handle + * @service: type of the service + * @instance: instance of the service + * @version: version of the service + * + * Register a new service with the name service. This allows clients to find + * and start sending messages to the client associated with @qmi. + * + * Return: 0 on success, negative errno on failure. + */ +int qmi_add_server(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance) +{ + struct qmi_service *svc; + + svc = kzalloc(sizeof(*svc), GFP_KERNEL); + if (!svc) + return -ENOMEM; + + svc->service = service; + svc->version = version; + svc->instance = instance; + + list_add(&svc->list_node, &qmi->services); + + qmi_send_new_server(qmi, svc); + + return 0; +} +EXPORT_SYMBOL(qmi_add_server); + +/** + * qmi_txn_init() - allocate transaction id within the given QMI handle + * @qmi: QMI handle + * @txn: transaction context + * @ei: description of how to decode a matching response (optional) + * @c_struct: pointer to the object to decode the response into (optional) + * + * This allocates a transaction id within the QMI handle. If @ei and @c_struct + * are specified any responses to this transaction will be decoded as described + * by @ei into @c_struct. + * + * A client calling qmi_txn_init() must call either qmi_txn_wait() or + * qmi_txn_cancel() to free up the allocated resources. + * + * Return: Transaction id on success, negative errno on failure. + */ +int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn, + const struct qmi_elem_info *ei, void *c_struct) +{ + int ret; + + memset(txn, 0, sizeof(*txn)); + + mutex_init(&txn->lock); + init_completion(&txn->completion); + txn->qmi = qmi; + txn->ei = ei; + txn->dest = c_struct; + + mutex_lock(&qmi->txn_lock); + ret = idr_alloc_cyclic(&qmi->txns, txn, 0, U16_MAX, GFP_KERNEL); + if (ret < 0) + pr_err("failed to allocate transaction id\n"); + + txn->id = ret; + mutex_unlock(&qmi->txn_lock); + + return ret; +} +EXPORT_SYMBOL(qmi_txn_init); + +/** + * qmi_txn_wait() - wait for a response on a transaction + * @txn: transaction handle + * @timeout: timeout, in jiffies + * + * If the transaction is decoded by the means of @ei and @c_struct the return + * value will be the returned value of qmi_decode_message(), otherwise it's up + * to the specified message handler to fill out the result. + * + * Return: the transaction response on success, negative errno on failure. + */ +int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout) +{ + struct qmi_handle *qmi = txn->qmi; + int ret; + + ret = wait_for_completion_timeout(&txn->completion, timeout); + + mutex_lock(&qmi->txn_lock); + mutex_lock(&txn->lock); + idr_remove(&qmi->txns, txn->id); + mutex_unlock(&txn->lock); + mutex_unlock(&qmi->txn_lock); + + if (ret == 0) + return -ETIMEDOUT; + else + return txn->result; +} +EXPORT_SYMBOL(qmi_txn_wait); + +/** + * qmi_txn_cancel() - cancel an ongoing transaction + * @txn: transaction id + */ +void qmi_txn_cancel(struct qmi_txn *txn) +{ + struct qmi_handle *qmi = txn->qmi; + + mutex_lock(&qmi->txn_lock); + mutex_lock(&txn->lock); + idr_remove(&qmi->txns, txn->id); + mutex_unlock(&txn->lock); + mutex_unlock(&qmi->txn_lock); +} +EXPORT_SYMBOL(qmi_txn_cancel); + +/** + * qmi_invoke_handler() - find and invoke a handler for a message + * @qmi: qmi handle + * @sq: sockaddr of the sender + * @txn: transaction object for the message + * @buf: buffer containing the message + * @len: length of @buf + * + * Find handler and invoke handler for the incoming message. + */ +static void qmi_invoke_handler(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *buf, size_t len) +{ + const struct qmi_msg_handler *handler; + const struct qmi_header *hdr = buf; + void *dest; + int ret; + + if (!qmi->handlers) + return; + + for (handler = qmi->handlers; handler->fn; handler++) { + if (handler->type == hdr->type && + handler->msg_id == hdr->msg_id) + break; + } + + if (!handler->fn) + return; + + dest = kzalloc(handler->decoded_size, GFP_KERNEL); + if (!dest) + return; + + ret = qmi_decode_message(buf, len, handler->ei, dest); + if (ret < 0) + pr_err("failed to decode incoming message\n"); + else + handler->fn(qmi, sq, txn, dest); + + kfree(dest); +} + +/** + * qmi_handle_net_reset() - invoked to handle ENETRESET on a QMI handle + * @qmi: the QMI context + * + * As a result of registering a name service with the QRTR all open sockets are + * flagged with ENETRESET and this function will be called. The typical case is + * the initial boot, where this signals that the local node id has been + * configured and as such any bound sockets needs to be rebound. So close the + * socket, inform the client and re-initialize the socket. + * + * For clients it's generally sufficient to react to the del_server callbacks, + * but server code is expected to treat the net_reset callback as a "bye" from + * all nodes. + * + * Finally the QMI handle will send out registration requests for any lookups + * and services. + */ +static void qmi_handle_net_reset(struct qmi_handle *qmi) +{ + struct sockaddr_qrtr sq; + struct qmi_service *svc; + struct socket *sock; + + sock = qmi_sock_create(qmi, &sq); + if (IS_ERR(sock)) + return; + + mutex_lock(&qmi->sock_lock); + sock_release(qmi->sock); + qmi->sock = NULL; + mutex_unlock(&qmi->sock_lock); + + qmi_recv_del_server(qmi, -1, -1); + + if (qmi->ops.net_reset) + qmi->ops.net_reset(qmi); + + mutex_lock(&qmi->sock_lock); + qmi->sock = sock; + qmi->sq = sq; + mutex_unlock(&qmi->sock_lock); + + list_for_each_entry(svc, &qmi->lookups, list_node) + qmi_send_new_lookup(qmi, svc); + + list_for_each_entry(svc, &qmi->services, list_node) + qmi_send_new_server(qmi, svc); +} + +static void qmi_handle_message(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + const void *buf, size_t len) +{ + const struct qmi_header *hdr; + struct qmi_txn tmp_txn; + struct qmi_txn *txn = NULL; + int ret; + + if (len < sizeof(*hdr)) { + pr_err("ignoring short QMI packet\n"); + return; + } + + hdr = buf; + + /* If this is a response, find the matching transaction handle */ + if (hdr->type == QMI_RESPONSE) { + mutex_lock(&qmi->txn_lock); + txn = idr_find(&qmi->txns, hdr->txn_id); + + /* Ignore unexpected responses */ + if (!txn) { + mutex_unlock(&qmi->txn_lock); + return; + } + + mutex_lock(&txn->lock); + mutex_unlock(&qmi->txn_lock); + + if (txn->dest && txn->ei) { + ret = qmi_decode_message(buf, len, txn->ei, txn->dest); + if (ret < 0) + pr_err("failed to decode incoming message\n"); + + txn->result = ret; + complete(&txn->completion); + } else { + qmi_invoke_handler(qmi, sq, txn, buf, len); + } + + mutex_unlock(&txn->lock); + } else { + /* Create a txn based on the txn_id of the incoming message */ + memset(&tmp_txn, 0, sizeof(tmp_txn)); + tmp_txn.id = hdr->txn_id; + + qmi_invoke_handler(qmi, sq, &tmp_txn, buf, len); + } +} + +static void qmi_data_ready_work(struct work_struct *work) +{ + struct qmi_handle *qmi = container_of(work, struct qmi_handle, work); + struct qmi_ops *ops = &qmi->ops; + struct sockaddr_qrtr sq; + struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) }; + struct kvec iv; + ssize_t msglen; + + for (;;) { + iv.iov_base = qmi->recv_buf; + iv.iov_len = qmi->recv_buf_size; + + mutex_lock(&qmi->sock_lock); + if (qmi->sock) + msglen = kernel_recvmsg(qmi->sock, &msg, &iv, 1, + iv.iov_len, MSG_DONTWAIT); + else + msglen = -EPIPE; + mutex_unlock(&qmi->sock_lock); + if (msglen == -EAGAIN) + break; + + if (msglen == -ENETRESET) { + qmi_handle_net_reset(qmi); + + /* The old qmi->sock is gone, our work is done */ + break; + } + + if (msglen < 0) { + pr_err("qmi recvmsg failed: %zd\n", msglen); + break; + } + + if (sq.sq_node == qmi->sq.sq_node && + sq.sq_port == QRTR_PORT_CTRL) { + qmi_recv_ctrl_pkt(qmi, qmi->recv_buf, msglen); + } else if (ops->msg_handler) { + ops->msg_handler(qmi, &sq, qmi->recv_buf, msglen); + } else { + qmi_handle_message(qmi, &sq, qmi->recv_buf, msglen); + } + } +} + +static void qmi_data_ready(struct sock *sk) +{ + struct qmi_handle *qmi = sk->sk_user_data; + + trace_sk_data_ready(sk); + + /* + * This will be NULL if we receive data while being in + * qmi_handle_release() + */ + if (!qmi) + return; + + queue_work(qmi->wq, &qmi->work); +} + +static struct socket *qmi_sock_create(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq) +{ + struct socket *sock; + int ret; + + ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM, + PF_QIPCRTR, &sock); + if (ret < 0) + return ERR_PTR(ret); + + ret = kernel_getsockname(sock, (struct sockaddr *)sq); + if (ret < 0) { + sock_release(sock); + return ERR_PTR(ret); + } + + sock->sk->sk_user_data = qmi; + sock->sk->sk_data_ready = qmi_data_ready; + sock->sk->sk_error_report = qmi_data_ready; + + return sock; +} + +/** + * qmi_handle_init() - initialize a QMI client handle + * @qmi: QMI handle to initialize + * @recv_buf_size: maximum size of incoming message + * @ops: reference to callbacks for QRTR notifications + * @handlers: NULL-terminated list of QMI message handlers + * + * This initializes the QMI client handle to allow sending and receiving QMI + * messages. As messages are received the appropriate handler will be invoked. + * + * Return: 0 on success, negative errno on failure. + */ +int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size, + const struct qmi_ops *ops, + const struct qmi_msg_handler *handlers) +{ + int ret; + + mutex_init(&qmi->txn_lock); + mutex_init(&qmi->sock_lock); + + idr_init(&qmi->txns); + + INIT_LIST_HEAD(&qmi->lookups); + INIT_LIST_HEAD(&qmi->lookup_results); + INIT_LIST_HEAD(&qmi->services); + + INIT_WORK(&qmi->work, qmi_data_ready_work); + + qmi->handlers = handlers; + if (ops) + qmi->ops = *ops; + + /* Make room for the header */ + recv_buf_size += sizeof(struct qmi_header); + /* Must also be sufficient to hold a control packet */ + if (recv_buf_size < sizeof(struct qrtr_ctrl_pkt)) + recv_buf_size = sizeof(struct qrtr_ctrl_pkt); + + qmi->recv_buf_size = recv_buf_size; + qmi->recv_buf = kzalloc(recv_buf_size, GFP_KERNEL); + if (!qmi->recv_buf) + return -ENOMEM; + + qmi->wq = alloc_ordered_workqueue("qmi_msg_handler", 0); + if (!qmi->wq) { + ret = -ENOMEM; + goto err_free_recv_buf; + } + + qmi->sock = qmi_sock_create(qmi, &qmi->sq); + if (IS_ERR(qmi->sock)) { + if (PTR_ERR(qmi->sock) == -EAFNOSUPPORT) { + ret = -EPROBE_DEFER; + } else { + pr_err("failed to create QMI socket\n"); + ret = PTR_ERR(qmi->sock); + } + goto err_destroy_wq; + } + + return 0; + +err_destroy_wq: + destroy_workqueue(qmi->wq); +err_free_recv_buf: + kfree(qmi->recv_buf); + + return ret; +} +EXPORT_SYMBOL(qmi_handle_init); + +/** + * qmi_handle_release() - release the QMI client handle + * @qmi: QMI client handle + * + * This closes the underlying socket and stops any handling of QMI messages. + */ +void qmi_handle_release(struct qmi_handle *qmi) +{ + struct socket *sock = qmi->sock; + struct qmi_service *svc, *tmp; + + sock->sk->sk_user_data = NULL; + cancel_work_sync(&qmi->work); + + qmi_recv_del_server(qmi, -1, -1); + + mutex_lock(&qmi->sock_lock); + sock_release(sock); + qmi->sock = NULL; + mutex_unlock(&qmi->sock_lock); + + destroy_workqueue(qmi->wq); + + idr_destroy(&qmi->txns); + + kfree(qmi->recv_buf); + + /* Free registered lookup requests */ + list_for_each_entry_safe(svc, tmp, &qmi->lookups, list_node) { + list_del(&svc->list_node); + kfree(svc); + } + + /* Free registered service information */ + list_for_each_entry_safe(svc, tmp, &qmi->services, list_node) { + list_del(&svc->list_node); + kfree(svc); + } +} +EXPORT_SYMBOL(qmi_handle_release); + +/** + * qmi_send_message() - send a QMI message + * @qmi: QMI client handle + * @sq: destination sockaddr + * @txn: transaction object to use for the message + * @type: type of message to send + * @msg_id: message id + * @len: max length of the QMI message + * @ei: QMI message description + * @c_struct: object to be encoded + * + * This function encodes @c_struct using @ei into a message of type @type, + * with @msg_id and @txn into a buffer of maximum size @len, and sends this to + * @sq. + * + * Return: 0 on success, negative errno on failure. + */ +static ssize_t qmi_send_message(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, struct qmi_txn *txn, + int type, int msg_id, size_t len, + const struct qmi_elem_info *ei, + const void *c_struct) +{ + struct msghdr msghdr = {}; + struct kvec iv; + void *msg; + int ret; + + msg = qmi_encode_message(type, + msg_id, &len, + txn->id, ei, + c_struct); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + iv.iov_base = msg; + iv.iov_len = len; + + if (sq) { + msghdr.msg_name = sq; + msghdr.msg_namelen = sizeof(*sq); + } + + mutex_lock(&qmi->sock_lock); + if (qmi->sock) { + ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len); + if (ret < 0) + pr_err("failed to send QMI message\n"); + } else { + ret = -EPIPE; + } + mutex_unlock(&qmi->sock_lock); + + kfree(msg); + + return ret < 0 ? ret : 0; +} + +/** + * qmi_send_request() - send a request QMI message + * @qmi: QMI client handle + * @sq: destination sockaddr + * @txn: transaction object to use for the message + * @msg_id: message id + * @len: max length of the QMI message + * @ei: QMI message description + * @c_struct: object to be encoded + * + * Return: 0 on success, negative errno on failure. + */ +ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + const struct qmi_elem_info *ei, const void *c_struct) +{ + return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei, + c_struct); +} +EXPORT_SYMBOL(qmi_send_request); + +/** + * qmi_send_response() - send a response QMI message + * @qmi: QMI client handle + * @sq: destination sockaddr + * @txn: transaction object to use for the message + * @msg_id: message id + * @len: max length of the QMI message + * @ei: QMI message description + * @c_struct: object to be encoded + * + * Return: 0 on success, negative errno on failure. + */ +ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + const struct qmi_elem_info *ei, const void *c_struct) +{ + return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei, + c_struct); +} +EXPORT_SYMBOL(qmi_send_response); + +/** + * qmi_send_indication() - send an indication QMI message + * @qmi: QMI client handle + * @sq: destination sockaddr + * @msg_id: message id + * @len: max length of the QMI message + * @ei: QMI message description + * @c_struct: object to be encoded + * + * Return: 0 on success, negative errno on failure. + */ +ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + int msg_id, size_t len, + const struct qmi_elem_info *ei, + const void *c_struct) +{ + struct qmi_txn txn; + ssize_t rval; + int ret; + + ret = qmi_txn_init(qmi, &txn, NULL, NULL); + if (ret < 0) + return ret; + + rval = qmi_send_message(qmi, sq, &txn, QMI_INDICATION, msg_id, len, ei, + c_struct); + + /* We don't care about future messages on this txn */ + qmi_txn_cancel(&txn); + + return rval; +} +EXPORT_SYMBOL(qmi_send_indication); diff --git a/drivers/soc/qcom/ramp_controller.c b/drivers/soc/qcom/ramp_controller.c new file mode 100644 index 0000000000..e9a0cca071 --- /dev/null +++ b/drivers/soc/qcom/ramp_controller.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Qualcomm Ramp Controller driver + * Copyright (c) 2022, AngeloGioacchino Del Regno + * <angelogioacchino.delregno@collabora.com> + */ + +#include <linux/bitfield.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/types.h> + +#define RC_UPDATE_EN BIT(0) +#define RC_ROOT_EN BIT(1) + +#define RC_REG_CFG_UPDATE 0x60 +#define RC_CFG_UPDATE_EN BIT(8) +#define RC_CFG_ACK GENMASK(31, 16) + +#define RC_DCVS_CFG_SID 2 +#define RC_LINK_SID 3 +#define RC_LMH_SID 6 +#define RC_DFS_SID 14 + +#define RC_UPDATE_TIMEOUT_US 500 + +/** + * struct qcom_ramp_controller_desc - SoC specific parameters + * @cfg_dfs_sid: Dynamic Frequency Scaling SID configuration + * @cfg_link_sid: Link SID configuration + * @cfg_lmh_sid: Limits Management hardware SID configuration + * @cfg_ramp_en: Ramp Controller enable sequence + * @cfg_ramp_dis: Ramp Controller disable sequence + * @cmd_reg: Command register offset + * @num_dfs_sids: Number of DFS SIDs (max 8) + * @num_link_sids: Number of Link SIDs (max 3) + * @num_lmh_sids: Number of LMh SIDs (max 8) + * @num_ramp_en: Number of entries in enable sequence + * @num_ramp_dis: Number of entries in disable sequence + */ +struct qcom_ramp_controller_desc { + const struct reg_sequence *cfg_dfs_sid; + const struct reg_sequence *cfg_link_sid; + const struct reg_sequence *cfg_lmh_sid; + const struct reg_sequence *cfg_ramp_en; + const struct reg_sequence *cfg_ramp_dis; + u8 cmd_reg; + u8 num_dfs_sids; + u8 num_link_sids; + u8 num_lmh_sids; + u8 num_ramp_en; + u8 num_ramp_dis; +}; + +/** + * struct qcom_ramp_controller - Main driver structure + * @regmap: Regmap handle + * @desc: SoC specific parameters + */ +struct qcom_ramp_controller { + struct regmap *regmap; + const struct qcom_ramp_controller_desc *desc; +}; + +/** + * rc_wait_for_update() - Wait for Ramp Controller root update + * @qrc: Main driver structure + * + * Return: Zero for success or negative number for failure + */ +static int rc_wait_for_update(struct qcom_ramp_controller *qrc) +{ + const struct qcom_ramp_controller_desc *d = qrc->desc; + struct regmap *r = qrc->regmap; + u32 val; + int ret; + + ret = regmap_set_bits(r, d->cmd_reg, RC_ROOT_EN); + if (ret) + return ret; + + return regmap_read_poll_timeout(r, d->cmd_reg, val, !(val & RC_UPDATE_EN), + 1, RC_UPDATE_TIMEOUT_US); +} + +/** + * rc_set_cfg_update() - Ramp Controller configuration update + * @qrc: Main driver structure + * @ce: Configuration entry to update + * + * Return: Zero for success or negative number for failure + */ +static int rc_set_cfg_update(struct qcom_ramp_controller *qrc, u8 ce) +{ + const struct qcom_ramp_controller_desc *d = qrc->desc; + struct regmap *r = qrc->regmap; + u32 ack, val; + int ret; + + /* The ack bit is between bits 16-31 of RC_REG_CFG_UPDATE */ + ack = FIELD_PREP(RC_CFG_ACK, BIT(ce)); + + /* Write the configuration type first... */ + ret = regmap_set_bits(r, d->cmd_reg + RC_REG_CFG_UPDATE, ce); + if (ret) + return ret; + + /* ...and after that, enable the update bit to sync the changes */ + ret = regmap_set_bits(r, d->cmd_reg + RC_REG_CFG_UPDATE, RC_CFG_UPDATE_EN); + if (ret) + return ret; + + /* Wait for the changes to go through */ + ret = regmap_read_poll_timeout(r, d->cmd_reg + RC_REG_CFG_UPDATE, val, + val & ack, 1, RC_UPDATE_TIMEOUT_US); + if (ret) + return ret; + + /* + * Configuration update success! The CFG_UPDATE register will not be + * cleared automatically upon applying the configuration, so we have + * to do that manually in order to leave the ramp controller in a + * predictable and clean state. + */ + ret = regmap_write(r, d->cmd_reg + RC_REG_CFG_UPDATE, 0); + if (ret) + return ret; + + /* Wait for the update bit cleared ack */ + return regmap_read_poll_timeout(r, d->cmd_reg + RC_REG_CFG_UPDATE, + val, !(val & RC_CFG_ACK), 1, + RC_UPDATE_TIMEOUT_US); +} + +/** + * rc_write_cfg - Send configuration sequence + * @qrc: Main driver structure + * @seq: Register sequence to send before asking for update + * @ce: Configuration SID + * @nsids: Total number of SIDs + * + * Returns: Zero for success or negative number for error + */ +static int rc_write_cfg(struct qcom_ramp_controller *qrc, + const struct reg_sequence *seq, + u16 ce, u8 nsids) +{ + int ret; + u8 i; + + /* Check if, and wait until the ramp controller is ready */ + ret = rc_wait_for_update(qrc); + if (ret) + return ret; + + /* Write the sequence */ + ret = regmap_multi_reg_write(qrc->regmap, seq, nsids); + if (ret) + return ret; + + /* Pull the trigger: do config update starting from the last sid */ + for (i = 0; i < nsids; i++) { + ret = rc_set_cfg_update(qrc, (u8)ce - i); + if (ret) + return ret; + } + + return 0; +} + +/** + * rc_ramp_ctrl_enable() - Enable Ramp up/down Control + * @qrc: Main driver structure + * + * Return: Zero for success or negative number for error + */ +static int rc_ramp_ctrl_enable(struct qcom_ramp_controller *qrc) +{ + const struct qcom_ramp_controller_desc *d = qrc->desc; + int i, ret; + + for (i = 0; i < d->num_ramp_en; i++) { + ret = rc_write_cfg(qrc, &d->cfg_ramp_en[i], RC_DCVS_CFG_SID, 1); + if (ret) + return ret; + } + + return 0; +} + +/** + * qcom_ramp_controller_start() - Initialize and start the ramp controller + * @qrc: Main driver structure + * + * The Ramp Controller needs to be initialized by programming the relevant + * registers with SoC-specific configuration: once programming is done, + * the hardware will take care of the rest (no further handling required). + * + * Return: Zero for success or negative number for error + */ +static int qcom_ramp_controller_start(struct qcom_ramp_controller *qrc) +{ + const struct qcom_ramp_controller_desc *d = qrc->desc; + int ret; + + /* Program LMH, DFS, Link SIDs */ + ret = rc_write_cfg(qrc, d->cfg_lmh_sid, RC_LMH_SID, d->num_lmh_sids); + if (ret) + return ret; + + ret = rc_write_cfg(qrc, d->cfg_dfs_sid, RC_DFS_SID, d->num_dfs_sids); + if (ret) + return ret; + + ret = rc_write_cfg(qrc, d->cfg_link_sid, RC_LINK_SID, d->num_link_sids); + if (ret) + return ret; + + /* Everything is ready! Enable the ramp up/down control */ + return rc_ramp_ctrl_enable(qrc); +} + +static const struct regmap_config qrc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x68, + .fast_io = true, +}; + +static const struct reg_sequence msm8976_cfg_dfs_sid[] = { + { 0x10, 0xfefebff7 }, + { 0x14, 0xfdff7fef }, + { 0x18, 0xfbffdefb }, + { 0x1c, 0xb69b5555 }, + { 0x20, 0x24929249 }, + { 0x24, 0x49241112 }, + { 0x28, 0x11112111 }, + { 0x2c, 0x8102 } +}; + +static const struct reg_sequence msm8976_cfg_link_sid[] = { + { 0x40, 0xfc987 } +}; + +static const struct reg_sequence msm8976_cfg_lmh_sid[] = { + { 0x30, 0x77706db }, + { 0x34, 0x5550249 }, + { 0x38, 0x111 } +}; + +static const struct reg_sequence msm8976_cfg_ramp_en[] = { + { 0x50, 0x800 }, /* pre_en */ + { 0x50, 0xc00 }, /* en */ + { 0x50, 0x400 } /* post_en */ +}; + +static const struct reg_sequence msm8976_cfg_ramp_dis[] = { + { 0x50, 0x0 } +}; + +static const struct qcom_ramp_controller_desc msm8976_rc_cfg = { + .cfg_dfs_sid = msm8976_cfg_dfs_sid, + .num_dfs_sids = ARRAY_SIZE(msm8976_cfg_dfs_sid), + + .cfg_link_sid = msm8976_cfg_link_sid, + .num_link_sids = ARRAY_SIZE(msm8976_cfg_link_sid), + + .cfg_lmh_sid = msm8976_cfg_lmh_sid, + .num_lmh_sids = ARRAY_SIZE(msm8976_cfg_lmh_sid), + + .cfg_ramp_en = msm8976_cfg_ramp_en, + .num_ramp_en = ARRAY_SIZE(msm8976_cfg_ramp_en), + + .cfg_ramp_dis = msm8976_cfg_ramp_dis, + .num_ramp_dis = ARRAY_SIZE(msm8976_cfg_ramp_dis), + + .cmd_reg = 0x0, +}; + +static int qcom_ramp_controller_probe(struct platform_device *pdev) +{ + struct qcom_ramp_controller *qrc; + void __iomem *base; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + qrc = devm_kmalloc(&pdev->dev, sizeof(*qrc), GFP_KERNEL); + if (!qrc) + return -ENOMEM; + + qrc->desc = device_get_match_data(&pdev->dev); + if (!qrc->desc) + return -EINVAL; + + qrc->regmap = devm_regmap_init_mmio(&pdev->dev, base, &qrc_regmap_config); + if (IS_ERR(qrc->regmap)) + return PTR_ERR(qrc->regmap); + + platform_set_drvdata(pdev, qrc); + + return qcom_ramp_controller_start(qrc); +} + +static void qcom_ramp_controller_remove(struct platform_device *pdev) +{ + struct qcom_ramp_controller *qrc = platform_get_drvdata(pdev); + int ret; + + ret = rc_write_cfg(qrc, qrc->desc->cfg_ramp_dis, + RC_DCVS_CFG_SID, qrc->desc->num_ramp_dis); + if (ret) + dev_err(&pdev->dev, "Failed to send disable sequence\n"); +} + +static const struct of_device_id qcom_ramp_controller_match_table[] = { + { .compatible = "qcom,msm8976-ramp-controller", .data = &msm8976_rc_cfg }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, qcom_ramp_controller_match_table); + +static struct platform_driver qcom_ramp_controller_driver = { + .driver = { + .name = "qcom-ramp-controller", + .of_match_table = qcom_ramp_controller_match_table, + .suppress_bind_attrs = true, + }, + .probe = qcom_ramp_controller_probe, + .remove_new = qcom_ramp_controller_remove, +}; + +static int __init qcom_ramp_controller_init(void) +{ + return platform_driver_register(&qcom_ramp_controller_driver); +} +arch_initcall(qcom_ramp_controller_init); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>"); +MODULE_DESCRIPTION("Qualcomm Ramp Controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c new file mode 100644 index 0000000000..f83811f511 --- /dev/null +++ b/drivers/soc/qcom/rmtfs_mem.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017 Linaro Ltd. + */ + +#include <linux/kernel.h> +#include <linux/cdev.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_reserved_mem.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/firmware/qcom/qcom_scm.h> + +#define QCOM_RMTFS_MEM_DEV_MAX (MINORMASK + 1) +#define NUM_MAX_VMIDS 2 + +static dev_t qcom_rmtfs_mem_major; + +struct qcom_rmtfs_mem { + struct device dev; + struct cdev cdev; + + void *base; + phys_addr_t addr; + phys_addr_t size; + + unsigned int client_id; + + u64 perms; +}; + +static ssize_t qcom_rmtfs_mem_show(struct device *dev, + struct device_attribute *attr, + char *buf); + +static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL); +static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL); +static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL); + +static ssize_t qcom_rmtfs_mem_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev, + struct qcom_rmtfs_mem, + dev); + + if (attr == &dev_attr_phys_addr) + return sprintf(buf, "%pa\n", &rmtfs_mem->addr); + if (attr == &dev_attr_size) + return sprintf(buf, "%pa\n", &rmtfs_mem->size); + if (attr == &dev_attr_client_id) + return sprintf(buf, "%d\n", rmtfs_mem->client_id); + + return -EINVAL; +} + +static struct attribute *qcom_rmtfs_mem_attrs[] = { + &dev_attr_phys_addr.attr, + &dev_attr_size.attr, + &dev_attr_client_id.attr, + NULL +}; +ATTRIBUTE_GROUPS(qcom_rmtfs_mem); + +static int qcom_rmtfs_mem_open(struct inode *inode, struct file *filp) +{ + struct qcom_rmtfs_mem *rmtfs_mem = container_of(inode->i_cdev, + struct qcom_rmtfs_mem, + cdev); + + get_device(&rmtfs_mem->dev); + filp->private_data = rmtfs_mem; + + return 0; +} +static ssize_t qcom_rmtfs_mem_read(struct file *filp, + char __user *buf, size_t count, loff_t *f_pos) +{ + struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data; + + if (*f_pos >= rmtfs_mem->size) + return 0; + + if (*f_pos + count >= rmtfs_mem->size) + count = rmtfs_mem->size - *f_pos; + + if (copy_to_user(buf, rmtfs_mem->base + *f_pos, count)) + return -EFAULT; + + *f_pos += count; + return count; +} + +static ssize_t qcom_rmtfs_mem_write(struct file *filp, + const char __user *buf, size_t count, + loff_t *f_pos) +{ + struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data; + + if (*f_pos >= rmtfs_mem->size) + return 0; + + if (*f_pos + count >= rmtfs_mem->size) + count = rmtfs_mem->size - *f_pos; + + if (copy_from_user(rmtfs_mem->base + *f_pos, buf, count)) + return -EFAULT; + + *f_pos += count; + return count; +} + +static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp) +{ + struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data; + + put_device(&rmtfs_mem->dev); + + return 0; +} + +static struct class rmtfs_class = { + .name = "rmtfs", +}; + +static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma) +{ + struct qcom_rmtfs_mem *rmtfs_mem = filep->private_data; + + if (vma->vm_end - vma->vm_start > rmtfs_mem->size) { + dev_dbg(&rmtfs_mem->dev, + "vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%pa]\n", + vma->vm_end, vma->vm_start, + (vma->vm_end - vma->vm_start), &rmtfs_mem->size); + return -EINVAL; + } + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + return remap_pfn_range(vma, + vma->vm_start, + rmtfs_mem->addr >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static const struct file_operations qcom_rmtfs_mem_fops = { + .owner = THIS_MODULE, + .open = qcom_rmtfs_mem_open, + .read = qcom_rmtfs_mem_read, + .write = qcom_rmtfs_mem_write, + .release = qcom_rmtfs_mem_release, + .llseek = default_llseek, + .mmap = qcom_rmtfs_mem_mmap, +}; + +static void qcom_rmtfs_mem_release_device(struct device *dev) +{ + struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev, + struct qcom_rmtfs_mem, + dev); + + kfree(rmtfs_mem); +} + +static int qcom_rmtfs_mem_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct qcom_scm_vmperm perms[NUM_MAX_VMIDS + 1]; + struct reserved_mem *rmem; + struct qcom_rmtfs_mem *rmtfs_mem; + u32 client_id; + u32 vmid[NUM_MAX_VMIDS]; + int num_vmids; + int ret, i; + + rmem = of_reserved_mem_lookup(node); + if (!rmem) { + dev_err(&pdev->dev, "failed to acquire memory region\n"); + return -EINVAL; + } + + ret = of_property_read_u32(node, "qcom,client-id", &client_id); + if (ret) { + dev_err(&pdev->dev, "failed to parse \"qcom,client-id\"\n"); + return ret; + + } + + rmtfs_mem = kzalloc(sizeof(*rmtfs_mem), GFP_KERNEL); + if (!rmtfs_mem) + return -ENOMEM; + + rmtfs_mem->addr = rmem->base; + rmtfs_mem->client_id = client_id; + rmtfs_mem->size = rmem->size; + + device_initialize(&rmtfs_mem->dev); + rmtfs_mem->dev.parent = &pdev->dev; + rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups; + rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device; + + rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr, + rmtfs_mem->size, MEMREMAP_WC); + if (IS_ERR(rmtfs_mem->base)) { + dev_err(&pdev->dev, "failed to remap rmtfs_mem region\n"); + ret = PTR_ERR(rmtfs_mem->base); + goto put_device; + } + + cdev_init(&rmtfs_mem->cdev, &qcom_rmtfs_mem_fops); + rmtfs_mem->cdev.owner = THIS_MODULE; + + dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id); + rmtfs_mem->dev.id = client_id; + rmtfs_mem->dev.class = &rmtfs_class; + rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id); + + ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev); + if (ret) { + dev_err(&pdev->dev, "failed to add cdev: %d\n", ret); + goto put_device; + } + + num_vmids = of_property_count_u32_elems(node, "qcom,vmid"); + if (num_vmids == -EINVAL) { + /* qcom,vmid is optional */ + num_vmids = 0; + } else if (num_vmids < 0) { + dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", num_vmids); + ret = num_vmids; + goto remove_cdev; + } else if (num_vmids > NUM_MAX_VMIDS) { + dev_warn(&pdev->dev, + "too many VMIDs (%d) specified! Only mapping first %d entries\n", + num_vmids, NUM_MAX_VMIDS); + num_vmids = NUM_MAX_VMIDS; + } + + ret = of_property_read_u32_array(node, "qcom,vmid", vmid, num_vmids); + if (ret < 0 && ret != -EINVAL) { + dev_err(&pdev->dev, "failed to parse qcom,vmid\n"); + goto remove_cdev; + } else if (!ret) { + if (!qcom_scm_is_available()) { + ret = -EPROBE_DEFER; + goto remove_cdev; + } + + perms[0].vmid = QCOM_SCM_VMID_HLOS; + perms[0].perm = QCOM_SCM_PERM_RW; + + for (i = 0; i < num_vmids; i++) { + perms[i + 1].vmid = vmid[i]; + perms[i + 1].perm = QCOM_SCM_PERM_RW; + } + + rmtfs_mem->perms = BIT(QCOM_SCM_VMID_HLOS); + ret = qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size, + &rmtfs_mem->perms, perms, num_vmids + 1); + if (ret < 0) { + dev_err(&pdev->dev, "assign memory failed\n"); + goto remove_cdev; + } + } + + dev_set_drvdata(&pdev->dev, rmtfs_mem); + + return 0; + +remove_cdev: + cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev); +put_device: + put_device(&rmtfs_mem->dev); + + return ret; +} + +static int qcom_rmtfs_mem_remove(struct platform_device *pdev) +{ + struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev); + struct qcom_scm_vmperm perm; + + if (rmtfs_mem->perms) { + perm.vmid = QCOM_SCM_VMID_HLOS; + perm.perm = QCOM_SCM_PERM_RW; + + qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size, + &rmtfs_mem->perms, &perm, 1); + } + + cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev); + put_device(&rmtfs_mem->dev); + + return 0; +} + +static const struct of_device_id qcom_rmtfs_mem_of_match[] = { + { .compatible = "qcom,rmtfs-mem" }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_rmtfs_mem_of_match); + +static struct platform_driver qcom_rmtfs_mem_driver = { + .probe = qcom_rmtfs_mem_probe, + .remove = qcom_rmtfs_mem_remove, + .driver = { + .name = "qcom_rmtfs_mem", + .of_match_table = qcom_rmtfs_mem_of_match, + }, +}; + +static int __init qcom_rmtfs_mem_init(void) +{ + int ret; + + ret = class_register(&rmtfs_class); + if (ret) + return ret; + + ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0, + QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem"); + if (ret < 0) { + pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n"); + goto unregister_class; + } + + ret = platform_driver_register(&qcom_rmtfs_mem_driver); + if (ret < 0) { + pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n"); + goto unregister_chrdev; + } + + return 0; + +unregister_chrdev: + unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX); +unregister_class: + class_unregister(&rmtfs_class); + return ret; +} +module_init(qcom_rmtfs_mem_init); + +static void __exit qcom_rmtfs_mem_exit(void) +{ + platform_driver_unregister(&qcom_rmtfs_mem_driver); + unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX); + class_unregister(&rmtfs_class); +} +module_exit(qcom_rmtfs_mem_exit); + +MODULE_AUTHOR("Linaro Ltd"); +MODULE_DESCRIPTION("Qualcomm Remote Filesystem memory driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/rpm-proc.c b/drivers/soc/qcom/rpm-proc.c new file mode 100644 index 0000000000..2995d9b901 --- /dev/null +++ b/drivers/soc/qcom/rpm-proc.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2021-2023, Stephan Gerhold <stephan@gerhold.net> */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/rpmsg/qcom_smd.h> + +static int rpm_proc_probe(struct platform_device *pdev) +{ + struct qcom_smd_edge *edge = NULL; + struct device *dev = &pdev->dev; + struct device_node *edge_node; + int ret; + + edge_node = of_get_child_by_name(dev->of_node, "smd-edge"); + if (edge_node) { + edge = qcom_smd_register_edge(dev, edge_node); + of_node_put(edge_node); + if (IS_ERR(edge)) + return dev_err_probe(dev, PTR_ERR(edge), + "Failed to register smd-edge\n"); + } + + ret = devm_of_platform_populate(dev); + if (ret) { + dev_err(dev, "Failed to populate child devices: %d\n", ret); + goto err; + } + + platform_set_drvdata(pdev, edge); + return 0; +err: + if (edge) + qcom_smd_unregister_edge(edge); + return ret; +} + +static void rpm_proc_remove(struct platform_device *pdev) +{ + struct qcom_smd_edge *edge = platform_get_drvdata(pdev); + + if (edge) + qcom_smd_unregister_edge(edge); +} + +static const struct of_device_id rpm_proc_of_match[] = { + { .compatible = "qcom,rpm-proc", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rpm_proc_of_match); + +static struct platform_driver rpm_proc_driver = { + .probe = rpm_proc_probe, + .remove_new = rpm_proc_remove, + .driver = { + .name = "qcom-rpm-proc", + .of_match_table = rpm_proc_of_match, + }, +}; + +static int __init rpm_proc_init(void) +{ + return platform_driver_register(&rpm_proc_driver); +} +arch_initcall(rpm_proc_init); + +static void __exit rpm_proc_exit(void) +{ + platform_driver_unregister(&rpm_proc_driver); +} +module_exit(rpm_proc_exit); + +MODULE_DESCRIPTION("Qualcomm RPM processor/subsystem driver"); +MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/rpm_master_stats.c b/drivers/soc/qcom/rpm_master_stats.c new file mode 100644 index 0000000000..9ca13bcf67 --- /dev/null +++ b/drivers/soc/qcom/rpm_master_stats.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Linaro Limited + * + * This driver supports what is known as "Master Stats v2" in Qualcomm + * downstream kernel terms, which seems to be the only version which has + * ever shipped, all the way from 2013 to 2023. + */ + +#include <linux/debugfs.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> + +struct master_stats_data { + void __iomem *base; + const char *label; +}; + +struct rpm_master_stats { + u32 active_cores; + u32 num_shutdowns; + u64 shutdown_req; + u64 wakeup_idx; + u64 bringup_req; + u64 bringup_ack; + u32 wakeup_reason; /* 0 = "rude wakeup", 1 = scheduled wakeup */ + u32 last_sleep_trans_dur; + u32 last_wake_trans_dur; + + /* Per-subsystem (*not necessarily* SoC-wide) XO shutdown stats */ + u32 xo_count; + u64 xo_last_enter; + u64 last_exit; + u64 xo_total_dur; +} __packed; + +static int master_stats_show(struct seq_file *s, void *unused) +{ + struct master_stats_data *data = s->private; + struct rpm_master_stats stat; + + memcpy_fromio(&stat, data->base, sizeof(stat)); + + seq_printf(s, "%s:\n", data->label); + + seq_printf(s, "\tLast shutdown @ %llu\n", stat.shutdown_req); + seq_printf(s, "\tLast bringup req @ %llu\n", stat.bringup_req); + seq_printf(s, "\tLast bringup ack @ %llu\n", stat.bringup_ack); + seq_printf(s, "\tLast wakeup idx: %llu\n", stat.wakeup_idx); + seq_printf(s, "\tLast XO shutdown enter @ %llu\n", stat.xo_last_enter); + seq_printf(s, "\tLast XO shutdown exit @ %llu\n", stat.last_exit); + seq_printf(s, "\tXO total duration: %llu\n", stat.xo_total_dur); + seq_printf(s, "\tLast sleep transition duration: %u\n", stat.last_sleep_trans_dur); + seq_printf(s, "\tLast wake transition duration: %u\n", stat.last_wake_trans_dur); + seq_printf(s, "\tXO shutdown count: %u\n", stat.xo_count); + seq_printf(s, "\tWakeup reason: 0x%x\n", stat.wakeup_reason); + seq_printf(s, "\tShutdown count: %u\n", stat.num_shutdowns); + seq_printf(s, "\tActive cores bitmask: 0x%x\n", stat.active_cores); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(master_stats); + +static int master_stats_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct master_stats_data *data; + struct device_node *msgram_np; + struct dentry *dent, *root; + struct resource res; + int count, i, ret; + + count = of_property_count_strings(dev->of_node, "qcom,master-names"); + if (count < 0) + return count; + + data = devm_kzalloc(dev, count * sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + root = debugfs_create_dir("qcom_rpm_master_stats", NULL); + platform_set_drvdata(pdev, root); + + for (i = 0; i < count; i++) { + msgram_np = of_parse_phandle(dev->of_node, "qcom,rpm-msg-ram", i); + if (!msgram_np) { + debugfs_remove_recursive(root); + return dev_err_probe(dev, -ENODEV, + "Couldn't parse MSG RAM phandle idx %d", i); + } + + /* + * Purposefully skip devm_platform helpers as we're using a + * shared resource. + */ + ret = of_address_to_resource(msgram_np, 0, &res); + of_node_put(msgram_np); + if (ret < 0) { + debugfs_remove_recursive(root); + return ret; + } + + data[i].base = devm_ioremap(dev, res.start, resource_size(&res)); + if (!data[i].base) { + debugfs_remove_recursive(root); + return dev_err_probe(dev, -EINVAL, + "Could not map the MSG RAM slice idx %d!\n", i); + } + + ret = of_property_read_string_index(dev->of_node, "qcom,master-names", i, + &data[i].label); + if (ret < 0) { + debugfs_remove_recursive(root); + return dev_err_probe(dev, ret, + "Could not read name idx %d!\n", i); + } + + /* + * Generally it's not advised to fail on debugfs errors, but this + * driver's only job is exposing data therein. + */ + dent = debugfs_create_file(data[i].label, 0444, root, + &data[i], &master_stats_fops); + if (IS_ERR(dent)) { + debugfs_remove_recursive(root); + return dev_err_probe(dev, PTR_ERR(dent), + "Failed to create debugfs file %s!\n", data[i].label); + } + } + + device_set_pm_not_required(dev); + + return 0; +} + +static void master_stats_remove(struct platform_device *pdev) +{ + struct dentry *root = platform_get_drvdata(pdev); + + debugfs_remove_recursive(root); +} + +static const struct of_device_id rpm_master_table[] = { + { .compatible = "qcom,rpm-master-stats" }, + { }, +}; + +static struct platform_driver master_stats_driver = { + .probe = master_stats_probe, + .remove_new = master_stats_remove, + .driver = { + .name = "qcom_rpm_master_stats", + .of_match_table = rpm_master_table, + }, +}; +module_platform_driver(master_stats_driver); + +MODULE_DESCRIPTION("Qualcomm RPM Master Statistics driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h new file mode 100644 index 0000000000..e3cf1beff8 --- /dev/null +++ b/drivers/soc/qcom/rpmh-internal.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + + +#ifndef __RPM_INTERNAL_H__ +#define __RPM_INTERNAL_H__ + +#include <linux/bitmap.h> +#include <linux/wait.h> +#include <soc/qcom/tcs.h> + +#define TCS_TYPE_NR 4 +#define MAX_CMDS_PER_TCS 16 +#define MAX_TCS_PER_TYPE 3 +#define MAX_TCS_NR (MAX_TCS_PER_TYPE * TCS_TYPE_NR) +#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE) + +struct rsc_drv; + +/** + * struct tcs_group: group of Trigger Command Sets (TCS) to send state requests + * to the controller + * + * @drv: The controller. + * @type: Type of the TCS in this group - active, sleep, wake. + * @mask: Mask of the TCSes relative to all the TCSes in the RSC. + * @offset: Start of the TCS group relative to the TCSes in the RSC. + * @num_tcs: Number of TCSes in this type. + * @ncpt: Number of commands in each TCS. + * @req: Requests that are sent from the TCS; only used for ACTIVE_ONLY + * transfers (could be on a wake/sleep TCS if we are borrowing for + * an ACTIVE_ONLY transfer). + * Start: grab drv->lock, set req, set tcs_in_use, drop drv->lock, + * trigger + * End: get irq, access req, + * grab drv->lock, clear tcs_in_use, drop drv->lock + * @slots: Indicates which of @cmd_addr are occupied; only used for + * SLEEP / WAKE TCSs. Things are tightly packed in the + * case that (ncpt < MAX_CMDS_PER_TCS). That is if ncpt = 2 and + * MAX_CMDS_PER_TCS = 16 then bit[2] = the first bit in 2nd TCS. + */ +struct tcs_group { + struct rsc_drv *drv; + int type; + u32 mask; + u32 offset; + int num_tcs; + int ncpt; + const struct tcs_request *req[MAX_TCS_PER_TYPE]; + DECLARE_BITMAP(slots, MAX_TCS_SLOTS); +}; + +/** + * struct rpmh_request: the message to be sent to rpmh-rsc + * + * @msg: the request + * @cmd: the payload that will be part of the @msg + * @completion: triggered when request is done + * @dev: the device making the request + * @needs_free: check to free dynamically allocated request object + */ +struct rpmh_request { + struct tcs_request msg; + struct tcs_cmd cmd[MAX_RPMH_PAYLOAD]; + struct completion *completion; + const struct device *dev; + bool needs_free; +}; + +/** + * struct rpmh_ctrlr: our representation of the controller + * + * @cache: the list of cached requests + * @cache_lock: synchronize access to the cache data + * @dirty: was the cache updated since flush + * @batch_cache: Cache sleep and wake requests sent as batch + */ +struct rpmh_ctrlr { + struct list_head cache; + spinlock_t cache_lock; + bool dirty; + struct list_head batch_cache; +}; + +struct rsc_ver { + u32 major; + u32 minor; +}; + +/** + * struct rsc_drv: the Direct Resource Voter (DRV) of the + * Resource State Coordinator controller (RSC) + * + * @name: Controller identifier. + * @base: Start address of the DRV registers in this controller. + * @tcs_base: Start address of the TCS registers in this controller. + * @id: Instance id in the controller (Direct Resource Voter). + * @num_tcs: Number of TCSes in this DRV. + * @rsc_pm: CPU PM notifier for controller. + * Used when solver mode is not present. + * @cpus_in_pm: Number of CPUs not in idle power collapse. + * Used when solver mode and "power-domains" is not present. + * @genpd_nb: PM Domain notifier for cluster genpd notifications. + * @tcs: TCS groups. + * @tcs_in_use: S/W state of the TCS; only set for ACTIVE_ONLY + * transfers, but might show a sleep/wake TCS in use if + * it was borrowed for an active_only transfer. You + * must hold the lock in this struct (AKA drv->lock) in + * order to update this. + * @lock: Synchronize state of the controller. If RPMH's cache + * lock will also be held, the order is: drv->lock then + * cache_lock. + * @tcs_wait: Wait queue used to wait for @tcs_in_use to free up a + * slot + * @client: Handle to the DRV's client. + * @dev: RSC device. + */ +struct rsc_drv { + const char *name; + void __iomem *base; + void __iomem *tcs_base; + int id; + int num_tcs; + struct notifier_block rsc_pm; + struct notifier_block genpd_nb; + atomic_t cpus_in_pm; + struct tcs_group tcs[TCS_TYPE_NR]; + DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR); + spinlock_t lock; + wait_queue_head_t tcs_wait; + struct rpmh_ctrlr client; + struct device *dev; + struct rsc_ver ver; + u32 *regs; +}; + +int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg); +int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, + const struct tcs_request *msg); +void rpmh_rsc_invalidate(struct rsc_drv *drv); +void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv); + +void rpmh_tx_done(const struct tcs_request *msg); +int rpmh_flush(struct rpmh_ctrlr *ctrlr); + +#endif /* __RPM_INTERNAL_H__ */ diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c new file mode 100644 index 0000000000..a021dc7180 --- /dev/null +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -0,0 +1,1160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME + +#include <linux/atomic.h> +#include <linux/cpu_pm.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/wait.h> + +#include <clocksource/arm_arch_timer.h> +#include <soc/qcom/cmd-db.h> +#include <soc/qcom/tcs.h> +#include <dt-bindings/soc/qcom,rpmh-rsc.h> + +#include "rpmh-internal.h" + +#define CREATE_TRACE_POINTS +#include "trace-rpmh.h" + + +#define RSC_DRV_ID 0 + +#define MAJOR_VER_MASK 0xFF +#define MAJOR_VER_SHIFT 16 +#define MINOR_VER_MASK 0xFF +#define MINOR_VER_SHIFT 8 + +enum { + RSC_DRV_TCS_OFFSET, + RSC_DRV_CMD_OFFSET, + DRV_SOLVER_CONFIG, + DRV_PRNT_CHLD_CONFIG, + RSC_DRV_IRQ_ENABLE, + RSC_DRV_IRQ_STATUS, + RSC_DRV_IRQ_CLEAR, + RSC_DRV_CMD_WAIT_FOR_CMPL, + RSC_DRV_CONTROL, + RSC_DRV_STATUS, + RSC_DRV_CMD_ENABLE, + RSC_DRV_CMD_MSGID, + RSC_DRV_CMD_ADDR, + RSC_DRV_CMD_DATA, + RSC_DRV_CMD_STATUS, + RSC_DRV_CMD_RESP_DATA, +}; + +/* DRV HW Solver Configuration Information Register */ +#define DRV_HW_SOLVER_MASK 1 +#define DRV_HW_SOLVER_SHIFT 24 + +/* DRV TCS Configuration Information Register */ +#define DRV_NUM_TCS_MASK 0x3F +#define DRV_NUM_TCS_SHIFT 6 +#define DRV_NCPT_MASK 0x1F +#define DRV_NCPT_SHIFT 27 + +/* Offsets for CONTROL TCS Registers */ +#define RSC_DRV_CTL_TCS_DATA_HI 0x38 +#define RSC_DRV_CTL_TCS_DATA_HI_MASK 0xFFFFFF +#define RSC_DRV_CTL_TCS_DATA_HI_VALID BIT(31) +#define RSC_DRV_CTL_TCS_DATA_LO 0x40 +#define RSC_DRV_CTL_TCS_DATA_LO_MASK 0xFFFFFFFF +#define RSC_DRV_CTL_TCS_DATA_SIZE 32 + +#define TCS_AMC_MODE_ENABLE BIT(16) +#define TCS_AMC_MODE_TRIGGER BIT(24) + +/* TCS CMD register bit mask */ +#define CMD_MSGID_LEN 8 +#define CMD_MSGID_RESP_REQ BIT(8) +#define CMD_MSGID_WRITE BIT(16) +#define CMD_STATUS_ISSUED BIT(8) +#define CMD_STATUS_COMPL BIT(16) + +/* + * Here's a high level overview of how all the registers in RPMH work + * together: + * + * - The main rpmh-rsc address is the base of a register space that can + * be used to find overall configuration of the hardware + * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register + * space are all the TCS blocks. The offset of the TCS blocks is + * specified in the device tree by "qcom,tcs-offset" and used to + * compute tcs_base. + * - TCS blocks come one after another. Type, count, and order are + * specified by the device tree as "qcom,tcs-config". + * - Each TCS block has some registers, then space for up to 16 commands. + * Note that though address space is reserved for 16 commands, fewer + * might be present. See ncpt (num cmds per TCS). + * + * Here's a picture: + * + * +---------------------------------------------------+ + * |RSC | + * | ctrl | + * | | + * | Drvs: | + * | +-----------------------------------------------+ | + * | |DRV0 | | + * | | ctrl/config | | + * | | IRQ | | + * | | | | + * | | TCSes: | | + * | | +------------------------------------------+ | | + * | | |TCS0 | | | | | | | | | | | | | | | + * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | + * | | | | | | | | | | | | | | | | | | + * | | +------------------------------------------+ | | + * | | +------------------------------------------+ | | + * | | |TCS1 | | | | | | | | | | | | | | | + * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | + * | | | | | | | | | | | | | | | | | | + * | | +------------------------------------------+ | | + * | | +------------------------------------------+ | | + * | | |TCS2 | | | | | | | | | | | | | | | + * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | + * | | | | | | | | | | | | | | | | | | + * | | +------------------------------------------+ | | + * | | ...... | | + * | +-----------------------------------------------+ | + * | +-----------------------------------------------+ | + * | |DRV1 | | + * | | (same as DRV0) | | + * | +-----------------------------------------------+ | + * | ...... | + * +---------------------------------------------------+ + */ + +#define USECS_TO_CYCLES(time_usecs) \ + xloops_to_cycles((time_usecs) * 0x10C7UL) + +static inline unsigned long xloops_to_cycles(u64 xloops) +{ + return (xloops * loops_per_jiffy * HZ) >> 32; +} + +static u32 rpmh_rsc_reg_offset_ver_2_7[] = { + [RSC_DRV_TCS_OFFSET] = 672, + [RSC_DRV_CMD_OFFSET] = 20, + [DRV_SOLVER_CONFIG] = 0x04, + [DRV_PRNT_CHLD_CONFIG] = 0x0C, + [RSC_DRV_IRQ_ENABLE] = 0x00, + [RSC_DRV_IRQ_STATUS] = 0x04, + [RSC_DRV_IRQ_CLEAR] = 0x08, + [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x10, + [RSC_DRV_CONTROL] = 0x14, + [RSC_DRV_STATUS] = 0x18, + [RSC_DRV_CMD_ENABLE] = 0x1C, + [RSC_DRV_CMD_MSGID] = 0x30, + [RSC_DRV_CMD_ADDR] = 0x34, + [RSC_DRV_CMD_DATA] = 0x38, + [RSC_DRV_CMD_STATUS] = 0x3C, + [RSC_DRV_CMD_RESP_DATA] = 0x40, +}; + +static u32 rpmh_rsc_reg_offset_ver_3_0[] = { + [RSC_DRV_TCS_OFFSET] = 672, + [RSC_DRV_CMD_OFFSET] = 24, + [DRV_SOLVER_CONFIG] = 0x04, + [DRV_PRNT_CHLD_CONFIG] = 0x0C, + [RSC_DRV_IRQ_ENABLE] = 0x00, + [RSC_DRV_IRQ_STATUS] = 0x04, + [RSC_DRV_IRQ_CLEAR] = 0x08, + [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x20, + [RSC_DRV_CONTROL] = 0x24, + [RSC_DRV_STATUS] = 0x28, + [RSC_DRV_CMD_ENABLE] = 0x2C, + [RSC_DRV_CMD_MSGID] = 0x34, + [RSC_DRV_CMD_ADDR] = 0x38, + [RSC_DRV_CMD_DATA] = 0x3C, + [RSC_DRV_CMD_STATUS] = 0x40, + [RSC_DRV_CMD_RESP_DATA] = 0x44, +}; + +static inline void __iomem * +tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id) +{ + return drv->tcs_base + drv->regs[RSC_DRV_TCS_OFFSET] * tcs_id + reg; +} + +static inline void __iomem * +tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id) +{ + return tcs_reg_addr(drv, reg, tcs_id) + drv->regs[RSC_DRV_CMD_OFFSET] * cmd_id; +} + +static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, + int cmd_id) +{ + return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); +} + +static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id) +{ + return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id)); +} + +static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, + int cmd_id, u32 data) +{ + writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); +} + +static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id, + u32 data) +{ + writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id)); +} + +static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id, + u32 data) +{ + int i; + + writel(data, tcs_reg_addr(drv, reg, tcs_id)); + + /* + * Wait until we read back the same value. Use a counter rather than + * ktime for timeout since this may be called after timekeeping stops. + */ + for (i = 0; i < USEC_PER_SEC; i++) { + if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data) + return; + udelay(1); + } + pr_err("%s: error writing %#x to %d:%#x\n", drv->name, + data, tcs_id, reg); +} + +/** + * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake). + * @drv: The RSC controller. + * @type: SLEEP_TCS or WAKE_TCS + * + * This will clear the "slots" variable of the given tcs_group and also + * tell the hardware to forget about all entries. + * + * The caller must ensure that no other RPMH actions are happening when this + * function is called, since otherwise the device may immediately become + * used again even before this function exits. + */ +static void tcs_invalidate(struct rsc_drv *drv, int type) +{ + int m; + struct tcs_group *tcs = &drv->tcs[type]; + + /* Caller ensures nobody else is running so no lock */ + if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) + return; + + for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) + write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], m, 0); + + bitmap_zero(tcs->slots, MAX_TCS_SLOTS); +} + +/** + * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes. + * @drv: The RSC controller. + * + * The caller must ensure that no other RPMH actions are happening when this + * function is called, since otherwise the device may immediately become + * used again even before this function exits. + */ +void rpmh_rsc_invalidate(struct rsc_drv *drv) +{ + tcs_invalidate(drv, SLEEP_TCS); + tcs_invalidate(drv, WAKE_TCS); +} + +/** + * get_tcs_for_msg() - Get the tcs_group used to send the given message. + * @drv: The RSC controller. + * @msg: The message we want to send. + * + * This is normally pretty straightforward except if we are trying to send + * an ACTIVE_ONLY message but don't have any active_only TCSes. + * + * Return: A pointer to a tcs_group or an ERR_PTR. + */ +static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, + const struct tcs_request *msg) +{ + int type; + struct tcs_group *tcs; + + switch (msg->state) { + case RPMH_ACTIVE_ONLY_STATE: + type = ACTIVE_TCS; + break; + case RPMH_WAKE_ONLY_STATE: + type = WAKE_TCS; + break; + case RPMH_SLEEP_STATE: + type = SLEEP_TCS; + break; + default: + return ERR_PTR(-EINVAL); + } + + /* + * If we are making an active request on a RSC that does not have a + * dedicated TCS for active state use, then re-purpose a wake TCS to + * send active votes. This is safe because we ensure any active-only + * transfers have finished before we use it (maybe by running from + * the last CPU in PM code). + */ + tcs = &drv->tcs[type]; + if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) + tcs = &drv->tcs[WAKE_TCS]; + + return tcs; +} + +/** + * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS. + * @drv: The RSC controller. + * @tcs_id: The global ID of this TCS. + * + * For ACTIVE_ONLY transfers we want to call back into the client when the + * transfer finishes. To do this we need the "request" that the client + * originally provided us. This function grabs the request that we stashed + * when we started the transfer. + * + * This only makes sense for ACTIVE_ONLY transfers since those are the only + * ones we track sending (the only ones we enable interrupts for and the only + * ones we call back to the client for). + * + * Return: The stashed request. + */ +static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv, + int tcs_id) +{ + struct tcs_group *tcs; + int i; + + for (i = 0; i < TCS_TYPE_NR; i++) { + tcs = &drv->tcs[i]; + if (tcs->mask & BIT(tcs_id)) + return tcs->req[tcs_id - tcs->offset]; + } + + return NULL; +} + +/** + * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS + * @drv: The controller. + * @tcs_id: The global ID of this TCS. + * @trigger: If true then untrigger/retrigger. If false then just untrigger. + * + * In the normal case we only ever call with "trigger=true" to start a + * transfer. That will un-trigger/disable the TCS from the last transfer + * then trigger/enable for this transfer. + * + * If we borrowed a wake TCS for an active-only transfer we'll also call + * this function with "trigger=false" to just do the un-trigger/disable + * before using the TCS for wake purposes again. + * + * Note that the AP is only in charge of triggering active-only transfers. + * The AP never triggers sleep/wake values using this function. + */ +static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger) +{ + u32 enable; + u32 reg = drv->regs[RSC_DRV_CONTROL]; + + /* + * HW req: Clear the DRV_CONTROL and enable TCS again + * While clearing ensure that the AMC mode trigger is cleared + * and then the mode enable is cleared. + */ + enable = read_tcs_reg(drv, reg, tcs_id); + enable &= ~TCS_AMC_MODE_TRIGGER; + write_tcs_reg_sync(drv, reg, tcs_id, enable); + enable &= ~TCS_AMC_MODE_ENABLE; + write_tcs_reg_sync(drv, reg, tcs_id, enable); + + if (trigger) { + /* Enable the AMC mode on the TCS and then trigger the TCS */ + enable = TCS_AMC_MODE_ENABLE; + write_tcs_reg_sync(drv, reg, tcs_id, enable); + enable |= TCS_AMC_MODE_TRIGGER; + write_tcs_reg(drv, reg, tcs_id, enable); + } +} + +/** + * enable_tcs_irq() - Enable or disable interrupts on the given TCS. + * @drv: The controller. + * @tcs_id: The global ID of this TCS. + * @enable: If true then enable; if false then disable + * + * We only ever call this when we borrow a wake TCS for an active-only + * transfer. For active-only TCSes interrupts are always left enabled. + */ +static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable) +{ + u32 data; + u32 reg = drv->regs[RSC_DRV_IRQ_ENABLE]; + + data = readl_relaxed(drv->tcs_base + reg); + if (enable) + data |= BIT(tcs_id); + else + data &= ~BIT(tcs_id); + writel_relaxed(data, drv->tcs_base + reg); +} + +/** + * tcs_tx_done() - TX Done interrupt handler. + * @irq: The IRQ number (ignored). + * @p: Pointer to "struct rsc_drv". + * + * Called for ACTIVE_ONLY transfers (those are the only ones we enable the + * IRQ for) when a transfer is done. + * + * Return: IRQ_HANDLED + */ +static irqreturn_t tcs_tx_done(int irq, void *p) +{ + struct rsc_drv *drv = p; + int i; + unsigned long irq_status; + const struct tcs_request *req; + + irq_status = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_STATUS]); + + for_each_set_bit(i, &irq_status, BITS_PER_TYPE(u32)) { + req = get_req_from_tcs(drv, i); + if (WARN_ON(!req)) + goto skip; + + trace_rpmh_tx_done(drv, i, req); + + /* + * If wake tcs was re-purposed for sending active + * votes, clear AMC trigger & enable modes and + * disable interrupt for this TCS + */ + if (!drv->tcs[ACTIVE_TCS].num_tcs) + __tcs_set_trigger(drv, i, false); +skip: + /* Reclaim the TCS */ + write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0); + writel_relaxed(BIT(i), drv->tcs_base + drv->regs[RSC_DRV_IRQ_CLEAR]); + spin_lock(&drv->lock); + clear_bit(i, drv->tcs_in_use); + /* + * Disable interrupt for WAKE TCS to avoid being + * spammed with interrupts coming when the solver + * sends its wake votes. + */ + if (!drv->tcs[ACTIVE_TCS].num_tcs) + enable_tcs_irq(drv, i, false); + spin_unlock(&drv->lock); + wake_up(&drv->tcs_wait); + if (req) + rpmh_tx_done(req); + } + + return IRQ_HANDLED; +} + +/** + * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger. + * @drv: The controller. + * @tcs_id: The global ID of this TCS. + * @cmd_id: The index within the TCS to start writing. + * @msg: The message we want to send, which will contain several addr/data + * pairs to program (but few enough that they all fit in one TCS). + * + * This is used for all types of transfers (active, sleep, and wake). + */ +static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, + const struct tcs_request *msg) +{ + u32 msgid; + u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE; + u32 cmd_enable = 0; + struct tcs_cmd *cmd; + int i, j; + + /* Convert all commands to RR when the request has wait_for_compl set */ + cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0; + + for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) { + cmd = &msg->cmds[i]; + cmd_enable |= BIT(j); + msgid = cmd_msgid; + /* + * Additionally, if the cmd->wait is set, make the command + * response reqd even if the overall request was fire-n-forget. + */ + msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0; + + write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_MSGID], tcs_id, j, msgid); + write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], tcs_id, j, cmd->addr); + write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_DATA], tcs_id, j, cmd->data); + trace_rpmh_send_msg(drv, tcs_id, msg->state, j, msgid, cmd); + } + + cmd_enable |= read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id); + write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, cmd_enable); +} + +/** + * check_for_req_inflight() - Look to see if conflicting cmds are in flight. + * @drv: The controller. + * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers. + * @msg: The message we want to send, which will contain several addr/data + * pairs to program (but few enough that they all fit in one TCS). + * + * This will walk through the TCSes in the group and check if any of them + * appear to be sending to addresses referenced in the message. If it finds + * one it'll return -EBUSY. + * + * Only for use for active-only transfers. + * + * Must be called with the drv->lock held since that protects tcs_in_use. + * + * Return: 0 if nothing in flight or -EBUSY if we should try again later. + * The caller must re-enable interrupts between tries since that's + * the only way tcs_in_use will ever be updated and the only way + * RSC_DRV_CMD_ENABLE will ever be cleared. + */ +static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, + const struct tcs_request *msg) +{ + unsigned long curr_enabled; + u32 addr; + int j, k; + int i = tcs->offset; + + for_each_set_bit_from(i, drv->tcs_in_use, tcs->offset + tcs->num_tcs) { + curr_enabled = read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i); + + for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) { + addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j); + for (k = 0; k < msg->num_cmds; k++) { + if (addr == msg->cmds[k].addr) + return -EBUSY; + } + } + } + + return 0; +} + +/** + * find_free_tcs() - Find free tcs in the given tcs_group; only for active. + * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if + * we borrowed it because there are zero active-only ones). + * + * Must be called with the drv->lock held since that protects tcs_in_use. + * + * Return: The first tcs that's free or -EBUSY if all in use. + */ +static int find_free_tcs(struct tcs_group *tcs) +{ + const struct rsc_drv *drv = tcs->drv; + unsigned long i; + unsigned long max = tcs->offset + tcs->num_tcs; + + i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset); + if (i >= max) + return -EBUSY; + + return i; +} + +/** + * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active. + * @drv: The controller. + * @tcs: The tcs_group used for ACTIVE_ONLY transfers. + * @msg: The data to be sent. + * + * Claims a tcs in the given tcs_group while making sure that no existing cmd + * is in flight that would conflict with the one in @msg. + * + * Context: Must be called with the drv->lock held since that protects + * tcs_in_use. + * + * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight + * or the tcs_group is full. + */ +static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs, + const struct tcs_request *msg) +{ + int ret; + + /* + * The h/w does not like if we send a request to the same address, + * when one is already in-flight or being processed. + */ + ret = check_for_req_inflight(drv, tcs, msg); + if (ret) + return ret; + + return find_free_tcs(tcs); +} + +/** + * rpmh_rsc_send_data() - Write / trigger active-only message. + * @drv: The controller. + * @msg: The data to be sent. + * + * NOTES: + * - This is only used for "ACTIVE_ONLY" since the limitations of this + * function don't make sense for sleep/wake cases. + * - To do the transfer, we will grab a whole TCS for ourselves--we don't + * try to share. If there are none available we'll wait indefinitely + * for a free one. + * - This function will not wait for the commands to be finished, only for + * data to be programmed into the RPMh. See rpmh_tx_done() which will + * be called when the transfer is fully complete. + * - This function must be called with interrupts enabled. If the hardware + * is busy doing someone else's transfer we need that transfer to fully + * finish so that we can have the hardware, and to fully finish it needs + * the interrupt handler to run. If the interrupts is set to run on the + * active CPU this can never happen if interrupts are disabled. + * + * Return: 0 on success, -EINVAL on error. + */ +int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) +{ + struct tcs_group *tcs; + int tcs_id; + unsigned long flags; + + tcs = get_tcs_for_msg(drv, msg); + if (IS_ERR(tcs)) + return PTR_ERR(tcs); + + spin_lock_irqsave(&drv->lock, flags); + + /* Wait forever for a free tcs. It better be there eventually! */ + wait_event_lock_irq(drv->tcs_wait, + (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0, + drv->lock); + + tcs->req[tcs_id - tcs->offset] = msg; + set_bit(tcs_id, drv->tcs_in_use); + if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) { + /* + * Clear previously programmed WAKE commands in selected + * repurposed TCS to avoid triggering them. tcs->slots will be + * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate() + */ + write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0); + enable_tcs_irq(drv, tcs_id, true); + } + spin_unlock_irqrestore(&drv->lock, flags); + + /* + * These two can be done after the lock is released because: + * - We marked "tcs_in_use" under lock. + * - Once "tcs_in_use" has been marked nobody else could be writing + * to these registers until the interrupt goes off. + * - The interrupt can't go off until we trigger w/ the last line + * of __tcs_set_trigger() below. + */ + __tcs_buffer_write(drv, tcs_id, 0, msg); + __tcs_set_trigger(drv, tcs_id, true); + + return 0; +} + +/** + * find_slots() - Find a place to write the given message. + * @tcs: The tcs group to search. + * @msg: The message we want to find room for. + * @tcs_id: If we return 0 from the function, we return the global ID of the + * TCS to write to here. + * @cmd_id: If we return 0 from the function, we return the index of + * the command array of the returned TCS where the client should + * start writing the message. + * + * Only for use on sleep/wake TCSes since those are the only ones we maintain + * tcs->slots for. + * + * Return: -ENOMEM if there was no room, else 0. + */ +static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg, + int *tcs_id, int *cmd_id) +{ + int slot, offset; + int i = 0; + + /* Do over, until we can fit the full payload in a single TCS */ + do { + slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS, + i, msg->num_cmds, 0); + if (slot >= tcs->num_tcs * tcs->ncpt) + return -ENOMEM; + i += tcs->ncpt; + } while (slot + msg->num_cmds - 1 >= i); + + bitmap_set(tcs->slots, slot, msg->num_cmds); + + offset = slot / tcs->ncpt; + *tcs_id = offset + tcs->offset; + *cmd_id = slot % tcs->ncpt; + + return 0; +} + +/** + * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger. + * @drv: The controller. + * @msg: The data to be written to the controller. + * + * This should only be called for sleep/wake state, never active-only + * state. + * + * The caller must ensure that no other RPMH actions are happening and the + * controller is idle when this function is called since it runs lockless. + * + * Return: 0 if no error; else -error. + */ +int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg) +{ + struct tcs_group *tcs; + int tcs_id = 0, cmd_id = 0; + int ret; + + tcs = get_tcs_for_msg(drv, msg); + if (IS_ERR(tcs)) + return PTR_ERR(tcs); + + /* find the TCS id and the command in the TCS to write to */ + ret = find_slots(tcs, msg, &tcs_id, &cmd_id); + if (!ret) + __tcs_buffer_write(drv, tcs_id, cmd_id, msg); + + return ret; +} + +/** + * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy. + * @drv: The controller + * + * Checks if any of the AMCs are busy in handling ACTIVE sets. + * This is called from the last cpu powering down before flushing + * SLEEP and WAKE sets. If AMCs are busy, controller can not enter + * power collapse, so deny from the last cpu's pm notification. + * + * Context: Must be called with the drv->lock held. + * + * Return: + * * False - AMCs are idle + * * True - AMCs are busy + */ +static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv) +{ + unsigned long set; + const struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS]; + unsigned long max; + + /* + * If we made an active request on a RSC that does not have a + * dedicated TCS for active state use, then re-purposed wake TCSes + * should be checked for not busy, because we used wake TCSes for + * active requests in this case. + */ + if (!tcs->num_tcs) + tcs = &drv->tcs[WAKE_TCS]; + + max = tcs->offset + tcs->num_tcs; + set = find_next_bit(drv->tcs_in_use, max, tcs->offset); + + return set < max; +} + +/** + * rpmh_rsc_write_next_wakeup() - Write next wakeup in CONTROL_TCS. + * @drv: The controller + * + * Writes maximum wakeup cycles when called from suspend. + * Writes earliest hrtimer wakeup when called from idle. + */ +void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv) +{ + ktime_t now, wakeup; + u64 wakeup_us, wakeup_cycles = ~0; + u32 lo, hi; + + if (!drv->tcs[CONTROL_TCS].num_tcs || !drv->genpd_nb.notifier_call) + return; + + /* Set highest time when system (timekeeping) is suspended */ + if (system_state == SYSTEM_SUSPEND) + goto exit; + + /* Find the earliest hrtimer wakeup from online cpus */ + wakeup = dev_pm_genpd_get_next_hrtimer(drv->dev); + + /* Find the relative wakeup in kernel time scale */ + now = ktime_get(); + wakeup = ktime_sub(wakeup, now); + wakeup_us = ktime_to_us(wakeup); + + /* Convert the wakeup to arch timer scale */ + wakeup_cycles = USECS_TO_CYCLES(wakeup_us); + wakeup_cycles += arch_timer_read_counter(); + +exit: + lo = wakeup_cycles & RSC_DRV_CTL_TCS_DATA_LO_MASK; + hi = wakeup_cycles >> RSC_DRV_CTL_TCS_DATA_SIZE; + hi &= RSC_DRV_CTL_TCS_DATA_HI_MASK; + hi |= RSC_DRV_CTL_TCS_DATA_HI_VALID; + + writel_relaxed(lo, drv->base + RSC_DRV_CTL_TCS_DATA_LO); + writel_relaxed(hi, drv->base + RSC_DRV_CTL_TCS_DATA_HI); +} + +/** + * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy. + * @nfb: Pointer to the notifier block in struct rsc_drv. + * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT. + * @v: Unused + * + * This function is given to cpu_pm_register_notifier so we can be informed + * about when CPUs go down. When all CPUs go down we know no more active + * transfers will be started so we write sleep/wake sets. This function gets + * called from cpuidle code paths and also at system suspend time. + * + * If its last CPU going down and AMCs are not busy then writes cached sleep + * and wake messages to TCSes. The firmware then takes care of triggering + * them when entering deepest low power modes. + * + * Return: See cpu_pm_register_notifier() + */ +static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb, + unsigned long action, void *v) +{ + struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm); + int ret = NOTIFY_OK; + int cpus_in_pm; + + switch (action) { + case CPU_PM_ENTER: + cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm); + /* + * NOTE: comments for num_online_cpus() point out that it's + * only a snapshot so we need to be careful. It should be OK + * for us to use, though. It's important for us not to miss + * if we're the last CPU going down so it would only be a + * problem if a CPU went offline right after we did the check + * AND that CPU was not idle AND that CPU was the last non-idle + * CPU. That can't happen. CPUs would have to come out of idle + * before the CPU could go offline. + */ + if (cpus_in_pm < num_online_cpus()) + return NOTIFY_OK; + break; + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + atomic_dec(&drv->cpus_in_pm); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } + + /* + * It's likely we're on the last CPU. Grab the drv->lock and write + * out the sleep/wake commands to RPMH hardware. Grabbing the lock + * means that if we race with another CPU coming up we are still + * guaranteed to be safe. If another CPU came up just after we checked + * and has grabbed the lock or started an active transfer then we'll + * notice we're busy and abort. If another CPU comes up after we start + * flushing it will be blocked from starting an active transfer until + * we're done flushing. If another CPU starts an active transfer after + * we release the lock we're still OK because we're no longer the last + * CPU. + */ + if (spin_trylock(&drv->lock)) { + if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client)) + ret = NOTIFY_BAD; + spin_unlock(&drv->lock); + } else { + /* Another CPU must be up */ + return NOTIFY_OK; + } + + if (ret == NOTIFY_BAD) { + /* Double-check if we're here because someone else is up */ + if (cpus_in_pm < num_online_cpus()) + ret = NOTIFY_OK; + else + /* We won't be called w/ CPU_PM_ENTER_FAILED */ + atomic_dec(&drv->cpus_in_pm); + } + + return ret; +} + +/** + * rpmh_rsc_pd_callback() - Check if any of the AMCs are busy. + * @nfb: Pointer to the genpd notifier block in struct rsc_drv. + * @action: GENPD_NOTIFY_PRE_OFF, GENPD_NOTIFY_OFF, GENPD_NOTIFY_PRE_ON or GENPD_NOTIFY_ON. + * @v: Unused + * + * This function is given to dev_pm_genpd_add_notifier() so we can be informed + * about when cluster-pd is going down. When cluster go down we know no more active + * transfers will be started so we write sleep/wake sets. This function gets + * called from cpuidle code paths and also at system suspend time. + * + * If AMCs are not busy then writes cached sleep and wake messages to TCSes. + * The firmware then takes care of triggering them when entering deepest low power modes. + * + * Return: + * * NOTIFY_OK - success + * * NOTIFY_BAD - failure + */ +static int rpmh_rsc_pd_callback(struct notifier_block *nfb, + unsigned long action, void *v) +{ + struct rsc_drv *drv = container_of(nfb, struct rsc_drv, genpd_nb); + + /* We don't need to lock as genpd on/off are serialized */ + if ((action == GENPD_NOTIFY_PRE_OFF) && + (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))) + return NOTIFY_BAD; + + return NOTIFY_OK; +} + +static int rpmh_rsc_pd_attach(struct rsc_drv *drv, struct device *dev) +{ + int ret; + + pm_runtime_enable(dev); + drv->genpd_nb.notifier_call = rpmh_rsc_pd_callback; + ret = dev_pm_genpd_add_notifier(dev, &drv->genpd_nb); + if (ret) + pm_runtime_disable(dev); + + return ret; +} + +static int rpmh_probe_tcs_config(struct platform_device *pdev, struct rsc_drv *drv) +{ + struct tcs_type_config { + u32 type; + u32 n; + } tcs_cfg[TCS_TYPE_NR] = { { 0 } }; + struct device_node *dn = pdev->dev.of_node; + u32 config, max_tcs, ncpt, offset; + int i, ret, n, st = 0; + struct tcs_group *tcs; + + ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset); + if (ret) + return ret; + drv->tcs_base = drv->base + offset; + + config = readl_relaxed(drv->base + drv->regs[DRV_PRNT_CHLD_CONFIG]); + + max_tcs = config; + max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id); + max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id); + + ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT); + ncpt = ncpt >> DRV_NCPT_SHIFT; + + n = of_property_count_u32_elems(dn, "qcom,tcs-config"); + if (n != 2 * TCS_TYPE_NR) + return -EINVAL; + + for (i = 0; i < TCS_TYPE_NR; i++) { + ret = of_property_read_u32_index(dn, "qcom,tcs-config", + i * 2, &tcs_cfg[i].type); + if (ret) + return ret; + if (tcs_cfg[i].type >= TCS_TYPE_NR) + return -EINVAL; + + ret = of_property_read_u32_index(dn, "qcom,tcs-config", + i * 2 + 1, &tcs_cfg[i].n); + if (ret) + return ret; + if (tcs_cfg[i].n > MAX_TCS_PER_TYPE) + return -EINVAL; + } + + for (i = 0; i < TCS_TYPE_NR; i++) { + tcs = &drv->tcs[tcs_cfg[i].type]; + if (tcs->drv) + return -EINVAL; + tcs->drv = drv; + tcs->type = tcs_cfg[i].type; + tcs->num_tcs = tcs_cfg[i].n; + tcs->ncpt = ncpt; + + if (!tcs->num_tcs || tcs->type == CONTROL_TCS) + continue; + + if (st + tcs->num_tcs > max_tcs || + st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask)) + return -EINVAL; + + tcs->mask = ((1 << tcs->num_tcs) - 1) << st; + tcs->offset = st; + st += tcs->num_tcs; + } + + drv->num_tcs = st; + + return 0; +} + +static int rpmh_rsc_probe(struct platform_device *pdev) +{ + struct device_node *dn = pdev->dev.of_node; + struct rsc_drv *drv; + char drv_id[10] = {0}; + int ret, irq; + u32 solver_config; + u32 rsc_id; + + /* + * Even though RPMh doesn't directly use cmd-db, all of its children + * do. To avoid adding this check to our children we'll do it now. + */ + ret = cmd_db_ready(); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Command DB not available (%d)\n", + ret); + return ret; + } + + drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + + ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id); + if (ret) + return ret; + + drv->name = of_get_property(dn, "label", NULL); + if (!drv->name) + drv->name = dev_name(&pdev->dev); + + snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id); + drv->base = devm_platform_ioremap_resource_byname(pdev, drv_id); + if (IS_ERR(drv->base)) + return PTR_ERR(drv->base); + + rsc_id = readl_relaxed(drv->base + RSC_DRV_ID); + drv->ver.major = rsc_id & (MAJOR_VER_MASK << MAJOR_VER_SHIFT); + drv->ver.major >>= MAJOR_VER_SHIFT; + drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT); + drv->ver.minor >>= MINOR_VER_SHIFT; + + if (drv->ver.major == 3) + drv->regs = rpmh_rsc_reg_offset_ver_3_0; + else + drv->regs = rpmh_rsc_reg_offset_ver_2_7; + + ret = rpmh_probe_tcs_config(pdev, drv); + if (ret) + return ret; + + spin_lock_init(&drv->lock); + init_waitqueue_head(&drv->tcs_wait); + bitmap_zero(drv->tcs_in_use, MAX_TCS_NR); + + irq = platform_get_irq(pdev, drv->id); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done, + IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, + drv->name, drv); + if (ret) + return ret; + + /* + * CPU PM/genpd notification are not required for controllers that support + * 'HW solver' mode where they can be in autonomous mode executing low + * power mode to power down. + */ + solver_config = readl_relaxed(drv->base + drv->regs[DRV_SOLVER_CONFIG]); + solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT; + solver_config = solver_config >> DRV_HW_SOLVER_SHIFT; + if (!solver_config) { + if (pdev->dev.pm_domain) { + ret = rpmh_rsc_pd_attach(drv, &pdev->dev); + if (ret) + return ret; + } else { + drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback; + cpu_pm_register_notifier(&drv->rsc_pm); + } + } + + /* Enable the active TCS to send requests immediately */ + writel_relaxed(drv->tcs[ACTIVE_TCS].mask, + drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]); + + spin_lock_init(&drv->client.cache_lock); + INIT_LIST_HEAD(&drv->client.cache); + INIT_LIST_HEAD(&drv->client.batch_cache); + + dev_set_drvdata(&pdev->dev, drv); + drv->dev = &pdev->dev; + + ret = devm_of_platform_populate(&pdev->dev); + if (ret && pdev->dev.pm_domain) { + dev_pm_genpd_remove_notifier(&pdev->dev); + pm_runtime_disable(&pdev->dev); + } + + return ret; +} + +static const struct of_device_id rpmh_drv_match[] = { + { .compatible = "qcom,rpmh-rsc", }, + { } +}; +MODULE_DEVICE_TABLE(of, rpmh_drv_match); + +static struct platform_driver rpmh_driver = { + .probe = rpmh_rsc_probe, + .driver = { + .name = "rpmh", + .of_match_table = rpmh_drv_match, + .suppress_bind_attrs = true, + }, +}; + +static int __init rpmh_driver_init(void) +{ + return platform_driver_register(&rpmh_driver); +} +arch_initcall(rpmh_driver_init); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c new file mode 100644 index 0000000000..08e09642d7 --- /dev/null +++ b/drivers/soc/qcom/rpmh.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#include <linux/atomic.h> +#include <linux/bug.h> +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/wait.h> + +#include <soc/qcom/rpmh.h> + +#include "rpmh-internal.h" + +#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000) + +#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \ + struct rpmh_request name = { \ + .msg = { \ + .state = s, \ + .cmds = name.cmd, \ + .num_cmds = 0, \ + .wait_for_compl = true, \ + }, \ + .cmd = { { 0 } }, \ + .completion = q, \ + .dev = device, \ + .needs_free = false, \ + } + +#define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client) + +/** + * struct cache_req: the request object for caching + * + * @addr: the address of the resource + * @sleep_val: the sleep vote + * @wake_val: the wake vote + * @list: linked list obj + */ +struct cache_req { + u32 addr; + u32 sleep_val; + u32 wake_val; + struct list_head list; +}; + +/** + * struct batch_cache_req - An entry in our batch catch + * + * @list: linked list obj + * @count: number of messages + * @rpm_msgs: the messages + */ + +struct batch_cache_req { + struct list_head list; + int count; + struct rpmh_request rpm_msgs[]; +}; + +static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev) +{ + struct rsc_drv *drv = dev_get_drvdata(dev->parent); + + return &drv->client; +} + +void rpmh_tx_done(const struct tcs_request *msg) +{ + struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request, + msg); + struct completion *compl = rpm_msg->completion; + bool free = rpm_msg->needs_free; + + if (!compl) + goto exit; + + /* Signal the blocking thread we are done */ + complete(compl); + +exit: + if (free) + kfree(rpm_msg); +} + +static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr) +{ + struct cache_req *p, *req = NULL; + + list_for_each_entry(p, &ctrlr->cache, list) { + if (p->addr == addr) { + req = p; + break; + } + } + + return req; +} + +static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, + enum rpmh_state state, + struct tcs_cmd *cmd) +{ + struct cache_req *req; + unsigned long flags; + u32 old_sleep_val, old_wake_val; + + spin_lock_irqsave(&ctrlr->cache_lock, flags); + req = __find_req(ctrlr, cmd->addr); + if (req) + goto existing; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) { + req = ERR_PTR(-ENOMEM); + goto unlock; + } + + req->addr = cmd->addr; + req->sleep_val = req->wake_val = UINT_MAX; + list_add_tail(&req->list, &ctrlr->cache); + +existing: + old_sleep_val = req->sleep_val; + old_wake_val = req->wake_val; + + switch (state) { + case RPMH_ACTIVE_ONLY_STATE: + case RPMH_WAKE_ONLY_STATE: + req->wake_val = cmd->data; + break; + case RPMH_SLEEP_STATE: + req->sleep_val = cmd->data; + break; + } + + ctrlr->dirty |= (req->sleep_val != old_sleep_val || + req->wake_val != old_wake_val) && + req->sleep_val != UINT_MAX && + req->wake_val != UINT_MAX; + +unlock: + spin_unlock_irqrestore(&ctrlr->cache_lock, flags); + + return req; +} + +/** + * __rpmh_write: Cache and send the RPMH request + * + * @dev: The device making the request + * @state: Active/Sleep request type + * @rpm_msg: The data that needs to be sent (cmds). + * + * Cache the RPMH request and send if the state is ACTIVE_ONLY. + * SLEEP/WAKE_ONLY requests are not sent to the controller at + * this time. Use rpmh_flush() to send them to the controller. + */ +static int __rpmh_write(const struct device *dev, enum rpmh_state state, + struct rpmh_request *rpm_msg) +{ + struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); + int ret = -EINVAL; + struct cache_req *req; + int i; + + /* Cache the request in our store and link the payload */ + for (i = 0; i < rpm_msg->msg.num_cmds; i++) { + req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]); + if (IS_ERR(req)) + return PTR_ERR(req); + } + + if (state == RPMH_ACTIVE_ONLY_STATE) { + WARN_ON(irqs_disabled()); + ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); + } else { + /* Clean up our call by spoofing tx_done */ + ret = 0; + rpmh_tx_done(&rpm_msg->msg); + } + + return ret; +} + +static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state, + const struct tcs_cmd *cmd, u32 n) +{ + if (!cmd || !n || n > MAX_RPMH_PAYLOAD) + return -EINVAL; + + memcpy(req->cmd, cmd, n * sizeof(*cmd)); + + req->msg.state = state; + req->msg.cmds = req->cmd; + req->msg.num_cmds = n; + + return 0; +} + +/** + * rpmh_write_async: Write a set of RPMH commands + * + * @dev: The device making the request + * @state: Active/sleep set + * @cmd: The payload data + * @n: The number of elements in payload + * + * Write a set of RPMH commands, the order of commands is maintained + * and will be sent as a single shot. + */ +int rpmh_write_async(const struct device *dev, enum rpmh_state state, + const struct tcs_cmd *cmd, u32 n) +{ + struct rpmh_request *rpm_msg; + int ret; + + rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC); + if (!rpm_msg) + return -ENOMEM; + rpm_msg->needs_free = true; + + ret = __fill_rpmh_msg(rpm_msg, state, cmd, n); + if (ret) { + kfree(rpm_msg); + return ret; + } + + return __rpmh_write(dev, state, rpm_msg); +} +EXPORT_SYMBOL(rpmh_write_async); + +/** + * rpmh_write: Write a set of RPMH commands and block until response + * + * @dev: The device making the request + * @state: Active/sleep set + * @cmd: The payload data + * @n: The number of elements in @cmd + * + * May sleep. Do not call from atomic contexts. + */ +int rpmh_write(const struct device *dev, enum rpmh_state state, + const struct tcs_cmd *cmd, u32 n) +{ + DECLARE_COMPLETION_ONSTACK(compl); + DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg); + int ret; + + ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n); + if (ret) + return ret; + + ret = __rpmh_write(dev, state, &rpm_msg); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS); + WARN_ON(!ret); + return (ret > 0) ? 0 : -ETIMEDOUT; +} +EXPORT_SYMBOL(rpmh_write); + +static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req) +{ + unsigned long flags; + + spin_lock_irqsave(&ctrlr->cache_lock, flags); + list_add_tail(&req->list, &ctrlr->batch_cache); + ctrlr->dirty = true; + spin_unlock_irqrestore(&ctrlr->cache_lock, flags); +} + +static int flush_batch(struct rpmh_ctrlr *ctrlr) +{ + struct batch_cache_req *req; + const struct rpmh_request *rpm_msg; + int ret = 0; + int i; + + /* Send Sleep/Wake requests to the controller, expect no response */ + list_for_each_entry(req, &ctrlr->batch_cache, list) { + for (i = 0; i < req->count; i++) { + rpm_msg = req->rpm_msgs + i; + ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), + &rpm_msg->msg); + if (ret) + break; + } + } + + return ret; +} + +/** + * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the + * batch to finish. + * + * @dev: the device making the request + * @state: Active/sleep set + * @cmd: The payload data + * @n: The array of count of elements in each batch, 0 terminated. + * + * Write a request to the RSC controller without caching. If the request + * state is ACTIVE, then the requests are treated as completion request + * and sent to the controller immediately. The function waits until all the + * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the + * request is sent as fire-n-forget and no ack is expected. + * + * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests. + */ +int rpmh_write_batch(const struct device *dev, enum rpmh_state state, + const struct tcs_cmd *cmd, u32 *n) +{ + struct batch_cache_req *req; + struct rpmh_request *rpm_msgs; + struct completion *compls; + struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); + unsigned long time_left; + int count = 0; + int ret, i; + void *ptr; + + if (!cmd || !n) + return -EINVAL; + + while (n[count] > 0) + count++; + if (!count) + return -EINVAL; + + ptr = kzalloc(sizeof(*req) + + count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)), + GFP_ATOMIC); + if (!ptr) + return -ENOMEM; + + req = ptr; + compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs); + + req->count = count; + rpm_msgs = req->rpm_msgs; + + for (i = 0; i < count; i++) { + __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]); + cmd += n[i]; + } + + if (state != RPMH_ACTIVE_ONLY_STATE) { + cache_batch(ctrlr, req); + return 0; + } + + for (i = 0; i < count; i++) { + struct completion *compl = &compls[i]; + + init_completion(compl); + rpm_msgs[i].completion = compl; + ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg); + if (ret) { + pr_err("Error(%d) sending RPMH message addr=%#x\n", + ret, rpm_msgs[i].msg.cmds[0].addr); + break; + } + } + + time_left = RPMH_TIMEOUT_MS; + while (i--) { + time_left = wait_for_completion_timeout(&compls[i], time_left); + if (!time_left) { + /* + * Better hope they never finish because they'll signal + * the completion that we're going to free once + * we've returned from this function. + */ + WARN_ON(1); + ret = -ETIMEDOUT; + goto exit; + } + } + +exit: + kfree(ptr); + + return ret; +} +EXPORT_SYMBOL(rpmh_write_batch); + +static int is_req_valid(struct cache_req *req) +{ + return (req->sleep_val != UINT_MAX && + req->wake_val != UINT_MAX && + req->sleep_val != req->wake_val); +} + +static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state, + u32 addr, u32 data) +{ + DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg); + + /* Wake sets are always complete and sleep sets are not */ + rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE); + rpm_msg.cmd[0].addr = addr; + rpm_msg.cmd[0].data = data; + rpm_msg.msg.num_cmds = 1; + + return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg); +} + +/** + * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes + * + * @ctrlr: Controller making request to flush cached data + * + * Return: + * * 0 - Success + * * Error code - Otherwise + */ +int rpmh_flush(struct rpmh_ctrlr *ctrlr) +{ + struct cache_req *p; + int ret = 0; + + lockdep_assert_irqs_disabled(); + + /* + * Currently rpmh_flush() is only called when we think we're running + * on the last processor. If the lock is busy it means another + * processor is up and it's better to abort than spin. + */ + if (!spin_trylock(&ctrlr->cache_lock)) + return -EBUSY; + + if (!ctrlr->dirty) { + pr_debug("Skipping flush, TCS has latest data.\n"); + goto write_next_wakeup; + } + + /* Invalidate the TCSes first to avoid stale data */ + rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); + + /* First flush the cached batch requests */ + ret = flush_batch(ctrlr); + if (ret) + goto exit; + + list_for_each_entry(p, &ctrlr->cache, list) { + if (!is_req_valid(p)) { + pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x", + __func__, p->addr, p->sleep_val, p->wake_val); + continue; + } + ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr, + p->sleep_val); + if (ret) + goto exit; + ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr, + p->wake_val); + if (ret) + goto exit; + } + + ctrlr->dirty = false; + +write_next_wakeup: + rpmh_rsc_write_next_wakeup(ctrlr_to_drv(ctrlr)); +exit: + spin_unlock(&ctrlr->cache_lock); + return ret; +} + +/** + * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache + * + * @dev: The device making the request + * + * Invalidate the sleep and wake values in batch_cache. + */ +void rpmh_invalidate(const struct device *dev) +{ + struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); + struct batch_cache_req *req, *tmp; + unsigned long flags; + + spin_lock_irqsave(&ctrlr->cache_lock, flags); + list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) + kfree(req); + INIT_LIST_HEAD(&ctrlr->batch_cache); + ctrlr->dirty = true; + spin_unlock_irqrestore(&ctrlr->cache_lock, flags); +} +EXPORT_SYMBOL(rpmh_invalidate); diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c new file mode 100644 index 0000000000..f9fd617711 --- /dev/null +++ b/drivers/soc/qcom/smd-rpm.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, Sony Mobile Communications AB. + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/slab.h> + +#include <linux/rpmsg.h> +#include <linux/soc/qcom/smd-rpm.h> + +#define RPM_REQUEST_TIMEOUT (5 * HZ) + +/** + * struct qcom_smd_rpm - state of the rpm device driver + * @rpm_channel: reference to the smd channel + * @dev: rpm device + * @ack: completion for acks + * @lock: mutual exclusion around the send/complete pair + * @ack_status: result of the rpm request + */ +struct qcom_smd_rpm { + struct rpmsg_endpoint *rpm_channel; + struct device *dev; + + struct completion ack; + struct mutex lock; + int ack_status; +}; + +/** + * struct qcom_rpm_header - header for all rpm requests and responses + * @service_type: identifier of the service + * @length: length of the payload + */ +struct qcom_rpm_header { + __le32 service_type; + __le32 length; +}; + +/** + * struct qcom_rpm_request - request message to the rpm + * @msg_id: identifier of the outgoing message + * @flags: active/sleep state flags + * @type: resource type + * @id: resource id + * @data_len: length of the payload following this header + */ +struct qcom_rpm_request { + __le32 msg_id; + __le32 flags; + __le32 type; + __le32 id; + __le32 data_len; +}; + +/** + * struct qcom_rpm_message - response message from the rpm + * @msg_type: indicator of the type of message + * @length: the size of this message, including the message header + * @msg_id: message id + * @message: textual message from the rpm + * + * Multiple of these messages can be stacked in an rpm message. + */ +struct qcom_rpm_message { + __le32 msg_type; + __le32 length; + union { + __le32 msg_id; + DECLARE_FLEX_ARRAY(u8, message); + }; +}; + +#define RPM_SERVICE_TYPE_REQUEST 0x00716572 /* "req\0" */ + +#define RPM_MSG_TYPE_ERR 0x00727265 /* "err\0" */ +#define RPM_MSG_TYPE_MSG_ID 0x2367736d /* "msg#" */ + +/** + * qcom_rpm_smd_write - write @buf to @type:@id + * @rpm: rpm handle + * @state: active/sleep state flags + * @type: resource type + * @id: resource identifier + * @buf: the data to be written + * @count: number of bytes in @buf + */ +int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, + int state, + u32 type, u32 id, + void *buf, + size_t count) +{ + static unsigned msg_id = 1; + int left; + int ret; + struct { + struct qcom_rpm_header hdr; + struct qcom_rpm_request req; + u8 payload[]; + } *pkt; + size_t size = sizeof(*pkt) + count; + + /* SMD packets to the RPM may not exceed 256 bytes */ + if (WARN_ON(size >= 256)) + return -EINVAL; + + pkt = kmalloc(size, GFP_ATOMIC); + if (!pkt) + return -ENOMEM; + + mutex_lock(&rpm->lock); + + pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST); + pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count); + + pkt->req.msg_id = cpu_to_le32(msg_id++); + pkt->req.flags = cpu_to_le32(state); + pkt->req.type = cpu_to_le32(type); + pkt->req.id = cpu_to_le32(id); + pkt->req.data_len = cpu_to_le32(count); + memcpy(pkt->payload, buf, count); + + ret = rpmsg_send(rpm->rpm_channel, pkt, size); + if (ret) + goto out; + + left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT); + if (!left) + ret = -ETIMEDOUT; + else + ret = rpm->ack_status; + +out: + kfree(pkt); + mutex_unlock(&rpm->lock); + return ret; +} +EXPORT_SYMBOL(qcom_rpm_smd_write); + +static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev, + void *data, + int count, + void *priv, + u32 addr) +{ + const struct qcom_rpm_header *hdr = data; + size_t hdr_length = le32_to_cpu(hdr->length); + const struct qcom_rpm_message *msg; + struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev); + const u8 *buf = data + sizeof(struct qcom_rpm_header); + const u8 *end = buf + hdr_length; + char msgbuf[32]; + int status = 0; + u32 len, msg_length; + + if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST || + hdr_length < sizeof(struct qcom_rpm_message)) { + dev_err(rpm->dev, "invalid request\n"); + return 0; + } + + while (buf < end) { + msg = (struct qcom_rpm_message *)buf; + msg_length = le32_to_cpu(msg->length); + switch (le32_to_cpu(msg->msg_type)) { + case RPM_MSG_TYPE_MSG_ID: + break; + case RPM_MSG_TYPE_ERR: + len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf)); + memcpy_fromio(msgbuf, msg->message, len); + msgbuf[len - 1] = 0; + + if (!strcmp(msgbuf, "resource does not exist")) + status = -ENXIO; + else + status = -EINVAL; + break; + } + + buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4); + } + + rpm->ack_status = status; + complete(&rpm->ack); + return 0; +} + +static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev) +{ + struct qcom_smd_rpm *rpm; + + if (!rpdev->dev.of_node) + return -EINVAL; + + rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL); + if (!rpm) + return -ENOMEM; + + mutex_init(&rpm->lock); + init_completion(&rpm->ack); + + rpm->dev = &rpdev->dev; + rpm->rpm_channel = rpdev->ept; + dev_set_drvdata(&rpdev->dev, rpm); + + return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev); +} + +static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev) +{ + of_platform_depopulate(&rpdev->dev); +} + +static const struct rpmsg_device_id qcom_smd_rpm_id_table[] = { + { .name = "rpm_requests", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(rpmsg, qcom_smd_rpm_id_table); + +static struct rpmsg_driver qcom_smd_rpm_driver = { + .probe = qcom_smd_rpm_probe, + .remove = qcom_smd_rpm_remove, + .callback = qcom_smd_rpm_callback, + .id_table = qcom_smd_rpm_id_table, + .drv.name = "qcom_smd_rpm", +}; + +static int __init qcom_smd_rpm_init(void) +{ + return register_rpmsg_driver(&qcom_smd_rpm_driver); +} +arch_initcall(qcom_smd_rpm_init); + +static void __exit qcom_smd_rpm_exit(void) +{ + unregister_rpmsg_driver(&qcom_smd_rpm_driver); +} +module_exit(qcom_smd_rpm_exit); + +MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); +MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c new file mode 100644 index 0000000000..d4a89d2bb4 --- /dev/null +++ b/drivers/soc/qcom/smem.c @@ -0,0 +1,1230 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, Sony Mobile Communications AB. + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/hwspinlock.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/socinfo.h> + +/* + * The Qualcomm shared memory system is a allocate only heap structure that + * consists of one of more memory areas that can be accessed by the processors + * in the SoC. + * + * All systems contains a global heap, accessible by all processors in the SoC, + * with a table of contents data structure (@smem_header) at the beginning of + * the main shared memory block. + * + * The global header contains meta data for allocations as well as a fixed list + * of 512 entries (@smem_global_entry) that can be initialized to reference + * parts of the shared memory space. + * + * + * In addition to this global heap a set of "private" heaps can be set up at + * boot time with access restrictions so that only certain processor pairs can + * access the data. + * + * These partitions are referenced from an optional partition table + * (@smem_ptable), that is found 4kB from the end of the main smem region. The + * partition table entries (@smem_ptable_entry) lists the involved processors + * (or hosts) and their location in the main shared memory region. + * + * Each partition starts with a header (@smem_partition_header) that identifies + * the partition and holds properties for the two internal memory regions. The + * two regions are cached and non-cached memory respectively. Each region + * contain a link list of allocation headers (@smem_private_entry) followed by + * their data. + * + * Items in the non-cached region are allocated from the start of the partition + * while items in the cached region are allocated from the end. The free area + * is hence the region between the cached and non-cached offsets. The header of + * cached items comes after the data. + * + * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure + * for the global heap. A new global partition is created from the global heap + * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is + * set by the bootloader. + * + * To synchronize allocations in the shared memory heaps a remote spinlock must + * be held - currently lock number 3 of the sfpb or tcsr is used for this on all + * platforms. + * + */ + +/* + * The version member of the smem header contains an array of versions for the + * various software components in the SoC. We verify that the boot loader + * version is a valid version as a sanity check. + */ +#define SMEM_MASTER_SBL_VERSION_INDEX 7 +#define SMEM_GLOBAL_HEAP_VERSION 11 +#define SMEM_GLOBAL_PART_VERSION 12 + +/* + * The first 8 items are only to be allocated by the boot loader while + * initializing the heap. + */ +#define SMEM_ITEM_LAST_FIXED 8 + +/* Highest accepted item number, for both global and private heaps */ +#define SMEM_ITEM_COUNT 512 + +/* Processor/host identifier for the application processor */ +#define SMEM_HOST_APPS 0 + +/* Processor/host identifier for the global partition */ +#define SMEM_GLOBAL_HOST 0xfffe + +/* Max number of processors/hosts in a system */ +#define SMEM_HOST_COUNT 20 + +/** + * struct smem_proc_comm - proc_comm communication struct (legacy) + * @command: current command to be executed + * @status: status of the currently requested command + * @params: parameters to the command + */ +struct smem_proc_comm { + __le32 command; + __le32 status; + __le32 params[2]; +}; + +/** + * struct smem_global_entry - entry to reference smem items on the heap + * @allocated: boolean to indicate if this entry is used + * @offset: offset to the allocated space + * @size: size of the allocated space, 8 byte aligned + * @aux_base: base address for the memory region used by this unit, or 0 for + * the default region. bits 0,1 are reserved + */ +struct smem_global_entry { + __le32 allocated; + __le32 offset; + __le32 size; + __le32 aux_base; /* bits 1:0 reserved */ +}; +#define AUX_BASE_MASK 0xfffffffc + +/** + * struct smem_header - header found in beginning of primary smem region + * @proc_comm: proc_comm communication interface (legacy) + * @version: array of versions for the various subsystems + * @initialized: boolean to indicate that smem is initialized + * @free_offset: index of the first unallocated byte in smem + * @available: number of bytes available for allocation + * @reserved: reserved field, must be 0 + * @toc: array of references to items + */ +struct smem_header { + struct smem_proc_comm proc_comm[4]; + __le32 version[32]; + __le32 initialized; + __le32 free_offset; + __le32 available; + __le32 reserved; + struct smem_global_entry toc[SMEM_ITEM_COUNT]; +}; + +/** + * struct smem_ptable_entry - one entry in the @smem_ptable list + * @offset: offset, within the main shared memory region, of the partition + * @size: size of the partition + * @flags: flags for the partition (currently unused) + * @host0: first processor/host with access to this partition + * @host1: second processor/host with access to this partition + * @cacheline: alignment for "cached" entries + * @reserved: reserved entries for later use + */ +struct smem_ptable_entry { + __le32 offset; + __le32 size; + __le32 flags; + __le16 host0; + __le16 host1; + __le32 cacheline; + __le32 reserved[7]; +}; + +/** + * struct smem_ptable - partition table for the private partitions + * @magic: magic number, must be SMEM_PTABLE_MAGIC + * @version: version of the partition table + * @num_entries: number of partitions in the table + * @reserved: for now reserved entries + * @entry: list of @smem_ptable_entry for the @num_entries partitions + */ +struct smem_ptable { + u8 magic[4]; + __le32 version; + __le32 num_entries; + __le32 reserved[5]; + struct smem_ptable_entry entry[]; +}; + +static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */ + +/** + * struct smem_partition_header - header of the partitions + * @magic: magic number, must be SMEM_PART_MAGIC + * @host0: first processor/host with access to this partition + * @host1: second processor/host with access to this partition + * @size: size of the partition + * @offset_free_uncached: offset to the first free byte of uncached memory in + * this partition + * @offset_free_cached: offset to the first free byte of cached memory in this + * partition + * @reserved: for now reserved entries + */ +struct smem_partition_header { + u8 magic[4]; + __le16 host0; + __le16 host1; + __le32 size; + __le32 offset_free_uncached; + __le32 offset_free_cached; + __le32 reserved[3]; +}; + +/** + * struct smem_partition - describes smem partition + * @virt_base: starting virtual address of partition + * @phys_base: starting physical address of partition + * @cacheline: alignment for "cached" entries + * @size: size of partition + */ +struct smem_partition { + void __iomem *virt_base; + phys_addr_t phys_base; + size_t cacheline; + size_t size; +}; + +static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 }; + +/** + * struct smem_private_entry - header of each item in the private partition + * @canary: magic number, must be SMEM_PRIVATE_CANARY + * @item: identifying number of the smem item + * @size: size of the data, including padding bytes + * @padding_data: number of bytes of padding of data + * @padding_hdr: number of bytes of padding between the header and the data + * @reserved: for now reserved entry + */ +struct smem_private_entry { + u16 canary; /* bytes are the same so no swapping needed */ + __le16 item; + __le32 size; /* includes padding bytes */ + __le16 padding_data; + __le16 padding_hdr; + __le32 reserved; +}; +#define SMEM_PRIVATE_CANARY 0xa5a5 + +/** + * struct smem_info - smem region info located after the table of contents + * @magic: magic number, must be SMEM_INFO_MAGIC + * @size: size of the smem region + * @base_addr: base address of the smem region + * @reserved: for now reserved entry + * @num_items: highest accepted item number + */ +struct smem_info { + u8 magic[4]; + __le32 size; + __le32 base_addr; + __le32 reserved; + __le16 num_items; +}; + +static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */ + +/** + * struct smem_region - representation of a chunk of memory used for smem + * @aux_base: identifier of aux_mem base + * @virt_base: virtual base address of memory with this aux_mem identifier + * @size: size of the memory region + */ +struct smem_region { + phys_addr_t aux_base; + void __iomem *virt_base; + size_t size; +}; + +/** + * struct qcom_smem - device data for the smem device + * @dev: device pointer + * @hwlock: reference to a hwspinlock + * @ptable: virtual base of partition table + * @global_partition: describes for global partition when in use + * @partitions: list of partitions of current processor/host + * @item_count: max accepted item number + * @socinfo: platform device pointer + * @num_regions: number of @regions + * @regions: list of the memory regions defining the shared memory + */ +struct qcom_smem { + struct device *dev; + + struct hwspinlock *hwlock; + + u32 item_count; + struct platform_device *socinfo; + struct smem_ptable *ptable; + struct smem_partition global_partition; + struct smem_partition partitions[SMEM_HOST_COUNT]; + + unsigned num_regions; + struct smem_region regions[]; +}; + +static void * +phdr_to_last_uncached_entry(struct smem_partition_header *phdr) +{ + void *p = phdr; + + return p + le32_to_cpu(phdr->offset_free_uncached); +} + +static struct smem_private_entry * +phdr_to_first_cached_entry(struct smem_partition_header *phdr, + size_t cacheline) +{ + void *p = phdr; + struct smem_private_entry *e; + + return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); +} + +static void * +phdr_to_last_cached_entry(struct smem_partition_header *phdr) +{ + void *p = phdr; + + return p + le32_to_cpu(phdr->offset_free_cached); +} + +static struct smem_private_entry * +phdr_to_first_uncached_entry(struct smem_partition_header *phdr) +{ + void *p = phdr; + + return p + sizeof(*phdr); +} + +static struct smem_private_entry * +uncached_entry_next(struct smem_private_entry *e) +{ + void *p = e; + + return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + + le32_to_cpu(e->size); +} + +static struct smem_private_entry * +cached_entry_next(struct smem_private_entry *e, size_t cacheline) +{ + void *p = e; + + return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); +} + +static void *uncached_entry_to_item(struct smem_private_entry *e) +{ + void *p = e; + + return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); +} + +static void *cached_entry_to_item(struct smem_private_entry *e) +{ + void *p = e; + + return p - le32_to_cpu(e->size); +} + +/* Pointer to the one and only smem handle */ +static struct qcom_smem *__smem; + +/* Timeout (ms) for the trylock of remote spinlocks */ +#define HWSPINLOCK_TIMEOUT 1000 + +/** + * qcom_smem_is_available() - Check if SMEM is available + * + * Return: true if SMEM is available, false otherwise. + */ +bool qcom_smem_is_available(void) +{ + return !!__smem; +} +EXPORT_SYMBOL(qcom_smem_is_available); + +static int qcom_smem_alloc_private(struct qcom_smem *smem, + struct smem_partition *part, + unsigned item, + size_t size) +{ + struct smem_private_entry *hdr, *end; + struct smem_partition_header *phdr; + size_t alloc_size; + void *cached; + void *p_end; + + phdr = (struct smem_partition_header __force *)part->virt_base; + p_end = (void *)phdr + part->size; + + hdr = phdr_to_first_uncached_entry(phdr); + end = phdr_to_last_uncached_entry(phdr); + cached = phdr_to_last_cached_entry(phdr); + + if (WARN_ON((void *)end > p_end || cached > p_end)) + return -EINVAL; + + while (hdr < end) { + if (hdr->canary != SMEM_PRIVATE_CANARY) + goto bad_canary; + if (le16_to_cpu(hdr->item) == item) + return -EEXIST; + + hdr = uncached_entry_next(hdr); + } + + if (WARN_ON((void *)hdr > p_end)) + return -EINVAL; + + /* Check that we don't grow into the cached region */ + alloc_size = sizeof(*hdr) + ALIGN(size, 8); + if ((void *)hdr + alloc_size > cached) { + dev_err(smem->dev, "Out of memory\n"); + return -ENOSPC; + } + + hdr->canary = SMEM_PRIVATE_CANARY; + hdr->item = cpu_to_le16(item); + hdr->size = cpu_to_le32(ALIGN(size, 8)); + hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); + hdr->padding_hdr = 0; + + /* + * Ensure the header is written before we advance the free offset, so + * that remote processors that does not take the remote spinlock still + * gets a consistent view of the linked list. + */ + wmb(); + le32_add_cpu(&phdr->offset_free_uncached, alloc_size); + + return 0; +bad_canary: + dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", + le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); + + return -EINVAL; +} + +static int qcom_smem_alloc_global(struct qcom_smem *smem, + unsigned item, + size_t size) +{ + struct smem_global_entry *entry; + struct smem_header *header; + + header = smem->regions[0].virt_base; + entry = &header->toc[item]; + if (entry->allocated) + return -EEXIST; + + size = ALIGN(size, 8); + if (WARN_ON(size > le32_to_cpu(header->available))) + return -ENOMEM; + + entry->offset = header->free_offset; + entry->size = cpu_to_le32(size); + + /* + * Ensure the header is consistent before we mark the item allocated, + * so that remote processors will get a consistent view of the item + * even though they do not take the spinlock on read. + */ + wmb(); + entry->allocated = cpu_to_le32(1); + + le32_add_cpu(&header->free_offset, size); + le32_add_cpu(&header->available, -size); + + return 0; +} + +/** + * qcom_smem_alloc() - allocate space for a smem item + * @host: remote processor id, or -1 + * @item: smem item handle + * @size: number of bytes to be allocated + * + * Allocate space for a given smem item of size @size, given that the item is + * not yet allocated. + */ +int qcom_smem_alloc(unsigned host, unsigned item, size_t size) +{ + struct smem_partition *part; + unsigned long flags; + int ret; + + if (!__smem) + return -EPROBE_DEFER; + + if (item < SMEM_ITEM_LAST_FIXED) { + dev_err(__smem->dev, + "Rejecting allocation of static entry %d\n", item); + return -EINVAL; + } + + if (WARN_ON(item >= __smem->item_count)) + return -EINVAL; + + ret = hwspin_lock_timeout_irqsave(__smem->hwlock, + HWSPINLOCK_TIMEOUT, + &flags); + if (ret) + return ret; + + if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { + part = &__smem->partitions[host]; + ret = qcom_smem_alloc_private(__smem, part, item, size); + } else if (__smem->global_partition.virt_base) { + part = &__smem->global_partition; + ret = qcom_smem_alloc_private(__smem, part, item, size); + } else { + ret = qcom_smem_alloc_global(__smem, item, size); + } + + hwspin_unlock_irqrestore(__smem->hwlock, &flags); + + return ret; +} +EXPORT_SYMBOL_GPL(qcom_smem_alloc); + +static void *qcom_smem_get_global(struct qcom_smem *smem, + unsigned item, + size_t *size) +{ + struct smem_header *header; + struct smem_region *region; + struct smem_global_entry *entry; + u64 entry_offset; + u32 e_size; + u32 aux_base; + unsigned i; + + header = smem->regions[0].virt_base; + entry = &header->toc[item]; + if (!entry->allocated) + return ERR_PTR(-ENXIO); + + aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; + + for (i = 0; i < smem->num_regions; i++) { + region = &smem->regions[i]; + + if ((u32)region->aux_base == aux_base || !aux_base) { + e_size = le32_to_cpu(entry->size); + entry_offset = le32_to_cpu(entry->offset); + + if (WARN_ON(e_size + entry_offset > region->size)) + return ERR_PTR(-EINVAL); + + if (size != NULL) + *size = e_size; + + return region->virt_base + entry_offset; + } + } + + return ERR_PTR(-ENOENT); +} + +static void *qcom_smem_get_private(struct qcom_smem *smem, + struct smem_partition *part, + unsigned item, + size_t *size) +{ + struct smem_private_entry *e, *end; + struct smem_partition_header *phdr; + void *item_ptr, *p_end; + u32 padding_data; + u32 e_size; + + phdr = (struct smem_partition_header __force *)part->virt_base; + p_end = (void *)phdr + part->size; + + e = phdr_to_first_uncached_entry(phdr); + end = phdr_to_last_uncached_entry(phdr); + + while (e < end) { + if (e->canary != SMEM_PRIVATE_CANARY) + goto invalid_canary; + + if (le16_to_cpu(e->item) == item) { + if (size != NULL) { + e_size = le32_to_cpu(e->size); + padding_data = le16_to_cpu(e->padding_data); + + if (WARN_ON(e_size > part->size || padding_data > e_size)) + return ERR_PTR(-EINVAL); + + *size = e_size - padding_data; + } + + item_ptr = uncached_entry_to_item(e); + if (WARN_ON(item_ptr > p_end)) + return ERR_PTR(-EINVAL); + + return item_ptr; + } + + e = uncached_entry_next(e); + } + + if (WARN_ON((void *)e > p_end)) + return ERR_PTR(-EINVAL); + + /* Item was not found in the uncached list, search the cached list */ + + e = phdr_to_first_cached_entry(phdr, part->cacheline); + end = phdr_to_last_cached_entry(phdr); + + if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end)) + return ERR_PTR(-EINVAL); + + while (e > end) { + if (e->canary != SMEM_PRIVATE_CANARY) + goto invalid_canary; + + if (le16_to_cpu(e->item) == item) { + if (size != NULL) { + e_size = le32_to_cpu(e->size); + padding_data = le16_to_cpu(e->padding_data); + + if (WARN_ON(e_size > part->size || padding_data > e_size)) + return ERR_PTR(-EINVAL); + + *size = e_size - padding_data; + } + + item_ptr = cached_entry_to_item(e); + if (WARN_ON(item_ptr < (void *)phdr)) + return ERR_PTR(-EINVAL); + + return item_ptr; + } + + e = cached_entry_next(e, part->cacheline); + } + + if (WARN_ON((void *)e < (void *)phdr)) + return ERR_PTR(-EINVAL); + + return ERR_PTR(-ENOENT); + +invalid_canary: + dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", + le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); + + return ERR_PTR(-EINVAL); +} + +/** + * qcom_smem_get() - resolve ptr of size of a smem item + * @host: the remote processor, or -1 + * @item: smem item handle + * @size: pointer to be filled out with size of the item + * + * Looks up smem item and returns pointer to it. Size of smem + * item is returned in @size. + */ +void *qcom_smem_get(unsigned host, unsigned item, size_t *size) +{ + struct smem_partition *part; + unsigned long flags; + int ret; + void *ptr = ERR_PTR(-EPROBE_DEFER); + + if (!__smem) + return ptr; + + if (WARN_ON(item >= __smem->item_count)) + return ERR_PTR(-EINVAL); + + ret = hwspin_lock_timeout_irqsave(__smem->hwlock, + HWSPINLOCK_TIMEOUT, + &flags); + if (ret) + return ERR_PTR(ret); + + if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { + part = &__smem->partitions[host]; + ptr = qcom_smem_get_private(__smem, part, item, size); + } else if (__smem->global_partition.virt_base) { + part = &__smem->global_partition; + ptr = qcom_smem_get_private(__smem, part, item, size); + } else { + ptr = qcom_smem_get_global(__smem, item, size); + } + + hwspin_unlock_irqrestore(__smem->hwlock, &flags); + + return ptr; + +} +EXPORT_SYMBOL_GPL(qcom_smem_get); + +/** + * qcom_smem_get_free_space() - retrieve amount of free space in a partition + * @host: the remote processor identifying a partition, or -1 + * + * To be used by smem clients as a quick way to determine if any new + * allocations has been made. + */ +int qcom_smem_get_free_space(unsigned host) +{ + struct smem_partition *part; + struct smem_partition_header *phdr; + struct smem_header *header; + unsigned ret; + + if (!__smem) + return -EPROBE_DEFER; + + if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { + part = &__smem->partitions[host]; + phdr = part->virt_base; + ret = le32_to_cpu(phdr->offset_free_cached) - + le32_to_cpu(phdr->offset_free_uncached); + + if (ret > le32_to_cpu(part->size)) + return -EINVAL; + } else if (__smem->global_partition.virt_base) { + part = &__smem->global_partition; + phdr = part->virt_base; + ret = le32_to_cpu(phdr->offset_free_cached) - + le32_to_cpu(phdr->offset_free_uncached); + + if (ret > le32_to_cpu(part->size)) + return -EINVAL; + } else { + header = __smem->regions[0].virt_base; + ret = le32_to_cpu(header->available); + + if (ret > __smem->regions[0].size) + return -EINVAL; + } + + return ret; +} +EXPORT_SYMBOL_GPL(qcom_smem_get_free_space); + +static bool addr_in_range(void __iomem *base, size_t size, void *addr) +{ + return base && ((void __iomem *)addr >= base && (void __iomem *)addr < base + size); +} + +/** + * qcom_smem_virt_to_phys() - return the physical address associated + * with an smem item pointer (previously returned by qcom_smem_get() + * @p: the virtual address to convert + * + * Returns 0 if the pointer provided is not within any smem region. + */ +phys_addr_t qcom_smem_virt_to_phys(void *p) +{ + struct smem_partition *part; + struct smem_region *area; + u64 offset; + u32 i; + + for (i = 0; i < SMEM_HOST_COUNT; i++) { + part = &__smem->partitions[i]; + + if (addr_in_range(part->virt_base, part->size, p)) { + offset = p - part->virt_base; + + return (phys_addr_t)part->phys_base + offset; + } + } + + part = &__smem->global_partition; + + if (addr_in_range(part->virt_base, part->size, p)) { + offset = p - part->virt_base; + + return (phys_addr_t)part->phys_base + offset; + } + + for (i = 0; i < __smem->num_regions; i++) { + area = &__smem->regions[i]; + + if (addr_in_range(area->virt_base, area->size, p)) { + offset = p - area->virt_base; + + return (phys_addr_t)area->aux_base + offset; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_smem_virt_to_phys); + +/** + * qcom_smem_get_soc_id() - return the SoC ID + * @id: On success, we return the SoC ID here. + * + * Look up SoC ID from HW/SW build ID and return it. + * + * Return: 0 on success, negative errno on failure. + */ +int qcom_smem_get_soc_id(u32 *id) +{ + struct socinfo *info; + + info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL); + if (IS_ERR(info)) + return PTR_ERR(info); + + *id = __le32_to_cpu(info->id); + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id); + +static int qcom_smem_get_sbl_version(struct qcom_smem *smem) +{ + struct smem_header *header; + __le32 *versions; + + header = smem->regions[0].virt_base; + versions = header->version; + + return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); +} + +static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem) +{ + struct smem_ptable *ptable; + u32 version; + + ptable = smem->ptable; + if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) + return ERR_PTR(-ENOENT); + + version = le32_to_cpu(ptable->version); + if (version != 1) { + dev_err(smem->dev, + "Unsupported partition header version %d\n", version); + return ERR_PTR(-EINVAL); + } + return ptable; +} + +static u32 qcom_smem_get_item_count(struct qcom_smem *smem) +{ + struct smem_ptable *ptable; + struct smem_info *info; + + ptable = qcom_smem_get_ptable(smem); + if (IS_ERR_OR_NULL(ptable)) + return SMEM_ITEM_COUNT; + + info = (struct smem_info *)&ptable->entry[ptable->num_entries]; + if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) + return SMEM_ITEM_COUNT; + + return le16_to_cpu(info->num_items); +} + +/* + * Validate the partition header for a partition whose partition + * table entry is supplied. Returns a pointer to its header if + * valid, or a null pointer otherwise. + */ +static struct smem_partition_header * +qcom_smem_partition_header(struct qcom_smem *smem, + struct smem_ptable_entry *entry, u16 host0, u16 host1) +{ + struct smem_partition_header *header; + u32 phys_addr; + u32 size; + + phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset); + header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size)); + + if (!header) + return NULL; + + if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { + dev_err(smem->dev, "bad partition magic %4ph\n", header->magic); + return NULL; + } + + if (host0 != le16_to_cpu(header->host0)) { + dev_err(smem->dev, "bad host0 (%hu != %hu)\n", + host0, le16_to_cpu(header->host0)); + return NULL; + } + if (host1 != le16_to_cpu(header->host1)) { + dev_err(smem->dev, "bad host1 (%hu != %hu)\n", + host1, le16_to_cpu(header->host1)); + return NULL; + } + + size = le32_to_cpu(header->size); + if (size != le32_to_cpu(entry->size)) { + dev_err(smem->dev, "bad partition size (%u != %u)\n", + size, le32_to_cpu(entry->size)); + return NULL; + } + + if (le32_to_cpu(header->offset_free_uncached) > size) { + dev_err(smem->dev, "bad partition free uncached (%u > %u)\n", + le32_to_cpu(header->offset_free_uncached), size); + return NULL; + } + + return header; +} + +static int qcom_smem_set_global_partition(struct qcom_smem *smem) +{ + struct smem_partition_header *header; + struct smem_ptable_entry *entry; + struct smem_ptable *ptable; + bool found = false; + int i; + + if (smem->global_partition.virt_base) { + dev_err(smem->dev, "Already found the global partition\n"); + return -EINVAL; + } + + ptable = qcom_smem_get_ptable(smem); + if (IS_ERR(ptable)) + return PTR_ERR(ptable); + + for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { + entry = &ptable->entry[i]; + if (!le32_to_cpu(entry->offset)) + continue; + if (!le32_to_cpu(entry->size)) + continue; + + if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST) + continue; + + if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) { + found = true; + break; + } + } + + if (!found) { + dev_err(smem->dev, "Missing entry for global partition\n"); + return -EINVAL; + } + + header = qcom_smem_partition_header(smem, entry, + SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST); + if (!header) + return -EINVAL; + + smem->global_partition.virt_base = (void __iomem *)header; + smem->global_partition.phys_base = smem->regions[0].aux_base + + le32_to_cpu(entry->offset); + smem->global_partition.size = le32_to_cpu(entry->size); + smem->global_partition.cacheline = le32_to_cpu(entry->cacheline); + + return 0; +} + +static int +qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host) +{ + struct smem_partition_header *header; + struct smem_ptable_entry *entry; + struct smem_ptable *ptable; + u16 remote_host; + u16 host0, host1; + int i; + + ptable = qcom_smem_get_ptable(smem); + if (IS_ERR(ptable)) + return PTR_ERR(ptable); + + for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { + entry = &ptable->entry[i]; + if (!le32_to_cpu(entry->offset)) + continue; + if (!le32_to_cpu(entry->size)) + continue; + + host0 = le16_to_cpu(entry->host0); + host1 = le16_to_cpu(entry->host1); + if (host0 == local_host) + remote_host = host1; + else if (host1 == local_host) + remote_host = host0; + else + continue; + + if (remote_host >= SMEM_HOST_COUNT) { + dev_err(smem->dev, "bad host %u\n", remote_host); + return -EINVAL; + } + + if (smem->partitions[remote_host].virt_base) { + dev_err(smem->dev, "duplicate host %u\n", remote_host); + return -EINVAL; + } + + header = qcom_smem_partition_header(smem, entry, host0, host1); + if (!header) + return -EINVAL; + + smem->partitions[remote_host].virt_base = (void __iomem *)header; + smem->partitions[remote_host].phys_base = smem->regions[0].aux_base + + le32_to_cpu(entry->offset); + smem->partitions[remote_host].size = le32_to_cpu(entry->size); + smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline); + } + + return 0; +} + +static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region) +{ + u32 ptable_start; + + /* map starting 4K for smem header */ + region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K); + ptable_start = region->aux_base + region->size - SZ_4K; + /* map last 4k for toc */ + smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K); + + if (!region->virt_base || !smem->ptable) + return -ENOMEM; + + return 0; +} + +static int qcom_smem_map_global(struct qcom_smem *smem, u32 size) +{ + u32 phys_addr; + + phys_addr = smem->regions[0].aux_base; + + smem->regions[0].size = size; + smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size); + + if (!smem->regions[0].virt_base) + return -ENOMEM; + + return 0; +} + +static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name, + struct smem_region *region) +{ + struct device *dev = smem->dev; + struct device_node *np; + struct resource r; + int ret; + + np = of_parse_phandle(dev->of_node, name, 0); + if (!np) { + dev_err(dev, "No %s specified\n", name); + return -EINVAL; + } + + ret = of_address_to_resource(np, 0, &r); + of_node_put(np); + if (ret) + return ret; + + region->aux_base = r.start; + region->size = resource_size(&r); + + return 0; +} + +static int qcom_smem_probe(struct platform_device *pdev) +{ + struct smem_header *header; + struct reserved_mem *rmem; + struct qcom_smem *smem; + unsigned long flags; + int num_regions; + int hwlock_id; + u32 version; + u32 size; + int ret; + int i; + + num_regions = 1; + if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram")) + num_regions++; + + smem = devm_kzalloc(&pdev->dev, struct_size(smem, regions, num_regions), + GFP_KERNEL); + if (!smem) + return -ENOMEM; + + smem->dev = &pdev->dev; + smem->num_regions = num_regions; + + rmem = of_reserved_mem_lookup(pdev->dev.of_node); + if (rmem) { + smem->regions[0].aux_base = rmem->base; + smem->regions[0].size = rmem->size; + } else { + /* + * Fall back to the memory-region reference, if we're not a + * reserved-memory node. + */ + ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]); + if (ret) + return ret; + } + + if (num_regions > 1) { + ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]); + if (ret) + return ret; + } + + + ret = qcom_smem_map_toc(smem, &smem->regions[0]); + if (ret) + return ret; + + for (i = 1; i < num_regions; i++) { + smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev, + smem->regions[i].aux_base, + smem->regions[i].size); + if (!smem->regions[i].virt_base) { + dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base); + return -ENOMEM; + } + } + + header = smem->regions[0].virt_base; + if (le32_to_cpu(header->initialized) != 1 || + le32_to_cpu(header->reserved)) { + dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); + return -EINVAL; + } + + hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); + if (hwlock_id < 0) { + if (hwlock_id != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to retrieve hwlock\n"); + return hwlock_id; + } + + smem->hwlock = hwspin_lock_request_specific(hwlock_id); + if (!smem->hwlock) + return -ENXIO; + + ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags); + if (ret) + return ret; + size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset); + hwspin_unlock_irqrestore(smem->hwlock, &flags); + + version = qcom_smem_get_sbl_version(smem); + /* + * smem header mapping is required only in heap version scheme, so unmap + * it here. It will be remapped in qcom_smem_map_global() when whole + * partition is mapped again. + */ + devm_iounmap(smem->dev, smem->regions[0].virt_base); + switch (version >> 16) { + case SMEM_GLOBAL_PART_VERSION: + ret = qcom_smem_set_global_partition(smem); + if (ret < 0) + return ret; + smem->item_count = qcom_smem_get_item_count(smem); + break; + case SMEM_GLOBAL_HEAP_VERSION: + qcom_smem_map_global(smem, size); + smem->item_count = SMEM_ITEM_COUNT; + break; + default: + dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); + return -EINVAL; + } + + BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT); + ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); + if (ret < 0 && ret != -ENOENT) + return ret; + + __smem = smem; + + smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo", + PLATFORM_DEVID_NONE, NULL, + 0); + if (IS_ERR(smem->socinfo)) + dev_dbg(&pdev->dev, "failed to register socinfo device\n"); + + return 0; +} + +static int qcom_smem_remove(struct platform_device *pdev) +{ + platform_device_unregister(__smem->socinfo); + + hwspin_lock_free(__smem->hwlock); + __smem = NULL; + + return 0; +} + +static const struct of_device_id qcom_smem_of_match[] = { + { .compatible = "qcom,smem" }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_smem_of_match); + +static struct platform_driver qcom_smem_driver = { + .probe = qcom_smem_probe, + .remove = qcom_smem_remove, + .driver = { + .name = "qcom-smem", + .of_match_table = qcom_smem_of_match, + .suppress_bind_attrs = true, + }, +}; + +static int __init qcom_smem_init(void) +{ + return platform_driver_register(&qcom_smem_driver); +} +arch_initcall(qcom_smem_init); + +static void __exit qcom_smem_exit(void) +{ + platform_driver_unregister(&qcom_smem_driver); +} +module_exit(qcom_smem_exit) + +MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); +MODULE_DESCRIPTION("Qualcomm Shared Memory Manager"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c new file mode 100644 index 0000000000..e848cc9a3c --- /dev/null +++ b/drivers/soc/qcom/smem_state.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, Sony Mobile Communications Inc. + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ +#include <linux/device.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/soc/qcom/smem_state.h> + +static LIST_HEAD(smem_states); +static DEFINE_MUTEX(list_lock); + +/** + * struct qcom_smem_state - state context + * @refcount: refcount for the state + * @orphan: boolean indicator that this state has been unregistered + * @list: entry in smem_states list + * @of_node: of_node to use for matching the state in DT + * @priv: implementation private data + * @ops: ops for the state + */ +struct qcom_smem_state { + struct kref refcount; + bool orphan; + + struct list_head list; + struct device_node *of_node; + + void *priv; + + struct qcom_smem_state_ops ops; +}; + +/** + * qcom_smem_state_update_bits() - update the masked bits in state with value + * @state: state handle acquired by calling qcom_smem_state_get() + * @mask: bit mask for the change + * @value: new value for the masked bits + * + * Returns 0 on success, otherwise negative errno. + */ +int qcom_smem_state_update_bits(struct qcom_smem_state *state, + u32 mask, + u32 value) +{ + if (state->orphan) + return -ENXIO; + + if (!state->ops.update_bits) + return -ENOTSUPP; + + return state->ops.update_bits(state->priv, mask, value); +} +EXPORT_SYMBOL_GPL(qcom_smem_state_update_bits); + +static struct qcom_smem_state *of_node_to_state(struct device_node *np) +{ + struct qcom_smem_state *state; + + mutex_lock(&list_lock); + + list_for_each_entry(state, &smem_states, list) { + if (state->of_node == np) { + kref_get(&state->refcount); + goto unlock; + } + } + state = ERR_PTR(-EPROBE_DEFER); + +unlock: + mutex_unlock(&list_lock); + + return state; +} + +/** + * qcom_smem_state_get() - acquire handle to a state + * @dev: client device pointer + * @con_id: name of the state to lookup + * @bit: flags from the state reference, indicating which bit's affected + * + * Returns handle to the state, or ERR_PTR(). qcom_smem_state_put() must be + * called to release the returned state handle. + */ +struct qcom_smem_state *qcom_smem_state_get(struct device *dev, + const char *con_id, + unsigned *bit) +{ + struct qcom_smem_state *state; + struct of_phandle_args args; + int index = 0; + int ret; + + if (con_id) { + index = of_property_match_string(dev->of_node, + "qcom,smem-state-names", + con_id); + if (index < 0) { + dev_err(dev, "missing qcom,smem-state-names\n"); + return ERR_PTR(index); + } + } + + ret = of_parse_phandle_with_args(dev->of_node, + "qcom,smem-states", + "#qcom,smem-state-cells", + index, + &args); + if (ret) { + dev_err(dev, "failed to parse qcom,smem-states property\n"); + return ERR_PTR(ret); + } + + if (args.args_count != 1) { + dev_err(dev, "invalid #qcom,smem-state-cells\n"); + return ERR_PTR(-EINVAL); + } + + state = of_node_to_state(args.np); + if (IS_ERR(state)) + goto put; + + *bit = args.args[0]; + +put: + of_node_put(args.np); + return state; +} +EXPORT_SYMBOL_GPL(qcom_smem_state_get); + +static void qcom_smem_state_release(struct kref *ref) +{ + struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount); + + list_del(&state->list); + of_node_put(state->of_node); + kfree(state); +} + +/** + * qcom_smem_state_put() - release state handle + * @state: state handle to be released + */ +void qcom_smem_state_put(struct qcom_smem_state *state) +{ + mutex_lock(&list_lock); + kref_put(&state->refcount, qcom_smem_state_release); + mutex_unlock(&list_lock); +} +EXPORT_SYMBOL_GPL(qcom_smem_state_put); + +static void devm_qcom_smem_state_release(struct device *dev, void *res) +{ + qcom_smem_state_put(*(struct qcom_smem_state **)res); +} + +/** + * devm_qcom_smem_state_get() - acquire handle to a devres managed state + * @dev: client device pointer + * @con_id: name of the state to lookup + * @bit: flags from the state reference, indicating which bit's affected + * + * Returns handle to the state, or ERR_PTR(). qcom_smem_state_put() is called + * automatically when @dev is removed. + */ +struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, + const char *con_id, + unsigned *bit) +{ + struct qcom_smem_state **ptr, *state; + + ptr = devres_alloc(devm_qcom_smem_state_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + state = qcom_smem_state_get(dev, con_id, bit); + if (!IS_ERR(state)) { + *ptr = state; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return state; +} +EXPORT_SYMBOL_GPL(devm_qcom_smem_state_get); + +/** + * qcom_smem_state_register() - register a new state + * @of_node: of_node used for matching client lookups + * @ops: implementation ops + * @priv: implementation specific private data + */ +struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, + const struct qcom_smem_state_ops *ops, + void *priv) +{ + struct qcom_smem_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + kref_init(&state->refcount); + + state->of_node = of_node_get(of_node); + state->ops = *ops; + state->priv = priv; + + mutex_lock(&list_lock); + list_add(&state->list, &smem_states); + mutex_unlock(&list_lock); + + return state; +} +EXPORT_SYMBOL_GPL(qcom_smem_state_register); + +/** + * qcom_smem_state_unregister() - unregister a registered state + * @state: state handle to be unregistered + */ +void qcom_smem_state_unregister(struct qcom_smem_state *state) +{ + state->orphan = true; + qcom_smem_state_put(state); +} +EXPORT_SYMBOL_GPL(qcom_smem_state_unregister); diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c new file mode 100644 index 0000000000..e9c8030d50 --- /dev/null +++ b/drivers/soc/qcom/smp2p.c @@ -0,0 +1,700 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, Sony Mobile Communications AB. + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/mailbox_client.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_wakeirq.h> +#include <linux/regmap.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/smem_state.h> +#include <linux/spinlock.h> + +/* + * The Shared Memory Point to Point (SMP2P) protocol facilitates communication + * of a single 32-bit value between two processors. Each value has a single + * writer (the local side) and a single reader (the remote side). Values are + * uniquely identified in the system by the directed edge (local processor ID + * to remote processor ID) and a string identifier. + * + * Each processor is responsible for creating the outgoing SMEM items and each + * item is writable by the local processor and readable by the remote + * processor. By using two separate SMEM items that are single-reader and + * single-writer, SMP2P does not require any remote locking mechanisms. + * + * The driver uses the Linux GPIO and interrupt framework to expose a virtual + * GPIO for each outbound entry and a virtual interrupt controller for each + * inbound entry. + */ + +#define SMP2P_MAX_ENTRY 16 +#define SMP2P_MAX_ENTRY_NAME 16 + +#define SMP2P_FEATURE_SSR_ACK 0x1 +#define SMP2P_FLAGS_RESTART_DONE_BIT 0 +#define SMP2P_FLAGS_RESTART_ACK_BIT 1 + +#define SMP2P_MAGIC 0x504d5324 +#define SMP2P_ALL_FEATURES SMP2P_FEATURE_SSR_ACK + +/** + * struct smp2p_smem_item - in memory communication structure + * @magic: magic number + * @version: version - must be 1 + * @features: features flag - currently unused + * @local_pid: processor id of sending end + * @remote_pid: processor id of receiving end + * @total_entries: number of entries - always SMP2P_MAX_ENTRY + * @valid_entries: number of allocated entries + * @flags: + * @entries: individual communication entries + * @name: name of the entry + * @value: content of the entry + */ +struct smp2p_smem_item { + u32 magic; + u8 version; + unsigned features:24; + u16 local_pid; + u16 remote_pid; + u16 total_entries; + u16 valid_entries; + u32 flags; + + struct { + u8 name[SMP2P_MAX_ENTRY_NAME]; + u32 value; + } entries[SMP2P_MAX_ENTRY]; +} __packed; + +/** + * struct smp2p_entry - driver context matching one entry + * @node: list entry to keep track of allocated entries + * @smp2p: reference to the device driver context + * @name: name of the entry, to match against smp2p_smem_item + * @value: pointer to smp2p_smem_item entry value + * @last_value: last handled value + * @domain: irq_domain for inbound entries + * @irq_enabled:bitmap to track enabled irq bits + * @irq_rising: bitmap to mark irq bits for rising detection + * @irq_falling:bitmap to mark irq bits for falling detection + * @state: smem state handle + * @lock: spinlock to protect read-modify-write of the value + */ +struct smp2p_entry { + struct list_head node; + struct qcom_smp2p *smp2p; + + const char *name; + u32 *value; + u32 last_value; + + struct irq_domain *domain; + DECLARE_BITMAP(irq_enabled, 32); + DECLARE_BITMAP(irq_rising, 32); + DECLARE_BITMAP(irq_falling, 32); + + struct qcom_smem_state *state; + + spinlock_t lock; +}; + +#define SMP2P_INBOUND 0 +#define SMP2P_OUTBOUND 1 + +/** + * struct qcom_smp2p - device driver context + * @dev: device driver handle + * @in: pointer to the inbound smem item + * @out: pointer to the outbound smem item + * @smem_items: ids of the two smem items + * @valid_entries: already scanned inbound entries + * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled + * @ssr_ack: current cached state of the local ack bit + * @negotiation_done: whether negotiating finished + * @local_pid: processor id of the inbound edge + * @remote_pid: processor id of the outbound edge + * @ipc_regmap: regmap for the outbound ipc + * @ipc_offset: offset within the regmap + * @ipc_bit: bit in regmap@offset to kick to signal remote processor + * @mbox_client: mailbox client handle + * @mbox_chan: apcs ipc mailbox channel handle + * @inbound: list of inbound entries + * @outbound: list of outbound entries + */ +struct qcom_smp2p { + struct device *dev; + + struct smp2p_smem_item *in; + struct smp2p_smem_item *out; + + unsigned smem_items[SMP2P_OUTBOUND + 1]; + + unsigned valid_entries; + + bool ssr_ack_enabled; + bool ssr_ack; + bool negotiation_done; + + unsigned local_pid; + unsigned remote_pid; + + struct regmap *ipc_regmap; + int ipc_offset; + int ipc_bit; + + struct mbox_client mbox_client; + struct mbox_chan *mbox_chan; + + struct list_head inbound; + struct list_head outbound; +}; + +static void qcom_smp2p_kick(struct qcom_smp2p *smp2p) +{ + /* Make sure any updated data is written before the kick */ + wmb(); + + if (smp2p->mbox_chan) { + mbox_send_message(smp2p->mbox_chan, NULL); + mbox_client_txdone(smp2p->mbox_chan, 0); + } else { + regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit)); + } +} + +static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p) +{ + struct smp2p_smem_item *in = smp2p->in; + bool restart; + + if (!smp2p->ssr_ack_enabled) + return false; + + restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT); + + return restart != smp2p->ssr_ack; +} + +static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p) +{ + struct smp2p_smem_item *out = smp2p->out; + u32 val; + + smp2p->ssr_ack = !smp2p->ssr_ack; + + val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT); + if (smp2p->ssr_ack) + val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT); + out->flags = val; + + qcom_smp2p_kick(smp2p); +} + +static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p) +{ + struct smp2p_smem_item *out = smp2p->out; + struct smp2p_smem_item *in = smp2p->in; + + if (in->version == out->version) { + out->features &= in->features; + + if (out->features & SMP2P_FEATURE_SSR_ACK) + smp2p->ssr_ack_enabled = true; + + smp2p->negotiation_done = true; + } +} + +static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p) +{ + struct smp2p_smem_item *in; + struct smp2p_entry *entry; + int irq_pin; + u32 status; + char buf[SMP2P_MAX_ENTRY_NAME]; + u32 val; + int i; + + in = smp2p->in; + + /* Match newly created entries */ + for (i = smp2p->valid_entries; i < in->valid_entries; i++) { + list_for_each_entry(entry, &smp2p->inbound, node) { + memcpy(buf, in->entries[i].name, sizeof(buf)); + if (!strcmp(buf, entry->name)) { + entry->value = &in->entries[i].value; + break; + } + } + } + smp2p->valid_entries = i; + + /* Fire interrupts based on any value changes */ + list_for_each_entry(entry, &smp2p->inbound, node) { + /* Ignore entries not yet allocated by the remote side */ + if (!entry->value) + continue; + + val = readl(entry->value); + + status = val ^ entry->last_value; + entry->last_value = val; + + /* No changes of this entry? */ + if (!status) + continue; + + for_each_set_bit(i, entry->irq_enabled, 32) { + if (!(status & BIT(i))) + continue; + + if ((val & BIT(i) && test_bit(i, entry->irq_rising)) || + (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) { + irq_pin = irq_find_mapping(entry->domain, i); + handle_nested_irq(irq_pin); + } + } + } +} + +/** + * qcom_smp2p_intr() - interrupt handler for incoming notifications + * @irq: unused + * @data: smp2p driver context + * + * Handle notifications from the remote side to handle newly allocated entries + * or any changes to the state bits of existing entries. + */ +static irqreturn_t qcom_smp2p_intr(int irq, void *data) +{ + struct smp2p_smem_item *in; + struct qcom_smp2p *smp2p = data; + unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND]; + unsigned int pid = smp2p->remote_pid; + bool ack_restart; + size_t size; + + in = smp2p->in; + + /* Acquire smem item, if not already found */ + if (!in) { + in = qcom_smem_get(pid, smem_id, &size); + if (IS_ERR(in)) { + dev_err(smp2p->dev, + "Unable to acquire remote smp2p item\n"); + goto out; + } + + smp2p->in = in; + } + + if (!smp2p->negotiation_done) + qcom_smp2p_negotiate(smp2p); + + if (smp2p->negotiation_done) { + ack_restart = qcom_smp2p_check_ssr(smp2p); + qcom_smp2p_notify_in(smp2p); + + if (ack_restart) + qcom_smp2p_do_ssr_ack(smp2p); + } + +out: + return IRQ_HANDLED; +} + +static void smp2p_mask_irq(struct irq_data *irqd) +{ + struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t irq = irqd_to_hwirq(irqd); + + clear_bit(irq, entry->irq_enabled); +} + +static void smp2p_unmask_irq(struct irq_data *irqd) +{ + struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t irq = irqd_to_hwirq(irqd); + + set_bit(irq, entry->irq_enabled); +} + +static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type) +{ + struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t irq = irqd_to_hwirq(irqd); + + if (!(type & IRQ_TYPE_EDGE_BOTH)) + return -EINVAL; + + if (type & IRQ_TYPE_EDGE_RISING) + set_bit(irq, entry->irq_rising); + else + clear_bit(irq, entry->irq_rising); + + if (type & IRQ_TYPE_EDGE_FALLING) + set_bit(irq, entry->irq_falling); + else + clear_bit(irq, entry->irq_falling); + + return 0; +} + +static struct irq_chip smp2p_irq_chip = { + .name = "smp2p", + .irq_mask = smp2p_mask_irq, + .irq_unmask = smp2p_unmask_irq, + .irq_set_type = smp2p_set_irq_type, +}; + +static int smp2p_irq_map(struct irq_domain *d, + unsigned int irq, + irq_hw_number_t hw) +{ + struct smp2p_entry *entry = d->host_data; + + irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq); + irq_set_chip_data(irq, entry); + irq_set_nested_thread(irq, 1); + irq_set_noprobe(irq); + + return 0; +} + +static const struct irq_domain_ops smp2p_irq_ops = { + .map = smp2p_irq_map, + .xlate = irq_domain_xlate_twocell, +}; + +static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p, + struct smp2p_entry *entry, + struct device_node *node) +{ + entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry); + if (!entry->domain) { + dev_err(smp2p->dev, "failed to add irq_domain\n"); + return -ENOMEM; + } + + return 0; +} + +static int smp2p_update_bits(void *data, u32 mask, u32 value) +{ + struct smp2p_entry *entry = data; + unsigned long flags; + u32 orig; + u32 val; + + spin_lock_irqsave(&entry->lock, flags); + val = orig = readl(entry->value); + val &= ~mask; + val |= value; + writel(val, entry->value); + spin_unlock_irqrestore(&entry->lock, flags); + + if (val != orig) + qcom_smp2p_kick(entry->smp2p); + + return 0; +} + +static const struct qcom_smem_state_ops smp2p_state_ops = { + .update_bits = smp2p_update_bits, +}; + +static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p, + struct smp2p_entry *entry, + struct device_node *node) +{ + struct smp2p_smem_item *out = smp2p->out; + char buf[SMP2P_MAX_ENTRY_NAME] = {}; + + /* Allocate an entry from the smem item */ + strscpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME); + memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME); + + /* Make the logical entry reference the physical value */ + entry->value = &out->entries[out->valid_entries].value; + + out->valid_entries++; + + entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry); + if (IS_ERR(entry->state)) { + dev_err(smp2p->dev, "failed to register qcom_smem_state\n"); + return PTR_ERR(entry->state); + } + + return 0; +} + +static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p) +{ + struct smp2p_smem_item *out; + unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND]; + unsigned pid = smp2p->remote_pid; + int ret; + + ret = qcom_smem_alloc(pid, smem_id, sizeof(*out)); + if (ret < 0 && ret != -EEXIST) { + if (ret != -EPROBE_DEFER) + dev_err(smp2p->dev, + "unable to allocate local smp2p item\n"); + return ret; + } + + out = qcom_smem_get(pid, smem_id, NULL); + if (IS_ERR(out)) { + dev_err(smp2p->dev, "Unable to acquire local smp2p item\n"); + return PTR_ERR(out); + } + + memset(out, 0, sizeof(*out)); + out->magic = SMP2P_MAGIC; + out->local_pid = smp2p->local_pid; + out->remote_pid = smp2p->remote_pid; + out->total_entries = SMP2P_MAX_ENTRY; + out->valid_entries = 0; + out->features = SMP2P_ALL_FEATURES; + + /* + * Make sure the rest of the header is written before we validate the + * item by writing a valid version number. + */ + wmb(); + out->version = 1; + + qcom_smp2p_kick(smp2p); + + smp2p->out = out; + + return 0; +} + +static int smp2p_parse_ipc(struct qcom_smp2p *smp2p) +{ + struct device_node *syscon; + struct device *dev = smp2p->dev; + const char *key; + int ret; + + syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0); + if (!syscon) { + dev_err(dev, "no qcom,ipc node\n"); + return -ENODEV; + } + + smp2p->ipc_regmap = syscon_node_to_regmap(syscon); + of_node_put(syscon); + if (IS_ERR(smp2p->ipc_regmap)) + return PTR_ERR(smp2p->ipc_regmap); + + key = "qcom,ipc"; + ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset); + if (ret < 0) { + dev_err(dev, "no offset in %s\n", key); + return -EINVAL; + } + + ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit); + if (ret < 0) { + dev_err(dev, "no bit in %s\n", key); + return -EINVAL; + } + + return 0; +} + +static int qcom_smp2p_probe(struct platform_device *pdev) +{ + struct smp2p_entry *entry; + struct device_node *node; + struct qcom_smp2p *smp2p; + const char *key; + int irq; + int ret; + + smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL); + if (!smp2p) + return -ENOMEM; + + smp2p->dev = &pdev->dev; + INIT_LIST_HEAD(&smp2p->inbound); + INIT_LIST_HEAD(&smp2p->outbound); + + platform_set_drvdata(pdev, smp2p); + + key = "qcom,smem"; + ret = of_property_read_u32_array(pdev->dev.of_node, key, + smp2p->smem_items, 2); + if (ret) + return ret; + + key = "qcom,local-pid"; + ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid); + if (ret) + goto report_read_failure; + + key = "qcom,remote-pid"; + ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid); + if (ret) + goto report_read_failure; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + smp2p->mbox_client.dev = &pdev->dev; + smp2p->mbox_client.knows_txdone = true; + smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0); + if (IS_ERR(smp2p->mbox_chan)) { + if (PTR_ERR(smp2p->mbox_chan) != -ENODEV) + return PTR_ERR(smp2p->mbox_chan); + + smp2p->mbox_chan = NULL; + + ret = smp2p_parse_ipc(smp2p); + if (ret) + return ret; + } + + ret = qcom_smp2p_alloc_outbound_item(smp2p); + if (ret < 0) + goto release_mbox; + + for_each_available_child_of_node(pdev->dev.of_node, node) { + entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL); + if (!entry) { + ret = -ENOMEM; + of_node_put(node); + goto unwind_interfaces; + } + + entry->smp2p = smp2p; + spin_lock_init(&entry->lock); + + ret = of_property_read_string(node, "qcom,entry-name", &entry->name); + if (ret < 0) { + of_node_put(node); + goto unwind_interfaces; + } + + if (of_property_read_bool(node, "interrupt-controller")) { + ret = qcom_smp2p_inbound_entry(smp2p, entry, node); + if (ret < 0) { + of_node_put(node); + goto unwind_interfaces; + } + + list_add(&entry->node, &smp2p->inbound); + } else { + ret = qcom_smp2p_outbound_entry(smp2p, entry, node); + if (ret < 0) { + of_node_put(node); + goto unwind_interfaces; + } + + list_add(&entry->node, &smp2p->outbound); + } + } + + /* Kick the outgoing edge after allocating entries */ + qcom_smp2p_kick(smp2p); + + ret = devm_request_threaded_irq(&pdev->dev, irq, + NULL, qcom_smp2p_intr, + IRQF_ONESHOT, + "smp2p", (void *)smp2p); + if (ret) { + dev_err(&pdev->dev, "failed to request interrupt\n"); + goto unwind_interfaces; + } + + /* + * Treat smp2p interrupt as wakeup source, but keep it disabled + * by default. User space can decide enabling it depending on its + * use cases. For example if remoteproc crashes and device wants + * to handle it immediatedly (e.g. to not miss phone calls) it can + * enable wakeup source from user space, while other devices which + * do not have proper autosleep feature may want to handle it with + * other wakeup events (e.g. Power button) instead waking up immediately. + */ + device_set_wakeup_capable(&pdev->dev, true); + + ret = dev_pm_set_wake_irq(&pdev->dev, irq); + if (ret) + goto set_wake_irq_fail; + + return 0; + +set_wake_irq_fail: + dev_pm_clear_wake_irq(&pdev->dev); + +unwind_interfaces: + list_for_each_entry(entry, &smp2p->inbound, node) + irq_domain_remove(entry->domain); + + list_for_each_entry(entry, &smp2p->outbound, node) + qcom_smem_state_unregister(entry->state); + + smp2p->out->valid_entries = 0; + +release_mbox: + mbox_free_channel(smp2p->mbox_chan); + + return ret; + +report_read_failure: + dev_err(&pdev->dev, "failed to read %s\n", key); + return -EINVAL; +} + +static int qcom_smp2p_remove(struct platform_device *pdev) +{ + struct qcom_smp2p *smp2p = platform_get_drvdata(pdev); + struct smp2p_entry *entry; + + dev_pm_clear_wake_irq(&pdev->dev); + + list_for_each_entry(entry, &smp2p->inbound, node) + irq_domain_remove(entry->domain); + + list_for_each_entry(entry, &smp2p->outbound, node) + qcom_smem_state_unregister(entry->state); + + mbox_free_channel(smp2p->mbox_chan); + + smp2p->out->valid_entries = 0; + + return 0; +} + +static const struct of_device_id qcom_smp2p_of_match[] = { + { .compatible = "qcom,smp2p" }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match); + +static struct platform_driver qcom_smp2p_driver = { + .probe = qcom_smp2p_probe, + .remove = qcom_smp2p_remove, + .driver = { + .name = "qcom_smp2p", + .of_match_table = qcom_smp2p_of_match, + }, +}; +module_platform_driver(qcom_smp2p_driver); + +MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c new file mode 100644 index 0000000000..c58cfff648 --- /dev/null +++ b/drivers/soc/qcom/smsm.c @@ -0,0 +1,647 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, Sony Mobile Communications Inc. + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/interrupt.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/regmap.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/smem_state.h> + +/* + * This driver implements the Qualcomm Shared Memory State Machine, a mechanism + * for communicating single bit state information to remote processors. + * + * The implementation is based on two sections of shared memory; the first + * holding the state bits and the second holding a matrix of subscription bits. + * + * The state bits are structured in entries of 32 bits, each belonging to one + * system in the SoC. The entry belonging to the local system is considered + * read-write, while the rest should be considered read-only. + * + * The subscription matrix consists of N bitmaps per entry, denoting interest + * in updates of the entry for each of the N hosts. Upon updating a state bit + * each host's subscription bitmap should be queried and the remote system + * should be interrupted if they request so. + * + * The subscription matrix is laid out in entry-major order: + * entry0: [host0 ... hostN] + * . + * . + * entryM: [host0 ... hostN] + * + * A third, optional, shared memory region might contain information regarding + * the number of entries in the state bitmap as well as number of columns in + * the subscription matrix. + */ + +/* + * Shared memory identifiers, used to acquire handles to respective memory + * region. + */ +#define SMEM_SMSM_SHARED_STATE 85 +#define SMEM_SMSM_CPU_INTR_MASK 333 +#define SMEM_SMSM_SIZE_INFO 419 + +/* + * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found. + */ +#define SMSM_DEFAULT_NUM_ENTRIES 8 +#define SMSM_DEFAULT_NUM_HOSTS 3 + +struct smsm_entry; +struct smsm_host; + +/** + * struct qcom_smsm - smsm driver context + * @dev: smsm device pointer + * @local_host: column in the subscription matrix representing this system + * @num_hosts: number of columns in the subscription matrix + * @num_entries: number of entries in the state map and rows in the subscription + * matrix + * @local_state: pointer to the local processor's state bits + * @subscription: pointer to local processor's row in subscription matrix + * @state: smem state handle + * @lock: spinlock for read-modify-write of the outgoing state + * @entries: context for each of the entries + * @hosts: context for each of the hosts + */ +struct qcom_smsm { + struct device *dev; + + u32 local_host; + + u32 num_hosts; + u32 num_entries; + + u32 *local_state; + u32 *subscription; + struct qcom_smem_state *state; + + spinlock_t lock; + + struct smsm_entry *entries; + struct smsm_host *hosts; +}; + +/** + * struct smsm_entry - per remote processor entry context + * @smsm: back-reference to driver context + * @domain: IRQ domain for this entry, if representing a remote system + * @irq_enabled: bitmap of which state bits IRQs are enabled + * @irq_rising: bitmap tracking if rising bits should be propagated + * @irq_falling: bitmap tracking if falling bits should be propagated + * @last_value: snapshot of state bits last time the interrupts where propagated + * @remote_state: pointer to this entry's state bits + * @subscription: pointer to a row in the subscription matrix representing this + * entry + */ +struct smsm_entry { + struct qcom_smsm *smsm; + + struct irq_domain *domain; + DECLARE_BITMAP(irq_enabled, 32); + DECLARE_BITMAP(irq_rising, 32); + DECLARE_BITMAP(irq_falling, 32); + unsigned long last_value; + + u32 *remote_state; + u32 *subscription; +}; + +/** + * struct smsm_host - representation of a remote host + * @ipc_regmap: regmap for outgoing interrupt + * @ipc_offset: offset in @ipc_regmap for outgoing interrupt + * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt + */ +struct smsm_host { + struct regmap *ipc_regmap; + int ipc_offset; + int ipc_bit; +}; + +/** + * smsm_update_bits() - change bit in outgoing entry and inform subscribers + * @data: smsm context pointer + * @mask: value mask + * @value: new value + * + * Used to set and clear the bits in the outgoing/local entry and inform + * subscribers about the change. + */ +static int smsm_update_bits(void *data, u32 mask, u32 value) +{ + struct qcom_smsm *smsm = data; + struct smsm_host *hostp; + unsigned long flags; + u32 changes; + u32 host; + u32 orig; + u32 val; + + spin_lock_irqsave(&smsm->lock, flags); + + /* Update the entry */ + val = orig = readl(smsm->local_state); + val &= ~mask; + val |= value; + + /* Don't signal if we didn't change the value */ + changes = val ^ orig; + if (!changes) { + spin_unlock_irqrestore(&smsm->lock, flags); + goto done; + } + + /* Write out the new value */ + writel(val, smsm->local_state); + spin_unlock_irqrestore(&smsm->lock, flags); + + /* Make sure the value update is ordered before any kicks */ + wmb(); + + /* Iterate over all hosts to check whom wants a kick */ + for (host = 0; host < smsm->num_hosts; host++) { + hostp = &smsm->hosts[host]; + + val = readl(smsm->subscription + host); + if (val & changes && hostp->ipc_regmap) { + regmap_write(hostp->ipc_regmap, + hostp->ipc_offset, + BIT(hostp->ipc_bit)); + } + } + +done: + return 0; +} + +static const struct qcom_smem_state_ops smsm_state_ops = { + .update_bits = smsm_update_bits, +}; + +/** + * smsm_intr() - cascading IRQ handler for SMSM + * @irq: unused + * @data: entry related to this IRQ + * + * This function cascades an incoming interrupt from a remote system, based on + * the state bits and configuration. + */ +static irqreturn_t smsm_intr(int irq, void *data) +{ + struct smsm_entry *entry = data; + unsigned i; + int irq_pin; + u32 changed; + u32 val; + + val = readl(entry->remote_state); + changed = val ^ xchg(&entry->last_value, val); + + for_each_set_bit(i, entry->irq_enabled, 32) { + if (!(changed & BIT(i))) + continue; + + if (val & BIT(i)) { + if (test_bit(i, entry->irq_rising)) { + irq_pin = irq_find_mapping(entry->domain, i); + handle_nested_irq(irq_pin); + } + } else { + if (test_bit(i, entry->irq_falling)) { + irq_pin = irq_find_mapping(entry->domain, i); + handle_nested_irq(irq_pin); + } + } + } + + return IRQ_HANDLED; +} + +/** + * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit + * @irqd: IRQ handle to be masked + * + * This un-subscribes the local CPU from interrupts upon changes to the defines + * status bit. The bit is also cleared from cascading. + */ +static void smsm_mask_irq(struct irq_data *irqd) +{ + struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t irq = irqd_to_hwirq(irqd); + struct qcom_smsm *smsm = entry->smsm; + u32 val; + + if (entry->subscription) { + val = readl(entry->subscription + smsm->local_host); + val &= ~BIT(irq); + writel(val, entry->subscription + smsm->local_host); + } + + clear_bit(irq, entry->irq_enabled); +} + +/** + * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit + * @irqd: IRQ handle to be unmasked + * + * This subscribes the local CPU to interrupts upon changes to the defined + * status bit. The bit is also marked for cascading. + */ +static void smsm_unmask_irq(struct irq_data *irqd) +{ + struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t irq = irqd_to_hwirq(irqd); + struct qcom_smsm *smsm = entry->smsm; + u32 val; + + /* Make sure our last cached state is up-to-date */ + if (readl(entry->remote_state) & BIT(irq)) + set_bit(irq, &entry->last_value); + else + clear_bit(irq, &entry->last_value); + + set_bit(irq, entry->irq_enabled); + + if (entry->subscription) { + val = readl(entry->subscription + smsm->local_host); + val |= BIT(irq); + writel(val, entry->subscription + smsm->local_host); + } +} + +/** + * smsm_set_irq_type() - updates the requested IRQ type for the cascading + * @irqd: consumer interrupt handle + * @type: requested flags + */ +static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type) +{ + struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t irq = irqd_to_hwirq(irqd); + + if (!(type & IRQ_TYPE_EDGE_BOTH)) + return -EINVAL; + + if (type & IRQ_TYPE_EDGE_RISING) + set_bit(irq, entry->irq_rising); + else + clear_bit(irq, entry->irq_rising); + + if (type & IRQ_TYPE_EDGE_FALLING) + set_bit(irq, entry->irq_falling); + else + clear_bit(irq, entry->irq_falling); + + return 0; +} + +static int smsm_get_irqchip_state(struct irq_data *irqd, + enum irqchip_irq_state which, bool *state) +{ + struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t irq = irqd_to_hwirq(irqd); + u32 val; + + if (which != IRQCHIP_STATE_LINE_LEVEL) + return -EINVAL; + + val = readl(entry->remote_state); + *state = !!(val & BIT(irq)); + + return 0; +} + +static struct irq_chip smsm_irq_chip = { + .name = "smsm", + .irq_mask = smsm_mask_irq, + .irq_unmask = smsm_unmask_irq, + .irq_set_type = smsm_set_irq_type, + .irq_get_irqchip_state = smsm_get_irqchip_state, +}; + +/** + * smsm_irq_map() - sets up a mapping for a cascaded IRQ + * @d: IRQ domain representing an entry + * @irq: IRQ to set up + * @hw: unused + */ +static int smsm_irq_map(struct irq_domain *d, + unsigned int irq, + irq_hw_number_t hw) +{ + struct smsm_entry *entry = d->host_data; + + irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq); + irq_set_chip_data(irq, entry); + irq_set_nested_thread(irq, 1); + + return 0; +} + +static const struct irq_domain_ops smsm_irq_ops = { + .map = smsm_irq_map, + .xlate = irq_domain_xlate_twocell, +}; + +/** + * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property + * @smsm: smsm driver context + * @host_id: index of the remote host to be resolved + * + * Parses device tree to acquire the information needed for sending the + * outgoing interrupts to a remote host - identified by @host_id. + */ +static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id) +{ + struct device_node *syscon; + struct device_node *node = smsm->dev->of_node; + struct smsm_host *host = &smsm->hosts[host_id]; + char key[16]; + int ret; + + snprintf(key, sizeof(key), "qcom,ipc-%d", host_id); + syscon = of_parse_phandle(node, key, 0); + if (!syscon) + return 0; + + host->ipc_regmap = syscon_node_to_regmap(syscon); + of_node_put(syscon); + if (IS_ERR(host->ipc_regmap)) + return PTR_ERR(host->ipc_regmap); + + ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset); + if (ret < 0) { + dev_err(smsm->dev, "no offset in %s\n", key); + return -EINVAL; + } + + ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit); + if (ret < 0) { + dev_err(smsm->dev, "no bit in %s\n", key); + return -EINVAL; + } + + return 0; +} + +/** + * smsm_inbound_entry() - parse DT and set up an entry representing a remote system + * @smsm: smsm driver context + * @entry: entry context to be set up + * @node: dt node containing the entry's properties + */ +static int smsm_inbound_entry(struct qcom_smsm *smsm, + struct smsm_entry *entry, + struct device_node *node) +{ + int ret; + int irq; + + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + dev_err(smsm->dev, "failed to parse smsm interrupt\n"); + return -EINVAL; + } + + ret = devm_request_threaded_irq(smsm->dev, irq, + NULL, smsm_intr, + IRQF_ONESHOT, + "smsm", (void *)entry); + if (ret) { + dev_err(smsm->dev, "failed to request interrupt\n"); + return ret; + } + + entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry); + if (!entry->domain) { + dev_err(smsm->dev, "failed to add irq_domain\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * smsm_get_size_info() - parse the optional memory segment for sizes + * @smsm: smsm driver context + * + * Attempt to acquire the number of hosts and entries from the optional shared + * memory location. Not being able to find this segment should indicate that + * we're on a older system where these values was hard coded to + * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS. + * + * Returns 0 on success, negative errno on failure. + */ +static int smsm_get_size_info(struct qcom_smsm *smsm) +{ + size_t size; + struct { + u32 num_hosts; + u32 num_entries; + u32 reserved0; + u32 reserved1; + } *info; + + info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size); + if (IS_ERR(info) && PTR_ERR(info) != -ENOENT) + return dev_err_probe(smsm->dev, PTR_ERR(info), + "unable to retrieve smsm size info\n"); + else if (IS_ERR(info) || size != sizeof(*info)) { + dev_warn(smsm->dev, "no smsm size info, using defaults\n"); + smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES; + smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS; + return 0; + } + + smsm->num_entries = info->num_entries; + smsm->num_hosts = info->num_hosts; + + dev_dbg(smsm->dev, + "found custom size of smsm: %d entries %d hosts\n", + smsm->num_entries, smsm->num_hosts); + + return 0; +} + +static int qcom_smsm_probe(struct platform_device *pdev) +{ + struct device_node *local_node; + struct device_node *node; + struct smsm_entry *entry; + struct qcom_smsm *smsm; + u32 *intr_mask; + size_t size; + u32 *states; + u32 id; + int ret; + + smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL); + if (!smsm) + return -ENOMEM; + smsm->dev = &pdev->dev; + spin_lock_init(&smsm->lock); + + ret = smsm_get_size_info(smsm); + if (ret) + return ret; + + smsm->entries = devm_kcalloc(&pdev->dev, + smsm->num_entries, + sizeof(struct smsm_entry), + GFP_KERNEL); + if (!smsm->entries) + return -ENOMEM; + + smsm->hosts = devm_kcalloc(&pdev->dev, + smsm->num_hosts, + sizeof(struct smsm_host), + GFP_KERNEL); + if (!smsm->hosts) + return -ENOMEM; + + for_each_child_of_node(pdev->dev.of_node, local_node) { + if (of_property_present(local_node, "#qcom,smem-state-cells")) + break; + } + if (!local_node) { + dev_err(&pdev->dev, "no state entry\n"); + return -EINVAL; + } + + of_property_read_u32(pdev->dev.of_node, + "qcom,local-host", + &smsm->local_host); + + /* Parse the host properties */ + for (id = 0; id < smsm->num_hosts; id++) { + ret = smsm_parse_ipc(smsm, id); + if (ret < 0) + goto out_put; + } + + /* Acquire the main SMSM state vector */ + ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, + smsm->num_entries * sizeof(u32)); + if (ret < 0 && ret != -EEXIST) { + dev_err(&pdev->dev, "unable to allocate shared state entry\n"); + goto out_put; + } + + states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL); + if (IS_ERR(states)) { + dev_err(&pdev->dev, "Unable to acquire shared state entry\n"); + ret = PTR_ERR(states); + goto out_put; + } + + /* Acquire the list of interrupt mask vectors */ + size = smsm->num_entries * smsm->num_hosts * sizeof(u32); + ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size); + if (ret < 0 && ret != -EEXIST) { + dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n"); + goto out_put; + } + + intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL); + if (IS_ERR(intr_mask)) { + dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n"); + ret = PTR_ERR(intr_mask); + goto out_put; + } + + /* Setup the reference to the local state bits */ + smsm->local_state = states + smsm->local_host; + smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts; + + /* Register the outgoing state */ + smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm); + if (IS_ERR(smsm->state)) { + dev_err(smsm->dev, "failed to register qcom_smem_state\n"); + ret = PTR_ERR(smsm->state); + goto out_put; + } + + /* Register handlers for remote processor entries of interest. */ + for_each_available_child_of_node(pdev->dev.of_node, node) { + if (!of_property_read_bool(node, "interrupt-controller")) + continue; + + ret = of_property_read_u32(node, "reg", &id); + if (ret || id >= smsm->num_entries) { + dev_err(&pdev->dev, "invalid reg of entry\n"); + if (!ret) + ret = -EINVAL; + goto unwind_interfaces; + } + entry = &smsm->entries[id]; + + entry->smsm = smsm; + entry->remote_state = states + id; + + /* Setup subscription pointers and unsubscribe to any kicks */ + entry->subscription = intr_mask + id * smsm->num_hosts; + writel(0, entry->subscription + smsm->local_host); + + ret = smsm_inbound_entry(smsm, entry, node); + if (ret < 0) + goto unwind_interfaces; + } + + platform_set_drvdata(pdev, smsm); + of_node_put(local_node); + + return 0; + +unwind_interfaces: + of_node_put(node); + for (id = 0; id < smsm->num_entries; id++) + if (smsm->entries[id].domain) + irq_domain_remove(smsm->entries[id].domain); + + qcom_smem_state_unregister(smsm->state); +out_put: + of_node_put(local_node); + return ret; +} + +static int qcom_smsm_remove(struct platform_device *pdev) +{ + struct qcom_smsm *smsm = platform_get_drvdata(pdev); + unsigned id; + + for (id = 0; id < smsm->num_entries; id++) + if (smsm->entries[id].domain) + irq_domain_remove(smsm->entries[id].domain); + + qcom_smem_state_unregister(smsm->state); + + return 0; +} + +static const struct of_device_id qcom_smsm_of_match[] = { + { .compatible = "qcom,smsm" }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_smsm_of_match); + +static struct platform_driver qcom_smsm_driver = { + .probe = qcom_smsm_probe, + .remove = qcom_smsm_remove, + .driver = { + .name = "qcom-smsm", + .of_match_table = qcom_smsm_of_match, + }, +}; +module_platform_driver(qcom_smsm_driver); + +MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c new file mode 100644 index 0000000000..497cfb720f --- /dev/null +++ b/drivers/soc/qcom/socinfo.c @@ -0,0 +1,802 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2019, Linaro Ltd. + */ + +#include <linux/debugfs.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/socinfo.h> +#include <linux/string.h> +#include <linux/stringify.h> +#include <linux/sys_soc.h> +#include <linux/types.h> + +#include <asm/unaligned.h> + +#include <dt-bindings/arm/qcom,ids.h> + +/* + * SoC version type with major number in the upper 16 bits and minor + * number in the lower 16 bits. + */ +#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff) +#define SOCINFO_MINOR(ver) ((ver) & 0xffff) +#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff)) + +/* Helper macros to create soc_id table */ +#define qcom_board_id(id) QCOM_ID_ ## id, __stringify(id) +#define qcom_board_id_named(id, name) QCOM_ID_ ## id, (name) + +#ifdef CONFIG_DEBUG_FS +#define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32 +#define SMEM_IMAGE_VERSION_SIZE 4096 +#define SMEM_IMAGE_VERSION_NAME_SIZE 75 +#define SMEM_IMAGE_VERSION_VARIANT_SIZE 20 +#define SMEM_IMAGE_VERSION_OEM_SIZE 32 + +/* + * SMEM Image table indices + */ +#define SMEM_IMAGE_TABLE_BOOT_INDEX 0 +#define SMEM_IMAGE_TABLE_TZ_INDEX 1 +#define SMEM_IMAGE_TABLE_RPM_INDEX 3 +#define SMEM_IMAGE_TABLE_APPS_INDEX 10 +#define SMEM_IMAGE_TABLE_MPSS_INDEX 11 +#define SMEM_IMAGE_TABLE_ADSP_INDEX 12 +#define SMEM_IMAGE_TABLE_CNSS_INDEX 13 +#define SMEM_IMAGE_TABLE_VIDEO_INDEX 14 +#define SMEM_IMAGE_VERSION_TABLE 469 + +/* + * SMEM Image table names + */ +static const char *const socinfo_image_names[] = { + [SMEM_IMAGE_TABLE_ADSP_INDEX] = "adsp", + [SMEM_IMAGE_TABLE_APPS_INDEX] = "apps", + [SMEM_IMAGE_TABLE_BOOT_INDEX] = "boot", + [SMEM_IMAGE_TABLE_CNSS_INDEX] = "cnss", + [SMEM_IMAGE_TABLE_MPSS_INDEX] = "mpss", + [SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm", + [SMEM_IMAGE_TABLE_TZ_INDEX] = "tz", + [SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video", +}; + +static const char *const pmic_models[] = { + [0] = "Unknown PMIC model", + [1] = "PM8941", + [2] = "PM8841", + [3] = "PM8019", + [4] = "PM8226", + [5] = "PM8110", + [6] = "PMA8084", + [7] = "PMI8962", + [8] = "PMD9635", + [9] = "PM8994", + [10] = "PMI8994", + [11] = "PM8916", + [12] = "PM8004", + [13] = "PM8909/PM8058", + [14] = "PM8028", + [15] = "PM8901", + [16] = "PM8950/PM8027", + [17] = "PMI8950/ISL9519", + [18] = "PMK8001/PM8921", + [19] = "PMI8996/PM8018", + [20] = "PM8998/PM8015", + [21] = "PMI8998/PM8014", + [22] = "PM8821", + [23] = "PM8038", + [24] = "PM8005/PM8922", + [25] = "PM8917", + [26] = "PM660L", + [27] = "PM660", + [30] = "PM8150", + [31] = "PM8150L", + [32] = "PM8150B", + [33] = "PMK8002", + [36] = "PM8009", + [37] = "PMI632", + [38] = "PM8150C", + [40] = "PM6150", + [41] = "SMB2351", + [44] = "PM8008", + [45] = "PM6125", + [46] = "PM7250B", + [47] = "PMK8350", + [48] = "PM8350", + [49] = "PM8350C", + [50] = "PM8350B", + [51] = "PMR735A", + [52] = "PMR735B", + [55] = "PM2250", + [58] = "PM8450", + [65] = "PM8010", +}; + +struct socinfo_params { + u32 raw_device_family; + u32 hw_plat_subtype; + u32 accessory_chip; + u32 raw_device_num; + u32 chip_family; + u32 foundry_id; + u32 plat_ver; + u32 raw_ver; + u32 hw_plat; + u32 fmt; + u32 nproduct_id; + u32 num_clusters; + u32 ncluster_array_offset; + u32 num_subset_parts; + u32 nsubset_parts_array_offset; + u32 nmodem_supported; + u32 feature_code; + u32 pcode; + u32 oem_variant; + u32 num_func_clusters; + u32 boot_cluster; + u32 boot_core; +}; + +struct smem_image_version { + char name[SMEM_IMAGE_VERSION_NAME_SIZE]; + char variant[SMEM_IMAGE_VERSION_VARIANT_SIZE]; + char pad; + char oem[SMEM_IMAGE_VERSION_OEM_SIZE]; +}; +#endif /* CONFIG_DEBUG_FS */ + +struct qcom_socinfo { + struct soc_device *soc_dev; + struct soc_device_attribute attr; +#ifdef CONFIG_DEBUG_FS + struct dentry *dbg_root; + struct socinfo_params info; +#endif /* CONFIG_DEBUG_FS */ +}; + +struct soc_id { + unsigned int id; + const char *name; +}; + +static const struct soc_id soc_id[] = { + { qcom_board_id(MSM8260) }, + { qcom_board_id(MSM8660) }, + { qcom_board_id(APQ8060) }, + { qcom_board_id(MSM8960) }, + { qcom_board_id(APQ8064) }, + { qcom_board_id(MSM8930) }, + { qcom_board_id(MSM8630) }, + { qcom_board_id(MSM8230) }, + { qcom_board_id(APQ8030) }, + { qcom_board_id(MSM8627) }, + { qcom_board_id(MSM8227) }, + { qcom_board_id(MSM8660A) }, + { qcom_board_id(MSM8260A) }, + { qcom_board_id(APQ8060A) }, + { qcom_board_id(MSM8974) }, + { qcom_board_id(MSM8225) }, + { qcom_board_id(MSM8625) }, + { qcom_board_id(MPQ8064) }, + { qcom_board_id(MSM8960AB) }, + { qcom_board_id(APQ8060AB) }, + { qcom_board_id(MSM8260AB) }, + { qcom_board_id(MSM8660AB) }, + { qcom_board_id(MSM8930AA) }, + { qcom_board_id(MSM8630AA) }, + { qcom_board_id(MSM8230AA) }, + { qcom_board_id(MSM8626) }, + { qcom_board_id(MSM8610) }, + { qcom_board_id(APQ8064AB) }, + { qcom_board_id(MSM8930AB) }, + { qcom_board_id(MSM8630AB) }, + { qcom_board_id(MSM8230AB) }, + { qcom_board_id(APQ8030AB) }, + { qcom_board_id(MSM8226) }, + { qcom_board_id(MSM8526) }, + { qcom_board_id(APQ8030AA) }, + { qcom_board_id(MSM8110) }, + { qcom_board_id(MSM8210) }, + { qcom_board_id(MSM8810) }, + { qcom_board_id(MSM8212) }, + { qcom_board_id(MSM8612) }, + { qcom_board_id(MSM8112) }, + { qcom_board_id(MSM8125) }, + { qcom_board_id(MSM8225Q) }, + { qcom_board_id(MSM8625Q) }, + { qcom_board_id(MSM8125Q) }, + { qcom_board_id(APQ8064AA) }, + { qcom_board_id(APQ8084) }, + { qcom_board_id(MSM8130) }, + { qcom_board_id(MSM8130AA) }, + { qcom_board_id(MSM8130AB) }, + { qcom_board_id(MSM8627AA) }, + { qcom_board_id(MSM8227AA) }, + { qcom_board_id(APQ8074) }, + { qcom_board_id(MSM8274) }, + { qcom_board_id(MSM8674) }, + { qcom_board_id(MDM9635) }, + { qcom_board_id_named(MSM8974PRO_AC, "MSM8974PRO-AC") }, + { qcom_board_id(MSM8126) }, + { qcom_board_id(APQ8026) }, + { qcom_board_id(MSM8926) }, + { qcom_board_id(IPQ8062) }, + { qcom_board_id(IPQ8064) }, + { qcom_board_id(IPQ8066) }, + { qcom_board_id(IPQ8068) }, + { qcom_board_id(MSM8326) }, + { qcom_board_id(MSM8916) }, + { qcom_board_id(MSM8994) }, + { qcom_board_id_named(APQ8074PRO_AA, "APQ8074PRO-AA") }, + { qcom_board_id_named(APQ8074PRO_AB, "APQ8074PRO-AB") }, + { qcom_board_id_named(APQ8074PRO_AC, "APQ8074PRO-AC") }, + { qcom_board_id_named(MSM8274PRO_AA, "MSM8274PRO-AA") }, + { qcom_board_id_named(MSM8274PRO_AB, "MSM8274PRO-AB") }, + { qcom_board_id_named(MSM8274PRO_AC, "MSM8274PRO-AC") }, + { qcom_board_id_named(MSM8674PRO_AA, "MSM8674PRO-AA") }, + { qcom_board_id_named(MSM8674PRO_AB, "MSM8674PRO-AB") }, + { qcom_board_id_named(MSM8674PRO_AC, "MSM8674PRO-AC") }, + { qcom_board_id_named(MSM8974PRO_AA, "MSM8974PRO-AA") }, + { qcom_board_id_named(MSM8974PRO_AB, "MSM8974PRO-AB") }, + { qcom_board_id(APQ8028) }, + { qcom_board_id(MSM8128) }, + { qcom_board_id(MSM8228) }, + { qcom_board_id(MSM8528) }, + { qcom_board_id(MSM8628) }, + { qcom_board_id(MSM8928) }, + { qcom_board_id(MSM8510) }, + { qcom_board_id(MSM8512) }, + { qcom_board_id(MSM8936) }, + { qcom_board_id(MDM9640) }, + { qcom_board_id(MSM8939) }, + { qcom_board_id(APQ8036) }, + { qcom_board_id(APQ8039) }, + { qcom_board_id(MSM8236) }, + { qcom_board_id(MSM8636) }, + { qcom_board_id(MSM8909) }, + { qcom_board_id(MSM8996) }, + { qcom_board_id(APQ8016) }, + { qcom_board_id(MSM8216) }, + { qcom_board_id(MSM8116) }, + { qcom_board_id(MSM8616) }, + { qcom_board_id(MSM8992) }, + { qcom_board_id(APQ8092) }, + { qcom_board_id(APQ8094) }, + { qcom_board_id(MSM8209) }, + { qcom_board_id(MSM8208) }, + { qcom_board_id(MDM9209) }, + { qcom_board_id(MDM9309) }, + { qcom_board_id(MDM9609) }, + { qcom_board_id(MSM8239) }, + { qcom_board_id(MSM8952) }, + { qcom_board_id(APQ8009) }, + { qcom_board_id(MSM8956) }, + { qcom_board_id(MSM8929) }, + { qcom_board_id(MSM8629) }, + { qcom_board_id(MSM8229) }, + { qcom_board_id(APQ8029) }, + { qcom_board_id(APQ8056) }, + { qcom_board_id(MSM8609) }, + { qcom_board_id(APQ8076) }, + { qcom_board_id(MSM8976) }, + { qcom_board_id(IPQ8065) }, + { qcom_board_id(IPQ8069) }, + { qcom_board_id(MDM9650) }, + { qcom_board_id(MDM9655) }, + { qcom_board_id(MDM9250) }, + { qcom_board_id(MDM9255) }, + { qcom_board_id(MDM9350) }, + { qcom_board_id(APQ8052) }, + { qcom_board_id(MDM9607) }, + { qcom_board_id(APQ8096) }, + { qcom_board_id(MSM8998) }, + { qcom_board_id(MSM8953) }, + { qcom_board_id(MSM8937) }, + { qcom_board_id(APQ8037) }, + { qcom_board_id(MDM8207) }, + { qcom_board_id(MDM9207) }, + { qcom_board_id(MDM9307) }, + { qcom_board_id(MDM9628) }, + { qcom_board_id(MSM8909W) }, + { qcom_board_id(APQ8009W) }, + { qcom_board_id(MSM8996L) }, + { qcom_board_id(MSM8917) }, + { qcom_board_id(APQ8053) }, + { qcom_board_id(MSM8996SG) }, + { qcom_board_id(APQ8017) }, + { qcom_board_id(MSM8217) }, + { qcom_board_id(MSM8617) }, + { qcom_board_id(MSM8996AU) }, + { qcom_board_id(APQ8096AU) }, + { qcom_board_id(APQ8096SG) }, + { qcom_board_id(MSM8940) }, + { qcom_board_id(SDX201) }, + { qcom_board_id(SDM660) }, + { qcom_board_id(SDM630) }, + { qcom_board_id(APQ8098) }, + { qcom_board_id(MSM8920) }, + { qcom_board_id(SDM845) }, + { qcom_board_id(MDM9206) }, + { qcom_board_id(IPQ8074) }, + { qcom_board_id(SDA660) }, + { qcom_board_id(SDM658) }, + { qcom_board_id(SDA658) }, + { qcom_board_id(SDA630) }, + { qcom_board_id(MSM8905) }, + { qcom_board_id(SDX202) }, + { qcom_board_id(SDM450) }, + { qcom_board_id(SM8150) }, + { qcom_board_id(SDA845) }, + { qcom_board_id(IPQ8072) }, + { qcom_board_id(IPQ8076) }, + { qcom_board_id(IPQ8078) }, + { qcom_board_id(SDM636) }, + { qcom_board_id(SDA636) }, + { qcom_board_id(SDM632) }, + { qcom_board_id(SDA632) }, + { qcom_board_id(SDA450) }, + { qcom_board_id(SDM439) }, + { qcom_board_id(SDM429) }, + { qcom_board_id(SM8250) }, + { qcom_board_id(SA8155) }, + { qcom_board_id(SDA439) }, + { qcom_board_id(SDA429) }, + { qcom_board_id(SM7150) }, + { qcom_board_id(IPQ8070) }, + { qcom_board_id(IPQ8071) }, + { qcom_board_id(QM215) }, + { qcom_board_id(IPQ8072A) }, + { qcom_board_id(IPQ8074A) }, + { qcom_board_id(IPQ8076A) }, + { qcom_board_id(IPQ8078A) }, + { qcom_board_id(SM6125) }, + { qcom_board_id(IPQ8070A) }, + { qcom_board_id(IPQ8071A) }, + { qcom_board_id(IPQ6018) }, + { qcom_board_id(IPQ6028) }, + { qcom_board_id(SDM429W) }, + { qcom_board_id(SM4250) }, + { qcom_board_id(IPQ6000) }, + { qcom_board_id(IPQ6010) }, + { qcom_board_id(SC7180) }, + { qcom_board_id(SM6350) }, + { qcom_board_id(QCM2150) }, + { qcom_board_id(SDA429W) }, + { qcom_board_id(SM8350) }, + { qcom_board_id(QCM2290) }, + { qcom_board_id(SM7125) }, + { qcom_board_id(SM6115) }, + { qcom_board_id(IPQ5010) }, + { qcom_board_id(IPQ5018) }, + { qcom_board_id(IPQ5028) }, + { qcom_board_id(SC8280XP) }, + { qcom_board_id(IPQ6005) }, + { qcom_board_id(QRB5165) }, + { qcom_board_id(SM8450) }, + { qcom_board_id(SM7225) }, + { qcom_board_id(SA8295P) }, + { qcom_board_id(SA8540P) }, + { qcom_board_id(QCM4290) }, + { qcom_board_id(QCS4290) }, + { qcom_board_id_named(SM8450_2, "SM8450") }, + { qcom_board_id_named(SM8450_3, "SM8450") }, + { qcom_board_id(SC7280) }, + { qcom_board_id(SC7180P) }, + { qcom_board_id(IPQ5000) }, + { qcom_board_id(IPQ0509) }, + { qcom_board_id(IPQ0518) }, + { qcom_board_id(SM6375) }, + { qcom_board_id(IPQ9514) }, + { qcom_board_id(IPQ9550) }, + { qcom_board_id(IPQ9554) }, + { qcom_board_id(IPQ9570) }, + { qcom_board_id(IPQ9574) }, + { qcom_board_id(SM8550) }, + { qcom_board_id(IPQ5016) }, + { qcom_board_id(IPQ9510) }, + { qcom_board_id(QRB4210) }, + { qcom_board_id(QRB2210) }, + { qcom_board_id(SA8775P) }, + { qcom_board_id(QRU1000) }, + { qcom_board_id(QDU1000) }, + { qcom_board_id(SM4450) }, + { qcom_board_id(QDU1010) }, + { qcom_board_id(QRU1032) }, + { qcom_board_id(QRU1052) }, + { qcom_board_id(QRU1062) }, + { qcom_board_id(IPQ5332) }, + { qcom_board_id(IPQ5322) }, + { qcom_board_id(IPQ5312) }, + { qcom_board_id(IPQ5302) }, + { qcom_board_id(IPQ5300) }, +}; + +static const char *socinfo_machine(struct device *dev, unsigned int id) +{ + int idx; + + for (idx = 0; idx < ARRAY_SIZE(soc_id); idx++) { + if (soc_id[idx].id == id) + return soc_id[idx].name; + } + + return NULL; +} + +#ifdef CONFIG_DEBUG_FS + +#define QCOM_OPEN(name, _func) \ +static int qcom_open_##name(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, _func, inode->i_private); \ +} \ + \ +static const struct file_operations qcom_ ##name## _ops = { \ + .open = qcom_open_##name, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +#define DEBUGFS_ADD(info, name) \ + debugfs_create_file(__stringify(name), 0444, \ + qcom_socinfo->dbg_root, \ + info, &qcom_ ##name## _ops) + + +static int qcom_show_build_id(struct seq_file *seq, void *p) +{ + struct socinfo *socinfo = seq->private; + + seq_printf(seq, "%s\n", socinfo->build_id); + + return 0; +} + +static int qcom_show_pmic_model(struct seq_file *seq, void *p) +{ + struct socinfo *socinfo = seq->private; + int model = SOCINFO_MINOR(le32_to_cpu(socinfo->pmic_model)); + + if (model < 0) + return -EINVAL; + + if (model < ARRAY_SIZE(pmic_models) && pmic_models[model]) + seq_printf(seq, "%s\n", pmic_models[model]); + else + seq_printf(seq, "unknown (%d)\n", model); + + return 0; +} + +static int qcom_show_pmic_model_array(struct seq_file *seq, void *p) +{ + struct socinfo *socinfo = seq->private; + unsigned int num_pmics = le32_to_cpu(socinfo->num_pmics); + unsigned int pmic_array_offset = le32_to_cpu(socinfo->pmic_array_offset); + int i; + void *ptr = socinfo; + + ptr += pmic_array_offset; + + /* No need for bounds checking, it happened at socinfo_debugfs_init */ + for (i = 0; i < num_pmics; i++) { + unsigned int model = SOCINFO_MINOR(get_unaligned_le32(ptr + 2 * i * sizeof(u32))); + unsigned int die_rev = get_unaligned_le32(ptr + (2 * i + 1) * sizeof(u32)); + + if (model < ARRAY_SIZE(pmic_models) && pmic_models[model]) + seq_printf(seq, "%s %u.%u\n", pmic_models[model], + SOCINFO_MAJOR(die_rev), + SOCINFO_MINOR(die_rev)); + else + seq_printf(seq, "unknown (%d)\n", model); + } + + return 0; +} + +static int qcom_show_pmic_die_revision(struct seq_file *seq, void *p) +{ + struct socinfo *socinfo = seq->private; + + seq_printf(seq, "%u.%u\n", + SOCINFO_MAJOR(le32_to_cpu(socinfo->pmic_die_rev)), + SOCINFO_MINOR(le32_to_cpu(socinfo->pmic_die_rev))); + + return 0; +} + +static int qcom_show_chip_id(struct seq_file *seq, void *p) +{ + struct socinfo *socinfo = seq->private; + + seq_printf(seq, "%s\n", socinfo->chip_id); + + return 0; +} + +QCOM_OPEN(build_id, qcom_show_build_id); +QCOM_OPEN(pmic_model, qcom_show_pmic_model); +QCOM_OPEN(pmic_model_array, qcom_show_pmic_model_array); +QCOM_OPEN(pmic_die_rev, qcom_show_pmic_die_revision); +QCOM_OPEN(chip_id, qcom_show_chip_id); + +#define DEFINE_IMAGE_OPS(type) \ +static int show_image_##type(struct seq_file *seq, void *p) \ +{ \ + struct smem_image_version *image_version = seq->private; \ + if (image_version->type[0] != '\0') \ + seq_printf(seq, "%s\n", image_version->type); \ + return 0; \ +} \ +static int open_image_##type(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, show_image_##type, inode->i_private); \ +} \ + \ +static const struct file_operations qcom_image_##type##_ops = { \ + .open = open_image_##type, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +DEFINE_IMAGE_OPS(name); +DEFINE_IMAGE_OPS(variant); +DEFINE_IMAGE_OPS(oem); + +static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, + struct socinfo *info, size_t info_size) +{ + struct smem_image_version *versions; + struct dentry *dentry; + size_t size; + int i; + unsigned int num_pmics; + unsigned int pmic_array_offset; + + qcom_socinfo->dbg_root = debugfs_create_dir("qcom_socinfo", NULL); + + qcom_socinfo->info.fmt = __le32_to_cpu(info->fmt); + + debugfs_create_x32("info_fmt", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.fmt); + + switch (qcom_socinfo->info.fmt) { + case SOCINFO_VERSION(0, 19): + qcom_socinfo->info.num_func_clusters = __le32_to_cpu(info->num_func_clusters); + qcom_socinfo->info.boot_cluster = __le32_to_cpu(info->boot_cluster); + qcom_socinfo->info.boot_core = __le32_to_cpu(info->boot_core); + + debugfs_create_u32("num_func_clusters", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.num_func_clusters); + debugfs_create_u32("boot_cluster", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.boot_cluster); + debugfs_create_u32("boot_core", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.boot_core); + fallthrough; + case SOCINFO_VERSION(0, 18): + case SOCINFO_VERSION(0, 17): + qcom_socinfo->info.oem_variant = __le32_to_cpu(info->oem_variant); + debugfs_create_u32("oem_variant", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.oem_variant); + fallthrough; + case SOCINFO_VERSION(0, 16): + qcom_socinfo->info.feature_code = __le32_to_cpu(info->feature_code); + qcom_socinfo->info.pcode = __le32_to_cpu(info->pcode); + + debugfs_create_u32("feature_code", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.feature_code); + debugfs_create_u32("pcode", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.pcode); + fallthrough; + case SOCINFO_VERSION(0, 15): + qcom_socinfo->info.nmodem_supported = __le32_to_cpu(info->nmodem_supported); + + debugfs_create_u32("nmodem_supported", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.nmodem_supported); + fallthrough; + case SOCINFO_VERSION(0, 14): + qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters); + qcom_socinfo->info.ncluster_array_offset = __le32_to_cpu(info->ncluster_array_offset); + qcom_socinfo->info.num_subset_parts = __le32_to_cpu(info->num_subset_parts); + qcom_socinfo->info.nsubset_parts_array_offset = + __le32_to_cpu(info->nsubset_parts_array_offset); + + debugfs_create_u32("num_clusters", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.num_clusters); + debugfs_create_u32("ncluster_array_offset", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.ncluster_array_offset); + debugfs_create_u32("num_subset_parts", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.num_subset_parts); + debugfs_create_u32("nsubset_parts_array_offset", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.nsubset_parts_array_offset); + fallthrough; + case SOCINFO_VERSION(0, 13): + qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id); + + debugfs_create_u32("nproduct_id", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.nproduct_id); + DEBUGFS_ADD(info, chip_id); + fallthrough; + case SOCINFO_VERSION(0, 12): + qcom_socinfo->info.chip_family = + __le32_to_cpu(info->chip_family); + qcom_socinfo->info.raw_device_family = + __le32_to_cpu(info->raw_device_family); + qcom_socinfo->info.raw_device_num = + __le32_to_cpu(info->raw_device_num); + + debugfs_create_x32("chip_family", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.chip_family); + debugfs_create_x32("raw_device_family", 0444, + qcom_socinfo->dbg_root, + &qcom_socinfo->info.raw_device_family); + debugfs_create_x32("raw_device_number", 0444, + qcom_socinfo->dbg_root, + &qcom_socinfo->info.raw_device_num); + fallthrough; + case SOCINFO_VERSION(0, 11): + num_pmics = le32_to_cpu(info->num_pmics); + pmic_array_offset = le32_to_cpu(info->pmic_array_offset); + if (pmic_array_offset + 2 * num_pmics * sizeof(u32) <= info_size) + DEBUGFS_ADD(info, pmic_model_array); + fallthrough; + case SOCINFO_VERSION(0, 10): + case SOCINFO_VERSION(0, 9): + qcom_socinfo->info.foundry_id = __le32_to_cpu(info->foundry_id); + + debugfs_create_u32("foundry_id", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.foundry_id); + fallthrough; + case SOCINFO_VERSION(0, 8): + case SOCINFO_VERSION(0, 7): + DEBUGFS_ADD(info, pmic_model); + DEBUGFS_ADD(info, pmic_die_rev); + fallthrough; + case SOCINFO_VERSION(0, 6): + qcom_socinfo->info.hw_plat_subtype = + __le32_to_cpu(info->hw_plat_subtype); + + debugfs_create_u32("hardware_platform_subtype", 0444, + qcom_socinfo->dbg_root, + &qcom_socinfo->info.hw_plat_subtype); + fallthrough; + case SOCINFO_VERSION(0, 5): + qcom_socinfo->info.accessory_chip = + __le32_to_cpu(info->accessory_chip); + + debugfs_create_u32("accessory_chip", 0444, + qcom_socinfo->dbg_root, + &qcom_socinfo->info.accessory_chip); + fallthrough; + case SOCINFO_VERSION(0, 4): + qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver); + + debugfs_create_u32("platform_version", 0444, + qcom_socinfo->dbg_root, + &qcom_socinfo->info.plat_ver); + fallthrough; + case SOCINFO_VERSION(0, 3): + qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat); + + debugfs_create_u32("hardware_platform", 0444, + qcom_socinfo->dbg_root, + &qcom_socinfo->info.hw_plat); + fallthrough; + case SOCINFO_VERSION(0, 2): + qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver); + + debugfs_create_u32("raw_version", 0444, qcom_socinfo->dbg_root, + &qcom_socinfo->info.raw_ver); + fallthrough; + case SOCINFO_VERSION(0, 1): + DEBUGFS_ADD(info, build_id); + break; + } + + versions = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_IMAGE_VERSION_TABLE, + &size); + + for (i = 0; i < ARRAY_SIZE(socinfo_image_names); i++) { + if (!socinfo_image_names[i]) + continue; + + dentry = debugfs_create_dir(socinfo_image_names[i], + qcom_socinfo->dbg_root); + debugfs_create_file("name", 0444, dentry, &versions[i], + &qcom_image_name_ops); + debugfs_create_file("variant", 0444, dentry, &versions[i], + &qcom_image_variant_ops); + debugfs_create_file("oem", 0444, dentry, &versions[i], + &qcom_image_oem_ops); + } +} + +static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo) +{ + debugfs_remove_recursive(qcom_socinfo->dbg_root); +} +#else +static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo, + struct socinfo *info, size_t info_size) +{ +} +static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo) { } +#endif /* CONFIG_DEBUG_FS */ + +static int qcom_socinfo_probe(struct platform_device *pdev) +{ + struct qcom_socinfo *qs; + struct socinfo *info; + size_t item_size; + + info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, + &item_size); + if (IS_ERR(info)) { + dev_err(&pdev->dev, "Couldn't find socinfo\n"); + return PTR_ERR(info); + } + + qs = devm_kzalloc(&pdev->dev, sizeof(*qs), GFP_KERNEL); + if (!qs) + return -ENOMEM; + + qs->attr.family = "Snapdragon"; + qs->attr.machine = socinfo_machine(&pdev->dev, + le32_to_cpu(info->id)); + qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", + le32_to_cpu(info->id)); + qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", + SOCINFO_MAJOR(le32_to_cpu(info->ver)), + SOCINFO_MINOR(le32_to_cpu(info->ver))); + if (offsetof(struct socinfo, serial_num) <= item_size) + qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "%u", + le32_to_cpu(info->serial_num)); + + qs->soc_dev = soc_device_register(&qs->attr); + if (IS_ERR(qs->soc_dev)) + return PTR_ERR(qs->soc_dev); + + socinfo_debugfs_init(qs, info, item_size); + + /* Feed the soc specific unique data into entropy pool */ + add_device_randomness(info, item_size); + + platform_set_drvdata(pdev, qs); + + return 0; +} + +static int qcom_socinfo_remove(struct platform_device *pdev) +{ + struct qcom_socinfo *qs = platform_get_drvdata(pdev); + + soc_device_unregister(qs->soc_dev); + + socinfo_debugfs_exit(qs); + + return 0; +} + +static struct platform_driver qcom_socinfo_driver = { + .probe = qcom_socinfo_probe, + .remove = qcom_socinfo_remove, + .driver = { + .name = "qcom-socinfo", + }, +}; + +module_platform_driver(qcom_socinfo_driver); + +MODULE_DESCRIPTION("Qualcomm SoCinfo driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:qcom-socinfo"); diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c new file mode 100644 index 0000000000..2f0b1bfe76 --- /dev/null +++ b/drivers/soc/qcom/spm.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2014,2015, Linaro Ltd. + * + * SAW power controller driver + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <soc/qcom/spm.h> + +#define SPM_CTL_INDEX 0x7f +#define SPM_CTL_INDEX_SHIFT 4 +#define SPM_CTL_EN BIT(0) + +enum spm_reg { + SPM_REG_CFG, + SPM_REG_SPM_CTL, + SPM_REG_DLY, + SPM_REG_PMIC_DLY, + SPM_REG_PMIC_DATA_0, + SPM_REG_PMIC_DATA_1, + SPM_REG_VCTL, + SPM_REG_SEQ_ENTRY, + SPM_REG_SPM_STS, + SPM_REG_PMIC_STS, + SPM_REG_AVS_CTL, + SPM_REG_AVS_LIMIT, + SPM_REG_NR, +}; + +static const u16 spm_reg_offset_v4_1[SPM_REG_NR] = { + [SPM_REG_AVS_CTL] = 0x904, + [SPM_REG_AVS_LIMIT] = 0x908, +}; + +static const struct spm_reg_data spm_reg_660_gold_l2 = { + .reg_offset = spm_reg_offset_v4_1, + .avs_ctl = 0x1010031, + .avs_limit = 0x4580458, +}; + +static const struct spm_reg_data spm_reg_660_silver_l2 = { + .reg_offset = spm_reg_offset_v4_1, + .avs_ctl = 0x101c031, + .avs_limit = 0x4580458, +}; + +static const struct spm_reg_data spm_reg_8998_gold_l2 = { + .reg_offset = spm_reg_offset_v4_1, + .avs_ctl = 0x1010031, + .avs_limit = 0x4700470, +}; + +static const struct spm_reg_data spm_reg_8998_silver_l2 = { + .reg_offset = spm_reg_offset_v4_1, + .avs_ctl = 0x1010031, + .avs_limit = 0x4200420, +}; + +static const u16 spm_reg_offset_v3_0[SPM_REG_NR] = { + [SPM_REG_CFG] = 0x08, + [SPM_REG_SPM_CTL] = 0x30, + [SPM_REG_DLY] = 0x34, + [SPM_REG_SEQ_ENTRY] = 0x400, +}; + +/* SPM register data for 8909 */ +static const struct spm_reg_data spm_reg_8909_cpu = { + .reg_offset = spm_reg_offset_v3_0, + .spm_cfg = 0x1, + .spm_dly = 0x3C102800, + .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90, + 0x5B, 0x60, 0x03, 0x60, 0x76, 0x76, 0x0B, 0x94, 0x5B, 0x80, + 0x10, 0x26, 0x30, 0x0F }, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 5, +}; + +/* SPM register data for 8916 */ +static const struct spm_reg_data spm_reg_8916_cpu = { + .reg_offset = spm_reg_offset_v3_0, + .spm_cfg = 0x1, + .spm_dly = 0x3C102800, + .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90, + 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B, + 0x80, 0x10, 0x26, 0x30, 0x0F }, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 5, +}; + +static const struct spm_reg_data spm_reg_8939_cpu = { + .reg_offset = spm_reg_offset_v3_0, + .spm_cfg = 0x1, + .spm_dly = 0x3C102800, + .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x50, 0x1B, 0x10, 0x80, + 0x30, 0x90, 0x5B, 0x60, 0x50, 0x03, 0x60, 0x76, 0x76, 0x0B, + 0x50, 0x1B, 0x94, 0x5B, 0x80, 0x10, 0x26, 0x30, 0x50, 0x0F }, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 5, +}; + +static const u16 spm_reg_offset_v2_3[SPM_REG_NR] = { + [SPM_REG_CFG] = 0x08, + [SPM_REG_SPM_CTL] = 0x30, + [SPM_REG_DLY] = 0x34, + [SPM_REG_PMIC_DATA_0] = 0x40, + [SPM_REG_PMIC_DATA_1] = 0x44, +}; + +/* SPM register data for 8976 */ +static const struct spm_reg_data spm_reg_8976_gold_l2 = { + .reg_offset = spm_reg_offset_v2_3, + .spm_cfg = 0x14, + .spm_dly = 0x3c11840a, + .pmic_data[0] = 0x03030080, + .pmic_data[1] = 0x00030000, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 3, +}; + +static const struct spm_reg_data spm_reg_8976_silver_l2 = { + .reg_offset = spm_reg_offset_v2_3, + .spm_cfg = 0x14, + .spm_dly = 0x3c102800, + .pmic_data[0] = 0x03030080, + .pmic_data[1] = 0x00030000, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 2, +}; + +static const u16 spm_reg_offset_v2_1[SPM_REG_NR] = { + [SPM_REG_CFG] = 0x08, + [SPM_REG_SPM_CTL] = 0x30, + [SPM_REG_DLY] = 0x34, + [SPM_REG_SEQ_ENTRY] = 0x80, +}; + +/* SPM register data for 8974, 8084 */ +static const struct spm_reg_data spm_reg_8974_8084_cpu = { + .reg_offset = spm_reg_offset_v2_1, + .spm_cfg = 0x1, + .spm_dly = 0x3C102800, + .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03, + 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30, + 0x0F }, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 3, +}; + +/* SPM register data for 8226 */ +static const struct spm_reg_data spm_reg_8226_cpu = { + .reg_offset = spm_reg_offset_v2_1, + .spm_cfg = 0x0, + .spm_dly = 0x3C102800, + .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90, + 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B, + 0x80, 0x10, 0x26, 0x30, 0x0F }, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 5, +}; + +static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = { + [SPM_REG_CFG] = 0x08, + [SPM_REG_SPM_CTL] = 0x20, + [SPM_REG_PMIC_DLY] = 0x24, + [SPM_REG_PMIC_DATA_0] = 0x28, + [SPM_REG_PMIC_DATA_1] = 0x2C, + [SPM_REG_SEQ_ENTRY] = 0x80, +}; + +/* SPM register data for 8064 */ +static const struct spm_reg_data spm_reg_8064_cpu = { + .reg_offset = spm_reg_offset_v1_1, + .spm_cfg = 0x1F, + .pmic_dly = 0x02020004, + .pmic_data[0] = 0x0084009C, + .pmic_data[1] = 0x00A4001C, + .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01, + 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F }, + .start_index[PM_SLEEP_MODE_STBY] = 0, + .start_index[PM_SLEEP_MODE_SPC] = 2, +}; + +static inline void spm_register_write(struct spm_driver_data *drv, + enum spm_reg reg, u32 val) +{ + if (drv->reg_data->reg_offset[reg]) + writel_relaxed(val, drv->reg_base + + drv->reg_data->reg_offset[reg]); +} + +/* Ensure a guaranteed write, before return */ +static inline void spm_register_write_sync(struct spm_driver_data *drv, + enum spm_reg reg, u32 val) +{ + u32 ret; + + if (!drv->reg_data->reg_offset[reg]) + return; + + do { + writel_relaxed(val, drv->reg_base + + drv->reg_data->reg_offset[reg]); + ret = readl_relaxed(drv->reg_base + + drv->reg_data->reg_offset[reg]); + if (ret == val) + break; + cpu_relax(); + } while (1); +} + +static inline u32 spm_register_read(struct spm_driver_data *drv, + enum spm_reg reg) +{ + return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]); +} + +void spm_set_low_power_mode(struct spm_driver_data *drv, + enum pm_sleep_mode mode) +{ + u32 start_index; + u32 ctl_val; + + start_index = drv->reg_data->start_index[mode]; + + ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL); + ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT); + ctl_val |= start_index << SPM_CTL_INDEX_SHIFT; + ctl_val |= SPM_CTL_EN; + spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val); +} + +static const struct of_device_id spm_match_table[] = { + { .compatible = "qcom,sdm660-gold-saw2-v4.1-l2", + .data = &spm_reg_660_gold_l2 }, + { .compatible = "qcom,sdm660-silver-saw2-v4.1-l2", + .data = &spm_reg_660_silver_l2 }, + { .compatible = "qcom,msm8226-saw2-v2.1-cpu", + .data = &spm_reg_8226_cpu }, + { .compatible = "qcom,msm8909-saw2-v3.0-cpu", + .data = &spm_reg_8909_cpu }, + { .compatible = "qcom,msm8916-saw2-v3.0-cpu", + .data = &spm_reg_8916_cpu }, + { .compatible = "qcom,msm8939-saw2-v3.0-cpu", + .data = &spm_reg_8939_cpu }, + { .compatible = "qcom,msm8974-saw2-v2.1-cpu", + .data = &spm_reg_8974_8084_cpu }, + { .compatible = "qcom,msm8976-gold-saw2-v2.3-l2", + .data = &spm_reg_8976_gold_l2 }, + { .compatible = "qcom,msm8976-silver-saw2-v2.3-l2", + .data = &spm_reg_8976_silver_l2 }, + { .compatible = "qcom,msm8998-gold-saw2-v4.1-l2", + .data = &spm_reg_8998_gold_l2 }, + { .compatible = "qcom,msm8998-silver-saw2-v4.1-l2", + .data = &spm_reg_8998_silver_l2 }, + { .compatible = "qcom,apq8084-saw2-v2.1-cpu", + .data = &spm_reg_8974_8084_cpu }, + { .compatible = "qcom,apq8064-saw2-v1.1-cpu", + .data = &spm_reg_8064_cpu }, + { }, +}; +MODULE_DEVICE_TABLE(of, spm_match_table); + +static int spm_dev_probe(struct platform_device *pdev) +{ + const struct of_device_id *match_id; + struct spm_driver_data *drv; + void __iomem *addr; + + drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + + drv->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(drv->reg_base)) + return PTR_ERR(drv->reg_base); + + match_id = of_match_node(spm_match_table, pdev->dev.of_node); + if (!match_id) + return -ENODEV; + + drv->reg_data = match_id->data; + platform_set_drvdata(pdev, drv); + + /* Write the SPM sequences first.. */ + addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY]; + __iowrite32_copy(addr, drv->reg_data->seq, + ARRAY_SIZE(drv->reg_data->seq) / 4); + + /* + * ..and then the control registers. + * On some SoC if the control registers are written first and if the + * CPU was held in reset, the reset signal could trigger the SPM state + * machine, before the sequences are completely written. + */ + spm_register_write(drv, SPM_REG_AVS_CTL, drv->reg_data->avs_ctl); + spm_register_write(drv, SPM_REG_AVS_LIMIT, drv->reg_data->avs_limit); + spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg); + spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly); + spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly); + spm_register_write(drv, SPM_REG_PMIC_DATA_0, + drv->reg_data->pmic_data[0]); + spm_register_write(drv, SPM_REG_PMIC_DATA_1, + drv->reg_data->pmic_data[1]); + + /* Set up Standby as the default low power mode */ + if (drv->reg_data->reg_offset[SPM_REG_SPM_CTL]) + spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY); + + return 0; +} + +static struct platform_driver spm_driver = { + .probe = spm_dev_probe, + .driver = { + .name = "qcom_spm", + .of_match_table = spm_match_table, + }, +}; + +static int __init qcom_spm_init(void) +{ + return platform_driver_register(&spm_driver); +} +arch_initcall(qcom_spm_init); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/trace-rpmh.h b/drivers/soc/qcom/trace-rpmh.h new file mode 100644 index 0000000000..be6b42ecc1 --- /dev/null +++ b/drivers/soc/qcom/trace-rpmh.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#if !defined(_TRACE_RPMH_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RPMH_H + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rpmh + +#include <linux/tracepoint.h> +#include "rpmh-internal.h" + +TRACE_EVENT(rpmh_tx_done, + + TP_PROTO(struct rsc_drv *d, int m, const struct tcs_request *r), + + TP_ARGS(d, m, r), + + TP_STRUCT__entry( + __string(name, d->name) + __field(int, m) + __field(u32, addr) + __field(u32, data) + ), + + TP_fast_assign( + __assign_str(name, d->name); + __entry->m = m; + __entry->addr = r->cmds[0].addr; + __entry->data = r->cmds[0].data; + ), + + TP_printk("%s: ack: tcs-m: %d addr: %#x data: %#x", + __get_str(name), __entry->m, __entry->addr, __entry->data) +); + +TRACE_EVENT(rpmh_send_msg, + + TP_PROTO(struct rsc_drv *d, int m, enum rpmh_state state, int n, u32 h, + const struct tcs_cmd *c), + + TP_ARGS(d, m, state, n, h, c), + + TP_STRUCT__entry( + __string(name, d->name) + __field(int, m) + __field(u32, state) + __field(int, n) + __field(u32, hdr) + __field(u32, addr) + __field(u32, data) + __field(bool, wait) + ), + + TP_fast_assign( + __assign_str(name, d->name); + __entry->m = m; + __entry->state = state; + __entry->n = n; + __entry->hdr = h; + __entry->addr = c->addr; + __entry->data = c->data; + __entry->wait = c->wait; + ), + + TP_printk("%s: tcs(m): %d [%s] cmd(n): %d msgid: %#x addr: %#x data: %#x complete: %d", + __get_str(name), __entry->m, + __print_symbolic(__entry->state, + { RPMH_SLEEP_STATE, "sleep" }, + { RPMH_WAKE_ONLY_STATE, "wake" }, + { RPMH_ACTIVE_ONLY_STATE, "active" }), + __entry->n, + __entry->hdr, + __entry->addr, __entry->data, __entry->wait) +); + +#endif /* _TRACE_RPMH_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace-rpmh + +#include <trace/define_trace.h> diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c new file mode 100644 index 0000000000..ad9942412c --- /dev/null +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -0,0 +1,366 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016, Linaro Ltd. + * Copyright (c) 2015, Sony Mobile Communications Inc. + */ +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/rpmsg.h> +#include <linux/soc/qcom/wcnss_ctrl.h> + +#define WCNSS_REQUEST_TIMEOUT (5 * HZ) +#define WCNSS_CBC_TIMEOUT (10 * HZ) + +#define WCNSS_ACK_DONE_BOOTING 1 +#define WCNSS_ACK_COLD_BOOTING 2 + +#define NV_FRAGMENT_SIZE 3072 +#define NVBIN_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin" + +/** + * struct wcnss_ctrl - driver context + * @dev: device handle + * @channel: SMD channel handle + * @ack: completion for outstanding requests + * @cbc: completion for cbc complete indication + * @ack_status: status of the outstanding request + * @probe_work: worker for uploading nv binary + */ +struct wcnss_ctrl { + struct device *dev; + struct rpmsg_endpoint *channel; + + struct completion ack; + struct completion cbc; + int ack_status; + + struct work_struct probe_work; +}; + +/* message types */ +enum { + WCNSS_VERSION_REQ = 0x01000000, + WCNSS_VERSION_RESP, + WCNSS_DOWNLOAD_NV_REQ, + WCNSS_DOWNLOAD_NV_RESP, + WCNSS_UPLOAD_CAL_REQ, + WCNSS_UPLOAD_CAL_RESP, + WCNSS_DOWNLOAD_CAL_REQ, + WCNSS_DOWNLOAD_CAL_RESP, + WCNSS_VBAT_LEVEL_IND, + WCNSS_BUILD_VERSION_REQ, + WCNSS_BUILD_VERSION_RESP, + WCNSS_PM_CONFIG_REQ, + WCNSS_CBC_COMPLETE_IND, +}; + +/** + * struct wcnss_msg_hdr - common packet header for requests and responses + * @type: packet message type + * @len: total length of the packet, including this header + */ +struct wcnss_msg_hdr { + u32 type; + u32 len; +} __packed; + +/* + * struct wcnss_version_resp - version request response + */ +struct wcnss_version_resp { + struct wcnss_msg_hdr hdr; + u8 major; + u8 minor; + u8 version; + u8 revision; +} __packed; + +/** + * struct wcnss_download_nv_req - firmware fragment request + * @hdr: common packet wcnss_msg_hdr header + * @seq: sequence number of this fragment + * @last: boolean indicator of this being the last fragment of the binary + * @frag_size: length of this fragment + * @fragment: fragment data + */ +struct wcnss_download_nv_req { + struct wcnss_msg_hdr hdr; + u16 seq; + u16 last; + u32 frag_size; + u8 fragment[]; +} __packed; + +/** + * struct wcnss_download_nv_resp - firmware download response + * @hdr: common packet wcnss_msg_hdr header + * @status: boolean to indicate success of the download + */ +struct wcnss_download_nv_resp { + struct wcnss_msg_hdr hdr; + u8 status; +} __packed; + +/** + * wcnss_ctrl_smd_callback() - handler from SMD responses + * @rpdev: remote processor message device pointer + * @data: pointer to the incoming data packet + * @count: size of the incoming data packet + * @priv: unused + * @addr: unused + * + * Handles any incoming packets from the remote WCNSS_CTRL service. + */ +static int wcnss_ctrl_smd_callback(struct rpmsg_device *rpdev, + void *data, + int count, + void *priv, + u32 addr) +{ + struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev); + const struct wcnss_download_nv_resp *nvresp; + const struct wcnss_version_resp *version; + const struct wcnss_msg_hdr *hdr = data; + + switch (hdr->type) { + case WCNSS_VERSION_RESP: + if (count != sizeof(*version)) { + dev_err(wcnss->dev, + "invalid size of version response\n"); + break; + } + + version = data; + dev_info(wcnss->dev, "WCNSS Version %d.%d %d.%d\n", + version->major, version->minor, + version->version, version->revision); + + complete(&wcnss->ack); + break; + case WCNSS_DOWNLOAD_NV_RESP: + if (count != sizeof(*nvresp)) { + dev_err(wcnss->dev, + "invalid size of download response\n"); + break; + } + + nvresp = data; + wcnss->ack_status = nvresp->status; + complete(&wcnss->ack); + break; + case WCNSS_CBC_COMPLETE_IND: + dev_dbg(wcnss->dev, "cold boot complete\n"); + complete(&wcnss->cbc); + break; + default: + dev_info(wcnss->dev, "unknown message type %d\n", hdr->type); + break; + } + + return 0; +} + +/** + * wcnss_request_version() - send a version request to WCNSS + * @wcnss: wcnss ctrl driver context + */ +static int wcnss_request_version(struct wcnss_ctrl *wcnss) +{ + struct wcnss_msg_hdr msg; + int ret; + + msg.type = WCNSS_VERSION_REQ; + msg.len = sizeof(msg); + ret = rpmsg_send(wcnss->channel, &msg, sizeof(msg)); + if (ret < 0) + return ret; + + ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_CBC_TIMEOUT); + if (!ret) { + dev_err(wcnss->dev, "timeout waiting for version response\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/** + * wcnss_download_nv() - send nv binary to WCNSS + * @wcnss: wcnss_ctrl state handle + * @expect_cbc: indicator to caller that an cbc event is expected + * + * Returns 0 on success. Negative errno on failure. + */ +static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) +{ + struct wcnss_download_nv_req *req; + const struct firmware *fw; + struct device *dev = wcnss->dev; + const char *nvbin = NVBIN_FILE; + const void *data; + ssize_t left; + int ret; + + req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL); + if (!req) + return -ENOMEM; + + ret = of_property_read_string(dev->of_node, "firmware-name", &nvbin); + if (ret < 0 && ret != -EINVAL) + goto free_req; + + ret = request_firmware(&fw, nvbin, dev); + if (ret < 0) { + dev_err(dev, "Failed to load nv file %s: %d\n", nvbin, ret); + goto free_req; + } + + data = fw->data; + left = fw->size; + + req->hdr.type = WCNSS_DOWNLOAD_NV_REQ; + req->hdr.len = sizeof(*req) + NV_FRAGMENT_SIZE; + + req->last = 0; + req->frag_size = NV_FRAGMENT_SIZE; + + req->seq = 0; + do { + if (left <= NV_FRAGMENT_SIZE) { + req->last = 1; + req->frag_size = left; + req->hdr.len = sizeof(*req) + left; + } + + memcpy(req->fragment, data, req->frag_size); + + ret = rpmsg_send(wcnss->channel, req, req->hdr.len); + if (ret < 0) { + dev_err(dev, "failed to send smd packet\n"); + goto release_fw; + } + + /* Increment for next fragment */ + req->seq++; + + data += NV_FRAGMENT_SIZE; + left -= NV_FRAGMENT_SIZE; + } while (left > 0); + + ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT); + if (!ret) { + dev_err(dev, "timeout waiting for nv upload ack\n"); + ret = -ETIMEDOUT; + } else { + *expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING; + ret = 0; + } + +release_fw: + release_firmware(fw); +free_req: + kfree(req); + + return ret; +} + +/** + * qcom_wcnss_open_channel() - open additional SMD channel to WCNSS + * @wcnss: wcnss handle, retrieved from drvdata + * @name: SMD channel name + * @cb: callback to handle incoming data on the channel + * @priv: private data for use in the call-back + */ +struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv) +{ + struct rpmsg_channel_info chinfo; + struct wcnss_ctrl *_wcnss = wcnss; + + strscpy(chinfo.name, name, sizeof(chinfo.name)); + chinfo.src = RPMSG_ADDR_ANY; + chinfo.dst = RPMSG_ADDR_ANY; + + return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo); +} +EXPORT_SYMBOL(qcom_wcnss_open_channel); + +static void wcnss_async_probe(struct work_struct *work) +{ + struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work); + bool expect_cbc; + int ret; + + ret = wcnss_request_version(wcnss); + if (ret < 0) + return; + + ret = wcnss_download_nv(wcnss, &expect_cbc); + if (ret < 0) + return; + + /* Wait for pending cold boot completion if indicated by the nv downloader */ + if (expect_cbc) { + ret = wait_for_completion_timeout(&wcnss->cbc, WCNSS_REQUEST_TIMEOUT); + if (!ret) + dev_err(wcnss->dev, "expected cold boot completion\n"); + } + + of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev); +} + +static int wcnss_ctrl_probe(struct rpmsg_device *rpdev) +{ + struct wcnss_ctrl *wcnss; + + wcnss = devm_kzalloc(&rpdev->dev, sizeof(*wcnss), GFP_KERNEL); + if (!wcnss) + return -ENOMEM; + + wcnss->dev = &rpdev->dev; + wcnss->channel = rpdev->ept; + + init_completion(&wcnss->ack); + init_completion(&wcnss->cbc); + INIT_WORK(&wcnss->probe_work, wcnss_async_probe); + + dev_set_drvdata(&rpdev->dev, wcnss); + + schedule_work(&wcnss->probe_work); + + return 0; +} + +static void wcnss_ctrl_remove(struct rpmsg_device *rpdev) +{ + struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev); + + cancel_work_sync(&wcnss->probe_work); + of_platform_depopulate(&rpdev->dev); +} + +static const struct of_device_id wcnss_ctrl_of_match[] = { + { .compatible = "qcom,wcnss", }, + {} +}; +MODULE_DEVICE_TABLE(of, wcnss_ctrl_of_match); + +static struct rpmsg_driver wcnss_ctrl_driver = { + .probe = wcnss_ctrl_probe, + .remove = wcnss_ctrl_remove, + .callback = wcnss_ctrl_smd_callback, + .drv = { + .name = "qcom_wcnss_ctrl", + .owner = THIS_MODULE, + .of_match_table = wcnss_ctrl_of_match, + }, +}; + +module_rpmsg_driver(wcnss_ctrl_driver); + +MODULE_DESCRIPTION("Qualcomm WCNSS control driver"); +MODULE_LICENSE("GPL v2"); |