summaryrefslogtreecommitdiffstats
path: root/drivers/soc/qcom
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/soc/qcom
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/soc/qcom')
-rw-r--r--drivers/soc/qcom/Kconfig251
-rw-r--r--drivers/soc/qcom/Makefile31
-rw-r--r--drivers/soc/qcom/apr.c736
-rw-r--r--drivers/soc/qcom/cmd-db.c366
-rw-r--r--drivers/soc/qcom/cpr.c1757
-rw-r--r--drivers/soc/qcom/icc-bwmon.c702
-rw-r--r--drivers/soc/qcom/kryo-l2-accessors.c57
-rw-r--r--drivers/soc/qcom/llcc-qcom.c915
-rw-r--r--drivers/soc/qcom/mdt_loader.c420
-rw-r--r--drivers/soc/qcom/ocmem.c451
-rw-r--r--drivers/soc/qcom/pdr_interface.c755
-rw-r--r--drivers/soc/qcom/pdr_internal.h379
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c920
-rw-r--r--drivers/soc/qcom/qcom_aoss.c574
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c249
-rw-r--r--drivers/soc/qcom/qcom_stats.c293
-rw-r--r--drivers/soc/qcom/qmi_encdec.c816
-rw-r--r--drivers/soc/qcom/qmi_interface.c851
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c340
-rw-r--r--drivers/soc/qcom/rpmh-internal.h136
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c1013
-rw-r--r--drivers/soc/qcom/rpmh.c507
-rw-r--r--drivers/soc/qcom/rpmhpd.c727
-rw-r--r--drivers/soc/qcom/rpmpd.c695
-rw-r--r--drivers/soc/qcom/smd-rpm.c279
-rw-r--r--drivers/soc/qcom/smem.c1197
-rw-r--r--drivers/soc/qcom/smem_state.c230
-rw-r--r--drivers/soc/qcom/smp2p.c700
-rw-r--r--drivers/soc/qcom/smsm.c648
-rw-r--r--drivers/soc/qcom/socinfo.c694
-rw-r--r--drivers/soc/qcom/spm.c293
-rw-r--r--drivers/soc/qcom/trace-rpmh.h82
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c365
33 files changed, 18429 insertions, 0 deletions
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
new file mode 100644
index 000000000..ae504c43d
--- /dev/null
+++ b/drivers/soc/qcom/Kconfig
@@ -0,0 +1,251 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# QCOM Soc drivers
+#
+menu "Qualcomm SoC drivers"
+
+config QCOM_AOSS_QMP
+ tristate "Qualcomm AOSS Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on MAILBOX
+ depends on COMMON_CLK && PM
+ select PM_GENERIC_DOMAINS
+ help
+ This driver provides the means of communicating with and controlling
+ the low-power state for resources related to the remoteproc
+ subsystems as well as controlling the debug clocks exposed by the Always On
+ Subsystem (AOSS) using Qualcomm Messaging Protocol (QMP).
+
+config QCOM_COMMAND_DB
+ tristate "Qualcomm Command DB"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on OF_RESERVED_MEM
+ help
+ Command DB queries shared memory by key string for shared system
+ resources. Platform drivers that require to set state of a shared
+ resource on a RPM-hardened platform must use this database to get
+ SoC specific identifier and information for the shared resources.
+
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ depends on ARCH_QCOM && HAS_IOMEM
+ select PM_OPP
+ select REGMAP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like QCS404.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
+config QCOM_GENI_SE
+ tristate "QCOM GENI Serial Engine Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ This driver is used to manage Generic Interface (GENI) firmware based
+ Qualcomm Technologies, Inc. Universal Peripheral (QUP) Wrapper. This
+ driver is also used to manage the common aspects of multiple Serial
+ Engines present in the QUP.
+
+config QCOM_GSBI
+ tristate "QCOM General Serial Bus Interface"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Say y here to enable GSBI support. The GSBI provides control
+ functions for connecting the underlying serial UART, SPI, and I2C
+ devices to the output pins.
+
+config QCOM_LLCC
+ tristate "Qualcomm Technologies, Inc. LLCC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Qualcomm Technologies, Inc. platform specific
+ Last Level Cache Controller(LLCC) driver for platforms such as,
+ SDM845. This provides interfaces to clients that use the LLCC.
+ Say yes here to enable LLCC slice driver.
+
+config QCOM_KRYO_L2_ACCESSORS
+ bool
+ depends on ARCH_QCOM && ARM64 || COMPILE_TEST
+
+config QCOM_MDT_LOADER
+ tristate
+ select QCOM_SCM
+
+config QCOM_OCMEM
+ tristate "Qualcomm On Chip Memory (OCMEM) driver"
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ The On Chip Memory (OCMEM) allocator allows various clients to
+ allocate memory from OCMEM based on performance, latency and power
+ requirements. This is typically used by the GPU, camera/video, and
+ audio components on some Snapdragon SoCs.
+
+config QCOM_PDR_HELPERS
+ tristate
+ select QCOM_QMI_HELPERS
+
+config QCOM_QMI_HELPERS
+ tristate
+ depends on NET
+
+config QCOM_RMTFS_MEM
+ tristate "Qualcomm Remote Filesystem memory driver"
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ The Qualcomm remote filesystem memory driver is used for allocating
+ and exposing regions of shared memory with remote processors for the
+ purpose of exchanging sector-data between the remote filesystem
+ service and its clients.
+
+ Say y here if you intend to boot the modem remoteproc.
+
+config QCOM_RPMH
+ tristate "Qualcomm RPM-Hardened (RPMH) Communication"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on (QCOM_COMMAND_DB || !QCOM_COMMAND_DB)
+ help
+ Support for communication with the hardened-RPM blocks in
+ Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
+ internal bus to transmit state requests for shared resources. A set
+ of hardware components aggregate requests for these resources and
+ help apply the aggregated state on the resource.
+
+config QCOM_RPMHPD
+ tristate "Qualcomm RPMh Power domain driver"
+ depends on QCOM_RPMH && QCOM_COMMAND_DB
+ help
+ QCOM RPMh Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPMh which then translates it into corresponding voltage
+ for the voltage rail.
+
+config QCOM_RPMPD
+ tristate "Qualcomm RPM Power domain driver"
+ depends on PM && OF
+ depends on QCOM_SMD_RPM
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ QCOM RPM Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPM which then translates it into corresponding voltage
+ for the voltage rail.
+
+config QCOM_SMEM
+ tristate "Qualcomm Shared Memory Manager (SMEM)"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HWSPINLOCK
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Manager.
+ The driver provides an interface to items in a heap shared among all
+ processors in a Qualcomm platform.
+
+config QCOM_SMD_RPM
+ tristate "Qualcomm Resource Power Manager (RPM) over SMD"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on RPMSG
+ help
+ If you say yes to this option, support will be included for the
+ Resource Power Manager system found in the Qualcomm 8974 based
+ devices.
+
+ This is required to access many regulators, clocks and bus
+ frequencies controlled by the RPM on these devices.
+
+ Say M here if you want to include support for the Qualcomm RPM as a
+ module. This will build a module called "qcom-smd-rpm".
+
+config QCOM_SMEM_STATE
+ bool
+
+config QCOM_SMP2P
+ tristate "Qualcomm Shared Memory Point to Point support"
+ depends on MAILBOX
+ depends on QCOM_SMEM
+ select QCOM_SMEM_STATE
+ select IRQ_DOMAIN
+ help
+ Say yes here to support the Qualcomm Shared Memory Point to Point
+ protocol.
+
+config QCOM_SMSM
+ tristate "Qualcomm Shared Memory State Machine"
+ depends on QCOM_SMEM
+ select QCOM_SMEM_STATE
+ select IRQ_DOMAIN
+ help
+ Say yes here to support the Qualcomm Shared Memory State Machine.
+ The state machine is represented by bits in shared memory.
+
+config QCOM_SOCINFO
+ tristate "Qualcomm socinfo driver"
+ depends on QCOM_SMEM
+ select SOC_BUS
+ help
+ Say yes here to support the Qualcomm socinfo driver, providing
+ information about the SoC to user space.
+
+config QCOM_SPM
+ tristate "Qualcomm Subsystem Power Manager (SPM)"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select QCOM_SCM
+ help
+ Enable the support for the Qualcomm Subsystem Power Manager, used
+ to manage cores, L2 low power modes and to configure the internal
+ Adaptive Voltage Scaler parameters, where supported.
+
+config QCOM_STATS
+ tristate "Qualcomm Technologies, Inc. (QTI) Sleep stats driver"
+ depends on (ARCH_QCOM && DEBUG_FS) || COMPILE_TEST
+ depends on QCOM_SMEM
+ help
+ Qualcomm Technologies, Inc. (QTI) Sleep stats driver to read
+ the shared memory exported by the remote processor related to
+ various SoC level low power modes statistics and export to debugfs
+ interface.
+
+config QCOM_WCNSS_CTRL
+ tristate "Qualcomm WCNSS control driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on RPMSG
+ help
+ Client driver for the WCNSS_CTRL SMD channel, used to download nv
+ firmware to a newly booted WCNSS chip.
+
+config QCOM_APR
+ tristate "Qualcomm APR/GPR Bus (Asynchronous/Generic Packet Router)"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on RPMSG
+ depends on NET
+ select QCOM_PDR_HELPERS
+ help
+ Enable APR IPC protocol support between
+ application processor and QDSP6. APR is
+ used by audio driver to configure QDSP6
+ ASM, ADM and AFE modules.
+
+config QCOM_ICC_BWMON
+ tristate "QCOM Interconnect Bandwidth Monitor driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select PM_OPP
+ select REGMAP_MMIO
+ help
+ Sets up driver monitoring bandwidth on various interconnects and
+ based on that voting for interconnect bandwidth, adjusting their
+ speed to current demand.
+ Current implementation brings support for BWMON v4, used for example
+ on SDM845 to measure bandwidth between CPU (gladiator_noc) and Last
+ Level Cache (memnoc). Usage of this BWMON allows to remove some of
+ the fixed bandwidth votes from cpufreq (CPU nodes) thus achieve high
+ memory throughput even with lower CPU frequencies.
+
+endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
new file mode 100644
index 000000000..d66604aff
--- /dev/null
+++ b/drivers/soc/qcom/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS_rpmh-rsc.o := -I$(src)
+obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
+obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
+obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
+obj-$(CONFIG_QCOM_CPR) += cpr.o
+obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
+obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
+obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
+obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
+obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
+qmi_helpers-y += qmi_encdec.o qmi_interface.o
+obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
+obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o
+qcom_rpmh-y += rpmh-rsc.o
+qcom_rpmh-y += rpmh.o
+obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) += smem.o
+obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
+obj-$(CONFIG_QCOM_SMSM) += smsm.o
+obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o
+obj-$(CONFIG_QCOM_SPM) += spm.o
+obj-$(CONFIG_QCOM_STATS) += qcom_stats.o
+obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
+obj-$(CONFIG_QCOM_APR) += apr.o
+obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o
+obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
+obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
+obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
+obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
new file mode 100644
index 000000000..d51abb462
--- /dev/null
+++ b/drivers/soc/qcom/apr.c
@@ -0,0 +1,736 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/soc/qcom/pdr.h>
+#include <linux/rpmsg.h>
+#include <linux/of.h>
+
+enum {
+ PR_TYPE_APR = 0,
+ PR_TYPE_GPR,
+};
+
+/* Some random values tbh which does not collide with static modules */
+#define GPR_DYNAMIC_PORT_START 0x10000000
+#define GPR_DYNAMIC_PORT_END 0x20000000
+
+struct packet_router {
+ struct rpmsg_endpoint *ch;
+ struct device *dev;
+ spinlock_t svcs_lock;
+ spinlock_t rx_lock;
+ struct idr svcs_idr;
+ int dest_domain_id;
+ int type;
+ struct pdr_handle *pdr;
+ struct workqueue_struct *rxwq;
+ struct work_struct rx_work;
+ struct list_head rx_list;
+};
+
+struct apr_rx_buf {
+ struct list_head node;
+ int len;
+ uint8_t buf[];
+};
+
+/**
+ * apr_send_pkt() - Send a apr message from apr device
+ *
+ * @adev: Pointer to previously registered apr device.
+ * @pkt: Pointer to apr packet to send
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt)
+{
+ struct packet_router *apr = dev_get_drvdata(adev->dev.parent);
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->svc.lock, flags);
+
+ hdr = &pkt->hdr;
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->src_svc = adev->svc.id;
+ hdr->dest_domain = adev->domain_id;
+ hdr->dest_svc = adev->svc.id;
+
+ ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size);
+ spin_unlock_irqrestore(&adev->svc.lock, flags);
+
+ return ret ? ret : hdr->pkt_size;
+}
+EXPORT_SYMBOL_GPL(apr_send_pkt);
+
+void gpr_free_port(gpr_port_t *port)
+{
+ struct packet_router *gpr = port->pr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpr->svcs_lock, flags);
+ idr_remove(&gpr->svcs_idr, port->id);
+ spin_unlock_irqrestore(&gpr->svcs_lock, flags);
+
+ kfree(port);
+}
+EXPORT_SYMBOL_GPL(gpr_free_port);
+
+gpr_port_t *gpr_alloc_port(struct apr_device *gdev, struct device *dev,
+ gpr_port_cb cb, void *priv)
+{
+ struct packet_router *pr = dev_get_drvdata(gdev->dev.parent);
+ gpr_port_t *port;
+ struct pkt_router_svc *svc;
+ int id;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ svc = port;
+ svc->callback = cb;
+ svc->pr = pr;
+ svc->priv = priv;
+ svc->dev = dev;
+ spin_lock_init(&svc->lock);
+
+ spin_lock(&pr->svcs_lock);
+ id = idr_alloc_cyclic(&pr->svcs_idr, svc, GPR_DYNAMIC_PORT_START,
+ GPR_DYNAMIC_PORT_END, GFP_ATOMIC);
+ if (id < 0) {
+ dev_err(dev, "Unable to allocate dynamic GPR src port\n");
+ kfree(port);
+ spin_unlock(&pr->svcs_lock);
+ return ERR_PTR(id);
+ }
+
+ svc->id = id;
+ spin_unlock(&pr->svcs_lock);
+
+ return port;
+}
+EXPORT_SYMBOL_GPL(gpr_alloc_port);
+
+static int pkt_router_send_svc_pkt(struct pkt_router_svc *svc, struct gpr_pkt *pkt)
+{
+ struct packet_router *pr = svc->pr;
+ struct gpr_hdr *hdr;
+ unsigned long flags;
+ int ret;
+
+ hdr = &pkt->hdr;
+
+ spin_lock_irqsave(&svc->lock, flags);
+ ret = rpmsg_trysend(pr->ch, pkt, hdr->pkt_size);
+ spin_unlock_irqrestore(&svc->lock, flags);
+
+ return ret ? ret : hdr->pkt_size;
+}
+
+int gpr_send_pkt(struct apr_device *gdev, struct gpr_pkt *pkt)
+{
+ return pkt_router_send_svc_pkt(&gdev->svc, pkt);
+}
+EXPORT_SYMBOL_GPL(gpr_send_pkt);
+
+int gpr_send_port_pkt(gpr_port_t *port, struct gpr_pkt *pkt)
+{
+ return pkt_router_send_svc_pkt(port, pkt);
+}
+EXPORT_SYMBOL_GPL(gpr_send_port_pkt);
+
+static void apr_dev_release(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ kfree(adev);
+}
+
+static int apr_callback(struct rpmsg_device *rpdev, void *buf,
+ int len, void *priv, u32 addr)
+{
+ struct packet_router *apr = dev_get_drvdata(&rpdev->dev);
+ struct apr_rx_buf *abuf;
+ unsigned long flags;
+
+ if (len <= APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Improper apr pkt received:%p %d\n",
+ buf, len);
+ return -EINVAL;
+ }
+
+ abuf = kzalloc(sizeof(*abuf) + len, GFP_ATOMIC);
+ if (!abuf)
+ return -ENOMEM;
+
+ abuf->len = len;
+ memcpy(abuf->buf, buf, len);
+
+ spin_lock_irqsave(&apr->rx_lock, flags);
+ list_add_tail(&abuf->node, &apr->rx_list);
+ spin_unlock_irqrestore(&apr->rx_lock, flags);
+
+ queue_work(apr->rxwq, &apr->rx_work);
+
+ return 0;
+}
+
+static int apr_do_rx_callback(struct packet_router *apr, struct apr_rx_buf *abuf)
+{
+ uint16_t hdr_size, msg_type, ver, svc_id;
+ struct pkt_router_svc *svc;
+ struct apr_device *adev;
+ struct apr_driver *adrv = NULL;
+ struct apr_resp_pkt resp;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ void *buf = abuf->buf;
+ int len = abuf->len;
+
+ hdr = buf;
+ ver = APR_HDR_FIELD_VER(hdr->hdr_field);
+ if (ver > APR_PKT_VER + 1)
+ return -EINVAL;
+
+ hdr_size = APR_HDR_FIELD_SIZE_BYTES(hdr->hdr_field);
+ if (hdr_size < APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+ if (hdr->pkt_size < APR_HDR_SIZE || hdr->pkt_size != len) {
+ dev_err(apr->dev, "APR: Wrong packet size\n");
+ return -EINVAL;
+ }
+
+ msg_type = APR_HDR_FIELD_MT(hdr->hdr_field);
+ if (msg_type >= APR_MSG_TYPE_MAX) {
+ dev_err(apr->dev, "APR: Wrong message type: %d\n", msg_type);
+ return -EINVAL;
+ }
+
+ if (hdr->src_domain >= APR_DOMAIN_MAX ||
+ hdr->dest_domain >= APR_DOMAIN_MAX ||
+ hdr->src_svc >= APR_SVC_MAX ||
+ hdr->dest_svc >= APR_SVC_MAX) {
+ dev_err(apr->dev, "APR: Wrong APR header\n");
+ return -EINVAL;
+ }
+
+ svc_id = hdr->dest_svc;
+ spin_lock_irqsave(&apr->svcs_lock, flags);
+ svc = idr_find(&apr->svcs_idr, svc_id);
+ if (svc && svc->dev->driver) {
+ adev = svc_to_apr_device(svc);
+ adrv = to_apr_driver(adev->dev.driver);
+ }
+ spin_unlock_irqrestore(&apr->svcs_lock, flags);
+
+ if (!adrv || !adev) {
+ dev_err(apr->dev, "APR: service is not registered (%d)\n",
+ svc_id);
+ return -EINVAL;
+ }
+
+ resp.hdr = *hdr;
+ resp.payload_size = hdr->pkt_size - hdr_size;
+
+ /*
+ * NOTE: hdr_size is not same as APR_HDR_SIZE as remote can include
+ * optional headers in to apr_hdr which should be ignored
+ */
+ if (resp.payload_size > 0)
+ resp.payload = buf + hdr_size;
+
+ adrv->callback(adev, &resp);
+
+ return 0;
+}
+
+static int gpr_do_rx_callback(struct packet_router *gpr, struct apr_rx_buf *abuf)
+{
+ uint16_t hdr_size, ver;
+ struct pkt_router_svc *svc = NULL;
+ struct gpr_resp_pkt resp;
+ struct gpr_hdr *hdr;
+ unsigned long flags;
+ void *buf = abuf->buf;
+ int len = abuf->len;
+
+ hdr = buf;
+ ver = hdr->version;
+ if (ver > GPR_PKT_VER + 1)
+ return -EINVAL;
+
+ hdr_size = hdr->hdr_size;
+ if (hdr_size < GPR_PKT_HEADER_WORD_SIZE) {
+ dev_err(gpr->dev, "GPR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+ if (hdr->pkt_size < GPR_PKT_HEADER_BYTE_SIZE || hdr->pkt_size != len) {
+ dev_err(gpr->dev, "GPR: Wrong packet size\n");
+ return -EINVAL;
+ }
+
+ resp.hdr = *hdr;
+ resp.payload_size = hdr->pkt_size - (hdr_size * 4);
+
+ /*
+ * NOTE: hdr_size is not same as GPR_HDR_SIZE as remote can include
+ * optional headers in to gpr_hdr which should be ignored
+ */
+ if (resp.payload_size > 0)
+ resp.payload = buf + (hdr_size * 4);
+
+
+ spin_lock_irqsave(&gpr->svcs_lock, flags);
+ svc = idr_find(&gpr->svcs_idr, hdr->dest_port);
+ spin_unlock_irqrestore(&gpr->svcs_lock, flags);
+
+ if (!svc) {
+ dev_err(gpr->dev, "GPR: Port(%x) is not registered\n",
+ hdr->dest_port);
+ return -EINVAL;
+ }
+
+ if (svc->callback)
+ svc->callback(&resp, svc->priv, 0);
+
+ return 0;
+}
+
+static void apr_rxwq(struct work_struct *work)
+{
+ struct packet_router *apr = container_of(work, struct packet_router, rx_work);
+ struct apr_rx_buf *abuf, *b;
+ unsigned long flags;
+
+ if (!list_empty(&apr->rx_list)) {
+ list_for_each_entry_safe(abuf, b, &apr->rx_list, node) {
+ switch (apr->type) {
+ case PR_TYPE_APR:
+ apr_do_rx_callback(apr, abuf);
+ break;
+ case PR_TYPE_GPR:
+ gpr_do_rx_callback(apr, abuf);
+ break;
+ default:
+ break;
+ }
+ spin_lock_irqsave(&apr->rx_lock, flags);
+ list_del(&abuf->node);
+ spin_unlock_irqrestore(&apr->rx_lock, flags);
+ kfree(abuf);
+ }
+ }
+}
+
+static int apr_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(drv);
+ const struct apr_device_id *id = adrv->id_table;
+
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!id)
+ return 0;
+
+ while (id->domain_id != 0 || id->svc_id != 0) {
+ if (id->domain_id == adev->domain_id &&
+ id->svc_id == adev->svc.id)
+ return 1;
+ id++;
+ }
+
+ return 0;
+}
+
+static int apr_device_probe(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(dev->driver);
+ int ret;
+
+ ret = adrv->probe(adev);
+ if (!ret)
+ adev->svc.callback = adrv->gpr_callback;
+
+ return ret;
+}
+
+static void apr_device_remove(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(dev->driver);
+ struct packet_router *apr = dev_get_drvdata(adev->dev.parent);
+
+ if (adrv->remove)
+ adrv->remove(adev);
+ spin_lock(&apr->svcs_lock);
+ idr_remove(&apr->svcs_idr, adev->svc.id);
+ spin_unlock(&apr->svcs_lock);
+}
+
+static int apr_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return add_uevent_var(env, "MODALIAS=apr:%s", adev->name);
+}
+
+struct bus_type aprbus = {
+ .name = "aprbus",
+ .match = apr_device_match,
+ .probe = apr_device_probe,
+ .uevent = apr_uevent,
+ .remove = apr_device_remove,
+};
+EXPORT_SYMBOL_GPL(aprbus);
+
+static int apr_add_device(struct device *dev, struct device_node *np,
+ u32 svc_id, u32 domain_id)
+{
+ struct packet_router *apr = dev_get_drvdata(dev);
+ struct apr_device *adev = NULL;
+ struct pkt_router_svc *svc;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->svc_id = svc_id;
+ svc = &adev->svc;
+
+ svc->id = svc_id;
+ svc->pr = apr;
+ svc->priv = adev;
+ svc->dev = dev;
+ spin_lock_init(&svc->lock);
+
+ adev->domain_id = domain_id;
+
+ if (np)
+ snprintf(adev->name, APR_NAME_SIZE, "%pOFn", np);
+
+ switch (apr->type) {
+ case PR_TYPE_APR:
+ dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name,
+ domain_id, svc_id);
+ break;
+ case PR_TYPE_GPR:
+ dev_set_name(&adev->dev, "gprsvc:%s:%x:%x", adev->name,
+ domain_id, svc_id);
+ break;
+ default:
+ break;
+ }
+
+ adev->dev.bus = &aprbus;
+ adev->dev.parent = dev;
+ adev->dev.of_node = np;
+ adev->dev.release = apr_dev_release;
+ adev->dev.driver = NULL;
+
+ spin_lock(&apr->svcs_lock);
+ ret = idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC);
+ spin_unlock(&apr->svcs_lock);
+ if (ret < 0) {
+ dev_err(dev, "idr_alloc failed: %d\n", ret);
+ goto out;
+ }
+
+ /* Protection domain is optional, it does not exist on older platforms */
+ ret = of_property_read_string_index(np, "qcom,protection-domain",
+ 1, &adev->service_path);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "Failed to read second value of qcom,protection-domain\n");
+ goto out;
+ }
+
+ dev_info(dev, "Adding APR/GPR dev: %s\n", dev_name(&adev->dev));
+
+ ret = device_register(&adev->dev);
+ if (ret) {
+ dev_err(dev, "device_register failed: %d\n", ret);
+ put_device(&adev->dev);
+ }
+
+out:
+ return ret;
+}
+
+static int of_apr_add_pd_lookups(struct device *dev)
+{
+ const char *service_name, *service_path;
+ struct packet_router *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+ struct pdr_service *pds;
+ int ret;
+
+ for_each_child_of_node(dev->of_node, node) {
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 0, &service_name);
+ if (ret < 0)
+ continue;
+
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 1, &service_path);
+ if (ret < 0) {
+ dev_err(dev, "pdr service path missing: %d\n", ret);
+ of_node_put(node);
+ return ret;
+ }
+
+ pds = pdr_add_lookup(apr->pdr, service_name, service_path);
+ if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
+ dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
+ of_node_put(node);
+ return PTR_ERR(pds);
+ }
+ }
+
+ return 0;
+}
+
+static void of_register_apr_devices(struct device *dev, const char *svc_path)
+{
+ struct packet_router *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+ const char *service_path;
+ int ret;
+
+ for_each_child_of_node(dev->of_node, node) {
+ u32 svc_id;
+ u32 domain_id;
+
+ /*
+ * This function is called with svc_path NULL during
+ * apr_probe(), in which case we register any apr devices
+ * without a qcom,protection-domain specified.
+ *
+ * Then as the protection domains becomes available
+ * (if applicable) this function is again called, but with
+ * svc_path representing the service becoming available. In
+ * this case we register any apr devices with a matching
+ * qcom,protection-domain.
+ */
+
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 1, &service_path);
+ if (svc_path) {
+ /* skip APR services that are PD independent */
+ if (ret)
+ continue;
+
+ /* skip APR services whose PD paths don't match */
+ if (strcmp(service_path, svc_path))
+ continue;
+ } else {
+ /* skip APR services whose PD lookups are registered */
+ if (ret == 0)
+ continue;
+ }
+
+ if (of_property_read_u32(node, "reg", &svc_id))
+ continue;
+
+ domain_id = apr->dest_domain_id;
+
+ if (apr_add_device(dev, node, svc_id, domain_id))
+ dev_err(dev, "Failed to add apr %d svc\n", svc_id);
+ }
+}
+
+static int apr_remove_device(struct device *dev, void *svc_path)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ if (svc_path && adev->service_path) {
+ if (!strcmp(adev->service_path, (char *)svc_path))
+ device_unregister(&adev->dev);
+ } else {
+ device_unregister(&adev->dev);
+ }
+
+ return 0;
+}
+
+static void apr_pd_status(int state, char *svc_path, void *priv)
+{
+ struct packet_router *apr = (struct packet_router *)priv;
+
+ switch (state) {
+ case SERVREG_SERVICE_STATE_UP:
+ of_register_apr_devices(apr->dev, svc_path);
+ break;
+ case SERVREG_SERVICE_STATE_DOWN:
+ device_for_each_child(apr->dev, svc_path, apr_remove_device);
+ break;
+ }
+}
+
+static int apr_probe(struct rpmsg_device *rpdev)
+{
+ struct device *dev = &rpdev->dev;
+ struct packet_router *apr;
+ int ret;
+
+ apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL);
+ if (!apr)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(dev->of_node, "qcom,domain", &apr->dest_domain_id);
+
+ if (of_device_is_compatible(dev->of_node, "qcom,gpr")) {
+ apr->type = PR_TYPE_GPR;
+ } else {
+ if (ret) /* try deprecated apr-domain property */
+ ret = of_property_read_u32(dev->of_node, "qcom,apr-domain",
+ &apr->dest_domain_id);
+ apr->type = PR_TYPE_APR;
+ }
+
+ if (ret) {
+ dev_err(dev, "Domain ID not specified in DT\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, apr);
+ apr->ch = rpdev->ept;
+ apr->dev = dev;
+ apr->rxwq = create_singlethread_workqueue("qcom_apr_rx");
+ if (!apr->rxwq) {
+ dev_err(apr->dev, "Failed to start Rx WQ\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&apr->rx_work, apr_rxwq);
+
+ apr->pdr = pdr_handle_alloc(apr_pd_status, apr);
+ if (IS_ERR(apr->pdr)) {
+ dev_err(dev, "Failed to init PDR handle\n");
+ ret = PTR_ERR(apr->pdr);
+ goto destroy_wq;
+ }
+
+ INIT_LIST_HEAD(&apr->rx_list);
+ spin_lock_init(&apr->rx_lock);
+ spin_lock_init(&apr->svcs_lock);
+ idr_init(&apr->svcs_idr);
+
+ ret = of_apr_add_pd_lookups(dev);
+ if (ret)
+ goto handle_release;
+
+ of_register_apr_devices(dev, NULL);
+
+ return 0;
+
+handle_release:
+ pdr_handle_release(apr->pdr);
+destroy_wq:
+ destroy_workqueue(apr->rxwq);
+ return ret;
+}
+
+static void apr_remove(struct rpmsg_device *rpdev)
+{
+ struct packet_router *apr = dev_get_drvdata(&rpdev->dev);
+
+ pdr_handle_release(apr->pdr);
+ device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
+ destroy_workqueue(apr->rxwq);
+}
+
+/*
+ * __apr_driver_register() - Client driver registration with aprbus
+ *
+ * @drv:Client driver to be associated with client-device.
+ * @owner: owning module/driver
+ *
+ * This API will register the client driver with the aprbus
+ * It is called from the driver's module-init function.
+ */
+int __apr_driver_register(struct apr_driver *drv, struct module *owner)
+{
+ drv->driver.bus = &aprbus;
+ drv->driver.owner = owner;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__apr_driver_register);
+
+/*
+ * apr_driver_unregister() - Undo effect of apr_driver_register
+ *
+ * @drv: Client driver to be unregistered
+ */
+void apr_driver_unregister(struct apr_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(apr_driver_unregister);
+
+static const struct of_device_id pkt_router_of_match[] = {
+ { .compatible = "qcom,apr"},
+ { .compatible = "qcom,apr-v2"},
+ { .compatible = "qcom,gpr"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, pkt_router_of_match);
+
+static struct rpmsg_driver packet_router_driver = {
+ .probe = apr_probe,
+ .remove = apr_remove,
+ .callback = apr_callback,
+ .drv = {
+ .name = "qcom,apr",
+ .of_match_table = pkt_router_of_match,
+ },
+};
+
+static int __init apr_init(void)
+{
+ int ret;
+
+ ret = bus_register(&aprbus);
+ if (!ret)
+ ret = register_rpmsg_driver(&packet_router_driver);
+ else
+ bus_unregister(&aprbus);
+
+ return ret;
+}
+
+static void __exit apr_exit(void)
+{
+ bus_unregister(&aprbus);
+ unregister_rpmsg_driver(&packet_router_driver);
+}
+
+subsys_initcall(apr_init);
+module_exit(apr_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APR Bus");
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
new file mode 100644
index 000000000..629a7188b
--- /dev/null
+++ b/drivers/soc/qcom/cmd-db.c
@@ -0,0 +1,366 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+#include <soc/qcom/cmd-db.h>
+
+#define NUM_PRIORITY 2
+#define MAX_SLV_ID 8
+#define SLAVE_ID_MASK 0x7
+#define SLAVE_ID_SHIFT 16
+
+/**
+ * struct entry_header: header for each entry in cmddb
+ *
+ * @id: resource's identifier
+ * @priority: unused
+ * @addr: the address of the resource
+ * @len: length of the data
+ * @offset: offset from :@data_offset, start of the data
+ */
+struct entry_header {
+ u8 id[8];
+ __le32 priority[NUM_PRIORITY];
+ __le32 addr;
+ __le16 len;
+ __le16 offset;
+};
+
+/**
+ * struct rsc_hdr: resource header information
+ *
+ * @slv_id: id for the resource
+ * @header_offset: entry's header at offset from the end of the cmd_db_header
+ * @data_offset: entry's data at offset from the end of the cmd_db_header
+ * @cnt: number of entries for HW type
+ * @version: MSB is major, LSB is minor
+ * @reserved: reserved for future use.
+ */
+struct rsc_hdr {
+ __le16 slv_id;
+ __le16 header_offset;
+ __le16 data_offset;
+ __le16 cnt;
+ __le16 version;
+ __le16 reserved[3];
+};
+
+/**
+ * struct cmd_db_header: The DB header information
+ *
+ * @version: The cmd db version
+ * @magic: constant expected in the database
+ * @header: array of resources
+ * @checksum: checksum for the header. Unused.
+ * @reserved: reserved memory
+ * @data: driver specific data
+ */
+struct cmd_db_header {
+ __le32 version;
+ u8 magic[4];
+ struct rsc_hdr header[MAX_SLV_ID];
+ __le32 checksum;
+ __le32 reserved;
+ u8 data[];
+};
+
+/**
+ * DOC: Description of the Command DB database.
+ *
+ * At the start of the command DB memory is the cmd_db_header structure.
+ * The cmd_db_header holds the version, checksum, magic key as well as an
+ * array for header for each slave (depicted by the rsc_header). Each h/w
+ * based accelerator is a 'slave' (shared resource) and has slave id indicating
+ * the type of accelerator. The rsc_header is the header for such individual
+ * slaves of a given type. The entries for each of these slaves begin at the
+ * rsc_hdr.header_offset. In addition each slave could have auxiliary data
+ * that may be needed by the driver. The data for the slave starts at the
+ * entry_header.offset to the location pointed to by the rsc_hdr.data_offset.
+ *
+ * Drivers have a stringified key to a slave/resource. They can query the slave
+ * information and get the slave id and the auxiliary data and the length of the
+ * data. Using this information, they can format the request to be sent to the
+ * h/w accelerator and request a resource state.
+ */
+
+static const u8 CMD_DB_MAGIC[] = { 0xdb, 0x30, 0x03, 0x0c };
+
+static bool cmd_db_magic_matches(const struct cmd_db_header *header)
+{
+ const u8 *magic = header->magic;
+
+ return memcmp(magic, CMD_DB_MAGIC, ARRAY_SIZE(CMD_DB_MAGIC)) == 0;
+}
+
+static struct cmd_db_header *cmd_db_header;
+
+static inline const void *rsc_to_entry_header(const struct rsc_hdr *hdr)
+{
+ u16 offset = le16_to_cpu(hdr->header_offset);
+
+ return cmd_db_header->data + offset;
+}
+
+static inline void *
+rsc_offset(const struct rsc_hdr *hdr, const struct entry_header *ent)
+{
+ u16 offset = le16_to_cpu(hdr->data_offset);
+ u16 loffset = le16_to_cpu(ent->offset);
+
+ return cmd_db_header->data + offset + loffset;
+}
+
+/**
+ * cmd_db_ready - Indicates if command DB is available
+ *
+ * Return: 0 on success, errno otherwise
+ */
+int cmd_db_ready(void)
+{
+ if (cmd_db_header == NULL)
+ return -EPROBE_DEFER;
+ else if (!cmd_db_magic_matches(cmd_db_header))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(cmd_db_ready);
+
+static int cmd_db_get_header(const char *id, const struct entry_header **eh,
+ const struct rsc_hdr **rh)
+{
+ const struct rsc_hdr *rsc_hdr;
+ const struct entry_header *ent;
+ int ret, i, j;
+ u8 query[sizeof(ent->id)] __nonstring;
+
+ ret = cmd_db_ready();
+ if (ret)
+ return ret;
+
+ /*
+ * Pad out query string to same length as in DB. NOTE: the output
+ * query string is not necessarily '\0' terminated if it bumps up
+ * against the max size. That's OK and expected.
+ */
+ strncpy(query, id, sizeof(query));
+
+ for (i = 0; i < MAX_SLV_ID; i++) {
+ rsc_hdr = &cmd_db_header->header[i];
+ if (!rsc_hdr->slv_id)
+ break;
+
+ ent = rsc_to_entry_header(rsc_hdr);
+ for (j = 0; j < le16_to_cpu(rsc_hdr->cnt); j++, ent++) {
+ if (memcmp(ent->id, query, sizeof(ent->id)) == 0) {
+ if (eh)
+ *eh = ent;
+ if (rh)
+ *rh = rsc_hdr;
+ return 0;
+ }
+ }
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * cmd_db_read_addr() - Query command db for resource id address.
+ *
+ * @id: resource id to query for address
+ *
+ * Return: resource address on success, 0 on error
+ *
+ * This is used to retrieve resource address based on resource
+ * id.
+ */
+u32 cmd_db_read_addr(const char *id)
+{
+ int ret;
+ const struct entry_header *ent;
+
+ ret = cmd_db_get_header(id, &ent, NULL);
+
+ return ret < 0 ? 0 : le32_to_cpu(ent->addr);
+}
+EXPORT_SYMBOL(cmd_db_read_addr);
+
+/**
+ * cmd_db_read_aux_data() - Query command db for aux data.
+ *
+ * @id: Resource to retrieve AUX Data on
+ * @len: size of data buffer returned
+ *
+ * Return: pointer to data on success, error pointer otherwise
+ */
+const void *cmd_db_read_aux_data(const char *id, size_t *len)
+{
+ int ret;
+ const struct entry_header *ent;
+ const struct rsc_hdr *rsc_hdr;
+
+ ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (len)
+ *len = le16_to_cpu(ent->len);
+
+ return rsc_offset(rsc_hdr, ent);
+}
+EXPORT_SYMBOL(cmd_db_read_aux_data);
+
+/**
+ * cmd_db_read_slave_id - Get the slave ID for a given resource address
+ *
+ * @id: Resource id to query the DB for version
+ *
+ * Return: cmd_db_hw_type enum on success, CMD_DB_HW_INVALID on error
+ */
+enum cmd_db_hw_type cmd_db_read_slave_id(const char *id)
+{
+ int ret;
+ const struct entry_header *ent;
+ u32 addr;
+
+ ret = cmd_db_get_header(id, &ent, NULL);
+ if (ret < 0)
+ return CMD_DB_HW_INVALID;
+
+ addr = le32_to_cpu(ent->addr);
+ return (addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
+}
+EXPORT_SYMBOL(cmd_db_read_slave_id);
+
+#ifdef CONFIG_DEBUG_FS
+static int cmd_db_debugfs_dump(struct seq_file *seq, void *p)
+{
+ int i, j;
+ const struct rsc_hdr *rsc;
+ const struct entry_header *ent;
+ const char *name;
+ u16 len, version;
+ u8 major, minor;
+
+ seq_puts(seq, "Command DB DUMP\n");
+
+ for (i = 0; i < MAX_SLV_ID; i++) {
+ rsc = &cmd_db_header->header[i];
+ if (!rsc->slv_id)
+ break;
+
+ switch (le16_to_cpu(rsc->slv_id)) {
+ case CMD_DB_HW_ARC:
+ name = "ARC";
+ break;
+ case CMD_DB_HW_VRM:
+ name = "VRM";
+ break;
+ case CMD_DB_HW_BCM:
+ name = "BCM";
+ break;
+ default:
+ name = "Unknown";
+ break;
+ }
+
+ version = le16_to_cpu(rsc->version);
+ major = version >> 8;
+ minor = version;
+
+ seq_printf(seq, "Slave %s (v%u.%u)\n", name, major, minor);
+ seq_puts(seq, "-------------------------\n");
+
+ ent = rsc_to_entry_header(rsc);
+ for (j = 0; j < le16_to_cpu(rsc->cnt); j++, ent++) {
+ seq_printf(seq, "0x%05x: %*pEp", le32_to_cpu(ent->addr),
+ (int)sizeof(ent->id), ent->id);
+
+ len = le16_to_cpu(ent->len);
+ if (len) {
+ seq_printf(seq, " [%*ph]",
+ len, rsc_offset(rsc, ent));
+ }
+ seq_putc(seq, '\n');
+ }
+ }
+
+ return 0;
+}
+
+static int open_cmd_db_debugfs(struct inode *inode, struct file *file)
+{
+ return single_open(file, cmd_db_debugfs_dump, inode->i_private);
+}
+#endif
+
+static const struct file_operations cmd_db_debugfs_ops = {
+#ifdef CONFIG_DEBUG_FS
+ .open = open_cmd_db_debugfs,
+#endif
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int cmd_db_dev_probe(struct platform_device *pdev)
+{
+ struct reserved_mem *rmem;
+ int ret = 0;
+
+ rmem = of_reserved_mem_lookup(pdev->dev.of_node);
+ if (!rmem) {
+ dev_err(&pdev->dev, "failed to acquire memory region\n");
+ return -EINVAL;
+ }
+
+ cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+ if (!cmd_db_header) {
+ ret = -ENOMEM;
+ cmd_db_header = NULL;
+ return ret;
+ }
+
+ if (!cmd_db_magic_matches(cmd_db_header)) {
+ dev_err(&pdev->dev, "Invalid Command DB Magic\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("cmd-db", 0400, NULL, NULL, &cmd_db_debugfs_ops);
+
+ return 0;
+}
+
+static const struct of_device_id cmd_db_match_table[] = {
+ { .compatible = "qcom,cmd-db" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cmd_db_match_table);
+
+static struct platform_driver cmd_db_dev_driver = {
+ .probe = cmd_db_dev_probe,
+ .driver = {
+ .name = "cmd-db",
+ .of_match_table = cmd_db_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cmd_db_device_init(void)
+{
+ return platform_driver_register(&cmd_db_dev_driver);
+}
+arch_initcall(cmd_db_device_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Command DB Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c
new file mode 100644
index 000000000..144ea68e0
--- /dev/null
+++ b/drivers/soc/qcom/cpr.c
@@ -0,0 +1,1757 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/nvmem-consumer.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
+
+#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
+#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4c
+
+#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+
+#define REG_RBIF_SW_VLEVEL 0x94
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define REG_RBCPR_STEP_QUOT 0x80
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9c
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xa0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 50
+
+#define FUSE_REVISION_UNKNOWN (-1)
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct cpr_fuse {
+ char *ring_osc;
+ char *init_voltage;
+ char *quotient;
+ char *quotient_offset;
+};
+
+struct fuse_corner_data {
+ int ref_uV;
+ int max_uV;
+ int min_uV;
+ int max_volt_scale;
+ int max_quot_scale;
+ /* fuse quot */
+ int quot_offset;
+ int quot_scale;
+ int quot_adjust;
+ /* fuse quot_offset */
+ int quot_offset_scale;
+ int quot_offset_adjust;
+};
+
+struct cpr_fuses {
+ int init_voltage_step;
+ int init_voltage_width;
+ struct fuse_corner_data *fuse_corner_data;
+};
+
+struct corner_data {
+ unsigned int fuse_corner;
+ unsigned long freq;
+};
+
+struct cpr_desc {
+ unsigned int num_fuse_corners;
+ int min_diff_quot;
+ int *step_quot;
+
+ unsigned int timer_delay_us;
+ unsigned int timer_cons_up;
+ unsigned int timer_cons_down;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int idle_clocks;
+ unsigned int gcnt_us;
+ unsigned int vdd_apc_step_up_limit;
+ unsigned int vdd_apc_step_down_limit;
+ unsigned int clamp_timer_interval;
+
+ struct cpr_fuses cpr_fuses;
+ bool reduce_to_fuse_uV;
+ bool reduce_to_corner_uV;
+};
+
+struct acc_desc {
+ unsigned int enable_reg;
+ u32 enable_mask;
+
+ struct reg_sequence *config;
+ struct reg_sequence *settings;
+ int num_regs_per_fuse;
+};
+
+struct cpr_acc_desc {
+ const struct cpr_desc *cpr_desc;
+ const struct acc_desc *acc_desc;
+};
+
+struct fuse_corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int quot;
+ int step_quot;
+ const struct reg_sequence *accs;
+ int num_accs;
+ unsigned long max_freq;
+ u8 ring_osc_idx;
+};
+
+struct corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int last_uV;
+ int quot_adjust;
+ u32 save_ctl;
+ u32 save_irq;
+ unsigned long freq;
+ struct fuse_corner *fuse_corner;
+};
+
+struct cpr_drv {
+ unsigned int num_corners;
+ unsigned int ref_clk_khz;
+
+ struct generic_pm_domain pd;
+ struct device *dev;
+ struct device *attached_cpu_dev;
+ struct mutex lock;
+ void __iomem *base;
+ struct corner *corner;
+ struct regulator *vdd_apc;
+ struct clk *cpu_clk;
+ struct regmap *tcsr;
+ bool loop_disabled;
+ u32 gcnt;
+ unsigned long flags;
+
+ struct fuse_corner *fuse_corners;
+ struct corner *corners;
+
+ const struct cpr_desc *desc;
+ const struct acc_desc *acc_desc;
+ const struct cpr_fuse *cpr_fuses;
+
+ struct dentry *debugfs;
+};
+
+static bool cpr_is_allowed(struct cpr_drv *drv)
+{
+ return !drv->loop_disabled;
+}
+
+static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
+{
+ writel_relaxed(value, drv->base + offset);
+}
+
+static u32 cpr_read(struct cpr_drv *drv, u32 offset)
+{
+ return readl_relaxed(drv->base + offset);
+}
+
+static void
+cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(drv->base + offset);
+ val &= ~mask;
+ val |= value & mask;
+ writel_relaxed(val, drv->base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_drv *drv)
+{
+ cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
+{
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
+{
+ cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 val, mask;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
+ cpr_masked_write(drv, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ corner->save_ctl);
+ cpr_irq_set(drv, corner->save_irq);
+
+ if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_drv *drv)
+{
+ cpr_irq_set(drv, 0);
+ cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
+{
+ corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
+ corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
+}
+
+static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 gcnt, ctl, irq, ro_sel, step_quot;
+ struct fuse_corner *fuse = corner->fuse_corner;
+ const struct cpr_desc *desc = drv->desc;
+ int i;
+
+ ro_sel = fuse->ring_osc_idx;
+ gcnt = drv->gcnt;
+ gcnt |= fuse->quot - corner->quot_adjust;
+
+ /* Program the step quotient and idle clocks */
+ step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
+ step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
+ cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = corner->save_ctl;
+ cpr_write(drv, REG_RBCPR_CTL, ctl);
+ irq = corner->save_irq;
+ cpr_irq_set(drv, irq);
+ dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
+ ctl, irq);
+}
+
+static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
+ struct fuse_corner *end)
+{
+ if (f == end)
+ return;
+
+ if (f < end) {
+ for (f += 1; f <= end; f++)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ } else {
+ for (f -= 1; f >= end; f--)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ }
+}
+
+static int cpr_pre_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == DOWN)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_post_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == UP)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
+ int new_uV, enum voltage_change_dir dir)
+{
+ int ret;
+ struct fuse_corner *fuse_corner = corner->fuse_corner;
+
+ ret = cpr_pre_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
+ if (ret) {
+ dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
+ new_uV);
+ return ret;
+ }
+
+ ret = cpr_post_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
+{
+ return drv->corner ? drv->corner - drv->corners + 1 : 0;
+}
+
+static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
+{
+ u32 val, error_steps, reg_mask;
+ int last_uV, new_uV, step_uV, ret;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ if (dir != UP && dir != DOWN)
+ return 0;
+
+ step_uV = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_uV)
+ return -EINVAL;
+
+ corner = drv->corner;
+
+ val = cpr_read(drv, REG_RBCPR_RESULT_0);
+
+ error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
+ error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_uV = corner->last_uV;
+
+ if (dir == UP) {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->up_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->up_threshold,
+ desc->vdd_apc_step_up_limit);
+ }
+
+ if (last_uV >= corner->max_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = reg_mask;
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_up_limit)
+ error_steps = desc->vdd_apc_step_up_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV + error_steps * step_uV;
+ new_uV = min(new_uV, corner->max_uV);
+
+ dev_dbg(drv->dev,
+ "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ } else {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->down_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->down_threshold,
+ desc->vdd_apc_step_down_limit);
+ }
+
+ if (last_uV <= corner->min_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_down_limit)
+ error_steps = desc->vdd_apc_step_down_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV - error_steps * step_uV;
+ new_uV = max(new_uV, corner->min_uV);
+
+ dev_dbg(drv->dev,
+ "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ }
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret) {
+ cpr_irq_clr_nack(drv);
+ return ret;
+ }
+ drv->corner->last_uV = new_uV;
+
+ if (dir == UP) {
+ /* Disable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = 0;
+ } else {
+ /* Restore default threshold for UP */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = desc->up_threshold;
+ val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ }
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(drv);
+
+ return 0;
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_drv *drv = dev;
+ const struct cpr_desc *desc = drv->desc;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 val;
+
+ mutex_lock(&drv->lock);
+
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+
+ dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
+
+ if (!cpr_ctl_is_enabled(drv)) {
+ dev_dbg(drv->dev, "CPR is disabled\n");
+ ret = IRQ_NONE;
+ } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
+ dev_dbg(drv->dev, "CPR measurement is not ready\n");
+ } else if (!cpr_is_allowed(drv)) {
+ val = cpr_read(drv, REG_RBCPR_CTL);
+ dev_err_ratelimited(drv->dev,
+ "Interrupt broken? RBCPR_CTL = %#02x\n",
+ val);
+ ret = IRQ_NONE;
+ } else {
+ /*
+ * Following sequence of handling is as per each IRQ's
+ * priority
+ */
+ if (val & CPR_INT_UP) {
+ cpr_scale(drv, UP);
+ } else if (val & CPR_INT_DOWN) {
+ cpr_scale(drv, DOWN);
+ } else if (val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
+ } else {
+ dev_dbg(drv->dev,
+ "IRQ occurred for unknown flag (%#08x)\n", val);
+ }
+
+ /* Save register values for the corner */
+ cpr_corner_save(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_enable(struct cpr_drv *drv)
+{
+ int ret;
+
+ ret = regulator_enable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv) && drv->corner) {
+ cpr_irq_clr(drv);
+ cpr_corner_restore(drv, drv->corner);
+ cpr_ctl_enable(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+static int cpr_disable(struct cpr_drv *drv)
+{
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_clr(drv);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return regulator_disable(drv->vdd_apc);
+}
+
+static int cpr_config(struct cpr_drv *drv)
+{
+ int i;
+ u32 val, gcnt;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Disable interrupt and CPR */
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
+ cpr_write(drv, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW ceiling, floor and vlevel */
+ val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+ << RBIF_LIMIT_CEILING_SHIFT;
+ val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
+ cpr_write(drv, REG_RBIF_LIMIT, val);
+ cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /*
+ * Clear the target quotient value and gate count of all
+ * ring oscillators
+ */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
+ gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
+ gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ drv->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
+ cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
+ dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
+ desc->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
+ cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(drv, REG_RBCPR_CTL, val);
+
+ for (i = 0; i < drv->num_corners; i++) {
+ corner = &drv->corners[i];
+ corner->save_ctl = val;
+ corner->save_irq = CPR_INT_DEFAULT;
+ }
+
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ val = cpr_read(drv, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ return 0;
+}
+
+static int cpr_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ struct corner *corner, *end;
+ enum voltage_change_dir dir;
+ int ret = 0, new_uV;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
+ __func__, state, cpr_get_cur_perf_state(drv));
+
+ /*
+ * Determine new corner we're going to.
+ * Remove one since lowest performance state is 1.
+ */
+ corner = drv->corners + state - 1;
+ end = &drv->corners[drv->num_corners - 1];
+ if (corner > end || corner < drv->corners) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Determine direction */
+ if (drv->corner > corner)
+ dir = DOWN;
+ else if (drv->corner < corner)
+ dir = UP;
+ else
+ dir = NO_CHANGE;
+
+ if (cpr_is_allowed(drv))
+ new_uV = corner->last_uV;
+ else
+ new_uV = corner->uV;
+
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret)
+ goto unlock;
+
+ if (cpr_is_allowed(drv)) {
+ cpr_irq_clr(drv);
+ if (drv->corner != corner)
+ cpr_corner_restore(drv, corner);
+ cpr_ctl_enable(drv, corner);
+ }
+
+ drv->corner = corner;
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int
+cpr_populate_ring_osc_idx(struct cpr_drv *drv)
+{
+ struct fuse_corner *fuse = drv->fuse_corners;
+ struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ u32 data;
+ int ret;
+
+ for (; fuse < end; fuse++, fuses++) {
+ ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->ring_osc, &data);
+ if (ret)
+ return ret;
+ fuse->ring_osc_idx = data;
+ }
+
+ return 0;
+}
+
+static int cpr_read_fuse_uV(const struct cpr_desc *desc,
+ const struct fuse_corner_data *fdata,
+ const char *init_v_efuse,
+ int step_volt,
+ struct cpr_drv *drv)
+{
+ int step_size_uV, steps, uV;
+ u32 bits = 0;
+ int ret;
+
+ ret = nvmem_cell_read_variable_le_u32(drv->dev, init_v_efuse, &bits);
+ if (ret)
+ return ret;
+
+ steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
+ /* Not two's complement.. instead highest bit is sign bit */
+ if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
+ steps = -steps;
+
+ step_size_uV = desc->cpr_fuses.init_voltage_step;
+
+ uV = fdata->ref_uV + steps * step_size_uV;
+ return DIV_ROUND_UP(uV, step_volt) * step_volt;
+}
+
+static int cpr_fuse_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int i;
+ unsigned int step_volt;
+ struct fuse_corner_data *fdata;
+ struct fuse_corner *fuse, *end;
+ int uV;
+ const struct reg_sequence *accs;
+ int ret;
+
+ accs = acc_desc->settings;
+
+ step_volt = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_volt)
+ return -EINVAL;
+
+ /* Populate fuse_corner members */
+ fuse = drv->fuse_corners;
+ end = &fuse[desc->num_fuse_corners - 1];
+ fdata = desc->cpr_fuses.fuse_corner_data;
+
+ for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
+ /*
+ * Update SoC voltages: platforms might choose a different
+ * regulators than the one used to characterize the algorithms
+ * (ie, init_voltage_step).
+ */
+ fdata->min_uV = roundup(fdata->min_uV, step_volt);
+ fdata->max_uV = roundup(fdata->max_uV, step_volt);
+
+ /* Populate uV */
+ uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
+ step_volt, drv);
+ if (uV < 0)
+ return uV;
+
+ fuse->min_uV = fdata->min_uV;
+ fuse->max_uV = fdata->max_uV;
+ fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
+
+ if (fuse == end) {
+ /*
+ * Allow the highest fuse corner's PVS voltage to
+ * define the ceiling voltage for that corner in order
+ * to support SoC's in which variable ceiling values
+ * are required.
+ */
+ end->max_uV = max(end->max_uV, end->uV);
+ }
+
+ /* Populate target quotient by scaling */
+ ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->quotient, &fuse->quot);
+ if (ret)
+ return ret;
+
+ fuse->quot *= fdata->quot_scale;
+ fuse->quot += fdata->quot_offset;
+ fuse->quot += fdata->quot_adjust;
+ fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
+
+ /* Populate acc settings */
+ fuse->accs = accs;
+ fuse->num_accs = acc_desc->num_regs_per_fuse;
+ accs += acc_desc->num_regs_per_fuse;
+ }
+
+ /*
+ * Restrict all fuse corner PVS voltages based upon per corner
+ * ceiling and floor voltages.
+ */
+ for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
+ if (fuse->uV > fuse->max_uV)
+ fuse->uV = fuse->max_uV;
+ else if (fuse->uV < fuse->min_uV)
+ fuse->uV = fuse->min_uV;
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->min_uV,
+ fuse->min_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "min uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->min_uV, i);
+ return -EINVAL;
+ }
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->max_uV,
+ fuse->max_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "max uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->max_uV, i);
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev,
+ "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
+ i, fuse->min_uV, fuse->uV, fuse->max_uV,
+ fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
+ }
+
+ return 0;
+}
+
+static int cpr_calculate_scaling(const char *quot_offset,
+ struct cpr_drv *drv,
+ const struct fuse_corner_data *fdata,
+ const struct corner *corner)
+{
+ u32 quot_diff = 0;
+ unsigned long freq_diff;
+ int scaling;
+ const struct fuse_corner *fuse, *prev_fuse;
+ int ret;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ if (quot_offset) {
+ ret = nvmem_cell_read_variable_le_u32(drv->dev, quot_offset, &quot_diff);
+ if (ret)
+ return ret;
+
+ quot_diff *= fdata->quot_offset_scale;
+ quot_diff += fdata->quot_offset_adjust;
+ } else {
+ quot_diff = fuse->quot - prev_fuse->quot;
+ }
+
+ freq_diff = fuse->max_freq - prev_fuse->max_freq;
+ freq_diff /= 1000000; /* Convert to MHz */
+ scaling = 1000 * quot_diff / freq_diff;
+ return min(scaling, fdata->max_quot_scale);
+}
+
+static int cpr_interpolate(const struct corner *corner, int step_volt,
+ const struct fuse_corner_data *fdata)
+{
+ unsigned long f_high, f_low, f_diff;
+ int uV_high, uV_low, uV;
+ u64 temp, temp_limit;
+ const struct fuse_corner *fuse, *prev_fuse;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ f_high = fuse->max_freq;
+ f_low = prev_fuse->max_freq;
+ uV_high = fuse->uV;
+ uV_low = prev_fuse->uV;
+ f_diff = fuse->max_freq - corner->freq;
+
+ /*
+ * Don't interpolate in the wrong direction. This could happen
+ * if the adjusted fuse voltage overlaps with the previous fuse's
+ * adjusted voltage.
+ */
+ if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
+ return corner->uV;
+
+ temp = f_diff * (uV_high - uV_low);
+ temp = div64_ul(temp, f_high - f_low);
+
+ /*
+ * max_volt_scale has units of uV/MHz while freq values
+ * have units of Hz. Divide by 1000000 to convert to.
+ */
+ temp_limit = f_diff * fdata->max_volt_scale;
+ do_div(temp_limit, 1000000);
+
+ uV = uV_high - min(temp, temp_limit);
+ return roundup(uV, step_volt);
+}
+
+static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
+{
+ struct device_node *np;
+ unsigned int fuse_corner = 0;
+
+ np = dev_pm_opp_get_of_node(opp);
+ if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
+ pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
+ __func__);
+
+ of_node_put(np);
+
+ return fuse_corner;
+}
+
+static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
+ struct device *cpu_dev)
+{
+ u64 rate = 0;
+ struct device_node *ref_np;
+ struct device_node *desc_np;
+ struct device_node *child_np = NULL;
+ struct device_node *child_req_np = NULL;
+
+ desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!desc_np)
+ return 0;
+
+ ref_np = dev_pm_opp_get_of_node(ref);
+ if (!ref_np)
+ goto out_ref;
+
+ do {
+ of_node_put(child_req_np);
+ child_np = of_get_next_available_child(desc_np, child_np);
+ child_req_np = of_parse_phandle(child_np, "required-opps", 0);
+ } while (child_np && child_req_np != ref_np);
+
+ if (child_np && child_req_np == ref_np)
+ of_property_read_u64(child_np, "opp-hz", &rate);
+
+ of_node_put(child_req_np);
+ of_node_put(child_np);
+ of_node_put(ref_np);
+out_ref:
+ of_node_put(desc_np);
+
+ return (unsigned long) rate;
+}
+
+static int cpr_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ int i, level, scaling = 0;
+ unsigned int fnum, fc;
+ const char *quot_offset;
+ struct fuse_corner *fuse, *prev_fuse;
+ struct corner *corner, *end;
+ struct corner_data *cdata;
+ const struct fuse_corner_data *fdata;
+ bool apply_scaling;
+ unsigned long freq_diff, freq_diff_mhz;
+ unsigned long freq;
+ int step_volt = regulator_get_linear_step(drv->vdd_apc);
+ struct dev_pm_opp *opp;
+
+ if (!step_volt)
+ return -EINVAL;
+
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+
+ cdata = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(struct corner_data),
+ GFP_KERNEL);
+ if (!cdata)
+ return -ENOMEM;
+
+ /*
+ * Store maximum frequency for each fuse corner based on the frequency
+ * plan
+ */
+ for (level = 1; level <= drv->num_corners; level++) {
+ opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
+ if (IS_ERR(opp))
+ return -EINVAL;
+ fc = cpr_get_fuse_corner(opp);
+ if (!fc) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ fnum = fc - 1;
+ freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
+ if (!freq) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ cdata[level - 1].fuse_corner = fnum;
+ cdata[level - 1].freq = freq;
+
+ fuse = &drv->fuse_corners[fnum];
+ dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
+ freq, dev_pm_opp_get_level(opp) - 1, fnum);
+ if (freq > fuse->max_freq)
+ fuse->max_freq = freq;
+ dev_pm_opp_put(opp);
+ }
+
+ /*
+ * Get the quotient adjustment scaling factor, according to:
+ *
+ * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+ * / (freq(corner_N) - freq(corner_N-1)), max_factor)
+ *
+ * QUOT(corner_N): quotient read from fuse for fuse corner N
+ * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
+ * freq(corner_N): max frequency in MHz supported by fuse corner N
+ * freq(corner_N-1): max frequency in MHz supported by fuse corner
+ * (N - 1)
+ *
+ * Then walk through the corners mapped to each fuse corner
+ * and calculate the quotient adjustment for each one using the
+ * following formula:
+ *
+ * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+ *
+ * freq_max: max frequency in MHz supported by the fuse corner
+ * freq_corner: frequency in MHz corresponding to the corner
+ * scaling: calculated from above equation
+ *
+ *
+ * + +
+ * | v |
+ * q | f c o | f c
+ * u | c l | c
+ * o | f t | f
+ * t | c a | c
+ * | c f g | c f
+ * | e |
+ * +--------------- +----------------
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ * corner corner
+ *
+ * c = corner
+ * f = fuse corner
+ *
+ */
+ for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
+ fnum = cdata[i].fuse_corner;
+ fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
+ quot_offset = fuses[fnum].quotient_offset;
+ fuse = &drv->fuse_corners[fnum];
+ if (fnum)
+ prev_fuse = &drv->fuse_corners[fnum - 1];
+ else
+ prev_fuse = NULL;
+
+ corner->fuse_corner = fuse;
+ corner->freq = cdata[i].freq;
+ corner->uV = fuse->uV;
+
+ if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
+ scaling = cpr_calculate_scaling(quot_offset, drv,
+ fdata, corner);
+ if (scaling < 0)
+ return scaling;
+
+ apply_scaling = true;
+ } else if (corner->freq == fuse->max_freq) {
+ /* This is a fuse corner; don't scale anything */
+ apply_scaling = false;
+ }
+
+ if (apply_scaling) {
+ freq_diff = fuse->max_freq - corner->freq;
+ freq_diff_mhz = freq_diff / 1000000;
+ corner->quot_adjust = scaling * freq_diff_mhz / 1000;
+
+ corner->uV = cpr_interpolate(corner, step_volt, fdata);
+ }
+
+ corner->max_uV = fuse->max_uV;
+ corner->min_uV = fuse->min_uV;
+ corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
+ corner->last_uV = corner->uV;
+
+ /* Reduce the ceiling voltage if needed */
+ if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
+ corner->max_uV = corner->uV;
+ else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
+ corner->max_uV = max(corner->min_uV, fuse->uV);
+
+ dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
+ corner->min_uV, corner->uV, corner->max_uV,
+ fuse->quot - corner->quot_adjust);
+ }
+
+ return 0;
+}
+
+static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct cpr_fuse *fuses;
+ int i;
+
+ fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
+ sizeof(struct cpr_fuse),
+ GFP_KERNEL);
+ if (!fuses)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < desc->num_fuse_corners; i++) {
+ char tbuf[32];
+
+ snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
+ fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].ring_osc)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
+ fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].init_voltage)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
+ fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].quotient)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
+ fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].quotient_offset)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return fuses;
+}
+
+static void cpr_set_loop_allowed(struct cpr_drv *drv)
+{
+ drv->loop_disabled = false;
+}
+
+static int cpr_init_parameters(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct clk *clk;
+
+ clk = clk_get(drv->dev, "ref");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ drv->ref_clk_khz = clk_get_rate(clk) / 1000;
+ clk_put(clk);
+
+ if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
+ desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
+ desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
+ desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
+ desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
+ desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
+ return -EINVAL;
+
+ dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
+ desc->up_threshold, desc->down_threshold);
+
+ return 0;
+}
+
+static int cpr_find_initial_corner(struct cpr_drv *drv)
+{
+ unsigned long rate;
+ const struct corner *end;
+ struct corner *iter;
+ unsigned int i = 0;
+
+ if (!drv->cpu_clk) {
+ dev_err(drv->dev, "cannot get rate from NULL clk\n");
+ return -EINVAL;
+ }
+
+ end = &drv->corners[drv->num_corners - 1];
+ rate = clk_get_rate(drv->cpu_clk);
+
+ /*
+ * Some bootloaders set a CPU clock frequency that is not defined
+ * in the OPP table. When running at an unlisted frequency,
+ * cpufreq_online() will change to the OPP which has the lowest
+ * frequency, at or above the unlisted frequency.
+ * Since cpufreq_online() always "rounds up" in the case of an
+ * unlisted frequency, this function always "rounds down" in case
+ * of an unlisted frequency. That way, when cpufreq_online()
+ * triggers the first ever call to cpr_set_performance_state(),
+ * it will correctly determine the direction as UP.
+ */
+ for (iter = drv->corners; iter <= end; iter++) {
+ if (iter->freq > rate)
+ break;
+ i++;
+ if (iter->freq == rate) {
+ drv->corner = iter;
+ break;
+ }
+ if (iter->freq < rate)
+ drv->corner = iter;
+ }
+
+ if (!drv->corner) {
+ dev_err(drv->dev, "boot up corner not found\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev, "boot up perf state: %u\n", i);
+
+ return 0;
+}
+
+static const struct cpr_desc qcs404_cpr_desc = {
+ .num_fuse_corners = 3,
+ .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
+ .step_quot = (int []){ 25, 25, 25, },
+ .timer_delay_us = 5000,
+ .timer_cons_up = 0,
+ .timer_cons_down = 2,
+ .up_threshold = 1,
+ .down_threshold = 3,
+ .idle_clocks = 15,
+ .gcnt_us = 1,
+ .vdd_apc_step_up_limit = 1,
+ .vdd_apc_step_down_limit = 1,
+ .cpr_fuses = {
+ .init_voltage_step = 8000,
+ .init_voltage_width = 6,
+ .fuse_corner_data = (struct fuse_corner_data[]){
+ /* fuse corner 0 */
+ {
+ .ref_uV = 1224000,
+ .max_uV = 1224000,
+ .min_uV = 1048000,
+ .max_volt_scale = 0,
+ .max_quot_scale = 0,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 1 */
+ {
+ .ref_uV = 1288000,
+ .max_uV = 1288000,
+ .min_uV = 1048000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = -20,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 2 */
+ {
+ .ref_uV = 1352000,
+ .max_uV = 1384000,
+ .min_uV = 1088000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ },
+ },
+};
+
+static const struct acc_desc qcs404_acc_desc = {
+ .settings = (struct reg_sequence[]){
+ { 0xb120, 0x1041040 },
+ { 0xb124, 0x41 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ },
+ .config = (struct reg_sequence[]){
+ { 0xb138, 0xff },
+ { 0xb130, 0x5555 },
+ },
+ .num_regs_per_fuse = 2,
+};
+
+static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
+ .cpr_desc = &qcs404_cpr_desc,
+ .acc_desc = &qcs404_acc_desc,
+};
+
+static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int cpr_power_off(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_disable(drv);
+}
+
+static int cpr_power_on(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_enable(drv);
+}
+
+static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int ret = 0;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
+
+ /*
+ * This driver only supports scaling voltage for a CPU cluster
+ * where all CPUs in the cluster share a single regulator.
+ * Therefore, save the struct device pointer only for the first
+ * CPU device that gets attached. There is no need to do any
+ * additional initialization when further CPUs get attached.
+ */
+ if (drv->attached_cpu_dev)
+ goto unlock;
+
+ /*
+ * cpr_scale_voltage() requires the direction (if we are changing
+ * to a higher or lower OPP). The first time
+ * cpr_set_performance_state() is called, there is no previous
+ * performance state defined. Therefore, we call
+ * cpr_find_initial_corner() that gets the CPU clock frequency
+ * set by the bootloader, so that we can determine the direction
+ * the first time cpr_set_performance_state() is called.
+ */
+ drv->cpu_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(drv->cpu_clk)) {
+ ret = PTR_ERR(drv->cpu_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
+ goto unlock;
+ }
+ drv->attached_cpu_dev = dev;
+
+ dev_dbg(drv->dev, "using cpu clk from: %s\n",
+ dev_name(drv->attached_cpu_dev));
+
+ /*
+ * Everything related to (virtual) corners has to be initialized
+ * here, when attaching to the power domain, since we need to know
+ * the maximum frequency for each fuse corner, and this is only
+ * available after the cpufreq driver has attached to us.
+ * The reason for this is that we need to know the highest
+ * frequency associated with each fuse corner.
+ */
+ ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
+ if (ret < 0) {
+ dev_err(drv->dev, "could not get OPP count\n");
+ goto unlock;
+ }
+ drv->num_corners = ret;
+
+ if (drv->num_corners < 2) {
+ dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(*drv->corners),
+ GFP_KERNEL);
+ if (!drv->corners) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = cpr_corner_init(drv);
+ if (ret)
+ goto unlock;
+
+ cpr_set_loop_allowed(drv);
+
+ ret = cpr_init_parameters(drv);
+ if (ret)
+ goto unlock;
+
+ /* Configure CPR HW but keep it disabled */
+ ret = cpr_config(drv);
+ if (ret)
+ goto unlock;
+
+ ret = cpr_find_initial_corner(drv);
+ if (ret)
+ goto unlock;
+
+ if (acc_desc->config)
+ regmap_multi_reg_write(drv->tcsr, acc_desc->config,
+ acc_desc->num_regs_per_fuse);
+
+ /* Enable ACC if required */
+ if (acc_desc->enable_mask)
+ regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
+ acc_desc->enable_mask,
+ acc_desc->enable_mask);
+
+ dev_info(drv->dev, "driver initialized with %u OPPs\n",
+ drv->num_corners);
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_debug_info_show(struct seq_file *s, void *unused)
+{
+ u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+ u32 step_dn, step_up, error, error_lt0, busy;
+ struct cpr_drv *drv = s->private;
+ struct fuse_corner *fuse_corner;
+ struct corner *corner;
+
+ corner = drv->corner;
+ fuse_corner = corner->fuse_corner;
+
+ seq_printf(s, "corner, current_volt = %d uV\n",
+ corner->last_uV);
+
+ ro_sel = fuse_corner->ring_osc_idx;
+ gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
+ seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
+
+ ctl = cpr_read(drv, REG_RBCPR_CTL);
+ seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
+
+ irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
+
+ reg = cpr_read(drv, REG_RBCPR_RESULT_0);
+ seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
+
+ step_dn = reg & 0x01;
+ step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+ seq_printf(s, " [step_dn = %u", step_dn);
+
+ seq_printf(s, ", step_up = %u", step_up);
+
+ error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ seq_printf(s, ", error_steps = %u", error_steps);
+
+ error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+ seq_printf(s, ", error = %u", error);
+
+ error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+ seq_printf(s, ", error_lt_0 = %u", error_lt0);
+
+ busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+ seq_printf(s, ", busy = %u]\n", busy);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
+
+static void cpr_debugfs_init(struct cpr_drv *drv)
+{
+ drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
+
+ debugfs_create_file("debug_info", 0444, drv->debugfs,
+ drv, &cpr_debug_info_fops);
+}
+
+static int cpr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cpr_drv *drv;
+ int irq, ret;
+ const struct cpr_acc_desc *data;
+ struct device_node *np;
+ u32 cpr_rev = FUSE_REVISION_UNKNOWN;
+
+ data = of_device_get_match_data(dev);
+ if (!data || !data->cpr_desc || !data->acc_desc)
+ return -EINVAL;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+ drv->dev = dev;
+ drv->desc = data->cpr_desc;
+ drv->acc_desc = data->acc_desc;
+
+ drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
+ sizeof(*drv->fuse_corners),
+ GFP_KERNEL);
+ if (!drv->fuse_corners)
+ return -ENOMEM;
+
+ np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
+ if (!np)
+ return -ENODEV;
+
+ drv->tcsr = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(drv->tcsr))
+ return PTR_ERR(drv->tcsr);
+
+ drv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(drv->base))
+ return PTR_ERR(drv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
+ if (IS_ERR(drv->vdd_apc))
+ return PTR_ERR(drv->vdd_apc);
+
+ /*
+ * Initialize fuse corners, since it simply depends
+ * on data in efuses.
+ * Everything related to (virtual) corners has to be
+ * initialized after attaching to the power domain,
+ * since it depends on the CPU's OPP table.
+ */
+ ret = nvmem_cell_read_variable_le_u32(dev, "cpr_fuse_revision", &cpr_rev);
+ if (ret)
+ return ret;
+
+ drv->cpr_fuses = cpr_get_fuses(drv);
+ if (IS_ERR(drv->cpr_fuses))
+ return PTR_ERR(drv->cpr_fuses);
+
+ ret = cpr_populate_ring_osc_idx(drv);
+ if (ret)
+ return ret;
+
+ ret = cpr_fuse_corner_init(drv);
+ if (ret)
+ return ret;
+
+ mutex_init(&drv->lock);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ cpr_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr", drv);
+ if (ret)
+ return ret;
+
+ drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
+ GFP_KERNEL);
+ if (!drv->pd.name)
+ return -EINVAL;
+
+ drv->pd.power_off = cpr_power_off;
+ drv->pd.power_on = cpr_power_on;
+ drv->pd.set_performance_state = cpr_set_performance_state;
+ drv->pd.opp_to_performance_state = cpr_get_performance_state;
+ drv->pd.attach_dev = cpr_pd_attach_dev;
+
+ ret = pm_genpd_init(&drv->pd, NULL, true);
+ if (ret)
+ return ret;
+
+ ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
+ if (ret)
+ goto err_remove_genpd;
+
+ platform_set_drvdata(pdev, drv);
+ cpr_debugfs_init(drv);
+
+ return 0;
+
+err_remove_genpd:
+ pm_genpd_remove(&drv->pd);
+ return ret;
+}
+
+static int cpr_remove(struct platform_device *pdev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_set(drv, 0);
+ }
+
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&drv->pd);
+
+ debugfs_remove_recursive(drv->debugfs);
+
+ return 0;
+}
+
+static const struct of_device_id cpr_match_table[] = {
+ { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cpr_match_table);
+
+static struct platform_driver cpr_driver = {
+ .probe = cpr_probe,
+ .remove = cpr_remove,
+ .driver = {
+ .name = "qcom-cpr",
+ .of_match_table = cpr_match_table,
+ },
+};
+module_platform_driver(cpr_driver);
+
+MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
new file mode 100644
index 000000000..782954999
--- /dev/null
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2021-2022 Linaro Ltd
+ * Author: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, based on
+ * previous work of Thara Gopinath and msm-4.9 downstream sources.
+ */
+
+#include <linux/err.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+
+/*
+ * The BWMON samples data throughput within 'sample_ms' time. With three
+ * configurable thresholds (Low, Medium and High) gives four windows (called
+ * zones) of current bandwidth:
+ *
+ * Zone 0: byte count < THRES_LO
+ * Zone 1: THRES_LO < byte count < THRES_MED
+ * Zone 2: THRES_MED < byte count < THRES_HIGH
+ * Zone 3: THRES_HIGH < byte count
+ *
+ * Zones 0 and 2 are not used by this driver.
+ */
+
+/* Internal sampling clock frequency */
+#define HW_TIMER_HZ 19200000
+
+#define BWMON_V4_GLOBAL_IRQ_CLEAR 0x008
+#define BWMON_V4_GLOBAL_IRQ_ENABLE 0x00c
+/*
+ * All values here and further are matching regmap fields, so without absolute
+ * register offsets.
+ */
+#define BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE BIT(0)
+
+#define BWMON_V4_IRQ_STATUS 0x100
+#define BWMON_V4_IRQ_CLEAR 0x108
+
+#define BWMON_V4_IRQ_ENABLE 0x10c
+#define BWMON_IRQ_ENABLE_MASK (BIT(1) | BIT(3))
+#define BWMON_V5_IRQ_STATUS 0x000
+#define BWMON_V5_IRQ_CLEAR 0x008
+#define BWMON_V5_IRQ_ENABLE 0x00c
+
+#define BWMON_V4_ENABLE 0x2a0
+#define BWMON_V5_ENABLE 0x010
+#define BWMON_ENABLE_ENABLE BIT(0)
+
+#define BWMON_V4_CLEAR 0x2a4
+#define BWMON_V5_CLEAR 0x014
+#define BWMON_CLEAR_CLEAR BIT(0)
+#define BWMON_CLEAR_CLEAR_ALL BIT(1)
+
+#define BWMON_V4_SAMPLE_WINDOW 0x2a8
+#define BWMON_V5_SAMPLE_WINDOW 0x020
+
+#define BWMON_V4_THRESHOLD_HIGH 0x2ac
+#define BWMON_V4_THRESHOLD_MED 0x2b0
+#define BWMON_V4_THRESHOLD_LOW 0x2b4
+#define BWMON_V5_THRESHOLD_HIGH 0x024
+#define BWMON_V5_THRESHOLD_MED 0x028
+#define BWMON_V5_THRESHOLD_LOW 0x02c
+
+#define BWMON_V4_ZONE_ACTIONS 0x2b8
+#define BWMON_V5_ZONE_ACTIONS 0x030
+/*
+ * Actions to perform on some zone 'z' when current zone hits the threshold:
+ * Increment counter of zone 'z'
+ */
+#define BWMON_ZONE_ACTIONS_INCREMENT(z) (0x2 << ((z) * 2))
+/* Clear counter of zone 'z' */
+#define BWMON_ZONE_ACTIONS_CLEAR(z) (0x1 << ((z) * 2))
+
+/* Zone 0 threshold hit: Clear zone count */
+#define BWMON_ZONE_ACTIONS_ZONE0 (BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 1 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE1 (BWMON_ZONE_ACTIONS_INCREMENT(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 2 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE2 (BWMON_ZONE_ACTIONS_INCREMENT(2) | \
+ BWMON_ZONE_ACTIONS_CLEAR(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 3 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE3 (BWMON_ZONE_ACTIONS_INCREMENT(3) | \
+ BWMON_ZONE_ACTIONS_CLEAR(2) | \
+ BWMON_ZONE_ACTIONS_CLEAR(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/*
+ * There is no clear documentation/explanation of BWMON_V4_THRESHOLD_COUNT
+ * register. Based on observations, this is number of times one threshold has to
+ * be reached, to trigger interrupt in given zone.
+ *
+ * 0xff are maximum values meant to ignore the zones 0 and 2.
+ */
+#define BWMON_V4_THRESHOLD_COUNT 0x2bc
+#define BWMON_V5_THRESHOLD_COUNT 0x034
+#define BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT 0xff
+#define BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT 0xff
+
+#define BWMON_V4_ZONE_MAX(zone) (0x2e0 + 4 * (zone))
+#define BWMON_V5_ZONE_MAX(zone) (0x044 + 4 * (zone))
+
+/* Quirks for specific BWMON types */
+#define BWMON_HAS_GLOBAL_IRQ BIT(0)
+#define BWMON_NEEDS_FORCE_CLEAR BIT(1)
+
+enum bwmon_fields {
+ F_GLOBAL_IRQ_CLEAR,
+ F_GLOBAL_IRQ_ENABLE,
+ F_IRQ_STATUS,
+ F_IRQ_CLEAR,
+ F_IRQ_ENABLE,
+ F_ENABLE,
+ F_CLEAR,
+ F_SAMPLE_WINDOW,
+ F_THRESHOLD_HIGH,
+ F_THRESHOLD_MED,
+ F_THRESHOLD_LOW,
+ F_ZONE_ACTIONS_ZONE0,
+ F_ZONE_ACTIONS_ZONE1,
+ F_ZONE_ACTIONS_ZONE2,
+ F_ZONE_ACTIONS_ZONE3,
+ F_THRESHOLD_COUNT_ZONE0,
+ F_THRESHOLD_COUNT_ZONE1,
+ F_THRESHOLD_COUNT_ZONE2,
+ F_THRESHOLD_COUNT_ZONE3,
+ F_ZONE0_MAX,
+ F_ZONE1_MAX,
+ F_ZONE2_MAX,
+ F_ZONE3_MAX,
+
+ F_NUM_FIELDS
+};
+
+struct icc_bwmon_data {
+ unsigned int sample_ms;
+ unsigned int count_unit_kb; /* kbytes */
+ unsigned int default_highbw_kbps;
+ unsigned int default_medbw_kbps;
+ unsigned int default_lowbw_kbps;
+ u8 zone1_thres_count;
+ u8 zone3_thres_count;
+ unsigned int quirks;
+
+ const struct regmap_config *regmap_cfg;
+ const struct reg_field *regmap_fields;
+};
+
+struct icc_bwmon {
+ struct device *dev;
+ const struct icc_bwmon_data *data;
+ int irq;
+
+ struct regmap *regmap;
+ struct regmap_field *regs[F_NUM_FIELDS];
+
+ unsigned int max_bw_kbps;
+ unsigned int min_bw_kbps;
+ unsigned int target_kbps;
+ unsigned int current_kbps;
+};
+
+/* BWMON v4 */
+static const struct reg_field msm8998_bwmon_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_CLEAR, 0, 0),
+ [F_GLOBAL_IRQ_ENABLE] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_ENABLE, 0, 0),
+ [F_IRQ_STATUS] = REG_FIELD(BWMON_V4_IRQ_STATUS, 4, 7),
+ [F_IRQ_CLEAR] = REG_FIELD(BWMON_V4_IRQ_CLEAR, 4, 7),
+ [F_IRQ_ENABLE] = REG_FIELD(BWMON_V4_IRQ_ENABLE, 4, 7),
+ /* F_ENABLE covers entire register to disable other features */
+ [F_ENABLE] = REG_FIELD(BWMON_V4_ENABLE, 0, 31),
+ [F_CLEAR] = REG_FIELD(BWMON_V4_CLEAR, 0, 1),
+ [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V4_SAMPLE_WINDOW, 0, 23),
+ [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V4_THRESHOLD_HIGH, 0, 11),
+ [F_THRESHOLD_MED] = REG_FIELD(BWMON_V4_THRESHOLD_MED, 0, 11),
+ [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V4_THRESHOLD_LOW, 0, 11),
+ [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 0, 7),
+ [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 8, 15),
+ [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 16, 23),
+ [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 24, 31),
+ [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 0, 7),
+ [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 8, 15),
+ [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 16, 23),
+ [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 24, 31),
+ [F_ZONE0_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(0), 0, 11),
+ [F_ZONE1_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(1), 0, 11),
+ [F_ZONE2_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(2), 0, 11),
+ [F_ZONE3_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(3), 0, 11),
+};
+
+static const struct regmap_range msm8998_bwmon_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V4_GLOBAL_IRQ_CLEAR, BWMON_V4_GLOBAL_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V4_IRQ_CLEAR, BWMON_V4_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V4_CLEAR, BWMON_V4_CLEAR),
+};
+
+static const struct regmap_access_table msm8998_bwmon_reg_read_table = {
+ .no_ranges = msm8998_bwmon_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(msm8998_bwmon_reg_noread_ranges),
+};
+
+static const struct regmap_range msm8998_bwmon_reg_volatile_ranges[] = {
+ regmap_reg_range(BWMON_V4_IRQ_STATUS, BWMON_V4_IRQ_STATUS),
+ regmap_reg_range(BWMON_V4_ZONE_MAX(0), BWMON_V4_ZONE_MAX(3)),
+};
+
+static const struct regmap_access_table msm8998_bwmon_reg_volatile_table = {
+ .yes_ranges = msm8998_bwmon_reg_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(msm8998_bwmon_reg_volatile_ranges),
+};
+
+/*
+ * Fill the cache for non-readable registers only as rest does not really
+ * matter and can be read from the device.
+ */
+static const struct reg_default msm8998_bwmon_reg_defaults[] = {
+ { BWMON_V4_GLOBAL_IRQ_CLEAR, 0x0 },
+ { BWMON_V4_IRQ_CLEAR, 0x0 },
+ { BWMON_V4_CLEAR, 0x0 },
+};
+
+static const struct regmap_config msm8998_bwmon_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &msm8998_bwmon_reg_read_table,
+ .volatile_table = &msm8998_bwmon_reg_volatile_table,
+ .reg_defaults = msm8998_bwmon_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(msm8998_bwmon_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/* BWMON v5 */
+static const struct reg_field sdm845_llcc_bwmon_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = {},
+ [F_GLOBAL_IRQ_ENABLE] = {},
+ [F_IRQ_STATUS] = REG_FIELD(BWMON_V5_IRQ_STATUS, 0, 3),
+ [F_IRQ_CLEAR] = REG_FIELD(BWMON_V5_IRQ_CLEAR, 0, 3),
+ [F_IRQ_ENABLE] = REG_FIELD(BWMON_V5_IRQ_ENABLE, 0, 3),
+ /* F_ENABLE covers entire register to disable other features */
+ [F_ENABLE] = REG_FIELD(BWMON_V5_ENABLE, 0, 31),
+ [F_CLEAR] = REG_FIELD(BWMON_V5_CLEAR, 0, 1),
+ [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V5_SAMPLE_WINDOW, 0, 19),
+ [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V5_THRESHOLD_HIGH, 0, 11),
+ [F_THRESHOLD_MED] = REG_FIELD(BWMON_V5_THRESHOLD_MED, 0, 11),
+ [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V5_THRESHOLD_LOW, 0, 11),
+ [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 0, 7),
+ [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 8, 15),
+ [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 16, 23),
+ [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 24, 31),
+ [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 0, 7),
+ [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 8, 15),
+ [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 16, 23),
+ [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 24, 31),
+ [F_ZONE0_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(0), 0, 11),
+ [F_ZONE1_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(1), 0, 11),
+ [F_ZONE2_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(2), 0, 11),
+ [F_ZONE3_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(3), 0, 11),
+};
+
+static const struct regmap_range sdm845_llcc_bwmon_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V5_IRQ_CLEAR, BWMON_V5_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V5_CLEAR, BWMON_V5_CLEAR),
+};
+
+static const struct regmap_access_table sdm845_llcc_bwmon_reg_read_table = {
+ .no_ranges = sdm845_llcc_bwmon_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(sdm845_llcc_bwmon_reg_noread_ranges),
+};
+
+static const struct regmap_range sdm845_llcc_bwmon_reg_volatile_ranges[] = {
+ regmap_reg_range(BWMON_V5_IRQ_STATUS, BWMON_V5_IRQ_STATUS),
+ regmap_reg_range(BWMON_V5_ZONE_MAX(0), BWMON_V5_ZONE_MAX(3)),
+};
+
+static const struct regmap_access_table sdm845_llcc_bwmon_reg_volatile_table = {
+ .yes_ranges = sdm845_llcc_bwmon_reg_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sdm845_llcc_bwmon_reg_volatile_ranges),
+};
+
+/*
+ * Fill the cache for non-readable registers only as rest does not really
+ * matter and can be read from the device.
+ */
+static const struct reg_default sdm845_llcc_bwmon_reg_defaults[] = {
+ { BWMON_V5_IRQ_CLEAR, 0x0 },
+ { BWMON_V5_CLEAR, 0x0 },
+};
+
+static const struct regmap_config sdm845_llcc_bwmon_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &sdm845_llcc_bwmon_reg_read_table,
+ .volatile_table = &sdm845_llcc_bwmon_reg_volatile_table,
+ .reg_defaults = sdm845_llcc_bwmon_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(sdm845_llcc_bwmon_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void bwmon_clear_counters(struct icc_bwmon *bwmon, bool clear_all)
+{
+ unsigned int val = BWMON_CLEAR_CLEAR;
+
+ if (clear_all)
+ val |= BWMON_CLEAR_CLEAR_ALL;
+ /*
+ * Clear counters. The order and barriers are
+ * important. Quoting downstream Qualcomm msm-4.9 tree:
+ *
+ * The counter clear and IRQ clear bits are not in the same 4KB
+ * region. So, we need to make sure the counter clear is completed
+ * before we try to clear the IRQ or do any other counter operations.
+ */
+ regmap_field_force_write(bwmon->regs[F_CLEAR], val);
+ if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR)
+ regmap_field_force_write(bwmon->regs[F_CLEAR], 0);
+}
+
+static void bwmon_clear_irq(struct icc_bwmon *bwmon)
+{
+ /*
+ * Clear zone and global interrupts. The order and barriers are
+ * important. Quoting downstream Qualcomm msm-4.9 tree:
+ *
+ * Synchronize the local interrupt clear in mon_irq_clear()
+ * with the global interrupt clear here. Otherwise, the CPU
+ * may reorder the two writes and clear the global interrupt
+ * before the local interrupt, causing the global interrupt
+ * to be retriggered by the local interrupt still being high.
+ *
+ * Similarly, because the global registers are in a different
+ * region than the local registers, we need to ensure any register
+ * writes to enable the monitor after this call are ordered with the
+ * clearing here so that local writes don't happen before the
+ * interrupt is cleared.
+ */
+ regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], BWMON_IRQ_ENABLE_MASK);
+ if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR)
+ regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], 0);
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_force_write(bwmon->regs[F_GLOBAL_IRQ_CLEAR],
+ BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE);
+}
+
+static void bwmon_disable(struct icc_bwmon *bwmon)
+{
+ /* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_write(bwmon->regs[F_GLOBAL_IRQ_ENABLE], 0x0);
+ regmap_field_write(bwmon->regs[F_IRQ_ENABLE], 0x0);
+
+ /*
+ * Disable bwmon. Must happen before bwmon_clear_irq() to avoid spurious
+ * IRQ.
+ */
+ regmap_field_write(bwmon->regs[F_ENABLE], 0x0);
+}
+
+static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable)
+{
+ /* Enable interrupts */
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_write(bwmon->regs[F_GLOBAL_IRQ_ENABLE],
+ BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE);
+ regmap_field_write(bwmon->regs[F_IRQ_ENABLE], irq_enable);
+
+ /* Enable bwmon */
+ regmap_field_write(bwmon->regs[F_ENABLE], BWMON_ENABLE_ENABLE);
+}
+
+static unsigned int bwmon_kbps_to_count(struct icc_bwmon *bwmon,
+ unsigned int kbps)
+{
+ return kbps / bwmon->data->count_unit_kb;
+}
+
+static void bwmon_set_threshold(struct icc_bwmon *bwmon,
+ struct regmap_field *reg, unsigned int kbps)
+{
+ unsigned int thres;
+
+ thres = mult_frac(bwmon_kbps_to_count(bwmon, kbps),
+ bwmon->data->sample_ms, MSEC_PER_SEC);
+ regmap_field_write(reg, thres);
+}
+
+static void bwmon_start(struct icc_bwmon *bwmon)
+{
+ const struct icc_bwmon_data *data = bwmon->data;
+ int window;
+
+ bwmon_clear_counters(bwmon, true);
+
+ window = mult_frac(bwmon->data->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC);
+ /* Maximum sampling window: 0xffffff for v4 and 0xfffff for v5 */
+ regmap_field_write(bwmon->regs[F_SAMPLE_WINDOW], window);
+
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH],
+ data->default_highbw_kbps);
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED],
+ data->default_medbw_kbps);
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_LOW],
+ data->default_lowbw_kbps);
+
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE0],
+ BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE1],
+ data->zone1_thres_count);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE2],
+ BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE3],
+ data->zone3_thres_count);
+
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE0],
+ BWMON_ZONE_ACTIONS_ZONE0);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE1],
+ BWMON_ZONE_ACTIONS_ZONE1);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE2],
+ BWMON_ZONE_ACTIONS_ZONE2);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE3],
+ BWMON_ZONE_ACTIONS_ZONE3);
+
+ bwmon_clear_irq(bwmon);
+ bwmon_enable(bwmon, BWMON_IRQ_ENABLE_MASK);
+}
+
+static irqreturn_t bwmon_intr(int irq, void *dev_id)
+{
+ struct icc_bwmon *bwmon = dev_id;
+ unsigned int status, max;
+ int zone;
+
+ if (regmap_field_read(bwmon->regs[F_IRQ_STATUS], &status))
+ return IRQ_NONE;
+
+ status &= BWMON_IRQ_ENABLE_MASK;
+ if (!status) {
+ /*
+ * Only zone 1 and zone 3 interrupts are enabled but zone 2
+ * threshold could be hit and trigger interrupt even if not
+ * enabled.
+ * Such spurious interrupt might come with valuable max count or
+ * not, so solution would be to always check all
+ * BWMON_ZONE_MAX() registers to find the highest value.
+ * Such case is currently ignored.
+ */
+ return IRQ_NONE;
+ }
+
+ bwmon_disable(bwmon);
+
+ zone = get_bitmask_order(status) - 1;
+ /*
+ * Zone max bytes count register returns count units within sampling
+ * window. Downstream kernel for BWMONv4 (called BWMON type 2 in
+ * downstream) always increments the max bytes count by one.
+ */
+ if (regmap_field_read(bwmon->regs[F_ZONE0_MAX + zone], &max))
+ return IRQ_NONE;
+
+ max += 1;
+ max *= bwmon->data->count_unit_kb;
+ bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->data->sample_ms);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
+{
+ struct icc_bwmon *bwmon = dev_id;
+ unsigned int irq_enable = 0;
+ struct dev_pm_opp *opp, *target_opp;
+ unsigned int bw_kbps, up_kbps, down_kbps;
+
+ bw_kbps = bwmon->target_kbps;
+
+ target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE)
+ target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+
+ bwmon->target_kbps = bw_kbps;
+
+ bw_kbps--;
+ opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
+ down_kbps = bwmon->target_kbps;
+ else
+ down_kbps = bw_kbps;
+
+ up_kbps = bwmon->target_kbps + 1;
+
+ if (bwmon->target_kbps >= bwmon->max_bw_kbps)
+ irq_enable = BIT(1);
+ else if (bwmon->target_kbps <= bwmon->min_bw_kbps)
+ irq_enable = BIT(3);
+ else
+ irq_enable = BWMON_IRQ_ENABLE_MASK;
+
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH],
+ up_kbps);
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED],
+ down_kbps);
+ bwmon_clear_counters(bwmon, false);
+ bwmon_clear_irq(bwmon);
+ bwmon_enable(bwmon, irq_enable);
+
+ if (bwmon->target_kbps == bwmon->current_kbps)
+ goto out;
+
+ dev_pm_opp_set_opp(bwmon->dev, target_opp);
+ bwmon->current_kbps = bwmon->target_kbps;
+
+out:
+ dev_pm_opp_put(target_opp);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
+
+ return IRQ_HANDLED;
+}
+
+static int bwmon_init_regmap(struct platform_device *pdev,
+ struct icc_bwmon *bwmon)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct regmap *map;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "failed to map bwmon registers\n");
+
+ map = devm_regmap_init_mmio(dev, base, bwmon->data->regmap_cfg);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map),
+ "failed to initialize regmap\n");
+
+ BUILD_BUG_ON(ARRAY_SIZE(msm8998_bwmon_reg_fields) != F_NUM_FIELDS);
+ BUILD_BUG_ON(ARRAY_SIZE(sdm845_llcc_bwmon_reg_fields) != F_NUM_FIELDS);
+
+ return devm_regmap_field_bulk_alloc(dev, map, bwmon->regs,
+ bwmon->data->regmap_fields,
+ F_NUM_FIELDS);
+}
+
+static int bwmon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
+ struct icc_bwmon *bwmon;
+ int ret;
+
+ bwmon = devm_kzalloc(dev, sizeof(*bwmon), GFP_KERNEL);
+ if (!bwmon)
+ return -ENOMEM;
+
+ bwmon->data = of_device_get_match_data(dev);
+
+ ret = bwmon_init_regmap(pdev, bwmon);
+ if (ret)
+ return ret;
+
+ bwmon->irq = platform_get_irq(pdev, 0);
+ if (bwmon->irq < 0)
+ return bwmon->irq;
+
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add OPP table\n");
+
+ bwmon->max_bw_kbps = UINT_MAX;
+ opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
+ if (IS_ERR(opp))
+ return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n");
+
+ bwmon->min_bw_kbps = 0;
+ opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
+ if (IS_ERR(opp))
+ return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n");
+
+ bwmon->dev = dev;
+
+ bwmon_disable(bwmon);
+ ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr,
+ bwmon_intr_thread,
+ IRQF_ONESHOT, dev_name(dev), bwmon);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ\n");
+
+ platform_set_drvdata(pdev, bwmon);
+ bwmon_start(bwmon);
+
+ return 0;
+}
+
+static int bwmon_remove(struct platform_device *pdev)
+{
+ struct icc_bwmon *bwmon = platform_get_drvdata(pdev);
+
+ bwmon_disable(bwmon);
+
+ return 0;
+}
+
+static const struct icc_bwmon_data msm8998_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 64,
+ .default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */
+ .default_medbw_kbps = 512 * 1024, /* 512 MBps */
+ .default_lowbw_kbps = 0,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .quirks = BWMON_HAS_GLOBAL_IRQ,
+ .regmap_fields = msm8998_bwmon_reg_fields,
+ .regmap_cfg = &msm8998_bwmon_regmap_cfg,
+};
+
+static const struct icc_bwmon_data sdm845_llcc_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 1024,
+ .default_highbw_kbps = 800 * 1024, /* 800 MBps */
+ .default_medbw_kbps = 256 * 1024, /* 256 MBps */
+ .default_lowbw_kbps = 0,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .regmap_fields = sdm845_llcc_bwmon_reg_fields,
+ .regmap_cfg = &sdm845_llcc_bwmon_regmap_cfg,
+};
+
+static const struct icc_bwmon_data sc7280_llcc_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 64,
+ .default_highbw_kbps = 800 * 1024, /* 800 MBps */
+ .default_medbw_kbps = 256 * 1024, /* 256 MBps */
+ .default_lowbw_kbps = 0,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .quirks = BWMON_NEEDS_FORCE_CLEAR,
+ .regmap_fields = sdm845_llcc_bwmon_reg_fields,
+ .regmap_cfg = &sdm845_llcc_bwmon_regmap_cfg,
+};
+
+static const struct of_device_id bwmon_of_match[] = {
+ {
+ .compatible = "qcom,msm8998-bwmon",
+ .data = &msm8998_bwmon_data
+ }, {
+ .compatible = "qcom,sdm845-llcc-bwmon",
+ .data = &sdm845_llcc_bwmon_data
+ }, {
+ .compatible = "qcom,sc7280-llcc-bwmon",
+ .data = &sc7280_llcc_bwmon_data
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bwmon_of_match);
+
+static struct platform_driver bwmon_driver = {
+ .probe = bwmon_probe,
+ .remove = bwmon_remove,
+ .driver = {
+ .name = "qcom-bwmon",
+ .of_match_table = bwmon_of_match,
+ },
+};
+module_platform_driver(bwmon_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>");
+MODULE_DESCRIPTION("QCOM BWMON driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c
new file mode 100644
index 000000000..7886af4fd
--- /dev/null
+++ b/drivers/soc/qcom/kryo-l2-accessors.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/spinlock.h>
+#include <asm/barrier.h>
+#include <asm/sysreg.h>
+#include <soc/qcom/kryo-l2-accessors.h>
+
+#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6)
+#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7)
+
+static DEFINE_RAW_SPINLOCK(l2_access_lock);
+
+/**
+ * kryo_l2_set_indirect_reg() - write value to an L2 register
+ * @reg: Address of L2 register.
+ * @val: Value to be written to register.
+ *
+ * Use architecturally required barriers for ordering between system register
+ * accesses, and system registers with respect to device memory
+ */
+void kryo_l2_set_indirect_reg(u64 reg, u64 val)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&l2_access_lock, flags);
+ write_sysreg_s(reg, L2CPUSRSELR_EL1);
+ isb();
+ write_sysreg_s(val, L2CPUSRDR_EL1);
+ isb();
+ raw_spin_unlock_irqrestore(&l2_access_lock, flags);
+}
+EXPORT_SYMBOL(kryo_l2_set_indirect_reg);
+
+/**
+ * kryo_l2_get_indirect_reg() - read an L2 register value
+ * @reg: Address of L2 register.
+ *
+ * Use architecturally required barriers for ordering between system register
+ * accesses, and system registers with respect to device memory
+ */
+u64 kryo_l2_get_indirect_reg(u64 reg)
+{
+ u64 val;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&l2_access_lock, flags);
+ write_sysreg_s(reg, L2CPUSRSELR_EL1);
+ isb();
+ val = read_sysreg_s(L2CPUSRDR_EL1);
+ raw_spin_unlock_irqrestore(&l2_access_lock, flags);
+
+ return val;
+}
+EXPORT_SYMBOL(kryo_l2_get_indirect_reg);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
new file mode 100644
index 000000000..16a05143d
--- /dev/null
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define ACTIVATE BIT(0)
+#define DEACTIVATE BIT(1)
+#define ACT_CTRL_OPCODE_ACTIVATE BIT(0)
+#define ACT_CTRL_OPCODE_DEACTIVATE BIT(1)
+#define ACT_CTRL_ACT_TRIG BIT(0)
+#define ACT_CTRL_OPCODE_SHIFT 0x01
+#define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02
+#define ATTR1_FIXED_SIZE_SHIFT 0x03
+#define ATTR1_PRIORITY_SHIFT 0x04
+#define ATTR1_MAX_CAP_SHIFT 0x10
+#define ATTR0_RES_WAYS_MASK GENMASK(15, 0)
+#define ATTR0_BONUS_WAYS_MASK GENMASK(31, 16)
+#define ATTR0_BONUS_WAYS_SHIFT 0x10
+#define LLCC_STATUS_READ_DELAY 100
+
+#define CACHE_LINE_SIZE_SHIFT 6
+
+#define LLCC_LB_CNT_MASK GENMASK(31, 28)
+#define LLCC_LB_CNT_SHIFT 28
+
+#define MAX_CAP_TO_BYTES(n) (n * SZ_1K)
+#define LLCC_TRP_ACT_CTRLn(n) (n * SZ_4K)
+#define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
+#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
+#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
+
+#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
+#define LLCC_TRP_PCB_ACT 0x21f04
+#define LLCC_TRP_WRSC_EN 0x21f20
+#define LLCC_TRP_WRSC_CACHEABLE_EN 0x21f2c
+
+#define LLCC_VERSION_2_0_0_0 0x02000000
+#define LLCC_VERSION_2_1_0_0 0x02010000
+
+/**
+ * struct llcc_slice_config - Data associated with the llcc slice
+ * @usecase_id: Unique id for the client's use case
+ * @slice_id: llcc slice id for each client
+ * @max_cap: The maximum capacity of the cache slice provided in KB
+ * @priority: Priority of the client used to select victim line for replacement
+ * @fixed_size: Boolean indicating if the slice has a fixed capacity
+ * @bonus_ways: Bonus ways are additional ways to be used for any slice,
+ * if client ends up using more than reserved cache ways. Bonus
+ * ways are allocated only if they are not reserved for some
+ * other client.
+ * @res_ways: Reserved ways for the cache slice, the reserved ways cannot
+ * be used by any other client than the one its assigned to.
+ * @cache_mode: Each slice operates as a cache, this controls the mode of the
+ * slice: normal or TCM(Tightly Coupled Memory)
+ * @probe_target_ways: Determines what ways to probe for access hit. When
+ * configured to 1 only bonus and reserved ways are probed.
+ * When configured to 0 all ways in llcc are probed.
+ * @dis_cap_alloc: Disable capacity based allocation for a client
+ * @retain_on_pc: If this bit is set and client has maintained active vote
+ * then the ways assigned to this client are not flushed on power
+ * collapse.
+ * @activate_on_init: Activate the slice immediately after it is programmed
+ * @write_scid_en: Bit enables write cache support for a given scid.
+ * @write_scid_cacheable_en: Enables write cache cacheable support for a
+ * given scid (not supported on v2 or older hardware).
+ */
+struct llcc_slice_config {
+ u32 usecase_id;
+ u32 slice_id;
+ u32 max_cap;
+ u32 priority;
+ bool fixed_size;
+ u32 bonus_ways;
+ u32 res_ways;
+ u32 cache_mode;
+ u32 probe_target_ways;
+ bool dis_cap_alloc;
+ bool retain_on_pc;
+ bool activate_on_init;
+ bool write_scid_en;
+ bool write_scid_cacheable_en;
+};
+
+struct qcom_llcc_config {
+ const struct llcc_slice_config *sct_data;
+ const u32 *reg_offset;
+ const struct llcc_edac_reg_offset *edac_reg_offset;
+ int size;
+ bool need_llcc_cfg;
+ bool no_edac;
+};
+
+enum llcc_reg_offset {
+ LLCC_COMMON_HW_INFO,
+ LLCC_COMMON_STATUS0,
+};
+
+static const struct llcc_slice_config sc7180_data[] = {
+ { LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sc7280_data[] = {
+ { LLCC_CPUSS, 1, 768, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 1, 0},
+ { LLCC_MDMHPGRW, 7, 512, 2, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_CMPT, 10, 768, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_GPUHTW, 11, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_GPU, 12, 512, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_MMUHWT, 13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 0, 1, 0},
+ { LLCC_MDMPNG, 21, 768, 0, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_WLHW, 24, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_MODPE, 29, 64, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+};
+
+static const struct llcc_slice_config sc8180x_data[] = {
+ { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 3072, 1, 1, 0x3ff, 0xc00, 0, 0, 0, 1, 0 },
+ { LLCC_MDM, 8, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 5120, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1 },
+ { LLCC_CMPTDMA, 15, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDFW, 17, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_WLHW, 24, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 512, 1, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 512, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 128, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0 },
+};
+
+static const struct llcc_slice_config sc8280xp_data[] = {
+ { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_GPU, 12, 4096, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDHW, 22, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_DRE, 26, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0, 0 },
+ { LLCC_WRCACHE, 31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUSS1, 3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sdm845_data[] = {
+ { LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
+ { LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0 },
+ { LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0 },
+ { LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0 },
+ { LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1 },
+ { LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+};
+
+static const struct llcc_slice_config sm6350_data[] = {
+ { LLCC_CPUSS, 1, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1 },
+ { LLCC_MDM, 8, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 256, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 512, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 768, 0, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 64, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sm8150_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 3072, 1, 0, 0xFF, 0xF00, 0, 0, 0, 1, 0 },
+ { LLCC_MDM, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW , 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1 },
+ { LLCC_CMPTDMA, 15, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 21, 1024, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_WLHW, 24, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 256, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0 },
+};
+
+static const struct llcc_slice_config sm8250_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_CMPT, 10, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPTDMA, 15, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_DISP, 16, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_VIDFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_NPU, 23, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_WLHW, 24, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CVP, 28, 256, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_APTCM, 30, 128, 3, 0, 0x0, 0x3, 1, 0, 0, 1, 0, 0 },
+ { LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sm8350_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+ { LLCC_DISP, 16, 3072, 2, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 256, 1, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+ { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CPUSS1, 3, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+};
+
+static const struct llcc_slice_config sm8450_data[] = {
+ {LLCC_CPUSS, 1, 3072, 1, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 },
+ {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+ {LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_MODHW, 9, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_GPU, 12, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 1, 0 },
+ {LLCC_MMUHWT, 13, 768, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {LLCC_DISP, 16, 4096, 2, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_MDMPNG, 21, 1024, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+ {LLCC_CVP, 28, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_MODPE, 29, 64, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0 },
+ {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {LLCC_CVPFW, 17, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CPUSS1, 3, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CAMEXP0, 4, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CPUMTE, 23, 256, 1, 1, 0x0FFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 },
+ {LLCC_CAMEXP1, 27, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x20344,
+ .trp_ecc_error_status1 = 0x20348,
+ .trp_ecc_sb_err_syn0 = 0x2304c,
+ .trp_ecc_db_err_syn0 = 0x20370,
+ .trp_ecc_error_cntr_clear = 0x20440,
+ .trp_interrupt_0_status = 0x20480,
+ .trp_interrupt_0_clear = 0x20484,
+ .trp_interrupt_0_enable = 0x20488,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x3000c,
+ .cmn_interrupt_0_enable = 0x3001c,
+ .cmn_interrupt_2_enable = 0x3003c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x40000,
+ .drp_ecc_error_cntr_clear = 0x40004,
+ .drp_interrupt_status = 0x41000,
+ .drp_interrupt_clear = 0x41008,
+ .drp_interrupt_enable = 0x4100c,
+ .drp_ecc_error_status0 = 0x42044,
+ .drp_ecc_error_status1 = 0x42048,
+ .drp_ecc_sb_err_syn0 = 0x4204c,
+ .drp_ecc_db_err_syn0 = 0x42070,
+};
+
+static const struct llcc_edac_reg_offset llcc_v2_1_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x20344,
+ .trp_ecc_error_status1 = 0x20348,
+ .trp_ecc_sb_err_syn0 = 0x2034c,
+ .trp_ecc_db_err_syn0 = 0x20370,
+ .trp_ecc_error_cntr_clear = 0x20440,
+ .trp_interrupt_0_status = 0x20480,
+ .trp_interrupt_0_clear = 0x20484,
+ .trp_interrupt_0_enable = 0x20488,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x3400c,
+ .cmn_interrupt_0_enable = 0x3401c,
+ .cmn_interrupt_2_enable = 0x3403c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x50000,
+ .drp_ecc_error_cntr_clear = 0x50004,
+ .drp_interrupt_status = 0x50020,
+ .drp_interrupt_clear = 0x50028,
+ .drp_interrupt_enable = 0x5002c,
+ .drp_ecc_error_status0 = 0x520f4,
+ .drp_ecc_error_status1 = 0x520f8,
+ .drp_ecc_sb_err_syn0 = 0x520fc,
+ .drp_ecc_db_err_syn0 = 0x52120,
+};
+
+/* LLCC register offset starting from v1.0.0 */
+static const u32 llcc_v1_reg_offset[] = {
+ [LLCC_COMMON_HW_INFO] = 0x00030000,
+ [LLCC_COMMON_STATUS0] = 0x0003000c,
+};
+
+/* LLCC register offset starting from v2.0.1 */
+static const u32 llcc_v2_1_reg_offset[] = {
+ [LLCC_COMMON_HW_INFO] = 0x00034000,
+ [LLCC_COMMON_STATUS0] = 0x0003400c,
+};
+
+static const struct qcom_llcc_config sc7180_cfg = {
+ .sct_data = sc7180_data,
+ .size = ARRAY_SIZE(sc7180_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sc7280_cfg = {
+ .sct_data = sc7280_data,
+ .size = ARRAY_SIZE(sc7280_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sc8180x_cfg = {
+ .sct_data = sc8180x_data,
+ .size = ARRAY_SIZE(sc8180x_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sc8280xp_cfg = {
+ .sct_data = sc8280xp_data,
+ .size = ARRAY_SIZE(sc8280xp_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sdm845_cfg = {
+ .sct_data = sdm845_data,
+ .size = ARRAY_SIZE(sdm845_data),
+ .need_llcc_cfg = false,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ .no_edac = true,
+};
+
+static const struct qcom_llcc_config sm6350_cfg = {
+ .sct_data = sm6350_data,
+ .size = ARRAY_SIZE(sm6350_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sm8150_cfg = {
+ .sct_data = sm8150_data,
+ .size = ARRAY_SIZE(sm8150_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sm8250_cfg = {
+ .sct_data = sm8250_data,
+ .size = ARRAY_SIZE(sm8250_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sm8350_cfg = {
+ .sct_data = sm8350_data,
+ .size = ARRAY_SIZE(sm8350_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sm8450_cfg = {
+ .sct_data = sm8450_data,
+ .size = ARRAY_SIZE(sm8450_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+};
+
+static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
+
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @uid: usecase_id for the client
+ *
+ * A pointer to llcc slice descriptor will be returned on success
+ * and error pointer is returned on failure
+ */
+struct llcc_slice_desc *llcc_slice_getd(u32 uid)
+{
+ const struct llcc_slice_config *cfg;
+ struct llcc_slice_desc *desc;
+ u32 sz, count;
+
+ if (IS_ERR(drv_data))
+ return ERR_CAST(drv_data);
+
+ cfg = drv_data->cfg;
+ sz = drv_data->cfg_size;
+
+ for (count = 0; cfg && count < sz; count++, cfg++)
+ if (cfg->usecase_id == uid)
+ break;
+
+ if (count == sz || !cfg)
+ return ERR_PTR(-ENODEV);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->slice_id = cfg->slice_id;
+ desc->slice_size = cfg->max_cap;
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_getd);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+ if (!IS_ERR_OR_NULL(desc))
+ kfree(desc);
+}
+EXPORT_SYMBOL_GPL(llcc_slice_putd);
+
+static int llcc_update_act_ctrl(u32 sid,
+ u32 act_ctrl_reg_val, u32 status)
+{
+ u32 act_ctrl_reg;
+ u32 status_reg;
+ u32 slice_status;
+ int ret;
+
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
+ act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid);
+ status_reg = LLCC_TRP_STATUSn(sid);
+
+ /* Set the ACTIVE trigger */
+ act_ctrl_reg_val |= ACT_CTRL_ACT_TRIG;
+ ret = regmap_write(drv_data->bcast_regmap, act_ctrl_reg,
+ act_ctrl_reg_val);
+ if (ret)
+ return ret;
+
+ /* Clear the ACTIVE trigger */
+ act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG;
+ ret = regmap_write(drv_data->bcast_regmap, act_ctrl_reg,
+ act_ctrl_reg_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
+ slice_status, !(slice_status & status),
+ 0, LLCC_STATUS_READ_DELAY);
+ return ret;
+}
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+ int ret;
+ u32 act_ctrl_val;
+
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
+ if (IS_ERR_OR_NULL(desc))
+ return -EINVAL;
+
+ mutex_lock(&drv_data->lock);
+ if (test_bit(desc->slice_id, drv_data->bitmap)) {
+ mutex_unlock(&drv_data->lock);
+ return 0;
+ }
+
+ act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+ ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+ DEACTIVATE);
+ if (ret) {
+ mutex_unlock(&drv_data->lock);
+ return ret;
+ }
+
+ __set_bit(desc->slice_id, drv_data->bitmap);
+ mutex_unlock(&drv_data->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_activate);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+ u32 act_ctrl_val;
+ int ret;
+
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
+ if (IS_ERR_OR_NULL(desc))
+ return -EINVAL;
+
+ mutex_lock(&drv_data->lock);
+ if (!test_bit(desc->slice_id, drv_data->bitmap)) {
+ mutex_unlock(&drv_data->lock);
+ return 0;
+ }
+ act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+ ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+ ACTIVATE);
+ if (ret) {
+ mutex_unlock(&drv_data->lock);
+ return ret;
+ }
+
+ __clear_bit(desc->slice_id, drv_data->bitmap);
+ mutex_unlock(&drv_data->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_deactivate);
+
+/**
+ * llcc_get_slice_id - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+ if (IS_ERR_OR_NULL(desc))
+ return -EINVAL;
+
+ return desc->slice_id;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_id);
+
+/**
+ * llcc_get_slice_size - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+ if (IS_ERR_OR_NULL(desc))
+ return 0;
+
+ return desc->slice_size;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_size);
+
+static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
+ const struct qcom_llcc_config *cfg)
+{
+ int ret;
+ u32 attr1_cfg;
+ u32 attr0_cfg;
+ u32 attr1_val;
+ u32 attr0_val;
+ u32 max_cap_cacheline;
+ struct llcc_slice_desc desc;
+
+ attr1_val = config->cache_mode;
+ attr1_val |= config->probe_target_ways << ATTR1_PROBE_TARGET_WAYS_SHIFT;
+ attr1_val |= config->fixed_size << ATTR1_FIXED_SIZE_SHIFT;
+ attr1_val |= config->priority << ATTR1_PRIORITY_SHIFT;
+
+ max_cap_cacheline = MAX_CAP_TO_BYTES(config->max_cap);
+
+ /*
+ * LLCC instances can vary for each target.
+ * The SW writes to broadcast register which gets propagated
+ * to each llcc instance (llcc0,.. llccN).
+ * Since the size of the memory is divided equally amongst the
+ * llcc instances, we need to configure the max cap accordingly.
+ */
+ max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
+ max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
+ attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
+
+ attr1_cfg = LLCC_TRP_ATTR1_CFGn(config->slice_id);
+
+ ret = regmap_write(drv_data->bcast_regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+
+ attr0_val = config->res_ways & ATTR0_RES_WAYS_MASK;
+ attr0_val |= config->bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
+
+ attr0_cfg = LLCC_TRP_ATTR0_CFGn(config->slice_id);
+
+ ret = regmap_write(drv_data->bcast_regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+
+ if (cfg->need_llcc_cfg) {
+ u32 disable_cap_alloc, retain_pc;
+
+ disable_cap_alloc = config->dis_cap_alloc << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_SCID_DIS_CAP_ALLOC,
+ BIT(config->slice_id), disable_cap_alloc);
+ if (ret)
+ return ret;
+
+ retain_pc = config->retain_on_pc << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_PCB_ACT,
+ BIT(config->slice_id), retain_pc);
+ if (ret)
+ return ret;
+ }
+
+ if (drv_data->version >= LLCC_VERSION_2_0_0_0) {
+ u32 wren;
+
+ wren = config->write_scid_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_WRSC_EN,
+ BIT(config->slice_id), wren);
+ if (ret)
+ return ret;
+ }
+
+ if (drv_data->version >= LLCC_VERSION_2_1_0_0) {
+ u32 wr_cache_en;
+
+ wr_cache_en = config->write_scid_cacheable_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_WRSC_CACHEABLE_EN,
+ BIT(config->slice_id), wr_cache_en);
+ if (ret)
+ return ret;
+ }
+
+ if (config->activate_on_init) {
+ desc.slice_id = config->slice_id;
+ ret = llcc_slice_activate(&desc);
+ }
+
+ return ret;
+}
+
+static int qcom_llcc_cfg_program(struct platform_device *pdev,
+ const struct qcom_llcc_config *cfg)
+{
+ int i;
+ u32 sz;
+ int ret = 0;
+ const struct llcc_slice_config *llcc_table;
+
+ sz = drv_data->cfg_size;
+ llcc_table = drv_data->cfg;
+
+ for (i = 0; i < sz; i++) {
+ ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int qcom_llcc_remove(struct platform_device *pdev)
+{
+ /* Set the global pointer to a error code to avoid referencing it */
+ drv_data = ERR_PTR(-ENODEV);
+ return 0;
+}
+
+static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index,
+ const char *name)
+{
+ void __iomem *base;
+ struct regmap_config llcc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ };
+
+ base = devm_platform_ioremap_resource(pdev, index);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ llcc_regmap_config.name = name;
+ return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
+}
+
+static int qcom_llcc_probe(struct platform_device *pdev)
+{
+ u32 num_banks;
+ struct device *dev = &pdev->dev;
+ int ret, i;
+ struct platform_device *llcc_edac;
+ const struct qcom_llcc_config *cfg;
+ const struct llcc_slice_config *llcc_cfg;
+ u32 sz;
+ u32 version;
+ struct regmap *regmap;
+
+ if (!IS_ERR(drv_data))
+ return -EBUSY;
+
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Initialize the first LLCC bank regmap */
+ regmap = qcom_llcc_init_mmio(pdev, 0, "llcc0_base");
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err;
+ }
+
+ cfg = of_device_get_match_data(&pdev->dev);
+
+ ret = regmap_read(regmap, cfg->reg_offset[LLCC_COMMON_STATUS0], &num_banks);
+ if (ret)
+ goto err;
+
+ num_banks &= LLCC_LB_CNT_MASK;
+ num_banks >>= LLCC_LB_CNT_SHIFT;
+ drv_data->num_banks = num_banks;
+
+ drv_data->regmaps = devm_kcalloc(dev, num_banks, sizeof(*drv_data->regmaps), GFP_KERNEL);
+ if (!drv_data->regmaps) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ drv_data->regmaps[0] = regmap;
+
+ /* Initialize rest of LLCC bank regmaps */
+ for (i = 1; i < num_banks; i++) {
+ char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i);
+
+ drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base);
+ if (IS_ERR(drv_data->regmaps[i])) {
+ ret = PTR_ERR(drv_data->regmaps[i]);
+ kfree(base);
+ goto err;
+ }
+
+ kfree(base);
+ }
+
+ drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
+ if (IS_ERR(drv_data->bcast_regmap)) {
+ ret = PTR_ERR(drv_data->bcast_regmap);
+ goto err;
+ }
+
+ /* Extract version of the IP */
+ ret = regmap_read(drv_data->bcast_regmap, cfg->reg_offset[LLCC_COMMON_HW_INFO],
+ &version);
+ if (ret)
+ goto err;
+
+ drv_data->version = version;
+
+ llcc_cfg = cfg->sct_data;
+ sz = cfg->size;
+
+ for (i = 0; i < sz; i++)
+ if (llcc_cfg[i].slice_id > drv_data->max_slices)
+ drv_data->max_slices = llcc_cfg[i].slice_id;
+
+ drv_data->bitmap = devm_bitmap_zalloc(dev, drv_data->max_slices,
+ GFP_KERNEL);
+ if (!drv_data->bitmap) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ drv_data->cfg = llcc_cfg;
+ drv_data->cfg_size = sz;
+ drv_data->edac_reg_offset = cfg->edac_reg_offset;
+ mutex_init(&drv_data->lock);
+ platform_set_drvdata(pdev, drv_data);
+
+ ret = qcom_llcc_cfg_program(pdev, cfg);
+ if (ret)
+ goto err;
+
+ drv_data->ecc_irq = platform_get_irq_optional(pdev, 0);
+
+ /*
+ * On some platforms, the access to EDAC registers will be locked by
+ * the bootloader. So probing the EDAC driver will result in a crash.
+ * Hence, disable the creation of EDAC platform device for the
+ * problematic platforms.
+ */
+ if (!cfg->no_edac) {
+ llcc_edac = platform_device_register_data(&pdev->dev,
+ "qcom_llcc_edac", -1, drv_data,
+ sizeof(*drv_data));
+ if (IS_ERR(llcc_edac))
+ dev_err(dev, "Failed to register llcc edac driver\n");
+ }
+
+ return 0;
+err:
+ drv_data = ERR_PTR(-ENODEV);
+ return ret;
+}
+
+static const struct of_device_id qcom_llcc_of_match[] = {
+ { .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
+ { .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfg },
+ { .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfg },
+ { .compatible = "qcom,sc8280xp-llcc", .data = &sc8280xp_cfg },
+ { .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
+ { .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg },
+ { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
+ { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
+ { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg },
+ { .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfg },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_llcc_of_match);
+
+static struct platform_driver qcom_llcc_driver = {
+ .driver = {
+ .name = "qcom-llcc",
+ .of_match_table = qcom_llcc_of_match,
+ },
+ .probe = qcom_llcc_probe,
+ .remove = qcom_llcc_remove,
+};
+module_platform_driver(qcom_llcc_driver);
+
+MODULE_DESCRIPTION("Qualcomm Last Level Cache Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
new file mode 100644
index 000000000..10235b36d
--- /dev/null
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2015 Sony Mobile Communications Inc
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/qcom_scm.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+static bool mdt_phdr_valid(const struct elf32_phdr *phdr)
+{
+ if (phdr->p_type != PT_LOAD)
+ return false;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ return false;
+
+ if (!phdr->p_memsz)
+ return false;
+
+ return true;
+}
+
+static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs,
+ unsigned int segment, const char *fw_name,
+ struct device *dev)
+{
+ const struct elf32_phdr *phdr = &phdrs[segment];
+ const struct firmware *seg_fw;
+ char *seg_name;
+ ssize_t ret;
+
+ if (strlen(fw_name) < 4)
+ return -EINVAL;
+
+ seg_name = kstrdup(fw_name, GFP_KERNEL);
+ if (!seg_name)
+ return -ENOMEM;
+
+ sprintf(seg_name + strlen(fw_name) - 3, "b%02d", segment);
+ ret = request_firmware_into_buf(&seg_fw, seg_name, dev,
+ ptr, phdr->p_filesz);
+ if (ret) {
+ dev_err(dev, "error %zd loading %s\n", ret, seg_name);
+ kfree(seg_name);
+ return ret;
+ }
+
+ if (seg_fw->size != phdr->p_filesz) {
+ dev_err(dev,
+ "failed to load segment %d from truncated file %s\n",
+ segment, seg_name);
+ ret = -EINVAL;
+ }
+
+ release_firmware(seg_fw);
+ kfree(seg_name);
+
+ return ret;
+}
+
+/**
+ * qcom_mdt_get_size() - acquire size of the memory region needed to load mdt
+ * @fw: firmware object for the mdt file
+ *
+ * Returns size of the loaded firmware blob, or -EINVAL on failure.
+ */
+ssize_t qcom_mdt_get_size(const struct firmware *fw)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ phys_addr_t min_addr = PHYS_ADDR_MAX;
+ phys_addr_t max_addr = 0;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ return min_addr < max_addr ? max_addr - min_addr : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
+
+/**
+ * qcom_mdt_read_metadata() - read header and metadata from mdt or mbn
+ * @fw: firmware of mdt header or mbn
+ * @data_len: length of the read metadata blob
+ * @fw_name: name of the firmware, for construction of segment file names
+ * @dev: device handle to associate resources with
+ *
+ * The mechanism that performs the authentication of the loading firmware
+ * expects an ELF header directly followed by the segment of hashes, with no
+ * padding inbetween. This function allocates a chunk of memory for this pair
+ * and copy the two pieces into the buffer.
+ *
+ * In the case of split firmware the hash is found directly following the ELF
+ * header, rather than at p_offset described by the second program header.
+ *
+ * The caller is responsible to free (kfree()) the returned pointer.
+ *
+ * Return: pointer to data, or ERR_PTR()
+ */
+void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
+ const char *fw_name, struct device *dev)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_hdr *ehdr;
+ unsigned int hash_segment = 0;
+ size_t hash_offset;
+ size_t hash_size;
+ size_t ehdr_size;
+ unsigned int i;
+ ssize_t ret;
+ void *data;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ if (ehdr->e_phnum < 2)
+ return ERR_PTR(-EINVAL);
+
+ if (phdrs[0].p_type == PT_LOAD)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 1; i < ehdr->e_phnum; i++) {
+ if ((phdrs[i].p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) {
+ hash_segment = i;
+ break;
+ }
+ }
+
+ if (!hash_segment) {
+ dev_err(dev, "no hash segment found in %s\n", fw_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ehdr_size = phdrs[0].p_filesz;
+ hash_size = phdrs[hash_segment].p_filesz;
+
+ data = kmalloc(ehdr_size + hash_size, GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ /* Copy ELF header */
+ memcpy(data, fw->data, ehdr_size);
+
+ if (ehdr_size + hash_size == fw->size) {
+ /* Firmware is split and hash is packed following the ELF header */
+ hash_offset = phdrs[0].p_filesz;
+ memcpy(data + ehdr_size, fw->data + hash_offset, hash_size);
+ } else if (phdrs[hash_segment].p_offset + hash_size <= fw->size) {
+ /* Hash is in its own segment, but within the loaded file */
+ hash_offset = phdrs[hash_segment].p_offset;
+ memcpy(data + ehdr_size, fw->data + hash_offset, hash_size);
+ } else {
+ /* Hash is in its own segment, beyond the loaded file */
+ ret = mdt_load_split_segment(data + ehdr_size, phdrs, hash_segment, fw_name, dev);
+ if (ret) {
+ kfree(data);
+ return ERR_PTR(ret);
+ }
+ }
+
+ *data_len = ehdr_size + hash_size;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata);
+
+/**
+ * qcom_mdt_pas_init() - initialize PAS region for firmware loading
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @fw_name: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_phys: physical address of allocated memory region
+ * @ctx: PAS metadata context, to be released by caller
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, phys_addr_t mem_phys,
+ struct qcom_scm_pas_metadata *ctx)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ phys_addr_t min_addr = PHYS_ADDR_MAX;
+ phys_addr_t max_addr = 0;
+ bool relocate = false;
+ size_t metadata_len;
+ void *metadata;
+ int ret;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
+ relocate = true;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev);
+ if (IS_ERR(metadata)) {
+ ret = PTR_ERR(metadata);
+ dev_err(dev, "error %d reading firmware %s metadata\n", ret, fw_name);
+ goto out;
+ }
+
+ ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, ctx);
+ kfree(metadata);
+ if (ret) {
+ /* Invalid firmware metadata */
+ dev_err(dev, "error %d initializing firmware %s\n", ret, fw_name);
+ goto out;
+ }
+
+ if (relocate) {
+ ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr);
+ if (ret) {
+ /* Unable to set up relocation */
+ dev_err(dev, "error %d setting up firmware %s\n", ret, fw_name);
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_pas_init);
+
+static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base, bool pas_init)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ phys_addr_t mem_reloc;
+ phys_addr_t min_addr = PHYS_ADDR_MAX;
+ ssize_t offset;
+ bool relocate = false;
+ void *ptr;
+ int ret = 0;
+ int i;
+
+ if (!fw || !mem_region || !mem_phys || !mem_size)
+ return -EINVAL;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
+ relocate = true;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+ }
+
+ if (relocate) {
+ /*
+ * The image is relocatable, so offset each segment based on
+ * the lowest segment address.
+ */
+ mem_reloc = min_addr;
+ } else {
+ /*
+ * Image is not relocatable, so offset each segment based on
+ * the allocated physical chunk of memory.
+ */
+ mem_reloc = mem_phys;
+ }
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ offset = phdr->p_paddr - mem_reloc;
+ if (offset < 0 || offset + phdr->p_memsz > mem_size) {
+ dev_err(dev, "segment outside memory range\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (phdr->p_filesz > phdr->p_memsz) {
+ dev_err(dev,
+ "refusing to load segment %d with p_filesz > p_memsz\n",
+ i);
+ ret = -EINVAL;
+ break;
+ }
+
+ ptr = mem_region + offset;
+
+ if (phdr->p_filesz && phdr->p_offset < fw->size &&
+ phdr->p_offset + phdr->p_filesz <= fw->size) {
+ /* Firmware is large enough to be non-split */
+ if (phdr->p_offset + phdr->p_filesz > fw->size) {
+ dev_err(dev, "file %s segment %d would be truncated\n",
+ fw_name, i);
+ ret = -EINVAL;
+ break;
+ }
+
+ memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
+ } else if (phdr->p_filesz) {
+ /* Firmware not large enough, load split-out segments */
+ ret = mdt_load_split_segment(ptr, phdrs, i, fw_name, dev);
+ if (ret)
+ break;
+ }
+
+ if (phdr->p_memsz > phdr->p_filesz)
+ memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+ }
+
+ if (reloc_base)
+ *reloc_base = mem_reloc;
+
+ return ret;
+}
+
+/**
+ * qcom_mdt_load() - load the firmware which header is loaded as fw
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @firmware: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_region: allocated memory region to load firmware into
+ * @mem_phys: physical address of allocated memory region
+ * @mem_size: size of the allocated memory region
+ * @reloc_base: adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *firmware, int pas_id, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base)
+{
+ int ret;
+
+ ret = qcom_mdt_pas_init(dev, fw, firmware, pas_id, mem_phys, NULL);
+ if (ret)
+ return ret;
+
+ return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
+ mem_size, reloc_base, true);
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load);
+
+/**
+ * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @firmware: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_region: allocated memory region to load firmware into
+ * @mem_phys: physical address of allocated memory region
+ * @mem_size: size of the allocated memory region
+ * @reloc_base: adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
+ const char *firmware, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size, phys_addr_t *reloc_base)
+{
+ return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
+ mem_size, reloc_base, false);
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
+
+MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
new file mode 100644
index 000000000..27c668eac
--- /dev/null
+++ b/drivers/soc/qcom/ocmem.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The On Chip Memory (OCMEM) allocator allows various clients to allocate
+ * memory from OCMEM based on performance, latency and power requirements.
+ * This is typically used by the GPU, camera/video, and audio components on
+ * some Snapdragon SoCs.
+ *
+ * Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
+ * Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <soc/qcom/ocmem.h>
+
+enum region_mode {
+ WIDE_MODE = 0x0,
+ THIN_MODE,
+ MODE_DEFAULT = WIDE_MODE,
+};
+
+enum ocmem_macro_state {
+ PASSTHROUGH = 0,
+ PERI_ON = 1,
+ CORE_ON = 2,
+ CLK_OFF = 4,
+};
+
+struct ocmem_region {
+ bool interleaved;
+ enum region_mode mode;
+ unsigned int num_macros;
+ enum ocmem_macro_state macro_state[4];
+ unsigned long macro_size;
+ unsigned long region_size;
+};
+
+struct ocmem_config {
+ uint8_t num_regions;
+ unsigned long macro_size;
+};
+
+struct ocmem {
+ struct device *dev;
+ const struct ocmem_config *config;
+ struct resource *memory;
+ void __iomem *mmio;
+ unsigned int num_ports;
+ unsigned int num_macros;
+ bool interleaved;
+ struct ocmem_region *regions;
+ unsigned long active_allocations;
+};
+
+#define OCMEM_MIN_ALIGN SZ_64K
+#define OCMEM_MIN_ALLOC SZ_64K
+
+#define OCMEM_REG_HW_VERSION 0x00000000
+#define OCMEM_REG_HW_PROFILE 0x00000004
+
+#define OCMEM_REG_REGION_MODE_CTL 0x00001000
+#define OCMEM_REGION_MODE_CTL_REG0_THIN 0x00000001
+#define OCMEM_REGION_MODE_CTL_REG1_THIN 0x00000002
+#define OCMEM_REGION_MODE_CTL_REG2_THIN 0x00000004
+#define OCMEM_REGION_MODE_CTL_REG3_THIN 0x00000008
+
+#define OCMEM_REG_GFX_MPU_START 0x00001004
+#define OCMEM_REG_GFX_MPU_END 0x00001008
+
+#define OCMEM_HW_VERSION_MAJOR(val) FIELD_GET(GENMASK(31, 28), val)
+#define OCMEM_HW_VERSION_MINOR(val) FIELD_GET(GENMASK(27, 16), val)
+#define OCMEM_HW_VERSION_STEP(val) FIELD_GET(GENMASK(15, 0), val)
+
+#define OCMEM_HW_PROFILE_NUM_PORTS(val) FIELD_GET(0x0000000f, (val))
+#define OCMEM_HW_PROFILE_NUM_MACROS(val) FIELD_GET(0x00003f00, (val))
+
+#define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE 0x00010000
+#define OCMEM_HW_PROFILE_INTERLEAVING 0x00020000
+#define OCMEM_REG_GEN_STATUS 0x0000000c
+
+#define OCMEM_REG_PSGSC_STATUS 0x00000038
+#define OCMEM_REG_PSGSC_CTL(i0) (0x0000003c + 0x1*(i0))
+
+#define OCMEM_PSGSC_CTL_MACRO0_MODE(val) FIELD_PREP(0x00000007, (val))
+#define OCMEM_PSGSC_CTL_MACRO1_MODE(val) FIELD_PREP(0x00000070, (val))
+#define OCMEM_PSGSC_CTL_MACRO2_MODE(val) FIELD_PREP(0x00000700, (val))
+#define OCMEM_PSGSC_CTL_MACRO3_MODE(val) FIELD_PREP(0x00007000, (val))
+
+#define OCMEM_CLK_CORE_IDX 0
+static struct clk_bulk_data ocmem_clks[] = {
+ {
+ .id = "core",
+ },
+ {
+ .id = "iface",
+ },
+};
+
+static inline void ocmem_write(struct ocmem *ocmem, u32 reg, u32 data)
+{
+ writel(data, ocmem->mmio + reg);
+}
+
+static inline u32 ocmem_read(struct ocmem *ocmem, u32 reg)
+{
+ return readl(ocmem->mmio + reg);
+}
+
+static void update_ocmem(struct ocmem *ocmem)
+{
+ uint32_t region_mode_ctrl = 0x0;
+ int i;
+
+ if (!qcom_scm_ocmem_lock_available()) {
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (region->mode == THIN_MODE)
+ region_mode_ctrl |= BIT(i);
+ }
+
+ dev_dbg(ocmem->dev, "ocmem_region_mode_control %x\n",
+ region_mode_ctrl);
+ ocmem_write(ocmem, OCMEM_REG_REGION_MODE_CTL, region_mode_ctrl);
+ }
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+ u32 data;
+
+ data = OCMEM_PSGSC_CTL_MACRO0_MODE(region->macro_state[0]) |
+ OCMEM_PSGSC_CTL_MACRO1_MODE(region->macro_state[1]) |
+ OCMEM_PSGSC_CTL_MACRO2_MODE(region->macro_state[2]) |
+ OCMEM_PSGSC_CTL_MACRO3_MODE(region->macro_state[3]);
+
+ ocmem_write(ocmem, OCMEM_REG_PSGSC_CTL(i), data);
+ }
+}
+
+static unsigned long phys_to_offset(struct ocmem *ocmem,
+ unsigned long addr)
+{
+ if (addr < ocmem->memory->start || addr >= ocmem->memory->end)
+ return 0;
+
+ return addr - ocmem->memory->start;
+}
+
+static unsigned long device_address(struct ocmem *ocmem,
+ enum ocmem_client client,
+ unsigned long addr)
+{
+ WARN_ON(client != OCMEM_GRAPHICS);
+
+ /* TODO: gpu uses phys_to_offset, but others do not.. */
+ return phys_to_offset(ocmem, addr);
+}
+
+static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf,
+ enum ocmem_macro_state mstate, enum region_mode rmode)
+{
+ unsigned long offset = 0;
+ int i, j;
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (buf->offset <= offset && offset < buf->offset + buf->len)
+ region->mode = rmode;
+
+ for (j = 0; j < region->num_macros; j++) {
+ if (buf->offset <= offset &&
+ offset < buf->offset + buf->len)
+ region->macro_state[j] = mstate;
+
+ offset += region->macro_size;
+ }
+ }
+
+ update_ocmem(ocmem);
+}
+
+struct ocmem *of_get_ocmem(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *devnode;
+ struct ocmem *ocmem;
+
+ devnode = of_parse_phandle(dev->of_node, "sram", 0);
+ if (!devnode || !devnode->parent) {
+ dev_err(dev, "Cannot look up sram phandle\n");
+ of_node_put(devnode);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(devnode->parent);
+ if (!pdev) {
+ dev_err(dev, "Cannot find device node %s\n", devnode->name);
+ of_node_put(devnode);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ of_node_put(devnode);
+
+ ocmem = platform_get_drvdata(pdev);
+ if (!ocmem) {
+ dev_err(dev, "Cannot get ocmem\n");
+ put_device(&pdev->dev);
+ return ERR_PTR(-ENODEV);
+ }
+ return ocmem;
+}
+EXPORT_SYMBOL(of_get_ocmem);
+
+struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
+ unsigned long size)
+{
+ struct ocmem_buf *buf;
+ int ret;
+
+ /* TODO: add support for other clients... */
+ if (WARN_ON(client != OCMEM_GRAPHICS))
+ return ERR_PTR(-ENODEV);
+
+ if (size < OCMEM_MIN_ALLOC || !IS_ALIGNED(size, OCMEM_MIN_ALIGN))
+ return ERR_PTR(-EINVAL);
+
+ if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations))
+ return ERR_PTR(-EBUSY);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ buf->offset = 0;
+ buf->addr = device_address(ocmem, client, buf->offset);
+ buf->len = size;
+
+ update_range(ocmem, buf, CORE_ON, WIDE_MODE);
+
+ if (qcom_scm_ocmem_lock_available()) {
+ ret = qcom_scm_ocmem_lock(QCOM_SCM_OCMEM_GRAPHICS_ID,
+ buf->offset, buf->len, WIDE_MODE);
+ if (ret) {
+ dev_err(ocmem->dev, "could not lock: %d\n", ret);
+ ret = -EINVAL;
+ goto err_kfree;
+ }
+ } else {
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset);
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END,
+ buf->offset + buf->len);
+ }
+
+ dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n",
+ size / 1024, buf->addr, client);
+
+ return buf;
+
+err_kfree:
+ kfree(buf);
+err_unlock:
+ clear_bit_unlock(BIT(client), &ocmem->active_allocations);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(ocmem_allocate);
+
+void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
+ struct ocmem_buf *buf)
+{
+ /* TODO: add support for other clients... */
+ if (WARN_ON(client != OCMEM_GRAPHICS))
+ return;
+
+ update_range(ocmem, buf, CLK_OFF, MODE_DEFAULT);
+
+ if (qcom_scm_ocmem_lock_available()) {
+ int ret;
+
+ ret = qcom_scm_ocmem_unlock(QCOM_SCM_OCMEM_GRAPHICS_ID,
+ buf->offset, buf->len);
+ if (ret)
+ dev_err(ocmem->dev, "could not unlock: %d\n", ret);
+ } else {
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, 0x0);
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, 0x0);
+ }
+
+ kfree(buf);
+
+ clear_bit_unlock(BIT(client), &ocmem->active_allocations);
+}
+EXPORT_SYMBOL(ocmem_free);
+
+static int ocmem_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long reg, region_size;
+ int i, j, ret, num_banks;
+ struct ocmem *ocmem;
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ ocmem = devm_kzalloc(dev, sizeof(*ocmem), GFP_KERNEL);
+ if (!ocmem)
+ return -ENOMEM;
+
+ ocmem->dev = dev;
+ ocmem->config = device_get_match_data(dev);
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(ocmem_clks), ocmem_clks);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Unable to get clocks\n");
+
+ return ret;
+ }
+
+ ocmem->mmio = devm_platform_ioremap_resource_byname(pdev, "ctrl");
+ if (IS_ERR(ocmem->mmio)) {
+ dev_err(&pdev->dev, "Failed to ioremap ocmem_ctrl resource\n");
+ return PTR_ERR(ocmem->mmio);
+ }
+
+ ocmem->memory = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mem");
+ if (!ocmem->memory) {
+ dev_err(dev, "Could not get mem region\n");
+ return -ENXIO;
+ }
+
+ /* The core clock is synchronous with graphics */
+ WARN_ON(clk_set_rate(ocmem_clks[OCMEM_CLK_CORE_IDX].clk, 1000) < 0);
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(ocmem_clks), ocmem_clks);
+ if (ret) {
+ dev_info(ocmem->dev, "Failed to enable clocks\n");
+ return ret;
+ }
+
+ if (qcom_scm_restore_sec_cfg_available()) {
+ dev_dbg(dev, "configuring scm\n");
+ ret = qcom_scm_restore_sec_cfg(QCOM_SCM_OCMEM_DEV_ID, 0);
+ if (ret) {
+ dev_err(dev, "Could not enable secure configuration\n");
+ goto err_clk_disable;
+ }
+ }
+
+ reg = ocmem_read(ocmem, OCMEM_REG_HW_VERSION);
+ dev_dbg(dev, "OCMEM hardware version: %lu.%lu.%lu\n",
+ OCMEM_HW_VERSION_MAJOR(reg),
+ OCMEM_HW_VERSION_MINOR(reg),
+ OCMEM_HW_VERSION_STEP(reg));
+
+ reg = ocmem_read(ocmem, OCMEM_REG_HW_PROFILE);
+ ocmem->num_ports = OCMEM_HW_PROFILE_NUM_PORTS(reg);
+ ocmem->num_macros = OCMEM_HW_PROFILE_NUM_MACROS(reg);
+ ocmem->interleaved = !!(reg & OCMEM_HW_PROFILE_INTERLEAVING);
+
+ num_banks = ocmem->num_ports / 2;
+ region_size = ocmem->config->macro_size * num_banks;
+
+ dev_info(dev, "%u ports, %u regions, %u macros, %sinterleaved\n",
+ ocmem->num_ports, ocmem->config->num_regions,
+ ocmem->num_macros, ocmem->interleaved ? "" : "not ");
+
+ ocmem->regions = devm_kcalloc(dev, ocmem->config->num_regions,
+ sizeof(struct ocmem_region), GFP_KERNEL);
+ if (!ocmem->regions) {
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (WARN_ON(num_banks > ARRAY_SIZE(region->macro_state))) {
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ region->mode = MODE_DEFAULT;
+ region->num_macros = num_banks;
+
+ if (i == (ocmem->config->num_regions - 1) &&
+ reg & OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE) {
+ region->macro_size = ocmem->config->macro_size / 2;
+ region->region_size = region_size / 2;
+ } else {
+ region->macro_size = ocmem->config->macro_size;
+ region->region_size = region_size;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(region->macro_state); j++)
+ region->macro_state[j] = CLK_OFF;
+ }
+
+ platform_set_drvdata(pdev, ocmem);
+
+ return 0;
+
+err_clk_disable:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks);
+ return ret;
+}
+
+static int ocmem_dev_remove(struct platform_device *pdev)
+{
+ clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks);
+
+ return 0;
+}
+
+static const struct ocmem_config ocmem_8974_config = {
+ .num_regions = 3,
+ .macro_size = SZ_128K,
+};
+
+static const struct of_device_id ocmem_of_match[] = {
+ { .compatible = "qcom,msm8974-ocmem", .data = &ocmem_8974_config },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ocmem_of_match);
+
+static struct platform_driver ocmem_driver = {
+ .probe = ocmem_dev_probe,
+ .remove = ocmem_dev_remove,
+ .driver = {
+ .name = "ocmem",
+ .of_match_table = ocmem_of_match,
+ },
+};
+
+module_platform_driver(ocmem_driver);
+
+MODULE_DESCRIPTION("On Chip Memory (OCMEM) allocator for some Snapdragon SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
new file mode 100644
index 000000000..0034af927
--- /dev/null
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include "pdr_internal.h"
+
+struct pdr_service {
+ char service_name[SERVREG_NAME_LENGTH + 1];
+ char service_path[SERVREG_NAME_LENGTH + 1];
+
+ struct sockaddr_qrtr addr;
+
+ unsigned int instance;
+ unsigned int service;
+ u8 service_data_valid;
+ u32 service_data;
+ int state;
+
+ bool need_notifier_register;
+ bool need_notifier_remove;
+ bool need_locator_lookup;
+ bool service_connected;
+
+ struct list_head node;
+};
+
+struct pdr_handle {
+ struct qmi_handle locator_hdl;
+ struct qmi_handle notifier_hdl;
+
+ struct sockaddr_qrtr locator_addr;
+
+ struct list_head lookups;
+ struct list_head indack_list;
+
+ /* control access to pdr lookup/indack lists */
+ struct mutex list_lock;
+
+ /* serialize pd status invocation */
+ struct mutex status_lock;
+
+ /* control access to the locator state */
+ struct mutex lock;
+
+ bool locator_init_complete;
+
+ struct work_struct locator_work;
+ struct work_struct notifier_work;
+ struct work_struct indack_work;
+
+ struct workqueue_struct *notifier_wq;
+ struct workqueue_struct *indack_wq;
+
+ void (*status)(int state, char *service_path, void *priv);
+ void *priv;
+};
+
+struct pdr_list_node {
+ enum servreg_service_state curr_state;
+ u16 transaction_id;
+ struct pdr_service *pds;
+ struct list_head node;
+};
+
+static int pdr_locator_new_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ locator_hdl);
+ struct pdr_service *pds;
+
+ /* Create a local client port for QMI communication */
+ pdr->locator_addr.sq_family = AF_QIPCRTR;
+ pdr->locator_addr.sq_node = svc->node;
+ pdr->locator_addr.sq_port = svc->port;
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = true;
+ mutex_unlock(&pdr->lock);
+
+ /* Service pending lookup requests */
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->need_locator_lookup)
+ schedule_work(&pdr->locator_work);
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ return 0;
+}
+
+static void pdr_locator_del_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ locator_hdl);
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = false;
+ mutex_unlock(&pdr->lock);
+
+ pdr->locator_addr.sq_node = 0;
+ pdr->locator_addr.sq_port = 0;
+}
+
+static const struct qmi_ops pdr_locator_ops = {
+ .new_server = pdr_locator_new_server,
+ .del_server = pdr_locator_del_server,
+};
+
+static int pdr_register_listener(struct pdr_handle *pdr,
+ struct pdr_service *pds,
+ bool enable)
+{
+ struct servreg_register_listener_resp resp;
+ struct servreg_register_listener_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
+ servreg_register_listener_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ req.enable = enable;
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
+ &txn, SERVREG_REGISTER_LISTENER_REQ,
+ SERVREG_REGISTER_LISTENER_REQ_LEN,
+ servreg_register_listener_req_ei,
+ &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s register listener txn wait failed: %d\n",
+ pds->service_path, ret);
+ return ret;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s register listener failed: 0x%x\n",
+ pds->service_path, resp.resp.error);
+ return -EREMOTEIO;
+ }
+
+ pds->state = resp.curr_state;
+
+ return 0;
+}
+
+static void pdr_notifier_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ notifier_work);
+ struct pdr_service *pds;
+ int ret;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service_connected) {
+ if (!pds->need_notifier_register)
+ continue;
+
+ pds->need_notifier_register = false;
+ ret = pdr_register_listener(pdr, pds, true);
+ if (ret < 0)
+ pds->state = SERVREG_SERVICE_STATE_DOWN;
+ } else {
+ if (!pds->need_notifier_remove)
+ continue;
+
+ pds->need_notifier_remove = false;
+ pds->state = SERVREG_SERVICE_STATE_DOWN;
+ }
+
+ mutex_lock(&pdr->status_lock);
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+static int pdr_notifier_new_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ struct pdr_service *pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service == svc->service &&
+ pds->instance == svc->instance) {
+ pds->service_connected = true;
+ pds->need_notifier_register = true;
+ pds->addr.sq_family = AF_QIPCRTR;
+ pds->addr.sq_node = svc->node;
+ pds->addr.sq_port = svc->port;
+ queue_work(pdr->notifier_wq, &pdr->notifier_work);
+ }
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ return 0;
+}
+
+static void pdr_notifier_del_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ struct pdr_service *pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service == svc->service &&
+ pds->instance == svc->instance) {
+ pds->service_connected = false;
+ pds->need_notifier_remove = true;
+ pds->addr.sq_node = 0;
+ pds->addr.sq_port = 0;
+ queue_work(pdr->notifier_wq, &pdr->notifier_work);
+ }
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+static const struct qmi_ops pdr_notifier_ops = {
+ .new_server = pdr_notifier_new_server,
+ .del_server = pdr_notifier_del_server,
+};
+
+static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds,
+ u16 tid)
+{
+ struct servreg_set_ack_resp resp;
+ struct servreg_set_ack_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ req.transaction_id = tid;
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
+ &txn, SERVREG_SET_ACK_REQ,
+ SERVREG_SET_ACK_REQ_LEN,
+ servreg_set_ack_req_ei,
+ &req);
+
+ /* Skip waiting for response */
+ qmi_txn_cancel(&txn);
+ return ret;
+}
+
+static void pdr_indack_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ indack_work);
+ struct pdr_list_node *ind, *tmp;
+ struct pdr_service *pds;
+
+ list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
+ pds = ind->pds;
+
+ mutex_lock(&pdr->status_lock);
+ pds->state = ind->curr_state;
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+
+ /* Ack the indication after clients release the PD resources */
+ pdr_send_indack_msg(pdr, pds, ind->transaction_id);
+
+ mutex_lock(&pdr->list_lock);
+ list_del(&ind->node);
+ mutex_unlock(&pdr->list_lock);
+
+ kfree(ind);
+ }
+}
+
+static void pdr_indication_cb(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ const struct servreg_state_updated_ind *ind_msg = data;
+ struct pdr_list_node *ind;
+ struct pdr_service *pds = NULL, *iter;
+
+ if (!ind_msg || !ind_msg->service_path[0] ||
+ strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH)
+ return;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(iter, &pdr->lookups, node) {
+ if (strcmp(iter->service_path, ind_msg->service_path))
+ continue;
+
+ pds = iter;
+ break;
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ if (!pds)
+ return;
+
+ pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
+ ind_msg->service_path, ind_msg->curr_state,
+ ind_msg->transaction_id);
+
+ ind = kzalloc(sizeof(*ind), GFP_KERNEL);
+ if (!ind)
+ return;
+
+ ind->transaction_id = ind_msg->transaction_id;
+ ind->curr_state = ind_msg->curr_state;
+ ind->pds = pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_add_tail(&ind->node, &pdr->indack_list);
+ mutex_unlock(&pdr->list_lock);
+
+ queue_work(pdr->indack_wq, &pdr->indack_work);
+}
+
+static const struct qmi_msg_handler qmi_indication_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = SERVREG_STATE_UPDATED_IND_ID,
+ .ei = servreg_state_updated_ind_ei,
+ .decoded_size = sizeof(struct servreg_state_updated_ind),
+ .fn = pdr_indication_cb,
+ },
+ {}
+};
+
+static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
+ struct servreg_get_domain_list_resp *resp,
+ struct pdr_handle *pdr)
+{
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->locator_hdl, &txn,
+ servreg_get_domain_list_resp_ei, resp);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(&pdr->locator_hdl,
+ &pdr->locator_addr,
+ &txn, SERVREG_GET_DOMAIN_LIST_REQ,
+ SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
+ servreg_get_domain_list_req_ei,
+ req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s get domain list txn wait failed: %d\n",
+ req->service_name, ret);
+ return ret;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s get domain list failed: 0x%x\n",
+ req->service_name, resp->resp.error);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+ struct servreg_get_domain_list_resp *resp;
+ struct servreg_get_domain_list_req req;
+ struct servreg_location_entry *entry;
+ int domains_read = 0;
+ int ret, i;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* Prepare req message */
+ strscpy(req.service_name, pds->service_name, sizeof(req.service_name));
+ req.domain_offset_valid = true;
+ req.domain_offset = 0;
+
+ do {
+ req.domain_offset = domains_read;
+ ret = pdr_get_domain_list(&req, resp, pdr);
+ if (ret < 0)
+ goto out;
+
+ for (i = domains_read; i < resp->domain_list_len; i++) {
+ entry = &resp->domain_list[i];
+
+ if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
+ continue;
+
+ if (!strcmp(entry->name, pds->service_path)) {
+ pds->service_data_valid = entry->service_data_valid;
+ pds->service_data = entry->service_data;
+ pds->instance = entry->instance;
+ goto out;
+ }
+ }
+
+ /* Update ret to indicate that the service is not yet found */
+ ret = -ENXIO;
+
+ /* Always read total_domains from the response msg */
+ if (resp->domain_list_len > resp->total_domains)
+ resp->domain_list_len = resp->total_domains;
+
+ domains_read += resp->domain_list_len;
+ } while (domains_read < resp->total_domains);
+out:
+ kfree(resp);
+ return ret;
+}
+
+static void pdr_notify_lookup_failure(struct pdr_handle *pdr,
+ struct pdr_service *pds,
+ int err)
+{
+ pr_err("PDR: service lookup for %s failed: %d\n",
+ pds->service_name, err);
+
+ if (err == -ENXIO)
+ return;
+
+ list_del(&pds->node);
+ pds->state = SERVREG_LOCATOR_ERR;
+ mutex_lock(&pdr->status_lock);
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+ kfree(pds);
+}
+
+static void pdr_locator_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ locator_work);
+ struct pdr_service *pds, *tmp;
+ int ret = 0;
+
+ /* Bail out early if the SERVREG LOCATOR QMI service is not up */
+ mutex_lock(&pdr->lock);
+ if (!pdr->locator_init_complete) {
+ mutex_unlock(&pdr->lock);
+ pr_debug("PDR: SERVICE LOCATOR service not available\n");
+ return;
+ }
+ mutex_unlock(&pdr->lock);
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+ if (!pds->need_locator_lookup)
+ continue;
+
+ ret = pdr_locate_service(pdr, pds);
+ if (ret < 0) {
+ pdr_notify_lookup_failure(pdr, pds, ret);
+ continue;
+ }
+
+ ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1,
+ pds->instance);
+ if (ret < 0) {
+ pdr_notify_lookup_failure(pdr, pds, ret);
+ continue;
+ }
+
+ pds->need_locator_lookup = false;
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+/**
+ * pdr_add_lookup() - register a tracking request for a PD
+ * @pdr: PDR client handle
+ * @service_name: service name of the tracking request
+ * @service_path: service path of the tracking request
+ *
+ * Registering a pdr lookup allows for tracking the life cycle of the PD.
+ *
+ * Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is
+ * returned if a lookup is already in progress for the given service path.
+ */
+struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
+ const char *service_name,
+ const char *service_path)
+{
+ struct pdr_service *pds, *tmp;
+ int ret;
+
+ if (IS_ERR_OR_NULL(pdr))
+ return ERR_PTR(-EINVAL);
+
+ if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH ||
+ !service_path || strlen(service_path) > SERVREG_NAME_LENGTH)
+ return ERR_PTR(-EINVAL);
+
+ pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return ERR_PTR(-ENOMEM);
+
+ pds->service = SERVREG_NOTIFIER_SERVICE;
+ strscpy(pds->service_name, service_name, sizeof(pds->service_name));
+ strscpy(pds->service_path, service_path, sizeof(pds->service_path));
+ pds->need_locator_lookup = true;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(tmp, &pdr->lookups, node) {
+ if (strcmp(tmp->service_path, service_path))
+ continue;
+
+ mutex_unlock(&pdr->list_lock);
+ ret = -EALREADY;
+ goto err;
+ }
+
+ list_add(&pds->node, &pdr->lookups);
+ mutex_unlock(&pdr->list_lock);
+
+ schedule_work(&pdr->locator_work);
+
+ return pds;
+err:
+ kfree(pds);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pdr_add_lookup);
+
+/**
+ * pdr_restart_pd() - restart PD
+ * @pdr: PDR client handle
+ * @pds: PD service handle
+ *
+ * Restarts the PD tracked by the PDR client handle for a given service path.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+ struct servreg_restart_pd_resp resp;
+ struct servreg_restart_pd_req req = { 0 };
+ struct sockaddr_qrtr addr;
+ struct pdr_service *tmp;
+ struct qmi_txn txn;
+ int ret;
+
+ if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds))
+ return -EINVAL;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(tmp, &pdr->lookups, node) {
+ if (tmp != pds)
+ continue;
+
+ if (!pds->service_connected)
+ break;
+
+ /* Prepare req message */
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
+ addr = pds->addr;
+ break;
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ if (!req.service_path[0])
+ return -EINVAL;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
+ servreg_restart_pd_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &addr,
+ &txn, SERVREG_RESTART_PD_REQ,
+ SERVREG_RESTART_PD_REQ_MAX_LEN,
+ servreg_restart_pd_req_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s PD restart txn wait failed: %d\n",
+ req.service_path, ret);
+ return ret;
+ }
+
+ /* Check response if PDR is disabled */
+ if (resp.resp.result == QMI_RESULT_FAILURE_V01 &&
+ resp.resp.error == QMI_ERR_DISABLED_V01) {
+ pr_err("PDR: %s PD restart is disabled: 0x%x\n",
+ req.service_path, resp.resp.error);
+ return -EOPNOTSUPP;
+ }
+
+ /* Check the response for other error case*/
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s request for PD restart failed: 0x%x\n",
+ req.service_path, resp.resp.error);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pdr_restart_pd);
+
+/**
+ * pdr_handle_alloc() - initialize the PDR client handle
+ * @status: function to be called on PD state change
+ * @priv: handle for client's use
+ *
+ * Initializes the PDR client handle to allow for tracking/restart of PDs.
+ *
+ * Return: pdr_handle object on success, ERR_PTR on failure.
+ */
+struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
+ char *service_path,
+ void *priv), void *priv)
+{
+ struct pdr_handle *pdr;
+ int ret;
+
+ if (!status)
+ return ERR_PTR(-EINVAL);
+
+ pdr = kzalloc(sizeof(*pdr), GFP_KERNEL);
+ if (!pdr)
+ return ERR_PTR(-ENOMEM);
+
+ pdr->status = status;
+ pdr->priv = priv;
+
+ mutex_init(&pdr->status_lock);
+ mutex_init(&pdr->list_lock);
+ mutex_init(&pdr->lock);
+
+ INIT_LIST_HEAD(&pdr->lookups);
+ INIT_LIST_HEAD(&pdr->indack_list);
+
+ INIT_WORK(&pdr->locator_work, pdr_locator_work);
+ INIT_WORK(&pdr->notifier_work, pdr_notifier_work);
+ INIT_WORK(&pdr->indack_work, pdr_indack_work);
+
+ pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq");
+ if (!pdr->notifier_wq) {
+ ret = -ENOMEM;
+ goto free_pdr_handle;
+ }
+
+ pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);
+ if (!pdr->indack_wq) {
+ ret = -ENOMEM;
+ goto destroy_notifier;
+ }
+
+ ret = qmi_handle_init(&pdr->locator_hdl,
+ SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN,
+ &pdr_locator_ops, NULL);
+ if (ret < 0)
+ goto destroy_indack;
+
+ ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1);
+ if (ret < 0)
+ goto release_qmi_handle;
+
+ ret = qmi_handle_init(&pdr->notifier_hdl,
+ SERVREG_STATE_UPDATED_IND_MAX_LEN,
+ &pdr_notifier_ops,
+ qmi_indication_handler);
+ if (ret < 0)
+ goto release_qmi_handle;
+
+ return pdr;
+
+release_qmi_handle:
+ qmi_handle_release(&pdr->locator_hdl);
+destroy_indack:
+ destroy_workqueue(pdr->indack_wq);
+destroy_notifier:
+ destroy_workqueue(pdr->notifier_wq);
+free_pdr_handle:
+ kfree(pdr);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pdr_handle_alloc);
+
+/**
+ * pdr_handle_release() - release the PDR client handle
+ * @pdr: PDR client handle
+ *
+ * Cleans up pending tracking requests and releases the underlying qmi handles.
+ */
+void pdr_handle_release(struct pdr_handle *pdr)
+{
+ struct pdr_service *pds, *tmp;
+
+ if (IS_ERR_OR_NULL(pdr))
+ return;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+ list_del(&pds->node);
+ kfree(pds);
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ cancel_work_sync(&pdr->locator_work);
+ cancel_work_sync(&pdr->notifier_work);
+ cancel_work_sync(&pdr->indack_work);
+
+ destroy_workqueue(pdr->notifier_wq);
+ destroy_workqueue(pdr->indack_wq);
+
+ qmi_handle_release(&pdr->locator_hdl);
+ qmi_handle_release(&pdr->notifier_hdl);
+
+ kfree(pdr);
+}
+EXPORT_SYMBOL(pdr_handle_release);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers");
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
new file mode 100644
index 000000000..a30422214
--- /dev/null
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PDR_HELPER_INTERNAL__
+#define __QCOM_PDR_HELPER_INTERNAL__
+
+#include <linux/soc/qcom/pdr.h>
+
+#define SERVREG_LOCATOR_SERVICE 0x40
+#define SERVREG_NOTIFIER_SERVICE 0x42
+
+#define SERVREG_REGISTER_LISTENER_REQ 0x20
+#define SERVREG_GET_DOMAIN_LIST_REQ 0x21
+#define SERVREG_STATE_UPDATED_IND_ID 0x22
+#define SERVREG_SET_ACK_REQ 0x23
+#define SERVREG_RESTART_PD_REQ 0x24
+
+#define SERVREG_DOMAIN_LIST_LENGTH 32
+#define SERVREG_RESTART_PD_REQ_MAX_LEN 67
+#define SERVREG_REGISTER_LISTENER_REQ_LEN 71
+#define SERVREG_SET_ACK_REQ_LEN 72
+#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74
+#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79
+#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389
+
+struct servreg_location_entry {
+ char name[SERVREG_NAME_LENGTH + 1];
+ u8 service_data_valid;
+ u32 service_data;
+ u32 instance;
+};
+
+static struct qmi_elem_info servreg_location_entry_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ instance),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data),
+ },
+ {}
+};
+
+struct servreg_get_domain_list_req {
+ char service_name[SERVREG_NAME_LENGTH + 1];
+ u8 domain_offset_valid;
+ u32 domain_offset;
+};
+
+static struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ service_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset),
+ },
+ {}
+};
+
+struct servreg_get_domain_list_resp {
+ struct qmi_response_type_v01 resp;
+ u8 total_domains_valid;
+ u16 total_domains;
+ u8 db_rev_count_valid;
+ u16 db_rev_count;
+ u8 domain_list_valid;
+ u32 domain_list_len;
+ struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
+};
+
+static struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
+ .elem_size = sizeof(struct servreg_location_entry),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list),
+ .ei_array = servreg_location_entry_ei,
+ },
+ {}
+};
+
+struct servreg_register_listener_req {
+ u8 enable;
+ char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+static struct qmi_elem_info servreg_register_listener_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_register_listener_req,
+ enable),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_req,
+ service_path),
+ },
+ {}
+};
+
+struct servreg_register_listener_resp {
+ struct qmi_response_type_v01 resp;
+ u8 curr_state_valid;
+ enum servreg_service_state curr_state;
+};
+
+static struct qmi_elem_info servreg_register_listener_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum servreg_service_state),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state),
+ },
+ {}
+};
+
+struct servreg_restart_pd_req {
+ char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+static struct qmi_elem_info servreg_restart_pd_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_restart_pd_req,
+ service_path),
+ },
+ {}
+};
+
+struct servreg_restart_pd_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+static struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_restart_pd_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct servreg_state_updated_ind {
+ enum servreg_service_state curr_state;
+ char service_path[SERVREG_NAME_LENGTH + 1];
+ u16 transaction_id;
+};
+
+static struct qmi_elem_info servreg_state_updated_ind_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ curr_state),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ transaction_id),
+ },
+ {}
+};
+
+struct servreg_set_ack_req {
+ char service_path[SERVREG_NAME_LENGTH + 1];
+ u16 transaction_id;
+};
+
+static struct qmi_elem_info servreg_set_ack_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_set_ack_req,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_req,
+ transaction_id),
+ },
+ {}
+};
+
+struct servreg_set_ack_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+static struct qmi_elem_info servreg_set_ack_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+#endif
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
new file mode 100644
index 000000000..a0ceeede4
--- /dev/null
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -0,0 +1,920 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+
+/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
+#define __DISABLE_TRACE_MMIO__
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/qcom-geni-se.h>
+
+/**
+ * DOC: Overview
+ *
+ * Generic Interface (GENI) Serial Engine (SE) Wrapper driver is introduced
+ * to manage GENI firmware based Qualcomm Universal Peripheral (QUP) Wrapper
+ * controller. QUP Wrapper is designed to support various serial bus protocols
+ * like UART, SPI, I2C, I3C, etc.
+ */
+
+/**
+ * DOC: Hardware description
+ *
+ * GENI based QUP is a highly-flexible and programmable module for supporting
+ * a wide range of serial interfaces like UART, SPI, I2C, I3C, etc. A single
+ * QUP module can provide upto 8 serial interfaces, using its internal
+ * serial engines. The actual configuration is determined by the target
+ * platform configuration. The protocol supported by each interface is
+ * determined by the firmware loaded to the serial engine. Each SE consists
+ * of a DMA Engine and GENI sub modules which enable serial engines to
+ * support FIFO and DMA modes of operation.
+ *
+ *
+ * +-----------------------------------------+
+ * |QUP Wrapper |
+ * | +----------------------------+ |
+ * --QUP & SE Clocks--> | Serial Engine N | +-IO------>
+ * | | ... | | Interface
+ * <---Clock Perf.----+ +----+-----------------------+ | |
+ * State Interface | | Serial Engine 1 | | |
+ * | | | | |
+ * | | | | |
+ * <--------AHB-------> | | | |
+ * | | +----+ |
+ * | | | |
+ * | | | |
+ * <------SE IRQ------+ +----------------------------+ |
+ * | |
+ * +-----------------------------------------+
+ *
+ * Figure 1: GENI based QUP Wrapper
+ *
+ * The GENI submodules include primary and secondary sequencers which are
+ * used to drive TX & RX operations. On serial interfaces that operate using
+ * master-slave model, primary sequencer drives both TX & RX operations. On
+ * serial interfaces that operate using peer-to-peer model, primary sequencer
+ * drives TX operation and secondary sequencer drives RX operation.
+ */
+
+/**
+ * DOC: Software description
+ *
+ * GENI SE Wrapper driver is structured into 2 parts:
+ *
+ * geni_wrapper represents QUP Wrapper controller. This part of the driver
+ * manages QUP Wrapper information such as hardware version, clock
+ * performance table that is common to all the internal serial engines.
+ *
+ * geni_se represents serial engine. This part of the driver manages serial
+ * engine information such as clocks, containing QUP Wrapper, etc. This part
+ * of driver also supports operations (eg. initialize the concerned serial
+ * engine, select between FIFO and DMA mode of operation etc.) that are
+ * common to all the serial engines and are independent of serial interfaces.
+ */
+
+#define MAX_CLK_PERF_LEVEL 32
+#define NUM_AHB_CLKS 2
+
+/**
+ * struct geni_wrapper - Data structure to represent the QUP Wrapper Core
+ * @dev: Device pointer of the QUP wrapper core
+ * @base: Base address of this instance of QUP wrapper core
+ * @ahb_clks: Handle to the primary & secondary AHB clocks
+ * @to_core: Core ICC path
+ */
+struct geni_wrapper {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
+};
+
+static const char * const icc_path_names[] = {"qup-core", "qup-config",
+ "qup-memory"};
+
+#define QUP_HW_VER_REG 0x4
+
+/* Common SE registers */
+#define GENI_INIT_CFG_REVISION 0x0
+#define GENI_S_INIT_CFG_REVISION 0x4
+#define GENI_OUTPUT_CTRL 0x24
+#define GENI_CGC_CTRL 0x28
+#define GENI_CLK_CTRL_RO 0x60
+#define GENI_FW_S_REVISION_RO 0x6c
+#define SE_GENI_BYTE_GRAN 0x254
+#define SE_GENI_TX_PACKING_CFG0 0x260
+#define SE_GENI_TX_PACKING_CFG1 0x264
+#define SE_GENI_RX_PACKING_CFG0 0x284
+#define SE_GENI_RX_PACKING_CFG1 0x288
+#define SE_GENI_M_GP_LENGTH 0x910
+#define SE_GENI_S_GP_LENGTH 0x914
+#define SE_DMA_TX_PTR_L 0xc30
+#define SE_DMA_TX_PTR_H 0xc34
+#define SE_DMA_TX_ATTR 0xc38
+#define SE_DMA_TX_LEN 0xc3c
+#define SE_DMA_TX_IRQ_EN 0xc48
+#define SE_DMA_TX_IRQ_EN_SET 0xc4c
+#define SE_DMA_TX_IRQ_EN_CLR 0xc50
+#define SE_DMA_TX_LEN_IN 0xc54
+#define SE_DMA_TX_MAX_BURST 0xc5c
+#define SE_DMA_RX_PTR_L 0xd30
+#define SE_DMA_RX_PTR_H 0xd34
+#define SE_DMA_RX_ATTR 0xd38
+#define SE_DMA_RX_LEN 0xd3c
+#define SE_DMA_RX_IRQ_EN 0xd48
+#define SE_DMA_RX_IRQ_EN_SET 0xd4c
+#define SE_DMA_RX_IRQ_EN_CLR 0xd50
+#define SE_DMA_RX_LEN_IN 0xd54
+#define SE_DMA_RX_MAX_BURST 0xd5c
+#define SE_DMA_RX_FLUSH 0xd60
+#define SE_GSI_EVENT_EN 0xe18
+#define SE_IRQ_EN 0xe1c
+#define SE_DMA_GENERAL_CFG 0xe30
+
+/* GENI_OUTPUT_CTRL fields */
+#define DEFAULT_IO_OUTPUT_CTRL_MSK GENMASK(6, 0)
+
+/* GENI_CGC_CTRL fields */
+#define CFG_AHB_CLK_CGC_ON BIT(0)
+#define CFG_AHB_WR_ACLK_CGC_ON BIT(1)
+#define DATA_AHB_CLK_CGC_ON BIT(2)
+#define SCLK_CGC_ON BIT(3)
+#define TX_CLK_CGC_ON BIT(4)
+#define RX_CLK_CGC_ON BIT(5)
+#define EXT_CLK_CGC_ON BIT(6)
+#define PROG_RAM_HCLK_OFF BIT(8)
+#define PROG_RAM_SCLK_OFF BIT(9)
+#define DEFAULT_CGC_EN GENMASK(6, 0)
+
+/* SE_GSI_EVENT_EN fields */
+#define DMA_RX_EVENT_EN BIT(0)
+#define DMA_TX_EVENT_EN BIT(1)
+#define GENI_M_EVENT_EN BIT(2)
+#define GENI_S_EVENT_EN BIT(3)
+
+/* SE_IRQ_EN fields */
+#define DMA_RX_IRQ_EN BIT(0)
+#define DMA_TX_IRQ_EN BIT(1)
+#define GENI_M_IRQ_EN BIT(2)
+#define GENI_S_IRQ_EN BIT(3)
+
+/* SE_DMA_GENERAL_CFG */
+#define DMA_RX_CLK_CGC_ON BIT(0)
+#define DMA_TX_CLK_CGC_ON BIT(1)
+#define DMA_AHB_SLV_CFG_ON BIT(2)
+#define AHB_SEC_SLV_CLK_CGC_ON BIT(3)
+#define DUMMY_RX_NON_BUFFERABLE BIT(4)
+#define RX_DMA_ZERO_PADDING_EN BIT(5)
+#define RX_DMA_IRQ_DELAY_MSK GENMASK(8, 6)
+#define RX_DMA_IRQ_DELAY_SHFT 6
+
+/**
+ * geni_se_get_qup_hw_version() - Read the QUP wrapper Hardware version
+ * @se: Pointer to the corresponding serial engine.
+ *
+ * Return: Hardware Version of the wrapper.
+ */
+u32 geni_se_get_qup_hw_version(struct geni_se *se)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ return readl_relaxed(wrapper->base + QUP_HW_VER_REG);
+}
+EXPORT_SYMBOL(geni_se_get_qup_hw_version);
+
+static void geni_se_io_set_mode(void __iomem *base)
+{
+ u32 val;
+
+ val = readl_relaxed(base + SE_IRQ_EN);
+ val |= GENI_M_IRQ_EN | GENI_S_IRQ_EN;
+ val |= DMA_TX_IRQ_EN | DMA_RX_IRQ_EN;
+ writel_relaxed(val, base + SE_IRQ_EN);
+
+ val = readl_relaxed(base + SE_GENI_DMA_MODE_EN);
+ val &= ~GENI_DMA_MODE_EN;
+ writel_relaxed(val, base + SE_GENI_DMA_MODE_EN);
+
+ writel_relaxed(0, base + SE_GSI_EVENT_EN);
+}
+
+static void geni_se_io_init(void __iomem *base)
+{
+ u32 val;
+
+ val = readl_relaxed(base + GENI_CGC_CTRL);
+ val |= DEFAULT_CGC_EN;
+ writel_relaxed(val, base + GENI_CGC_CTRL);
+
+ val = readl_relaxed(base + SE_DMA_GENERAL_CFG);
+ val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON;
+ val |= DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON;
+ writel_relaxed(val, base + SE_DMA_GENERAL_CFG);
+
+ writel_relaxed(DEFAULT_IO_OUTPUT_CTRL_MSK, base + GENI_OUTPUT_CTRL);
+ writel_relaxed(FORCE_DEFAULT, base + GENI_FORCE_DEFAULT_REG);
+}
+
+static void geni_se_irq_clear(struct geni_se *se)
+{
+ writel_relaxed(0, se->base + SE_GSI_EVENT_EN);
+ writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR);
+ writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR);
+ writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR);
+ writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR);
+ writel_relaxed(0xffffffff, se->base + SE_IRQ_EN);
+}
+
+/**
+ * geni_se_init() - Initialize the GENI serial engine
+ * @se: Pointer to the concerned serial engine.
+ * @rx_wm: Receive watermark, in units of FIFO words.
+ * @rx_rfr: Ready-for-receive watermark, in units of FIFO words.
+ *
+ * This function is used to initialize the GENI serial engine, configure
+ * receive watermark and ready-for-receive watermarks.
+ */
+void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr)
+{
+ u32 val;
+
+ geni_se_irq_clear(se);
+ geni_se_io_init(se->base);
+ geni_se_io_set_mode(se->base);
+
+ writel_relaxed(rx_wm, se->base + SE_GENI_RX_WATERMARK_REG);
+ writel_relaxed(rx_rfr, se->base + SE_GENI_RX_RFR_WATERMARK_REG);
+
+ val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ val |= M_COMMON_GENI_M_IRQ_EN;
+ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+
+ val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+ val |= S_COMMON_GENI_S_IRQ_EN;
+ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
+}
+EXPORT_SYMBOL(geni_se_init);
+
+static void geni_se_select_fifo_mode(struct geni_se *se)
+{
+ u32 proto = geni_se_read_proto(se);
+ u32 val, val_old;
+
+ geni_se_irq_clear(se);
+
+ /*
+ * The RX path for the UART is asynchronous and so needs more
+ * complex logic for enabling / disabling its interrupts.
+ *
+ * Specific notes:
+ * - The done and TX-related interrupts are managed manually.
+ * - We don't RX from the main sequencer (we use the secondary) so
+ * we don't need the RX-related interrupts enabled in the main
+ * sequencer for UART.
+ */
+ if (proto != GENI_SE_UART) {
+ val_old = val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ val |= M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN;
+ val |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+ if (val != val_old)
+ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+
+ val_old = val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+ val |= S_CMD_DONE_EN;
+ if (val != val_old)
+ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
+ }
+
+ val_old = val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
+ val &= ~GENI_DMA_MODE_EN;
+ if (val != val_old)
+ writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
+}
+
+static void geni_se_select_dma_mode(struct geni_se *se)
+{
+ u32 proto = geni_se_read_proto(se);
+ u32 val, val_old;
+
+ geni_se_irq_clear(se);
+
+ if (proto != GENI_SE_UART) {
+ val_old = val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
+ val &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ if (val != val_old)
+ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+
+ val_old = val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+ val &= ~S_CMD_DONE_EN;
+ if (val != val_old)
+ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
+ }
+
+ val_old = val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
+ val |= GENI_DMA_MODE_EN;
+ if (val != val_old)
+ writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
+}
+
+static void geni_se_select_gpi_mode(struct geni_se *se)
+{
+ u32 val;
+
+ geni_se_irq_clear(se);
+
+ writel(0, se->base + SE_IRQ_EN);
+
+ val = readl(se->base + SE_GENI_S_IRQ_EN);
+ val &= ~S_CMD_DONE_EN;
+ writel(val, se->base + SE_GENI_S_IRQ_EN);
+
+ val = readl(se->base + SE_GENI_M_IRQ_EN);
+ val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN |
+ M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ writel(val, se->base + SE_GENI_M_IRQ_EN);
+
+ writel(GENI_DMA_MODE_EN, se->base + SE_GENI_DMA_MODE_EN);
+
+ val = readl(se->base + SE_GSI_EVENT_EN);
+ val |= (DMA_RX_EVENT_EN | DMA_TX_EVENT_EN | GENI_M_EVENT_EN | GENI_S_EVENT_EN);
+ writel(val, se->base + SE_GSI_EVENT_EN);
+}
+
+/**
+ * geni_se_select_mode() - Select the serial engine transfer mode
+ * @se: Pointer to the concerned serial engine.
+ * @mode: Transfer mode to be selected.
+ */
+void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
+{
+ WARN_ON(mode != GENI_SE_FIFO && mode != GENI_SE_DMA && mode != GENI_GPI_DMA);
+
+ switch (mode) {
+ case GENI_SE_FIFO:
+ geni_se_select_fifo_mode(se);
+ break;
+ case GENI_SE_DMA:
+ geni_se_select_dma_mode(se);
+ break;
+ case GENI_GPI_DMA:
+ geni_se_select_gpi_mode(se);
+ break;
+ case GENI_SE_INVALID:
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(geni_se_select_mode);
+
+/**
+ * DOC: Overview
+ *
+ * GENI FIFO packing is highly configurable. TX/RX packing/unpacking consist
+ * of up to 4 operations, each operation represented by 4 configuration vectors
+ * of 10 bits programmed in GENI_TX_PACKING_CFG0 and GENI_TX_PACKING_CFG1 for
+ * TX FIFO and in GENI_RX_PACKING_CFG0 and GENI_RX_PACKING_CFG1 for RX FIFO.
+ * Refer to below examples for detailed bit-field description.
+ *
+ * Example 1: word_size = 7, packing_mode = 4 x 8, msb_to_lsb = 1
+ *
+ * +-----------+-------+-------+-------+-------+
+ * | | vec_0 | vec_1 | vec_2 | vec_3 |
+ * +-----------+-------+-------+-------+-------+
+ * | start | 0x6 | 0xe | 0x16 | 0x1e |
+ * | direction | 1 | 1 | 1 | 1 |
+ * | length | 6 | 6 | 6 | 6 |
+ * | stop | 0 | 0 | 0 | 1 |
+ * +-----------+-------+-------+-------+-------+
+ *
+ * Example 2: word_size = 15, packing_mode = 2 x 16, msb_to_lsb = 0
+ *
+ * +-----------+-------+-------+-------+-------+
+ * | | vec_0 | vec_1 | vec_2 | vec_3 |
+ * +-----------+-------+-------+-------+-------+
+ * | start | 0x0 | 0x8 | 0x10 | 0x18 |
+ * | direction | 0 | 0 | 0 | 0 |
+ * | length | 7 | 6 | 7 | 6 |
+ * | stop | 0 | 0 | 0 | 1 |
+ * +-----------+-------+-------+-------+-------+
+ *
+ * Example 3: word_size = 23, packing_mode = 1 x 32, msb_to_lsb = 1
+ *
+ * +-----------+-------+-------+-------+-------+
+ * | | vec_0 | vec_1 | vec_2 | vec_3 |
+ * +-----------+-------+-------+-------+-------+
+ * | start | 0x16 | 0xe | 0x6 | 0x0 |
+ * | direction | 1 | 1 | 1 | 1 |
+ * | length | 7 | 7 | 6 | 0 |
+ * | stop | 0 | 0 | 1 | 0 |
+ * +-----------+-------+-------+-------+-------+
+ *
+ */
+
+#define NUM_PACKING_VECTORS 4
+#define PACKING_START_SHIFT 5
+#define PACKING_DIR_SHIFT 4
+#define PACKING_LEN_SHIFT 1
+#define PACKING_STOP_BIT BIT(0)
+#define PACKING_VECTOR_SHIFT 10
+/**
+ * geni_se_config_packing() - Packing configuration of the serial engine
+ * @se: Pointer to the concerned serial engine
+ * @bpw: Bits of data per transfer word.
+ * @pack_words: Number of words per fifo element.
+ * @msb_to_lsb: Transfer from MSB to LSB or vice-versa.
+ * @tx_cfg: Flag to configure the TX Packing.
+ * @rx_cfg: Flag to configure the RX Packing.
+ *
+ * This function is used to configure the packing rules for the current
+ * transfer.
+ */
+void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words,
+ bool msb_to_lsb, bool tx_cfg, bool rx_cfg)
+{
+ u32 cfg0, cfg1, cfg[NUM_PACKING_VECTORS] = {0};
+ int len;
+ int temp_bpw = bpw;
+ int idx_start = msb_to_lsb ? bpw - 1 : 0;
+ int idx = idx_start;
+ int idx_delta = msb_to_lsb ? -BITS_PER_BYTE : BITS_PER_BYTE;
+ int ceil_bpw = ALIGN(bpw, BITS_PER_BYTE);
+ int iter = (ceil_bpw * pack_words) / BITS_PER_BYTE;
+ int i;
+
+ if (iter <= 0 || iter > NUM_PACKING_VECTORS)
+ return;
+
+ for (i = 0; i < iter; i++) {
+ len = min_t(int, temp_bpw, BITS_PER_BYTE) - 1;
+ cfg[i] = idx << PACKING_START_SHIFT;
+ cfg[i] |= msb_to_lsb << PACKING_DIR_SHIFT;
+ cfg[i] |= len << PACKING_LEN_SHIFT;
+
+ if (temp_bpw <= BITS_PER_BYTE) {
+ idx = ((i + 1) * BITS_PER_BYTE) + idx_start;
+ temp_bpw = bpw;
+ } else {
+ idx = idx + idx_delta;
+ temp_bpw = temp_bpw - BITS_PER_BYTE;
+ }
+ }
+ cfg[iter - 1] |= PACKING_STOP_BIT;
+ cfg0 = cfg[0] | (cfg[1] << PACKING_VECTOR_SHIFT);
+ cfg1 = cfg[2] | (cfg[3] << PACKING_VECTOR_SHIFT);
+
+ if (tx_cfg) {
+ writel_relaxed(cfg0, se->base + SE_GENI_TX_PACKING_CFG0);
+ writel_relaxed(cfg1, se->base + SE_GENI_TX_PACKING_CFG1);
+ }
+ if (rx_cfg) {
+ writel_relaxed(cfg0, se->base + SE_GENI_RX_PACKING_CFG0);
+ writel_relaxed(cfg1, se->base + SE_GENI_RX_PACKING_CFG1);
+ }
+
+ /*
+ * Number of protocol words in each FIFO entry
+ * 0 - 4x8, four words in each entry, max word size of 8 bits
+ * 1 - 2x16, two words in each entry, max word size of 16 bits
+ * 2 - 1x32, one word in each entry, max word size of 32 bits
+ * 3 - undefined
+ */
+ if (pack_words || bpw == 32)
+ writel_relaxed(bpw / 16, se->base + SE_GENI_BYTE_GRAN);
+}
+EXPORT_SYMBOL(geni_se_config_packing);
+
+static void geni_se_clks_off(struct geni_se *se)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ clk_disable_unprepare(se->clk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(wrapper->ahb_clks),
+ wrapper->ahb_clks);
+}
+
+/**
+ * geni_se_resources_off() - Turn off resources associated with the serial
+ * engine
+ * @se: Pointer to the concerned serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_resources_off(struct geni_se *se)
+{
+ int ret;
+
+ if (has_acpi_companion(se->dev))
+ return 0;
+
+ ret = pinctrl_pm_select_sleep_state(se->dev);
+ if (ret)
+ return ret;
+
+ geni_se_clks_off(se);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_resources_off);
+
+static int geni_se_clks_on(struct geni_se *se)
+{
+ int ret;
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(wrapper->ahb_clks),
+ wrapper->ahb_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(se->clk);
+ if (ret)
+ clk_bulk_disable_unprepare(ARRAY_SIZE(wrapper->ahb_clks),
+ wrapper->ahb_clks);
+ return ret;
+}
+
+/**
+ * geni_se_resources_on() - Turn on resources associated with the serial
+ * engine
+ * @se: Pointer to the concerned serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_resources_on(struct geni_se *se)
+{
+ int ret;
+
+ if (has_acpi_companion(se->dev))
+ return 0;
+
+ ret = geni_se_clks_on(se);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_pm_select_default_state(se->dev);
+ if (ret)
+ geni_se_clks_off(se);
+
+ return ret;
+}
+EXPORT_SYMBOL(geni_se_resources_on);
+
+/**
+ * geni_se_clk_tbl_get() - Get the clock table to program DFS
+ * @se: Pointer to the concerned serial engine.
+ * @tbl: Table in which the output is returned.
+ *
+ * This function is called by the protocol drivers to determine the different
+ * clock frequencies supported by serial engine core clock. The protocol
+ * drivers use the output to determine the clock frequency index to be
+ * programmed into DFS.
+ *
+ * Return: number of valid performance levels in the table on success,
+ * standard Linux error codes on failure.
+ */
+int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
+{
+ long freq = 0;
+ int i;
+
+ if (se->clk_perf_tbl) {
+ *tbl = se->clk_perf_tbl;
+ return se->num_clk_levels;
+ }
+
+ se->clk_perf_tbl = devm_kcalloc(se->dev, MAX_CLK_PERF_LEVEL,
+ sizeof(*se->clk_perf_tbl),
+ GFP_KERNEL);
+ if (!se->clk_perf_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
+ freq = clk_round_rate(se->clk, freq + 1);
+ if (freq <= 0 || freq == se->clk_perf_tbl[i - 1])
+ break;
+ se->clk_perf_tbl[i] = freq;
+ }
+ se->num_clk_levels = i;
+ *tbl = se->clk_perf_tbl;
+ return se->num_clk_levels;
+}
+EXPORT_SYMBOL(geni_se_clk_tbl_get);
+
+/**
+ * geni_se_clk_freq_match() - Get the matching or closest SE clock frequency
+ * @se: Pointer to the concerned serial engine.
+ * @req_freq: Requested clock frequency.
+ * @index: Index of the resultant frequency in the table.
+ * @res_freq: Resultant frequency of the source clock.
+ * @exact: Flag to indicate exact multiple requirement of the requested
+ * frequency.
+ *
+ * This function is called by the protocol drivers to determine the best match
+ * of the requested frequency as provided by the serial engine clock in order
+ * to meet the performance requirements.
+ *
+ * If we return success:
+ * - if @exact is true then @res_freq / <an_integer> == @req_freq
+ * - if @exact is false then @res_freq / <an_integer> <= @req_freq
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
+ unsigned int *index, unsigned long *res_freq,
+ bool exact)
+{
+ unsigned long *tbl;
+ int num_clk_levels;
+ int i;
+ unsigned long best_delta;
+ unsigned long new_delta;
+ unsigned int divider;
+
+ num_clk_levels = geni_se_clk_tbl_get(se, &tbl);
+ if (num_clk_levels < 0)
+ return num_clk_levels;
+
+ if (num_clk_levels == 0)
+ return -EINVAL;
+
+ best_delta = ULONG_MAX;
+ for (i = 0; i < num_clk_levels; i++) {
+ divider = DIV_ROUND_UP(tbl[i], req_freq);
+ new_delta = req_freq - tbl[i] / divider;
+ if (new_delta < best_delta) {
+ /* We have a new best! */
+ *index = i;
+ *res_freq = tbl[i];
+
+ /* If the new best is exact then we're done */
+ if (new_delta == 0)
+ return 0;
+
+ /* Record how close we got */
+ best_delta = new_delta;
+ }
+ }
+
+ if (exact)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_clk_freq_match);
+
+#define GENI_SE_DMA_DONE_EN BIT(0)
+#define GENI_SE_DMA_EOT_EN BIT(1)
+#define GENI_SE_DMA_AHB_ERR_EN BIT(2)
+#define GENI_SE_DMA_EOT_BUF BIT(0)
+/**
+ * geni_se_tx_dma_prep() - Prepare the serial engine for TX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @buf: Pointer to the TX buffer.
+ * @len: Length of the TX buffer.
+ * @iova: Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA TX.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
+ dma_addr_t *iova)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+ u32 val;
+
+ if (!wrapper)
+ return -EINVAL;
+
+ *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(wrapper->dev, *iova))
+ return -EIO;
+
+ val = GENI_SE_DMA_DONE_EN;
+ val |= GENI_SE_DMA_EOT_EN;
+ val |= GENI_SE_DMA_AHB_ERR_EN;
+ writel_relaxed(val, se->base + SE_DMA_TX_IRQ_EN_SET);
+ writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L);
+ writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H);
+ writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR);
+ writel(len, se->base + SE_DMA_TX_LEN);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_tx_dma_prep);
+
+/**
+ * geni_se_rx_dma_prep() - Prepare the serial engine for RX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @buf: Pointer to the RX buffer.
+ * @len: Length of the RX buffer.
+ * @iova: Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA RX.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
+ dma_addr_t *iova)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+ u32 val;
+
+ if (!wrapper)
+ return -EINVAL;
+
+ *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(wrapper->dev, *iova))
+ return -EIO;
+
+ val = GENI_SE_DMA_DONE_EN;
+ val |= GENI_SE_DMA_EOT_EN;
+ val |= GENI_SE_DMA_AHB_ERR_EN;
+ writel_relaxed(val, se->base + SE_DMA_RX_IRQ_EN_SET);
+ writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_RX_PTR_L);
+ writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H);
+ /* RX does not have EOT buffer type bit. So just reset RX_ATTR */
+ writel_relaxed(0, se->base + SE_DMA_RX_ATTR);
+ writel(len, se->base + SE_DMA_RX_LEN);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_rx_dma_prep);
+
+/**
+ * geni_se_tx_dma_unprep() - Unprepare the serial engine after TX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @iova: DMA address of the TX buffer.
+ * @len: Length of the TX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA TX.
+ */
+void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ if (!dma_mapping_error(wrapper->dev, iova))
+ dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_tx_dma_unprep);
+
+/**
+ * geni_se_rx_dma_unprep() - Unprepare the serial engine after RX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @iova: DMA address of the RX buffer.
+ * @len: Length of the RX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA RX.
+ */
+void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ if (!dma_mapping_error(wrapper->dev, iova))
+ dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_rx_dma_unprep);
+
+int geni_icc_get(struct geni_se *se, const char *icc_ddr)
+{
+ int i, err;
+ const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
+
+ if (has_acpi_companion(se->dev))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ if (!icc_names[i])
+ continue;
+
+ se->icc_paths[i].path = devm_of_icc_get(se->dev, icc_names[i]);
+ if (IS_ERR(se->icc_paths[i].path))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ err = PTR_ERR(se->icc_paths[i].path);
+ if (err != -EPROBE_DEFER)
+ dev_err_ratelimited(se->dev, "Failed to get ICC path '%s': %d\n",
+ icc_names[i], err);
+ return err;
+
+}
+EXPORT_SYMBOL(geni_icc_get);
+
+int geni_icc_set_bw(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_set_bw(se->icc_paths[i].path,
+ se->icc_paths[i].avg_bw, se->icc_paths[i].avg_bw);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC BW voting failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_set_bw);
+
+void geni_icc_set_tag(struct geni_se *se, u32 tag)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++)
+ icc_set_tag(se->icc_paths[i].path, tag);
+}
+EXPORT_SYMBOL(geni_icc_set_tag);
+
+/* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */
+int geni_icc_enable(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_enable(se->icc_paths[i].path);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC enable failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_enable);
+
+int geni_icc_disable(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_disable(se->icc_paths[i].path);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC disable failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_disable);
+
+static int geni_se_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct geni_wrapper *wrapper;
+ int ret;
+
+ wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
+ if (!wrapper)
+ return -ENOMEM;
+
+ wrapper->dev = dev;
+ wrapper->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(wrapper->base))
+ return PTR_ERR(wrapper->base);
+
+ if (!has_acpi_companion(&pdev->dev)) {
+ wrapper->ahb_clks[0].id = "m-ahb";
+ wrapper->ahb_clks[1].id = "s-ahb";
+ ret = devm_clk_bulk_get(dev, NUM_AHB_CLKS, wrapper->ahb_clks);
+ if (ret) {
+ dev_err(dev, "Err getting AHB clks %d\n", ret);
+ return ret;
+ }
+ }
+
+ dev_set_drvdata(dev, wrapper);
+ dev_dbg(dev, "GENI SE Driver probed\n");
+ return devm_of_platform_populate(dev);
+}
+
+static const struct of_device_id geni_se_dt_match[] = {
+ { .compatible = "qcom,geni-se-qup", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, geni_se_dt_match);
+
+static struct platform_driver geni_se_driver = {
+ .driver = {
+ .name = "geni_se_qup",
+ .of_match_table = geni_se_dt_match,
+ },
+ .probe = geni_se_probe,
+};
+module_platform_driver(geni_se_driver);
+
+MODULE_DESCRIPTION("GENI Serial Engine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
new file mode 100644
index 000000000..18c856056
--- /dev/null
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Ltd
+ */
+#include <linux/clk-provider.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/qcom_aoss.h>
+
+#define QMP_DESC_MAGIC 0x0
+#define QMP_DESC_VERSION 0x4
+#define QMP_DESC_FEATURES 0x8
+
+/* AOP-side offsets */
+#define QMP_DESC_UCORE_LINK_STATE 0xc
+#define QMP_DESC_UCORE_LINK_STATE_ACK 0x10
+#define QMP_DESC_UCORE_CH_STATE 0x14
+#define QMP_DESC_UCORE_CH_STATE_ACK 0x18
+#define QMP_DESC_UCORE_MBOX_SIZE 0x1c
+#define QMP_DESC_UCORE_MBOX_OFFSET 0x20
+
+/* Linux-side offsets */
+#define QMP_DESC_MCORE_LINK_STATE 0x24
+#define QMP_DESC_MCORE_LINK_STATE_ACK 0x28
+#define QMP_DESC_MCORE_CH_STATE 0x2c
+#define QMP_DESC_MCORE_CH_STATE_ACK 0x30
+#define QMP_DESC_MCORE_MBOX_SIZE 0x34
+#define QMP_DESC_MCORE_MBOX_OFFSET 0x38
+
+#define QMP_STATE_UP GENMASK(15, 0)
+#define QMP_STATE_DOWN GENMASK(31, 16)
+
+#define QMP_MAGIC 0x4d41494c /* mail */
+#define QMP_VERSION 1
+
+/* 64 bytes is enough to store the requests and provides padding to 4 bytes */
+#define QMP_MSG_LEN 64
+
+#define QMP_NUM_COOLING_RESOURCES 2
+
+static bool qmp_cdev_max_state = 1;
+
+struct qmp_cooling_device {
+ struct thermal_cooling_device *cdev;
+ struct qmp *qmp;
+ char *name;
+ bool state;
+};
+
+/**
+ * struct qmp - driver state for QMP implementation
+ * @msgram: iomem referencing the message RAM used for communication
+ * @dev: reference to QMP device
+ * @mbox_client: mailbox client used to ring the doorbell on transmit
+ * @mbox_chan: mailbox channel used to ring the doorbell on transmit
+ * @offset: offset within @msgram where messages should be written
+ * @size: maximum size of the messages to be transmitted
+ * @event: wait_queue for synchronization with the IRQ
+ * @tx_lock: provides synchronization between multiple callers of qmp_send()
+ * @qdss_clk: QDSS clock hw struct
+ * @cooling_devs: thermal cooling devices
+ */
+struct qmp {
+ void __iomem *msgram;
+ struct device *dev;
+
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+
+ size_t offset;
+ size_t size;
+
+ wait_queue_head_t event;
+
+ struct mutex tx_lock;
+
+ struct clk_hw qdss_clk;
+ struct qmp_cooling_device *cooling_devs;
+};
+
+static void qmp_kick(struct qmp *qmp)
+{
+ mbox_send_message(qmp->mbox_chan, NULL);
+ mbox_client_txdone(qmp->mbox_chan, 0);
+}
+
+static bool qmp_magic_valid(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
+}
+
+static bool qmp_link_acked(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
+}
+
+static bool qmp_mcore_channel_acked(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
+}
+
+static bool qmp_ucore_channel_up(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
+}
+
+static int qmp_open(struct qmp *qmp)
+{
+ int ret;
+ u32 val;
+
+ if (!qmp_magic_valid(qmp)) {
+ dev_err(qmp->dev, "QMP magic doesn't match\n");
+ return -EINVAL;
+ }
+
+ val = readl(qmp->msgram + QMP_DESC_VERSION);
+ if (val != QMP_VERSION) {
+ dev_err(qmp->dev, "unsupported QMP version %d\n", val);
+ return -EINVAL;
+ }
+
+ qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
+ qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
+ if (!qmp->size) {
+ dev_err(qmp->dev, "invalid mailbox size\n");
+ return -EINVAL;
+ }
+
+ /* Ack remote core's link state */
+ val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
+ writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
+
+ /* Set local core's link state to up */
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't ack link\n");
+ goto timeout_close_link;
+ }
+
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't open channel\n");
+ goto timeout_close_channel;
+ }
+
+ /* Ack remote core's channel state */
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't ack channel\n");
+ goto timeout_close_channel;
+ }
+
+ return 0;
+
+timeout_close_channel:
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+
+timeout_close_link:
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+ qmp_kick(qmp);
+
+ return -ETIMEDOUT;
+}
+
+static void qmp_close(struct qmp *qmp)
+{
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+ qmp_kick(qmp);
+}
+
+static irqreturn_t qmp_intr(int irq, void *data)
+{
+ struct qmp *qmp = data;
+
+ wake_up_all(&qmp->event);
+
+ return IRQ_HANDLED;
+}
+
+static bool qmp_message_empty(struct qmp *qmp)
+{
+ return readl(qmp->msgram + qmp->offset) == 0;
+}
+
+/**
+ * qmp_send() - send a message to the AOSS
+ * @qmp: qmp context
+ * @data: message to be sent
+ * @len: length of the message
+ *
+ * Transmit @data to AOSS and wait for the AOSS to acknowledge the message.
+ * @len must be a multiple of 4 and not longer than the mailbox size. Access is
+ * synchronized by this implementation.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qmp_send(struct qmp *qmp, const void *data, size_t len)
+{
+ long time_left;
+ int ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(qmp) || !data))
+ return -EINVAL;
+
+ if (WARN_ON(len + sizeof(u32) > qmp->size))
+ return -EINVAL;
+
+ if (WARN_ON(len % sizeof(u32)))
+ return -EINVAL;
+
+ mutex_lock(&qmp->tx_lock);
+
+ /* The message RAM only implements 32-bit accesses */
+ __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
+ data, len / sizeof(u32));
+ writel(len, qmp->msgram + qmp->offset);
+
+ /* Read back len to confirm data written in message RAM */
+ readl(qmp->msgram + qmp->offset);
+ qmp_kick(qmp);
+
+ time_left = wait_event_interruptible_timeout(qmp->event,
+ qmp_message_empty(qmp), HZ);
+ if (!time_left) {
+ dev_err(qmp->dev, "ucore did not ack channel\n");
+ ret = -ETIMEDOUT;
+
+ /* Clear message from buffer */
+ writel(0, qmp->msgram + qmp->offset);
+ } else {
+ ret = 0;
+ }
+
+ mutex_unlock(&qmp->tx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmp_send);
+
+static int qmp_qdss_clk_prepare(struct clk_hw *hw)
+{
+ static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}";
+ struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
+
+ return qmp_send(qmp, buf, sizeof(buf));
+}
+
+static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
+{
+ static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}";
+ struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
+
+ qmp_send(qmp, buf, sizeof(buf));
+}
+
+static const struct clk_ops qmp_qdss_clk_ops = {
+ .prepare = qmp_qdss_clk_prepare,
+ .unprepare = qmp_qdss_clk_unprepare,
+};
+
+static int qmp_qdss_clk_add(struct qmp *qmp)
+{
+ static const struct clk_init_data qdss_init = {
+ .ops = &qmp_qdss_clk_ops,
+ .name = "qdss",
+ };
+ int ret;
+
+ qmp->qdss_clk.init = &qdss_init;
+ ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
+ if (ret < 0) {
+ dev_err(qmp->dev, "failed to register qdss clock\n");
+ return ret;
+ }
+
+ ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
+ &qmp->qdss_clk);
+ if (ret < 0) {
+ dev_err(qmp->dev, "unable to register of clk hw provider\n");
+ clk_hw_unregister(&qmp->qdss_clk);
+ }
+
+ return ret;
+}
+
+static void qmp_qdss_clk_remove(struct qmp *qmp)
+{
+ of_clk_del_provider(qmp->dev->of_node);
+ clk_hw_unregister(&qmp->qdss_clk);
+}
+
+static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = qmp_cdev_max_state;
+ return 0;
+}
+
+static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qmp_cooling_device *qmp_cdev = cdev->devdata;
+
+ *state = qmp_cdev->state;
+ return 0;
+}
+
+static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct qmp_cooling_device *qmp_cdev = cdev->devdata;
+ char buf[QMP_MSG_LEN] = {};
+ bool cdev_state;
+ int ret;
+
+ /* Normalize state */
+ cdev_state = !!state;
+
+ if (qmp_cdev->state == state)
+ return 0;
+
+ snprintf(buf, sizeof(buf),
+ "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
+ qmp_cdev->name,
+ cdev_state ? "on" : "off");
+
+ ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf));
+
+ if (!ret)
+ qmp_cdev->state = cdev_state;
+
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops qmp_cooling_device_ops = {
+ .get_max_state = qmp_cdev_get_max_state,
+ .get_cur_state = qmp_cdev_get_cur_state,
+ .set_cur_state = qmp_cdev_set_cur_state,
+};
+
+static int qmp_cooling_device_add(struct qmp *qmp,
+ struct qmp_cooling_device *qmp_cdev,
+ struct device_node *node)
+{
+ char *cdev_name = (char *)node->name;
+
+ qmp_cdev->qmp = qmp;
+ qmp_cdev->state = !qmp_cdev_max_state;
+ qmp_cdev->name = cdev_name;
+ qmp_cdev->cdev = devm_thermal_of_cooling_device_register
+ (qmp->dev, node,
+ cdev_name,
+ qmp_cdev, &qmp_cooling_device_ops);
+
+ if (IS_ERR(qmp_cdev->cdev))
+ dev_err(qmp->dev, "unable to register %s cooling device\n",
+ cdev_name);
+
+ return PTR_ERR_OR_ZERO(qmp_cdev->cdev);
+}
+
+static int qmp_cooling_devices_register(struct qmp *qmp)
+{
+ struct device_node *np, *child;
+ int count = 0;
+ int ret;
+
+ np = qmp->dev->of_node;
+
+ qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
+ sizeof(*qmp->cooling_devs),
+ GFP_KERNEL);
+
+ if (!qmp->cooling_devs)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, child) {
+ if (!of_find_property(child, "#cooling-cells", NULL))
+ continue;
+ ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
+ child);
+ if (ret) {
+ of_node_put(child);
+ goto unroll;
+ }
+ }
+
+ if (!count)
+ devm_kfree(qmp->dev, qmp->cooling_devs);
+
+ return 0;
+
+unroll:
+ while (--count >= 0)
+ thermal_cooling_device_unregister
+ (qmp->cooling_devs[count].cdev);
+ devm_kfree(qmp->dev, qmp->cooling_devs);
+
+ return ret;
+}
+
+static void qmp_cooling_devices_remove(struct qmp *qmp)
+{
+ int i;
+
+ for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++)
+ thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
+}
+
+/**
+ * qmp_get() - get a qmp handle from a device
+ * @dev: client device pointer
+ *
+ * Return: handle to qmp device on success, ERR_PTR() on failure
+ */
+struct qmp *qmp_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct qmp *qmp;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ np = of_parse_phandle(dev->of_node, "qcom,qmp", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-EINVAL);
+
+ qmp = platform_get_drvdata(pdev);
+
+ if (!qmp) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ return qmp;
+}
+EXPORT_SYMBOL(qmp_get);
+
+/**
+ * qmp_put() - release a qmp handle
+ * @qmp: qmp handle obtained from qmp_get()
+ */
+void qmp_put(struct qmp *qmp)
+{
+ /*
+ * Match get_device() inside of_find_device_by_node() in
+ * qmp_get()
+ */
+ if (!IS_ERR_OR_NULL(qmp))
+ put_device(qmp->dev);
+}
+EXPORT_SYMBOL(qmp_put);
+
+static int qmp_probe(struct platform_device *pdev)
+{
+ struct qmp *qmp;
+ int irq;
+ int ret;
+
+ qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = &pdev->dev;
+ init_waitqueue_head(&qmp->event);
+ mutex_init(&qmp->tx_lock);
+
+ qmp->msgram = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qmp->msgram))
+ return PTR_ERR(qmp->msgram);
+
+ qmp->mbox_client.dev = &pdev->dev;
+ qmp->mbox_client.knows_txdone = true;
+ qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
+ if (IS_ERR(qmp->mbox_chan)) {
+ dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
+ return PTR_ERR(qmp->mbox_chan);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0,
+ "aoss-qmp", qmp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request interrupt\n");
+ goto err_free_mbox;
+ }
+
+ ret = qmp_open(qmp);
+ if (ret < 0)
+ goto err_free_mbox;
+
+ ret = qmp_qdss_clk_add(qmp);
+ if (ret)
+ goto err_close_qmp;
+
+ ret = qmp_cooling_devices_register(qmp);
+ if (ret)
+ dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
+
+ platform_set_drvdata(pdev, qmp);
+
+ return 0;
+
+err_close_qmp:
+ qmp_close(qmp);
+err_free_mbox:
+ mbox_free_channel(qmp->mbox_chan);
+
+ return ret;
+}
+
+static int qmp_remove(struct platform_device *pdev)
+{
+ struct qmp *qmp = platform_get_drvdata(pdev);
+
+ qmp_qdss_clk_remove(qmp);
+ qmp_cooling_devices_remove(qmp);
+
+ qmp_close(qmp);
+ mbox_free_channel(qmp->mbox_chan);
+
+ return 0;
+}
+
+static const struct of_device_id qmp_dt_match[] = {
+ { .compatible = "qcom,sc7180-aoss-qmp", },
+ { .compatible = "qcom,sc7280-aoss-qmp", },
+ { .compatible = "qcom,sdm845-aoss-qmp", },
+ { .compatible = "qcom,sm8150-aoss-qmp", },
+ { .compatible = "qcom,sm8250-aoss-qmp", },
+ { .compatible = "qcom,sm8350-aoss-qmp", },
+ { .compatible = "qcom,aoss-qmp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qmp_dt_match);
+
+static struct platform_driver qmp_driver = {
+ .driver = {
+ .name = "qcom_aoss_qmp",
+ .of_match_table = qmp_dt_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = qmp_probe,
+ .remove = qmp_remove,
+};
+module_platform_driver(qmp_driver);
+
+MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
new file mode 100644
index 000000000..290bdefbf
--- /dev/null
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, The Linux foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/soc/qcom,gsbi.h>
+
+#define GSBI_CTRL_REG 0x0000
+#define GSBI_PROTOCOL_SHIFT 4
+#define MAX_GSBI 12
+
+#define TCSR_ADM_CRCI_BASE 0x70
+
+struct crci_config {
+ u32 num_rows;
+ const u32 (*array)[MAX_GSBI];
+};
+
+static const u32 crci_ipq8064[][MAX_GSBI] = {
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+};
+
+static const struct crci_config config_ipq8064 = {
+ .num_rows = ARRAY_SIZE(crci_ipq8064),
+ .array = crci_ipq8064,
+};
+
+static const unsigned int crci_apq8064[][MAX_GSBI] = {
+ {
+ 0x001800, 0x006000, 0x000030, 0x0000c0,
+ 0x000300, 0x000400, 0x000000, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+ {
+ 0x000000, 0x000000, 0x000000, 0x000000,
+ 0x000000, 0x000020, 0x0000c0, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+};
+
+static const struct crci_config config_apq8064 = {
+ .num_rows = ARRAY_SIZE(crci_apq8064),
+ .array = crci_apq8064,
+};
+
+static const unsigned int crci_msm8960[][MAX_GSBI] = {
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000400, 0x000000, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+ {
+ 0x000000, 0x000000, 0x000000, 0x000000,
+ 0x000000, 0x000020, 0x0000c0, 0x000300,
+ 0x001800, 0x006000, 0x000000, 0x000000
+ },
+};
+
+static const struct crci_config config_msm8960 = {
+ .num_rows = ARRAY_SIZE(crci_msm8960),
+ .array = crci_msm8960,
+};
+
+static const unsigned int crci_msm8660[][MAX_GSBI] = {
+ { /* ADM 0 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 0 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 1 - A */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 1 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+};
+
+static const struct crci_config config_msm8660 = {
+ .num_rows = ARRAY_SIZE(crci_msm8660),
+ .array = crci_msm8660,
+};
+
+struct gsbi_info {
+ struct clk *hclk;
+ u32 mode;
+ u32 crci;
+ struct regmap *tcsr;
+};
+
+static const struct of_device_id tcsr_dt_match[] = {
+ { .compatible = "qcom,tcsr-ipq8064", .data = &config_ipq8064},
+ { .compatible = "qcom,tcsr-apq8064", .data = &config_apq8064},
+ { .compatible = "qcom,tcsr-msm8960", .data = &config_msm8960},
+ { .compatible = "qcom,tcsr-msm8660", .data = &config_msm8660},
+ { },
+};
+
+static int gsbi_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *tcsr_node;
+ const struct of_device_id *match;
+ void __iomem *base;
+ struct gsbi_info *gsbi;
+ int i, ret;
+ u32 mask, gsbi_num;
+ const struct crci_config *config = NULL;
+
+ gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL);
+
+ if (!gsbi)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* get the tcsr node and setup the config and regmap */
+ gsbi->tcsr = syscon_regmap_lookup_by_phandle(node, "syscon-tcsr");
+
+ if (!IS_ERR(gsbi->tcsr)) {
+ tcsr_node = of_parse_phandle(node, "syscon-tcsr", 0);
+ if (tcsr_node) {
+ match = of_match_node(tcsr_dt_match, tcsr_node);
+ if (match)
+ config = match->data;
+ else
+ dev_warn(&pdev->dev, "no matching TCSR\n");
+
+ of_node_put(tcsr_node);
+ }
+ }
+
+ if (of_property_read_u32(node, "cell-index", &gsbi_num)) {
+ dev_err(&pdev->dev, "missing cell-index\n");
+ return -EINVAL;
+ }
+
+ if (gsbi_num < 1 || gsbi_num > MAX_GSBI) {
+ dev_err(&pdev->dev, "invalid cell-index\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) {
+ dev_err(&pdev->dev, "missing mode configuration\n");
+ return -EINVAL;
+ }
+
+ /* not required, so default to 0 if not present */
+ of_property_read_u32(node, "qcom,crci", &gsbi->crci);
+
+ dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n",
+ gsbi->mode, gsbi->crci);
+ gsbi->hclk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(gsbi->hclk))
+ return PTR_ERR(gsbi->hclk);
+
+ clk_prepare_enable(gsbi->hclk);
+
+ writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci,
+ base + GSBI_CTRL_REG);
+
+ /*
+ * modify tcsr to reflect mode and ADM CRCI mux
+ * Each gsbi contains a pair of bits, one for RX and one for TX
+ * SPI mode requires both bits cleared, otherwise they are set
+ */
+ if (config) {
+ for (i = 0; i < config->num_rows; i++) {
+ mask = config->array[i][gsbi_num - 1];
+
+ if (gsbi->mode == GSBI_PROT_SPI)
+ regmap_update_bits(gsbi->tcsr,
+ TCSR_ADM_CRCI_BASE + 4 * i, mask, 0);
+ else
+ regmap_update_bits(gsbi->tcsr,
+ TCSR_ADM_CRCI_BASE + 4 * i, mask, mask);
+
+ }
+ }
+
+ /* make sure the gsbi control write is not reordered */
+ wmb();
+
+ platform_set_drvdata(pdev, gsbi);
+
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ clk_disable_unprepare(gsbi->hclk);
+ return ret;
+}
+
+static int gsbi_remove(struct platform_device *pdev)
+{
+ struct gsbi_info *gsbi = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(gsbi->hclk);
+
+ return 0;
+}
+
+static const struct of_device_id gsbi_dt_match[] = {
+ { .compatible = "qcom,gsbi-v1.0.0", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, gsbi_dt_match);
+
+static struct platform_driver gsbi_driver = {
+ .driver = {
+ .name = "gsbi",
+ .of_match_table = gsbi_dt_match,
+ },
+ .probe = gsbi_probe,
+ .remove = gsbi_remove,
+};
+
+module_platform_driver(gsbi_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM GSBI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
new file mode 100644
index 000000000..b252bedf0
--- /dev/null
+++ b/drivers/soc/qcom/qcom_stats.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+#include <linux/soc/qcom/smem.h>
+#include <clocksource/arm_arch_timer.h>
+
+#define RPM_DYNAMIC_ADDR 0x14
+#define RPM_DYNAMIC_ADDR_MASK 0xFFFF
+
+#define STAT_TYPE_OFFSET 0x0
+#define COUNT_OFFSET 0x4
+#define LAST_ENTERED_AT_OFFSET 0x8
+#define LAST_EXITED_AT_OFFSET 0x10
+#define ACCUMULATED_OFFSET 0x18
+#define CLIENT_VOTES_OFFSET 0x20
+
+struct subsystem_data {
+ const char *name;
+ u32 smem_item;
+ u32 pid;
+};
+
+static const struct subsystem_data subsystems[] = {
+ { "modem", 605, 1 },
+ { "wpss", 605, 13 },
+ { "adsp", 606, 2 },
+ { "cdsp", 607, 5 },
+ { "slpi", 608, 3 },
+ { "gpu", 609, 0 },
+ { "display", 610, 0 },
+ { "adsp_island", 613, 2 },
+ { "slpi_island", 613, 3 },
+};
+
+struct stats_config {
+ size_t stats_offset;
+ size_t num_records;
+ bool appended_stats_avail;
+ bool dynamic_offset;
+ bool subsystem_stats_in_smem;
+};
+
+struct stats_data {
+ bool appended_stats_avail;
+ void __iomem *base;
+};
+
+struct sleep_stats {
+ u32 stat_type;
+ u32 count;
+ u64 last_entered_at;
+ u64 last_exited_at;
+ u64 accumulated;
+};
+
+struct appended_stats {
+ u32 client_votes;
+ u32 reserved[3];
+};
+
+static void qcom_print_stats(struct seq_file *s, const struct sleep_stats *stat)
+{
+ u64 accumulated = stat->accumulated;
+ /*
+ * If a subsystem is in sleep when reading the sleep stats adjust
+ * the accumulated sleep duration to show actual sleep time.
+ */
+ if (stat->last_entered_at > stat->last_exited_at)
+ accumulated += arch_timer_read_counter() - stat->last_entered_at;
+
+ seq_printf(s, "Count: %u\n", stat->count);
+ seq_printf(s, "Last Entered At: %llu\n", stat->last_entered_at);
+ seq_printf(s, "Last Exited At: %llu\n", stat->last_exited_at);
+ seq_printf(s, "Accumulated Duration: %llu\n", accumulated);
+}
+
+static int qcom_subsystem_sleep_stats_show(struct seq_file *s, void *unused)
+{
+ struct subsystem_data *subsystem = s->private;
+ struct sleep_stats *stat;
+
+ /* Items are allocated lazily, so lookup pointer each time */
+ stat = qcom_smem_get(subsystem->pid, subsystem->smem_item, NULL);
+ if (IS_ERR(stat))
+ return 0;
+
+ qcom_print_stats(s, stat);
+
+ return 0;
+}
+
+static int qcom_soc_sleep_stats_show(struct seq_file *s, void *unused)
+{
+ struct stats_data *d = s->private;
+ void __iomem *reg = d->base;
+ struct sleep_stats stat;
+
+ memcpy_fromio(&stat, reg, sizeof(stat));
+ qcom_print_stats(s, &stat);
+
+ if (d->appended_stats_avail) {
+ struct appended_stats votes;
+
+ memcpy_fromio(&votes, reg + CLIENT_VOTES_OFFSET, sizeof(votes));
+ seq_printf(s, "Client Votes: %#x\n", votes.client_votes);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qcom_soc_sleep_stats);
+DEFINE_SHOW_ATTRIBUTE(qcom_subsystem_sleep_stats);
+
+static void qcom_create_soc_sleep_stat_files(struct dentry *root, void __iomem *reg,
+ struct stats_data *d,
+ const struct stats_config *config)
+{
+ char stat_type[sizeof(u32) + 1] = {0};
+ size_t stats_offset = config->stats_offset;
+ u32 offset = 0, type;
+ int i, j;
+
+ /*
+ * On RPM targets, stats offset location is dynamic and changes from target
+ * to target and sometimes from build to build for same target.
+ *
+ * In such cases the dynamic address is present at 0x14 offset from base
+ * address in devicetree. The last 16bits indicates the stats_offset.
+ */
+ if (config->dynamic_offset) {
+ stats_offset = readl(reg + RPM_DYNAMIC_ADDR);
+ stats_offset &= RPM_DYNAMIC_ADDR_MASK;
+ }
+
+ for (i = 0; i < config->num_records; i++) {
+ d[i].base = reg + offset + stats_offset;
+
+ /*
+ * Read the low power mode name and create debugfs file for it.
+ * The names read could be of below,
+ * (may change depending on low power mode supported).
+ * For rpmh-sleep-stats: "aosd", "cxsd" and "ddr".
+ * For rpm-sleep-stats: "vmin" and "vlow".
+ */
+ type = readl(d[i].base);
+ for (j = 0; j < sizeof(u32); j++) {
+ stat_type[j] = type & 0xff;
+ type = type >> 8;
+ }
+ strim(stat_type);
+ debugfs_create_file(stat_type, 0400, root, &d[i],
+ &qcom_soc_sleep_stats_fops);
+
+ offset += sizeof(struct sleep_stats);
+ if (d[i].appended_stats_avail)
+ offset += sizeof(struct appended_stats);
+ }
+}
+
+static void qcom_create_subsystem_stat_files(struct dentry *root,
+ const struct stats_config *config)
+{
+ int i;
+
+ if (!config->subsystem_stats_in_smem)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(subsystems); i++)
+ debugfs_create_file(subsystems[i].name, 0400, root, (void *)&subsystems[i],
+ &qcom_subsystem_sleep_stats_fops);
+}
+
+static int qcom_stats_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ struct dentry *root;
+ const struct stats_config *config;
+ struct stats_data *d;
+ int i;
+
+ config = device_get_match_data(&pdev->dev);
+ if (!config)
+ return -ENODEV;
+
+ reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(reg))
+ return -ENOMEM;
+
+ d = devm_kcalloc(&pdev->dev, config->num_records,
+ sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ for (i = 0; i < config->num_records; i++)
+ d[i].appended_stats_avail = config->appended_stats_avail;
+
+ root = debugfs_create_dir("qcom_stats", NULL);
+
+ qcom_create_subsystem_stat_files(root, config);
+ qcom_create_soc_sleep_stat_files(root, reg, d, config);
+
+ platform_set_drvdata(pdev, root);
+
+ return 0;
+}
+
+static int qcom_stats_remove(struct platform_device *pdev)
+{
+ struct dentry *root = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(root);
+
+ return 0;
+}
+
+static const struct stats_config rpm_data = {
+ .stats_offset = 0,
+ .num_records = 2,
+ .appended_stats_avail = true,
+ .dynamic_offset = true,
+ .subsystem_stats_in_smem = false,
+};
+
+/* Older RPM firmwares have the stats at a fixed offset instead */
+static const struct stats_config rpm_data_dba0 = {
+ .stats_offset = 0xdba0,
+ .num_records = 2,
+ .appended_stats_avail = true,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = false,
+};
+
+static const struct stats_config rpmh_data_sdm845 = {
+ .stats_offset = 0x48,
+ .num_records = 2,
+ .appended_stats_avail = false,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = true,
+};
+
+static const struct stats_config rpmh_data = {
+ .stats_offset = 0x48,
+ .num_records = 3,
+ .appended_stats_avail = false,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = true,
+};
+
+static const struct of_device_id qcom_stats_table[] = {
+ { .compatible = "qcom,apq8084-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,msm8226-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,msm8916-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,msm8974-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,rpm-stats", .data = &rpm_data },
+ { .compatible = "qcom,rpmh-stats", .data = &rpmh_data },
+ { .compatible = "qcom,sdm845-rpmh-stats", .data = &rpmh_data_sdm845 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_stats_table);
+
+static struct platform_driver qcom_stats = {
+ .probe = qcom_stats_probe,
+ .remove = qcom_stats_remove,
+ .driver = {
+ .name = "qcom_stats",
+ .of_match_table = qcom_stats_table,
+ },
+};
+
+static int __init qcom_stats_init(void)
+{
+ return platform_driver_register(&qcom_stats);
+}
+late_initcall(qcom_stats_init);
+
+static void __exit qcom_stats_exit(void)
+{
+ platform_driver_unregister(&qcom_stats);
+}
+module_exit(qcom_stats_exit)
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. (QTI) Stats driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
new file mode 100644
index 000000000..5c7161b18
--- /dev/null
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define QMI_ENCDEC_ENCODE_TLV(type, length, p_dst) do { \
+ *p_dst++ = type; \
+ *p_dst++ = ((u8)((length) & 0xFF)); \
+ *p_dst++ = ((u8)(((length) >> 8) & 0xFF)); \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_TLV(p_type, p_length, p_src) do { \
+ *p_type = (u8)*p_src++; \
+ *p_length = (u8)*p_src++; \
+ *p_length |= ((u8)*p_src) << 8; \
+} while (0)
+
+#define QMI_ENCDEC_ENCODE_N_BYTES(p_dst, p_src, size) \
+do { \
+ memcpy(p_dst, p_src, size); \
+ p_dst = (u8 *)p_dst + size; \
+ p_src = (u8 *)p_src + size; \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_N_BYTES(p_dst, p_src, size) \
+do { \
+ memcpy(p_dst, p_src, size); \
+ p_dst = (u8 *)p_dst + size; \
+ p_src = (u8 *)p_src + size; \
+} while (0)
+
+#define UPDATE_ENCODE_VARIABLES(temp_si, buf_dst, \
+ encoded_bytes, tlv_len, encode_tlv, rc) \
+do { \
+ buf_dst = (u8 *)buf_dst + rc; \
+ encoded_bytes += rc; \
+ tlv_len += rc; \
+ temp_si = temp_si + 1; \
+ encode_tlv = 1; \
+} while (0)
+
+#define UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc) \
+do { \
+ buf_src = (u8 *)buf_src + rc; \
+ decoded_bytes += rc; \
+} while (0)
+
+#define TLV_LEN_SIZE sizeof(u16)
+#define TLV_TYPE_SIZE sizeof(u8)
+#define OPTIONAL_TLV_TYPE_START 0x10
+
+static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
+ const void *in_c_struct, u32 out_buf_len,
+ int enc_level);
+
+static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
+ const void *in_buf, u32 in_buf_len, int dec_level);
+
+/**
+ * skip_to_next_elem() - Skip to next element in the structure to be encoded
+ * @ei_array: Struct info describing the element to be skipped.
+ * @level: Depth level of encoding/decoding to identify nested structures.
+ *
+ * This function is used while encoding optional elements. If the flag
+ * corresponding to an optional element is not set, then encoding the
+ * optional element can be skipped. This function can be used to perform
+ * that operation.
+ *
+ * Return: struct info of the next element that can be encoded.
+ */
+static const struct qmi_elem_info *
+skip_to_next_elem(const struct qmi_elem_info *ei_array, int level)
+{
+ const struct qmi_elem_info *temp_ei = ei_array;
+ u8 tlv_type;
+
+ if (level > 1) {
+ temp_ei = temp_ei + 1;
+ } else {
+ do {
+ tlv_type = temp_ei->tlv_type;
+ temp_ei = temp_ei + 1;
+ } while (tlv_type == temp_ei->tlv_type);
+ }
+
+ return temp_ei;
+}
+
+/**
+ * qmi_calc_min_msg_len() - Calculate the minimum length of a QMI message
+ * @ei_array: Struct info array describing the structure.
+ * @level: Level to identify the depth of the nested structures.
+ *
+ * Return: Expected minimum length of the QMI message or 0 on error.
+ */
+static int qmi_calc_min_msg_len(const struct qmi_elem_info *ei_array,
+ int level)
+{
+ int min_msg_len = 0;
+ const struct qmi_elem_info *temp_ei = ei_array;
+
+ if (!ei_array)
+ return min_msg_len;
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ /* Optional elements do not count in minimum length */
+ if (temp_ei->data_type == QMI_OPT_FLAG) {
+ temp_ei = skip_to_next_elem(temp_ei, level);
+ continue;
+ }
+
+ if (temp_ei->data_type == QMI_DATA_LEN) {
+ min_msg_len += (temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16));
+ temp_ei++;
+ continue;
+ } else if (temp_ei->data_type == QMI_STRUCT) {
+ min_msg_len += qmi_calc_min_msg_len(temp_ei->ei_array,
+ (level + 1));
+ temp_ei++;
+ } else if (temp_ei->data_type == QMI_STRING) {
+ if (level > 1)
+ min_msg_len += temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ min_msg_len += temp_ei->elem_len * temp_ei->elem_size;
+ temp_ei++;
+ } else {
+ min_msg_len += (temp_ei->elem_len * temp_ei->elem_size);
+ temp_ei++;
+ }
+
+ /*
+ * Type & Length info. not prepended for elements in the
+ * nested structure.
+ */
+ if (level == 1)
+ min_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ }
+
+ return min_msg_len;
+}
+
+/**
+ * qmi_encode_basic_elem() - Encodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @elem_size: Size of a single instance of the element to be encoded.
+ *
+ * This function encodes the "elem_len" number of data elements, each of
+ * size "elem_size" bytes from the source buffer "buf_src" and stores the
+ * encoded information in the destination buffer "buf_dst". The elements are
+ * of primary data type which include u8 - u64 or similar. This
+ * function returns the number of bytes of encoded information.
+ *
+ * Return: The number of bytes of encoded information.
+ */
+static int qmi_encode_basic_elem(void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 elem_size)
+{
+ u32 i, rc = 0;
+
+ for (i = 0; i < elem_len; i++) {
+ QMI_ENCDEC_ENCODE_N_BYTES(buf_dst, buf_src, elem_size);
+ rc += elem_size;
+ }
+
+ return rc;
+}
+
+/**
+ * qmi_encode_struct_elem() - Encodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the nested structure from the main structure.
+ *
+ * This function encodes the "elem_len" number of struct elements, each of
+ * size "ei_array->elem_size" bytes from the source buffer "buf_src" and
+ * stores the encoded information in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of encoded information.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode_struct_elem(const struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 out_buf_len,
+ int enc_level)
+{
+ int i, rc, encoded_bytes = 0;
+ const struct qmi_elem_info *temp_ei = ei_array;
+
+ for (i = 0; i < elem_len; i++) {
+ rc = qmi_encode(temp_ei->ei_array, buf_dst, buf_src,
+ out_buf_len - encoded_bytes, enc_level);
+ if (rc < 0) {
+ pr_err("%s: STRUCT Encode failure\n", __func__);
+ return rc;
+ }
+ buf_dst = buf_dst + rc;
+ buf_src = buf_src + temp_ei->elem_size;
+ encoded_bytes += rc;
+ }
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_encode_string_elem() - Encodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the string element from the main structure.
+ *
+ * This function encodes a string element of maximum length "ei_array->elem_len"
+ * bytes from the source buffer "buf_src" and stores the encoded information in
+ * the destination buffer "buf_dst". This function returns the number of bytes
+ * of encoded information.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode_string_elem(const struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 out_buf_len, int enc_level)
+{
+ int rc;
+ int encoded_bytes = 0;
+ const struct qmi_elem_info *temp_ei = ei_array;
+ u32 string_len = 0;
+ u32 string_len_sz = 0;
+
+ string_len = strlen(buf_src);
+ string_len_sz = temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ if (string_len > temp_ei->elem_len) {
+ pr_err("%s: String to be encoded is longer - %d > %d\n",
+ __func__, string_len, temp_ei->elem_len);
+ return -EINVAL;
+ }
+
+ if (enc_level == 1) {
+ if (string_len + TLV_LEN_SIZE + TLV_TYPE_SIZE >
+ out_buf_len) {
+ pr_err("%s: Output len %d > Out Buf len %d\n",
+ __func__, string_len, out_buf_len);
+ return -ETOOSMALL;
+ }
+ } else {
+ if (string_len + string_len_sz > out_buf_len) {
+ pr_err("%s: Output len %d > Out Buf len %d\n",
+ __func__, string_len, out_buf_len);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, &string_len,
+ 1, string_len_sz);
+ encoded_bytes += rc;
+ }
+
+ rc = qmi_encode_basic_elem(buf_dst + encoded_bytes, buf_src,
+ string_len, temp_ei->elem_size);
+ encoded_bytes += rc;
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_encode() - Core Encode Function
+ * @ei_array: Struct info array describing the structure to be encoded.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @in_c_struct: Pointer to the C structure to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Encode level to indicate the depth of the nested structure,
+ * within the main structure, being encoded.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
+ const void *in_c_struct, u32 out_buf_len,
+ int enc_level)
+{
+ const struct qmi_elem_info *temp_ei = ei_array;
+ u8 opt_flag_value = 0;
+ u32 data_len_value = 0, data_len_sz;
+ u8 *buf_dst = (u8 *)out_buf;
+ u8 *tlv_pointer;
+ u32 tlv_len;
+ u8 tlv_type;
+ u32 encoded_bytes = 0;
+ const void *buf_src;
+ int encode_tlv = 0;
+ int rc;
+
+ if (!ei_array)
+ return 0;
+
+ tlv_pointer = buf_dst;
+ tlv_len = 0;
+ if (enc_level == 1)
+ buf_dst = buf_dst + (TLV_LEN_SIZE + TLV_TYPE_SIZE);
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ buf_src = in_c_struct + temp_ei->offset;
+ tlv_type = temp_ei->tlv_type;
+
+ if (temp_ei->array_type == NO_ARRAY) {
+ data_len_value = 1;
+ } else if (temp_ei->array_type == STATIC_ARRAY) {
+ data_len_value = temp_ei->elem_len;
+ } else if (data_len_value <= 0 ||
+ temp_ei->elem_len < data_len_value) {
+ pr_err("%s: Invalid data length\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (temp_ei->data_type) {
+ case QMI_OPT_FLAG:
+ rc = qmi_encode_basic_elem(&opt_flag_value, buf_src,
+ 1, sizeof(u8));
+ if (opt_flag_value)
+ temp_ei = temp_ei + 1;
+ else
+ temp_ei = skip_to_next_elem(temp_ei, enc_level);
+ break;
+
+ case QMI_DATA_LEN:
+ memcpy(&data_len_value, buf_src, temp_ei->elem_size);
+ data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16);
+ /* Check to avoid out of range buffer access */
+ if ((data_len_sz + encoded_bytes + TLV_LEN_SIZE +
+ TLV_TYPE_SIZE) > out_buf_len) {
+ pr_err("%s: Too Small Buffer @DATA_LEN\n",
+ __func__);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, &data_len_value,
+ 1, data_len_sz);
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ if (!data_len_value)
+ temp_ei = skip_to_next_elem(temp_ei, enc_level);
+ else
+ encode_tlv = 0;
+ break;
+
+ case QMI_UNSIGNED_1_BYTE:
+ case QMI_UNSIGNED_2_BYTE:
+ case QMI_UNSIGNED_4_BYTE:
+ case QMI_UNSIGNED_8_BYTE:
+ case QMI_SIGNED_2_BYTE_ENUM:
+ case QMI_SIGNED_4_BYTE_ENUM:
+ /* Check to avoid out of range buffer access */
+ if (((data_len_value * temp_ei->elem_size) +
+ encoded_bytes + TLV_LEN_SIZE + TLV_TYPE_SIZE) >
+ out_buf_len) {
+ pr_err("%s: Too Small Buffer @data_type:%d\n",
+ __func__, temp_ei->data_type);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, buf_src,
+ data_len_value,
+ temp_ei->elem_size);
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+
+ case QMI_STRUCT:
+ rc = qmi_encode_struct_elem(temp_ei, buf_dst, buf_src,
+ data_len_value,
+ out_buf_len - encoded_bytes,
+ enc_level + 1);
+ if (rc < 0)
+ return rc;
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+
+ case QMI_STRING:
+ rc = qmi_encode_string_elem(temp_ei, buf_dst, buf_src,
+ out_buf_len - encoded_bytes,
+ enc_level);
+ if (rc < 0)
+ return rc;
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+ default:
+ pr_err("%s: Unrecognized data type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (encode_tlv && enc_level == 1) {
+ QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+ encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ tlv_pointer = buf_dst;
+ tlv_len = 0;
+ buf_dst = buf_dst + TLV_LEN_SIZE + TLV_TYPE_SIZE;
+ encode_tlv = 0;
+ }
+ }
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_decode_basic_elem() - Decodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @elem_size: Size of a single instance of the element to be decoded.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "elem_size" bytes from the source buffer "buf_src" and stores
+ * the decoded elements in the destination buffer "buf_dst". The elements are
+ * of primary data type which include u8 - u64 or similar. This
+ * function returns the number of bytes of decoded information.
+ *
+ * Return: The total size of the decoded data elements, in bytes.
+ */
+static int qmi_decode_basic_elem(void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 elem_size)
+{
+ u32 i, rc = 0;
+
+ for (i = 0; i < elem_len; i++) {
+ QMI_ENCDEC_DECODE_N_BYTES(buf_dst, buf_src, elem_size);
+ rc += elem_size;
+ }
+
+ return rc;
+}
+
+/**
+ * qmi_decode_struct_elem() - Decodes elements of struct data type
+ * @ei_array: Struct info array describing the struct element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @tlv_len: Total size of the encoded information corresponding to
+ * this struct element.
+ * @dec_level: Depth of the nested structure from the main structure.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "(tlv_len/elem_len)" bytes from the source buffer "buf_src"
+ * and stores the decoded elements in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of decoded information.
+ *
+ * Return: The total size of the decoded data elements on success, negative
+ * errno on error.
+ */
+static int qmi_decode_struct_elem(const struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 tlv_len,
+ int dec_level)
+{
+ int i, rc, decoded_bytes = 0;
+ const struct qmi_elem_info *temp_ei = ei_array;
+
+ for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) {
+ rc = qmi_decode(temp_ei->ei_array, buf_dst, buf_src,
+ tlv_len - decoded_bytes, dec_level);
+ if (rc < 0)
+ return rc;
+ buf_src = buf_src + rc;
+ buf_dst = buf_dst + temp_ei->elem_size;
+ decoded_bytes += rc;
+ }
+
+ if ((dec_level <= 2 && decoded_bytes != tlv_len) ||
+ (dec_level > 2 && (i < elem_len || decoded_bytes > tlv_len))) {
+ pr_err("%s: Fault in decoding: dl(%d), db(%d), tl(%d), i(%d), el(%d)\n",
+ __func__, dec_level, decoded_bytes, tlv_len,
+ i, elem_len);
+ return -EFAULT;
+ }
+
+ return decoded_bytes;
+}
+
+/**
+ * qmi_decode_string_elem() - Decodes elements of string data type
+ * @ei_array: Struct info array describing the string element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @tlv_len: Total size of the encoded information corresponding to
+ * this string element.
+ * @dec_level: Depth of the string element from the main structure.
+ *
+ * This function decodes the string element of maximum length
+ * "ei_array->elem_len" from the source buffer "buf_src" and puts it into
+ * the destination buffer "buf_dst". This function returns number of bytes
+ * decoded from the input buffer.
+ *
+ * Return: The total size of the decoded data elements on success, negative
+ * errno on error.
+ */
+static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 tlv_len, int dec_level)
+{
+ int rc;
+ int decoded_bytes = 0;
+ u32 string_len = 0;
+ u32 string_len_sz = 0;
+ const struct qmi_elem_info *temp_ei = ei_array;
+
+ if (dec_level == 1) {
+ string_len = tlv_len;
+ } else {
+ string_len_sz = temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ rc = qmi_decode_basic_elem(&string_len, buf_src,
+ 1, string_len_sz);
+ decoded_bytes += rc;
+ }
+
+ if (string_len >= temp_ei->elem_len) {
+ pr_err("%s: String len %d >= Max Len %d\n",
+ __func__, string_len, temp_ei->elem_len);
+ return -ETOOSMALL;
+ } else if (string_len > tlv_len) {
+ pr_err("%s: String len %d > Input Buffer Len %d\n",
+ __func__, string_len, tlv_len);
+ return -EFAULT;
+ }
+
+ rc = qmi_decode_basic_elem(buf_dst, buf_src + decoded_bytes,
+ string_len, temp_ei->elem_size);
+ *((char *)buf_dst + string_len) = '\0';
+ decoded_bytes += rc;
+
+ return decoded_bytes;
+}
+
+/**
+ * find_ei() - Find element info corresponding to TLV Type
+ * @ei_array: Struct info array of the message being decoded.
+ * @type: TLV Type of the element being searched.
+ *
+ * Every element that got encoded in the QMI message will have a type
+ * information associated with it. While decoding the QMI message,
+ * this function is used to find the struct info regarding the element
+ * that corresponds to the type being decoded.
+ *
+ * Return: Pointer to struct info, if found
+ */
+static const struct qmi_elem_info *find_ei(const struct qmi_elem_info *ei_array,
+ u32 type)
+{
+ const struct qmi_elem_info *temp_ei = ei_array;
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ if (temp_ei->tlv_type == (u8)type)
+ return temp_ei;
+ temp_ei = temp_ei + 1;
+ }
+
+ return NULL;
+}
+
+/**
+ * qmi_decode() - Core Decode Function
+ * @ei_array: Struct info array describing the structure to be decoded.
+ * @out_c_struct: Buffer to hold the decoded C struct
+ * @in_buf: Buffer containing the QMI message to be decoded
+ * @in_buf_len: Length of the QMI message to be decoded
+ * @dec_level: Decode level to indicate the depth of the nested structure,
+ * within the main structure, being decoded
+ *
+ * Return: The number of bytes of decoded information on success, negative
+ * errno on error.
+ */
+static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
+ const void *in_buf, u32 in_buf_len,
+ int dec_level)
+{
+ const struct qmi_elem_info *temp_ei = ei_array;
+ u8 opt_flag_value = 1;
+ u32 data_len_value = 0, data_len_sz = 0;
+ u8 *buf_dst = out_c_struct;
+ const u8 *tlv_pointer;
+ u32 tlv_len = 0;
+ u32 tlv_type;
+ u32 decoded_bytes = 0;
+ const void *buf_src = in_buf;
+ int rc;
+
+ while (decoded_bytes < in_buf_len) {
+ if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI)
+ return decoded_bytes;
+
+ if (dec_level == 1) {
+ tlv_pointer = buf_src;
+ QMI_ENCDEC_DECODE_TLV(&tlv_type,
+ &tlv_len, tlv_pointer);
+ buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ temp_ei = find_ei(ei_array, tlv_type);
+ if (!temp_ei && tlv_type < OPTIONAL_TLV_TYPE_START) {
+ pr_err("%s: Inval element info\n", __func__);
+ return -EINVAL;
+ } else if (!temp_ei) {
+ UPDATE_DECODE_VARIABLES(buf_src,
+ decoded_bytes, tlv_len);
+ continue;
+ }
+ } else {
+ /*
+ * No length information for elements in nested
+ * structures. So use remaining decodable buffer space.
+ */
+ tlv_len = in_buf_len - decoded_bytes;
+ }
+
+ buf_dst = out_c_struct + temp_ei->offset;
+ if (temp_ei->data_type == QMI_OPT_FLAG) {
+ memcpy(buf_dst, &opt_flag_value, sizeof(u8));
+ temp_ei = temp_ei + 1;
+ buf_dst = out_c_struct + temp_ei->offset;
+ }
+
+ if (temp_ei->data_type == QMI_DATA_LEN) {
+ data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16);
+ rc = qmi_decode_basic_elem(&data_len_value, buf_src,
+ 1, data_len_sz);
+ memcpy(buf_dst, &data_len_value, sizeof(u32));
+ temp_ei = temp_ei + 1;
+ buf_dst = out_c_struct + temp_ei->offset;
+ tlv_len -= data_len_sz;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ }
+
+ if (temp_ei->array_type == NO_ARRAY) {
+ data_len_value = 1;
+ } else if (temp_ei->array_type == STATIC_ARRAY) {
+ data_len_value = temp_ei->elem_len;
+ } else if (data_len_value > temp_ei->elem_len) {
+ pr_err("%s: Data len %d > max spec %d\n",
+ __func__, data_len_value, temp_ei->elem_len);
+ return -ETOOSMALL;
+ }
+
+ switch (temp_ei->data_type) {
+ case QMI_UNSIGNED_1_BYTE:
+ case QMI_UNSIGNED_2_BYTE:
+ case QMI_UNSIGNED_4_BYTE:
+ case QMI_UNSIGNED_8_BYTE:
+ case QMI_SIGNED_2_BYTE_ENUM:
+ case QMI_SIGNED_4_BYTE_ENUM:
+ rc = qmi_decode_basic_elem(buf_dst, buf_src,
+ data_len_value,
+ temp_ei->elem_size);
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ case QMI_STRUCT:
+ rc = qmi_decode_struct_elem(temp_ei, buf_dst, buf_src,
+ data_len_value, tlv_len,
+ dec_level + 1);
+ if (rc < 0)
+ return rc;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ case QMI_STRING:
+ rc = qmi_decode_string_elem(temp_ei, buf_dst, buf_src,
+ tlv_len, dec_level);
+ if (rc < 0)
+ return rc;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ default:
+ pr_err("%s: Unrecognized data type\n", __func__);
+ return -EINVAL;
+ }
+ temp_ei = temp_ei + 1;
+ }
+
+ return decoded_bytes;
+}
+
+/**
+ * qmi_encode_message() - Encode C structure as QMI encoded message
+ * @type: Type of QMI message
+ * @msg_id: Message ID of the message
+ * @len: Passed as max length of the message, updated to actual size
+ * @txn_id: Transaction ID
+ * @ei: QMI message descriptor
+ * @c_struct: Reference to structure to encode
+ *
+ * Return: Buffer with encoded message, or negative ERR_PTR() on error
+ */
+void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
+ unsigned int txn_id, const struct qmi_elem_info *ei,
+ const void *c_struct)
+{
+ struct qmi_header *hdr;
+ ssize_t msglen = 0;
+ void *msg;
+ int ret;
+
+ /* Check the possibility of a zero length QMI message */
+ if (!c_struct) {
+ ret = qmi_calc_min_msg_len(ei, 1);
+ if (ret) {
+ pr_err("%s: Calc. len %d != 0, but NULL c_struct\n",
+ __func__, ret);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ msg = kzalloc(sizeof(*hdr) + *len, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ /* Encode message, if we have a message */
+ if (c_struct) {
+ msglen = qmi_encode(ei, msg + sizeof(*hdr), c_struct, *len, 1);
+ if (msglen < 0) {
+ kfree(msg);
+ return ERR_PTR(msglen);
+ }
+ }
+
+ hdr = msg;
+ hdr->type = type;
+ hdr->txn_id = txn_id;
+ hdr->msg_id = msg_id;
+ hdr->msg_len = msglen;
+
+ *len = sizeof(*hdr) + msglen;
+
+ return msg;
+}
+EXPORT_SYMBOL(qmi_encode_message);
+
+/**
+ * qmi_decode_message() - Decode QMI encoded message to C structure
+ * @buf: Buffer with encoded message
+ * @len: Amount of data in @buf
+ * @ei: QMI message descriptor
+ * @c_struct: Reference to structure to decode into
+ *
+ * Return: The number of bytes of decoded information on success, negative
+ * errno on error.
+ */
+int qmi_decode_message(const void *buf, size_t len,
+ const struct qmi_elem_info *ei, void *c_struct)
+{
+ if (!ei)
+ return -EINVAL;
+
+ if (!c_struct || !buf || !len)
+ return -EINVAL;
+
+ return qmi_decode(ei, c_struct, buf + sizeof(struct qmi_header),
+ len - sizeof(struct qmi_header), 1);
+}
+EXPORT_SYMBOL(qmi_decode_message);
+
+/* Common header in all QMI responses */
+const struct qmi_elem_info qmi_response_type_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_2_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct qmi_response_type_v01, result),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_SIGNED_2_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct qmi_response_type_v01, error),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
+
+MODULE_DESCRIPTION("QMI encoder/decoder helper");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
new file mode 100644
index 000000000..570527262
--- /dev/null
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -0,0 +1,851 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/qrtr.h>
+#include <linux/net.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <net/sock.h>
+#include <linux/workqueue.h>
+#include <linux/soc/qcom/qmi.h>
+
+static struct socket *qmi_sock_create(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq);
+
+/**
+ * qmi_recv_new_server() - handler of NEW_SERVER control message
+ * @qmi: qmi handle
+ * @service: service id of the new server
+ * @instance: instance id of the new server
+ * @node: node of the new server
+ * @port: port of the new server
+ *
+ * Calls the new_server callback to inform the client about a newly registered
+ * server matching the currently registered service lookup.
+ */
+static void qmi_recv_new_server(struct qmi_handle *qmi,
+ unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+ struct qmi_service *svc;
+ int ret;
+
+ if (!ops->new_server)
+ return;
+
+ /* Ignore EOF marker */
+ if (!node && !port)
+ return;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return;
+
+ svc->service = service;
+ svc->version = instance & 0xff;
+ svc->instance = instance >> 8;
+ svc->node = node;
+ svc->port = port;
+
+ ret = ops->new_server(qmi, svc);
+ if (ret < 0)
+ kfree(svc);
+ else
+ list_add(&svc->list_node, &qmi->lookup_results);
+}
+
+/**
+ * qmi_recv_del_server() - handler of DEL_SERVER control message
+ * @qmi: qmi handle
+ * @node: node of the dying server, a value of -1 matches all nodes
+ * @port: port of the dying server, a value of -1 matches all ports
+ *
+ * Calls the del_server callback for each previously seen server, allowing the
+ * client to react to the disappearing server.
+ */
+static void qmi_recv_del_server(struct qmi_handle *qmi,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+ struct qmi_service *svc;
+ struct qmi_service *tmp;
+
+ list_for_each_entry_safe(svc, tmp, &qmi->lookup_results, list_node) {
+ if (node != -1 && svc->node != node)
+ continue;
+ if (port != -1 && svc->port != port)
+ continue;
+
+ if (ops->del_server)
+ ops->del_server(qmi, svc);
+
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+}
+
+/**
+ * qmi_recv_bye() - handler of BYE control message
+ * @qmi: qmi handle
+ * @node: id of the dying node
+ *
+ * Signals the client that all previously registered services on this node are
+ * now gone and then calls the bye callback to allow the client further
+ * cleaning up resources associated with this remote.
+ */
+static void qmi_recv_bye(struct qmi_handle *qmi,
+ unsigned int node)
+{
+ struct qmi_ops *ops = &qmi->ops;
+
+ qmi_recv_del_server(qmi, node, -1);
+
+ if (ops->bye)
+ ops->bye(qmi, node);
+}
+
+/**
+ * qmi_recv_del_client() - handler of DEL_CLIENT control message
+ * @qmi: qmi handle
+ * @node: node of the dying client
+ * @port: port of the dying client
+ *
+ * Signals the client about a dying client, by calling the del_client callback.
+ */
+static void qmi_recv_del_client(struct qmi_handle *qmi,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+
+ if (ops->del_client)
+ ops->del_client(qmi, node, port);
+}
+
+static void qmi_recv_ctrl_pkt(struct qmi_handle *qmi,
+ const void *buf, size_t len)
+{
+ const struct qrtr_ctrl_pkt *pkt = buf;
+
+ if (len < sizeof(struct qrtr_ctrl_pkt)) {
+ pr_debug("ignoring short control packet\n");
+ return;
+ }
+
+ switch (le32_to_cpu(pkt->cmd)) {
+ case QRTR_TYPE_BYE:
+ qmi_recv_bye(qmi, le32_to_cpu(pkt->client.node));
+ break;
+ case QRTR_TYPE_NEW_SERVER:
+ qmi_recv_new_server(qmi,
+ le32_to_cpu(pkt->server.service),
+ le32_to_cpu(pkt->server.instance),
+ le32_to_cpu(pkt->server.node),
+ le32_to_cpu(pkt->server.port));
+ break;
+ case QRTR_TYPE_DEL_SERVER:
+ qmi_recv_del_server(qmi,
+ le32_to_cpu(pkt->server.node),
+ le32_to_cpu(pkt->server.port));
+ break;
+ case QRTR_TYPE_DEL_CLIENT:
+ qmi_recv_del_client(qmi,
+ le32_to_cpu(pkt->client.node),
+ le32_to_cpu(pkt->client.port));
+ break;
+ }
+}
+
+static void qmi_send_new_lookup(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { };
+ struct kvec iv = { &pkt, sizeof(pkt) };
+ int ret;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP);
+ pkt.server.service = cpu_to_le32(svc->service);
+ pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
+
+ sq.sq_family = qmi->sq.sq_family;
+ sq.sq_node = qmi->sq.sq_node;
+ sq.sq_port = QRTR_PORT_CTRL;
+
+ msg.msg_name = &sq;
+ msg.msg_namelen = sizeof(sq);
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("failed to send lookup registration: %d\n", ret);
+ }
+ mutex_unlock(&qmi->sock_lock);
+}
+
+/**
+ * qmi_add_lookup() - register a new lookup with the name service
+ * @qmi: qmi handle
+ * @service: service id of the request
+ * @instance: instance id of the request
+ * @version: version number of the request
+ *
+ * Registering a lookup query with the name server will cause the name server
+ * to send NEW_SERVER and DEL_SERVER control messages to this socket as
+ * matching services are registered.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service,
+ unsigned int version, unsigned int instance)
+{
+ struct qmi_service *svc;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return -ENOMEM;
+
+ svc->service = service;
+ svc->version = version;
+ svc->instance = instance;
+
+ list_add(&svc->list_node, &qmi->lookups);
+
+ qmi_send_new_lookup(qmi, svc);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmi_add_lookup);
+
+static void qmi_send_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { };
+ struct kvec iv = { &pkt, sizeof(pkt) };
+ int ret;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER);
+ pkt.server.service = cpu_to_le32(svc->service);
+ pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
+ pkt.server.node = cpu_to_le32(qmi->sq.sq_node);
+ pkt.server.port = cpu_to_le32(qmi->sq.sq_port);
+
+ sq.sq_family = qmi->sq.sq_family;
+ sq.sq_node = qmi->sq.sq_node;
+ sq.sq_port = QRTR_PORT_CTRL;
+
+ msg.msg_name = &sq;
+ msg.msg_namelen = sizeof(sq);
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("send service registration failed: %d\n", ret);
+ }
+ mutex_unlock(&qmi->sock_lock);
+}
+
+/**
+ * qmi_add_server() - register a service with the name service
+ * @qmi: qmi handle
+ * @service: type of the service
+ * @instance: instance of the service
+ * @version: version of the service
+ *
+ * Register a new service with the name service. This allows clients to find
+ * and start sending messages to the client associated with @qmi.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_add_server(struct qmi_handle *qmi, unsigned int service,
+ unsigned int version, unsigned int instance)
+{
+ struct qmi_service *svc;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return -ENOMEM;
+
+ svc->service = service;
+ svc->version = version;
+ svc->instance = instance;
+
+ list_add(&svc->list_node, &qmi->services);
+
+ qmi_send_new_server(qmi, svc);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmi_add_server);
+
+/**
+ * qmi_txn_init() - allocate transaction id within the given QMI handle
+ * @qmi: QMI handle
+ * @txn: transaction context
+ * @ei: description of how to decode a matching response (optional)
+ * @c_struct: pointer to the object to decode the response into (optional)
+ *
+ * This allocates a transaction id within the QMI handle. If @ei and @c_struct
+ * are specified any responses to this transaction will be decoded as described
+ * by @ei into @c_struct.
+ *
+ * A client calling qmi_txn_init() must call either qmi_txn_wait() or
+ * qmi_txn_cancel() to free up the allocated resources.
+ *
+ * Return: Transaction id on success, negative errno on failure.
+ */
+int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
+ const struct qmi_elem_info *ei, void *c_struct)
+{
+ int ret;
+
+ memset(txn, 0, sizeof(*txn));
+
+ mutex_init(&txn->lock);
+ init_completion(&txn->completion);
+ txn->qmi = qmi;
+ txn->ei = ei;
+ txn->dest = c_struct;
+
+ mutex_lock(&qmi->txn_lock);
+ ret = idr_alloc_cyclic(&qmi->txns, txn, 0, U16_MAX, GFP_KERNEL);
+ if (ret < 0)
+ pr_err("failed to allocate transaction id\n");
+
+ txn->id = ret;
+ mutex_unlock(&qmi->txn_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmi_txn_init);
+
+/**
+ * qmi_txn_wait() - wait for a response on a transaction
+ * @txn: transaction handle
+ * @timeout: timeout, in jiffies
+ *
+ * If the transaction is decoded by the means of @ei and @c_struct the return
+ * value will be the returned value of qmi_decode_message(), otherwise it's up
+ * to the specified message handler to fill out the result.
+ *
+ * Return: the transaction response on success, negative errno on failure.
+ */
+int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
+{
+ struct qmi_handle *qmi = txn->qmi;
+ int ret;
+
+ ret = wait_for_completion_timeout(&txn->completion, timeout);
+
+ mutex_lock(&qmi->txn_lock);
+ mutex_lock(&txn->lock);
+ idr_remove(&qmi->txns, txn->id);
+ mutex_unlock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+ else
+ return txn->result;
+}
+EXPORT_SYMBOL(qmi_txn_wait);
+
+/**
+ * qmi_txn_cancel() - cancel an ongoing transaction
+ * @txn: transaction id
+ */
+void qmi_txn_cancel(struct qmi_txn *txn)
+{
+ struct qmi_handle *qmi = txn->qmi;
+
+ mutex_lock(&qmi->txn_lock);
+ mutex_lock(&txn->lock);
+ idr_remove(&qmi->txns, txn->id);
+ mutex_unlock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+}
+EXPORT_SYMBOL(qmi_txn_cancel);
+
+/**
+ * qmi_invoke_handler() - find and invoke a handler for a message
+ * @qmi: qmi handle
+ * @sq: sockaddr of the sender
+ * @txn: transaction object for the message
+ * @buf: buffer containing the message
+ * @len: length of @buf
+ *
+ * Find handler and invoke handler for the incoming message.
+ */
+static void qmi_invoke_handler(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *buf, size_t len)
+{
+ const struct qmi_msg_handler *handler;
+ const struct qmi_header *hdr = buf;
+ void *dest;
+ int ret;
+
+ if (!qmi->handlers)
+ return;
+
+ for (handler = qmi->handlers; handler->fn; handler++) {
+ if (handler->type == hdr->type &&
+ handler->msg_id == hdr->msg_id)
+ break;
+ }
+
+ if (!handler->fn)
+ return;
+
+ dest = kzalloc(handler->decoded_size, GFP_KERNEL);
+ if (!dest)
+ return;
+
+ ret = qmi_decode_message(buf, len, handler->ei, dest);
+ if (ret < 0)
+ pr_err("failed to decode incoming message\n");
+ else
+ handler->fn(qmi, sq, txn, dest);
+
+ kfree(dest);
+}
+
+/**
+ * qmi_handle_net_reset() - invoked to handle ENETRESET on a QMI handle
+ * @qmi: the QMI context
+ *
+ * As a result of registering a name service with the QRTR all open sockets are
+ * flagged with ENETRESET and this function will be called. The typical case is
+ * the initial boot, where this signals that the local node id has been
+ * configured and as such any bound sockets needs to be rebound. So close the
+ * socket, inform the client and re-initialize the socket.
+ *
+ * For clients it's generally sufficient to react to the del_server callbacks,
+ * but server code is expected to treat the net_reset callback as a "bye" from
+ * all nodes.
+ *
+ * Finally the QMI handle will send out registration requests for any lookups
+ * and services.
+ */
+static void qmi_handle_net_reset(struct qmi_handle *qmi)
+{
+ struct sockaddr_qrtr sq;
+ struct qmi_service *svc;
+ struct socket *sock;
+
+ sock = qmi_sock_create(qmi, &sq);
+ if (IS_ERR(sock))
+ return;
+
+ mutex_lock(&qmi->sock_lock);
+ sock_release(qmi->sock);
+ qmi->sock = NULL;
+ mutex_unlock(&qmi->sock_lock);
+
+ qmi_recv_del_server(qmi, -1, -1);
+
+ if (qmi->ops.net_reset)
+ qmi->ops.net_reset(qmi);
+
+ mutex_lock(&qmi->sock_lock);
+ qmi->sock = sock;
+ qmi->sq = sq;
+ mutex_unlock(&qmi->sock_lock);
+
+ list_for_each_entry(svc, &qmi->lookups, list_node)
+ qmi_send_new_lookup(qmi, svc);
+
+ list_for_each_entry(svc, &qmi->services, list_node)
+ qmi_send_new_server(qmi, svc);
+}
+
+static void qmi_handle_message(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ const void *buf, size_t len)
+{
+ const struct qmi_header *hdr;
+ struct qmi_txn tmp_txn;
+ struct qmi_txn *txn = NULL;
+ int ret;
+
+ if (len < sizeof(*hdr)) {
+ pr_err("ignoring short QMI packet\n");
+ return;
+ }
+
+ hdr = buf;
+
+ /* If this is a response, find the matching transaction handle */
+ if (hdr->type == QMI_RESPONSE) {
+ mutex_lock(&qmi->txn_lock);
+ txn = idr_find(&qmi->txns, hdr->txn_id);
+
+ /* Ignore unexpected responses */
+ if (!txn) {
+ mutex_unlock(&qmi->txn_lock);
+ return;
+ }
+
+ mutex_lock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+
+ if (txn->dest && txn->ei) {
+ ret = qmi_decode_message(buf, len, txn->ei, txn->dest);
+ if (ret < 0)
+ pr_err("failed to decode incoming message\n");
+
+ txn->result = ret;
+ complete(&txn->completion);
+ } else {
+ qmi_invoke_handler(qmi, sq, txn, buf, len);
+ }
+
+ mutex_unlock(&txn->lock);
+ } else {
+ /* Create a txn based on the txn_id of the incoming message */
+ memset(&tmp_txn, 0, sizeof(tmp_txn));
+ tmp_txn.id = hdr->txn_id;
+
+ qmi_invoke_handler(qmi, sq, &tmp_txn, buf, len);
+ }
+}
+
+static void qmi_data_ready_work(struct work_struct *work)
+{
+ struct qmi_handle *qmi = container_of(work, struct qmi_handle, work);
+ struct qmi_ops *ops = &qmi->ops;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) };
+ struct kvec iv;
+ ssize_t msglen;
+
+ for (;;) {
+ iv.iov_base = qmi->recv_buf;
+ iv.iov_len = qmi->recv_buf_size;
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock)
+ msglen = kernel_recvmsg(qmi->sock, &msg, &iv, 1,
+ iv.iov_len, MSG_DONTWAIT);
+ else
+ msglen = -EPIPE;
+ mutex_unlock(&qmi->sock_lock);
+ if (msglen == -EAGAIN)
+ break;
+
+ if (msglen == -ENETRESET) {
+ qmi_handle_net_reset(qmi);
+
+ /* The old qmi->sock is gone, our work is done */
+ break;
+ }
+
+ if (msglen < 0) {
+ pr_err("qmi recvmsg failed: %zd\n", msglen);
+ break;
+ }
+
+ if (sq.sq_node == qmi->sq.sq_node &&
+ sq.sq_port == QRTR_PORT_CTRL) {
+ qmi_recv_ctrl_pkt(qmi, qmi->recv_buf, msglen);
+ } else if (ops->msg_handler) {
+ ops->msg_handler(qmi, &sq, qmi->recv_buf, msglen);
+ } else {
+ qmi_handle_message(qmi, &sq, qmi->recv_buf, msglen);
+ }
+ }
+}
+
+static void qmi_data_ready(struct sock *sk)
+{
+ struct qmi_handle *qmi = sk->sk_user_data;
+
+ /*
+ * This will be NULL if we receive data while being in
+ * qmi_handle_release()
+ */
+ if (!qmi)
+ return;
+
+ queue_work(qmi->wq, &qmi->work);
+}
+
+static struct socket *qmi_sock_create(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq)
+{
+ struct socket *sock;
+ int ret;
+
+ ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
+ PF_QIPCRTR, &sock);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ ret = kernel_getsockname(sock, (struct sockaddr *)sq);
+ if (ret < 0) {
+ sock_release(sock);
+ return ERR_PTR(ret);
+ }
+
+ sock->sk->sk_user_data = qmi;
+ sock->sk->sk_data_ready = qmi_data_ready;
+ sock->sk->sk_error_report = qmi_data_ready;
+
+ return sock;
+}
+
+/**
+ * qmi_handle_init() - initialize a QMI client handle
+ * @qmi: QMI handle to initialize
+ * @recv_buf_size: maximum size of incoming message
+ * @ops: reference to callbacks for QRTR notifications
+ * @handlers: NULL-terminated list of QMI message handlers
+ *
+ * This initializes the QMI client handle to allow sending and receiving QMI
+ * messages. As messages are received the appropriate handler will be invoked.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size,
+ const struct qmi_ops *ops,
+ const struct qmi_msg_handler *handlers)
+{
+ int ret;
+
+ mutex_init(&qmi->txn_lock);
+ mutex_init(&qmi->sock_lock);
+
+ idr_init(&qmi->txns);
+
+ INIT_LIST_HEAD(&qmi->lookups);
+ INIT_LIST_HEAD(&qmi->lookup_results);
+ INIT_LIST_HEAD(&qmi->services);
+
+ INIT_WORK(&qmi->work, qmi_data_ready_work);
+
+ qmi->handlers = handlers;
+ if (ops)
+ qmi->ops = *ops;
+
+ /* Make room for the header */
+ recv_buf_size += sizeof(struct qmi_header);
+ /* Must also be sufficient to hold a control packet */
+ if (recv_buf_size < sizeof(struct qrtr_ctrl_pkt))
+ recv_buf_size = sizeof(struct qrtr_ctrl_pkt);
+
+ qmi->recv_buf_size = recv_buf_size;
+ qmi->recv_buf = kzalloc(recv_buf_size, GFP_KERNEL);
+ if (!qmi->recv_buf)
+ return -ENOMEM;
+
+ qmi->wq = alloc_workqueue("qmi_msg_handler", WQ_UNBOUND, 1);
+ if (!qmi->wq) {
+ ret = -ENOMEM;
+ goto err_free_recv_buf;
+ }
+
+ qmi->sock = qmi_sock_create(qmi, &qmi->sq);
+ if (IS_ERR(qmi->sock)) {
+ if (PTR_ERR(qmi->sock) == -EAFNOSUPPORT) {
+ ret = -EPROBE_DEFER;
+ } else {
+ pr_err("failed to create QMI socket\n");
+ ret = PTR_ERR(qmi->sock);
+ }
+ goto err_destroy_wq;
+ }
+
+ return 0;
+
+err_destroy_wq:
+ destroy_workqueue(qmi->wq);
+err_free_recv_buf:
+ kfree(qmi->recv_buf);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmi_handle_init);
+
+/**
+ * qmi_handle_release() - release the QMI client handle
+ * @qmi: QMI client handle
+ *
+ * This closes the underlying socket and stops any handling of QMI messages.
+ */
+void qmi_handle_release(struct qmi_handle *qmi)
+{
+ struct socket *sock = qmi->sock;
+ struct qmi_service *svc, *tmp;
+
+ sock->sk->sk_user_data = NULL;
+ cancel_work_sync(&qmi->work);
+
+ qmi_recv_del_server(qmi, -1, -1);
+
+ mutex_lock(&qmi->sock_lock);
+ sock_release(sock);
+ qmi->sock = NULL;
+ mutex_unlock(&qmi->sock_lock);
+
+ destroy_workqueue(qmi->wq);
+
+ idr_destroy(&qmi->txns);
+
+ kfree(qmi->recv_buf);
+
+ /* Free registered lookup requests */
+ list_for_each_entry_safe(svc, tmp, &qmi->lookups, list_node) {
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+
+ /* Free registered service information */
+ list_for_each_entry_safe(svc, tmp, &qmi->services, list_node) {
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+}
+EXPORT_SYMBOL(qmi_handle_release);
+
+/**
+ * qmi_send_message() - send a QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @type: type of message to send
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * This function encodes @c_struct using @ei into a message of type @type,
+ * with @msg_id and @txn into a buffer of maximum size @len, and sends this to
+ * @sq.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+static ssize_t qmi_send_message(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq, struct qmi_txn *txn,
+ int type, int msg_id, size_t len,
+ const struct qmi_elem_info *ei,
+ const void *c_struct)
+{
+ struct msghdr msghdr = {};
+ struct kvec iv;
+ void *msg;
+ int ret;
+
+ msg = qmi_encode_message(type,
+ msg_id, &len,
+ txn->id, ei,
+ c_struct);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ iv.iov_base = msg;
+ iv.iov_len = len;
+
+ if (sq) {
+ msghdr.msg_name = sq;
+ msghdr.msg_namelen = sizeof(*sq);
+ }
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len);
+ if (ret < 0)
+ pr_err("failed to send QMI message\n");
+ } else {
+ ret = -EPIPE;
+ }
+ mutex_unlock(&qmi->sock_lock);
+
+ kfree(msg);
+
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * qmi_send_request() - send a request QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, int msg_id, size_t len,
+ const struct qmi_elem_info *ei, const void *c_struct)
+{
+ return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei,
+ c_struct);
+}
+EXPORT_SYMBOL(qmi_send_request);
+
+/**
+ * qmi_send_response() - send a response QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, int msg_id, size_t len,
+ const struct qmi_elem_info *ei, const void *c_struct)
+{
+ return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei,
+ c_struct);
+}
+EXPORT_SYMBOL(qmi_send_response);
+
+/**
+ * qmi_send_indication() - send an indication QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ int msg_id, size_t len,
+ const struct qmi_elem_info *ei,
+ const void *c_struct)
+{
+ struct qmi_txn txn;
+ ssize_t rval;
+ int ret;
+
+ ret = qmi_txn_init(qmi, &txn, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ rval = qmi_send_message(qmi, sq, &txn, QMI_INDICATION, msg_id, len, ei,
+ c_struct);
+
+ /* We don't care about future messages on this txn */
+ qmi_txn_cancel(&txn);
+
+ return rval;
+}
+EXPORT_SYMBOL(qmi_send_indication);
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
new file mode 100644
index 000000000..0feaae357
--- /dev/null
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/qcom_scm.h>
+
+#define QCOM_RMTFS_MEM_DEV_MAX (MINORMASK + 1)
+
+static dev_t qcom_rmtfs_mem_major;
+
+struct qcom_rmtfs_mem {
+ struct device dev;
+ struct cdev cdev;
+
+ void *base;
+ phys_addr_t addr;
+ phys_addr_t size;
+
+ unsigned int client_id;
+
+ unsigned int perms;
+};
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL);
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+ struct qcom_rmtfs_mem,
+ dev);
+
+ if (attr == &dev_attr_phys_addr)
+ return sprintf(buf, "%pa\n", &rmtfs_mem->addr);
+ if (attr == &dev_attr_size)
+ return sprintf(buf, "%pa\n", &rmtfs_mem->size);
+ if (attr == &dev_attr_client_id)
+ return sprintf(buf, "%d\n", rmtfs_mem->client_id);
+
+ return -EINVAL;
+}
+
+static struct attribute *qcom_rmtfs_mem_attrs[] = {
+ &dev_attr_phys_addr.attr,
+ &dev_attr_size.attr,
+ &dev_attr_client_id.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(qcom_rmtfs_mem);
+
+static int qcom_rmtfs_mem_open(struct inode *inode, struct file *filp)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(inode->i_cdev,
+ struct qcom_rmtfs_mem,
+ cdev);
+
+ get_device(&rmtfs_mem->dev);
+ filp->private_data = rmtfs_mem;
+
+ return 0;
+}
+static ssize_t qcom_rmtfs_mem_read(struct file *filp,
+ char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ if (*f_pos >= rmtfs_mem->size)
+ return 0;
+
+ if (*f_pos + count >= rmtfs_mem->size)
+ count = rmtfs_mem->size - *f_pos;
+
+ if (copy_to_user(buf, rmtfs_mem->base + *f_pos, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static ssize_t qcom_rmtfs_mem_write(struct file *filp,
+ const char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ if (*f_pos >= rmtfs_mem->size)
+ return 0;
+
+ if (*f_pos + count >= rmtfs_mem->size)
+ count = rmtfs_mem->size - *f_pos;
+
+ if (copy_from_user(rmtfs_mem->base + *f_pos, buf, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ put_device(&rmtfs_mem->dev);
+
+ return 0;
+}
+
+static struct class rmtfs_class = {
+ .owner = THIS_MODULE,
+ .name = "rmtfs",
+};
+
+static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filep->private_data;
+
+ if (vma->vm_end - vma->vm_start > rmtfs_mem->size) {
+ dev_dbg(&rmtfs_mem->dev,
+ "vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%pa]\n",
+ vma->vm_end, vma->vm_start,
+ (vma->vm_end - vma->vm_start), &rmtfs_mem->size);
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ rmtfs_mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static const struct file_operations qcom_rmtfs_mem_fops = {
+ .owner = THIS_MODULE,
+ .open = qcom_rmtfs_mem_open,
+ .read = qcom_rmtfs_mem_read,
+ .write = qcom_rmtfs_mem_write,
+ .release = qcom_rmtfs_mem_release,
+ .llseek = default_llseek,
+ .mmap = qcom_rmtfs_mem_mmap,
+};
+
+static void qcom_rmtfs_mem_release_device(struct device *dev)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+ struct qcom_rmtfs_mem,
+ dev);
+
+ kfree(rmtfs_mem);
+}
+
+static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct qcom_scm_vmperm perms[2];
+ struct reserved_mem *rmem;
+ struct qcom_rmtfs_mem *rmtfs_mem;
+ u32 client_id;
+ u32 vmid;
+ int ret;
+
+ rmem = of_reserved_mem_lookup(node);
+ if (!rmem) {
+ dev_err(&pdev->dev, "failed to acquire memory region\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "qcom,client-id", &client_id);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse \"qcom,client-id\"\n");
+ return ret;
+
+ }
+
+ rmtfs_mem = kzalloc(sizeof(*rmtfs_mem), GFP_KERNEL);
+ if (!rmtfs_mem)
+ return -ENOMEM;
+
+ rmtfs_mem->addr = rmem->base;
+ rmtfs_mem->client_id = client_id;
+ rmtfs_mem->size = rmem->size;
+
+ device_initialize(&rmtfs_mem->dev);
+ rmtfs_mem->dev.parent = &pdev->dev;
+ rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
+ rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
+
+ rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
+ rmtfs_mem->size, MEMREMAP_WC);
+ if (IS_ERR(rmtfs_mem->base)) {
+ dev_err(&pdev->dev, "failed to remap rmtfs_mem region\n");
+ ret = PTR_ERR(rmtfs_mem->base);
+ goto put_device;
+ }
+
+ cdev_init(&rmtfs_mem->cdev, &qcom_rmtfs_mem_fops);
+ rmtfs_mem->cdev.owner = THIS_MODULE;
+
+ dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
+ rmtfs_mem->dev.id = client_id;
+ rmtfs_mem->dev.class = &rmtfs_class;
+ rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
+
+ ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add cdev: %d\n", ret);
+ goto put_device;
+ }
+
+ ret = of_property_read_u32(node, "qcom,vmid", &vmid);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
+ goto remove_cdev;
+ } else if (!ret) {
+ if (!qcom_scm_is_available()) {
+ ret = -EPROBE_DEFER;
+ goto remove_cdev;
+ }
+
+ perms[0].vmid = QCOM_SCM_VMID_HLOS;
+ perms[0].perm = QCOM_SCM_PERM_RW;
+ perms[1].vmid = vmid;
+ perms[1].perm = QCOM_SCM_PERM_RW;
+
+ rmtfs_mem->perms = BIT(QCOM_SCM_VMID_HLOS);
+ ret = qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
+ &rmtfs_mem->perms, perms, 2);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "assign memory failed\n");
+ goto remove_cdev;
+ }
+ }
+
+ dev_set_drvdata(&pdev->dev, rmtfs_mem);
+
+ return 0;
+
+remove_cdev:
+ cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+put_device:
+ put_device(&rmtfs_mem->dev);
+
+ return ret;
+}
+
+static int qcom_rmtfs_mem_remove(struct platform_device *pdev)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev);
+ struct qcom_scm_vmperm perm;
+
+ if (rmtfs_mem->perms) {
+ perm.vmid = QCOM_SCM_VMID_HLOS;
+ perm.perm = QCOM_SCM_PERM_RW;
+
+ qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
+ &rmtfs_mem->perms, &perm, 1);
+ }
+
+ cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+ put_device(&rmtfs_mem->dev);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_rmtfs_mem_of_match[] = {
+ { .compatible = "qcom,rmtfs-mem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_rmtfs_mem_of_match);
+
+static struct platform_driver qcom_rmtfs_mem_driver = {
+ .probe = qcom_rmtfs_mem_probe,
+ .remove = qcom_rmtfs_mem_remove,
+ .driver = {
+ .name = "qcom_rmtfs_mem",
+ .of_match_table = qcom_rmtfs_mem_of_match,
+ },
+};
+
+static int __init qcom_rmtfs_mem_init(void)
+{
+ int ret;
+
+ ret = class_register(&rmtfs_class);
+ if (ret)
+ return ret;
+
+ ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
+ QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
+ if (ret < 0) {
+ pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
+ goto unregister_class;
+ }
+
+ ret = platform_driver_register(&qcom_rmtfs_mem_driver);
+ if (ret < 0) {
+ pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
+ goto unregister_chrdev;
+ }
+
+ return 0;
+
+unregister_chrdev:
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+unregister_class:
+ class_unregister(&rmtfs_class);
+ return ret;
+}
+module_init(qcom_rmtfs_mem_init);
+
+static void __exit qcom_rmtfs_mem_exit(void)
+{
+ platform_driver_unregister(&qcom_rmtfs_mem_driver);
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+ class_unregister(&rmtfs_class);
+}
+module_exit(qcom_rmtfs_mem_exit);
+
+MODULE_AUTHOR("Linaro Ltd");
+MODULE_DESCRIPTION("Qualcomm Remote Filesystem memory driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
new file mode 100644
index 000000000..344ba687c
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef __RPM_INTERNAL_H__
+#define __RPM_INTERNAL_H__
+
+#include <linux/bitmap.h>
+#include <linux/wait.h>
+#include <soc/qcom/tcs.h>
+
+#define TCS_TYPE_NR 4
+#define MAX_CMDS_PER_TCS 16
+#define MAX_TCS_PER_TYPE 3
+#define MAX_TCS_NR (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
+#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
+
+struct rsc_drv;
+
+/**
+ * struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
+ * to the controller
+ *
+ * @drv: The controller.
+ * @type: Type of the TCS in this group - active, sleep, wake.
+ * @mask: Mask of the TCSes relative to all the TCSes in the RSC.
+ * @offset: Start of the TCS group relative to the TCSes in the RSC.
+ * @num_tcs: Number of TCSes in this type.
+ * @ncpt: Number of commands in each TCS.
+ * @req: Requests that are sent from the TCS; only used for ACTIVE_ONLY
+ * transfers (could be on a wake/sleep TCS if we are borrowing for
+ * an ACTIVE_ONLY transfer).
+ * Start: grab drv->lock, set req, set tcs_in_use, drop drv->lock,
+ * trigger
+ * End: get irq, access req,
+ * grab drv->lock, clear tcs_in_use, drop drv->lock
+ * @slots: Indicates which of @cmd_addr are occupied; only used for
+ * SLEEP / WAKE TCSs. Things are tightly packed in the
+ * case that (ncpt < MAX_CMDS_PER_TCS). That is if ncpt = 2 and
+ * MAX_CMDS_PER_TCS = 16 then bit[2] = the first bit in 2nd TCS.
+ */
+struct tcs_group {
+ struct rsc_drv *drv;
+ int type;
+ u32 mask;
+ u32 offset;
+ int num_tcs;
+ int ncpt;
+ const struct tcs_request *req[MAX_TCS_PER_TYPE];
+ DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
+};
+
+/**
+ * struct rpmh_request: the message to be sent to rpmh-rsc
+ *
+ * @msg: the request
+ * @cmd: the payload that will be part of the @msg
+ * @completion: triggered when request is done
+ * @dev: the device making the request
+ * @err: err return from the controller
+ * @needs_free: check to free dynamically allocated request object
+ */
+struct rpmh_request {
+ struct tcs_request msg;
+ struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
+ struct completion *completion;
+ const struct device *dev;
+ int err;
+ bool needs_free;
+};
+
+/**
+ * struct rpmh_ctrlr: our representation of the controller
+ *
+ * @cache: the list of cached requests
+ * @cache_lock: synchronize access to the cache data
+ * @dirty: was the cache updated since flush
+ * @batch_cache: Cache sleep and wake requests sent as batch
+ */
+struct rpmh_ctrlr {
+ struct list_head cache;
+ spinlock_t cache_lock;
+ bool dirty;
+ struct list_head batch_cache;
+};
+
+/**
+ * struct rsc_drv: the Direct Resource Voter (DRV) of the
+ * Resource State Coordinator controller (RSC)
+ *
+ * @name: Controller identifier.
+ * @tcs_base: Start address of the TCS registers in this controller.
+ * @id: Instance id in the controller (Direct Resource Voter).
+ * @num_tcs: Number of TCSes in this DRV.
+ * @rsc_pm: CPU PM notifier for controller.
+ * Used when solver mode is not present.
+ * @cpus_in_pm: Number of CPUs not in idle power collapse.
+ * Used when solver mode is not present.
+ * @tcs: TCS groups.
+ * @tcs_in_use: S/W state of the TCS; only set for ACTIVE_ONLY
+ * transfers, but might show a sleep/wake TCS in use if
+ * it was borrowed for an active_only transfer. You
+ * must hold the lock in this struct (AKA drv->lock) in
+ * order to update this.
+ * @lock: Synchronize state of the controller. If RPMH's cache
+ * lock will also be held, the order is: drv->lock then
+ * cache_lock.
+ * @tcs_wait: Wait queue used to wait for @tcs_in_use to free up a
+ * slot
+ * @client: Handle to the DRV's client.
+ */
+struct rsc_drv {
+ const char *name;
+ void __iomem *tcs_base;
+ int id;
+ int num_tcs;
+ struct notifier_block rsc_pm;
+ atomic_t cpus_in_pm;
+ struct tcs_group tcs[TCS_TYPE_NR];
+ DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
+ spinlock_t lock;
+ wait_queue_head_t tcs_wait;
+ struct rpmh_ctrlr client;
+};
+
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
+ const struct tcs_request *msg);
+void rpmh_rsc_invalidate(struct rsc_drv *drv);
+
+void rpmh_tx_done(const struct tcs_request *msg, int r);
+int rpmh_flush(struct rpmh_ctrlr *ctrlr);
+
+#endif /* __RPM_INTERNAL_H__ */
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
new file mode 100644
index 000000000..01c2f50cb
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -0,0 +1,1013 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
+
+#include <linux/atomic.h>
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+#include "rpmh-internal.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace-rpmh.h"
+
+#define RSC_DRV_TCS_OFFSET 672
+#define RSC_DRV_CMD_OFFSET 20
+
+/* DRV HW Solver Configuration Information Register */
+#define DRV_SOLVER_CONFIG 0x04
+#define DRV_HW_SOLVER_MASK 1
+#define DRV_HW_SOLVER_SHIFT 24
+
+/* DRV TCS Configuration Information Register */
+#define DRV_PRNT_CHLD_CONFIG 0x0C
+#define DRV_NUM_TCS_MASK 0x3F
+#define DRV_NUM_TCS_SHIFT 6
+#define DRV_NCPT_MASK 0x1F
+#define DRV_NCPT_SHIFT 27
+
+/* Offsets for common TCS Registers, one bit per TCS */
+#define RSC_DRV_IRQ_ENABLE 0x00
+#define RSC_DRV_IRQ_STATUS 0x04
+#define RSC_DRV_IRQ_CLEAR 0x08 /* w/o; write 1 to clear */
+
+/*
+ * Offsets for per TCS Registers.
+ *
+ * TCSes start at 0x10 from tcs_base and are stored one after another.
+ * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one
+ * of the below to find a register.
+ */
+#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 /* 1 bit per command */
+#define RSC_DRV_CONTROL 0x14
+#define RSC_DRV_STATUS 0x18 /* zero if tcs is busy */
+#define RSC_DRV_CMD_ENABLE 0x1C /* 1 bit per command */
+
+/*
+ * Offsets for per command in a TCS.
+ *
+ * Commands (up to 16) start at 0x30 in a TCS; multiply command index
+ * by RSC_DRV_CMD_OFFSET and add one of the below to find a register.
+ */
+#define RSC_DRV_CMD_MSGID 0x30
+#define RSC_DRV_CMD_ADDR 0x34
+#define RSC_DRV_CMD_DATA 0x38
+#define RSC_DRV_CMD_STATUS 0x3C
+#define RSC_DRV_CMD_RESP_DATA 0x40
+
+#define TCS_AMC_MODE_ENABLE BIT(16)
+#define TCS_AMC_MODE_TRIGGER BIT(24)
+
+/* TCS CMD register bit mask */
+#define CMD_MSGID_LEN 8
+#define CMD_MSGID_RESP_REQ BIT(8)
+#define CMD_MSGID_WRITE BIT(16)
+#define CMD_STATUS_ISSUED BIT(8)
+#define CMD_STATUS_COMPL BIT(16)
+
+/*
+ * Here's a high level overview of how all the registers in RPMH work
+ * together:
+ *
+ * - The main rpmh-rsc address is the base of a register space that can
+ * be used to find overall configuration of the hardware
+ * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
+ * space are all the TCS blocks. The offset of the TCS blocks is
+ * specified in the device tree by "qcom,tcs-offset" and used to
+ * compute tcs_base.
+ * - TCS blocks come one after another. Type, count, and order are
+ * specified by the device tree as "qcom,tcs-config".
+ * - Each TCS block has some registers, then space for up to 16 commands.
+ * Note that though address space is reserved for 16 commands, fewer
+ * might be present. See ncpt (num cmds per TCS).
+ *
+ * Here's a picture:
+ *
+ * +---------------------------------------------------+
+ * |RSC |
+ * | ctrl |
+ * | |
+ * | Drvs: |
+ * | +-----------------------------------------------+ |
+ * | |DRV0 | |
+ * | | ctrl/config | |
+ * | | IRQ | |
+ * | | | |
+ * | | TCSes: | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS0 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS1 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS2 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | ...... | |
+ * | +-----------------------------------------------+ |
+ * | +-----------------------------------------------+ |
+ * | |DRV1 | |
+ * | | (same as DRV0) | |
+ * | +-----------------------------------------------+ |
+ * | ...... |
+ * +---------------------------------------------------+
+ */
+
+static inline void __iomem *
+tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
+{
+ return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg;
+}
+
+static inline void __iomem *
+tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
+{
+ return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id;
+}
+
+static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
+ int cmd_id)
+{
+ return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
+}
+
+static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
+{
+ return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
+}
+
+static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
+ int cmd_id, u32 data)
+{
+ writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
+}
+
+static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
+{
+ writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
+}
+
+static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
+{
+ int i;
+
+ writel(data, tcs_reg_addr(drv, reg, tcs_id));
+
+ /*
+ * Wait until we read back the same value. Use a counter rather than
+ * ktime for timeout since this may be called after timekeeping stops.
+ */
+ for (i = 0; i < USEC_PER_SEC; i++) {
+ if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
+ return;
+ udelay(1);
+ }
+ pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
+ data, tcs_id, reg);
+}
+
+/**
+ * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
+ * @drv: The RSC controller.
+ * @type: SLEEP_TCS or WAKE_TCS
+ *
+ * This will clear the "slots" variable of the given tcs_group and also
+ * tell the hardware to forget about all entries.
+ *
+ * The caller must ensure that no other RPMH actions are happening when this
+ * function is called, since otherwise the device may immediately become
+ * used again even before this function exits.
+ */
+static void tcs_invalidate(struct rsc_drv *drv, int type)
+{
+ int m;
+ struct tcs_group *tcs = &drv->tcs[type];
+
+ /* Caller ensures nobody else is running so no lock */
+ if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
+ return;
+
+ for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++)
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
+
+ bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
+}
+
+/**
+ * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
+ * @drv: The RSC controller.
+ *
+ * The caller must ensure that no other RPMH actions are happening when this
+ * function is called, since otherwise the device may immediately become
+ * used again even before this function exits.
+ */
+void rpmh_rsc_invalidate(struct rsc_drv *drv)
+{
+ tcs_invalidate(drv, SLEEP_TCS);
+ tcs_invalidate(drv, WAKE_TCS);
+}
+
+/**
+ * get_tcs_for_msg() - Get the tcs_group used to send the given message.
+ * @drv: The RSC controller.
+ * @msg: The message we want to send.
+ *
+ * This is normally pretty straightforward except if we are trying to send
+ * an ACTIVE_ONLY message but don't have any active_only TCSes.
+ *
+ * Return: A pointer to a tcs_group or an ERR_PTR.
+ */
+static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
+ const struct tcs_request *msg)
+{
+ int type;
+ struct tcs_group *tcs;
+
+ switch (msg->state) {
+ case RPMH_ACTIVE_ONLY_STATE:
+ type = ACTIVE_TCS;
+ break;
+ case RPMH_WAKE_ONLY_STATE:
+ type = WAKE_TCS;
+ break;
+ case RPMH_SLEEP_STATE:
+ type = SLEEP_TCS;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * If we are making an active request on a RSC that does not have a
+ * dedicated TCS for active state use, then re-purpose a wake TCS to
+ * send active votes. This is safe because we ensure any active-only
+ * transfers have finished before we use it (maybe by running from
+ * the last CPU in PM code).
+ */
+ tcs = &drv->tcs[type];
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
+ tcs = &drv->tcs[WAKE_TCS];
+
+ return tcs;
+}
+
+/**
+ * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
+ * @drv: The RSC controller.
+ * @tcs_id: The global ID of this TCS.
+ *
+ * For ACTIVE_ONLY transfers we want to call back into the client when the
+ * transfer finishes. To do this we need the "request" that the client
+ * originally provided us. This function grabs the request that we stashed
+ * when we started the transfer.
+ *
+ * This only makes sense for ACTIVE_ONLY transfers since those are the only
+ * ones we track sending (the only ones we enable interrupts for and the only
+ * ones we call back to the client for).
+ *
+ * Return: The stashed request.
+ */
+static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
+ int tcs_id)
+{
+ struct tcs_group *tcs;
+ int i;
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ tcs = &drv->tcs[i];
+ if (tcs->mask & BIT(tcs_id))
+ return tcs->req[tcs_id - tcs->offset];
+ }
+
+ return NULL;
+}
+
+/**
+ * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @trigger: If true then untrigger/retrigger. If false then just untrigger.
+ *
+ * In the normal case we only ever call with "trigger=true" to start a
+ * transfer. That will un-trigger/disable the TCS from the last transfer
+ * then trigger/enable for this transfer.
+ *
+ * If we borrowed a wake TCS for an active-only transfer we'll also call
+ * this function with "trigger=false" to just do the un-trigger/disable
+ * before using the TCS for wake purposes again.
+ *
+ * Note that the AP is only in charge of triggering active-only transfers.
+ * The AP never triggers sleep/wake values using this function.
+ */
+static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
+{
+ u32 enable;
+
+ /*
+ * HW req: Clear the DRV_CONTROL and enable TCS again
+ * While clearing ensure that the AMC mode trigger is cleared
+ * and then the mode enable is cleared.
+ */
+ enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id);
+ enable &= ~TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable &= ~TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+
+ if (trigger) {
+ /* Enable the AMC mode on the TCS and then trigger the TCS */
+ enable = TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable |= TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ }
+}
+
+/**
+ * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @enable: If true then enable; if false then disable
+ *
+ * We only ever call this when we borrow a wake TCS for an active-only
+ * transfer. For active-only TCSes interrupts are always left enabled.
+ */
+static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
+{
+ u32 data;
+
+ data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE);
+ if (enable)
+ data |= BIT(tcs_id);
+ else
+ data &= ~BIT(tcs_id);
+ writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE);
+}
+
+/**
+ * tcs_tx_done() - TX Done interrupt handler.
+ * @irq: The IRQ number (ignored).
+ * @p: Pointer to "struct rsc_drv".
+ *
+ * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
+ * IRQ for) when a transfer is done.
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t tcs_tx_done(int irq, void *p)
+{
+ struct rsc_drv *drv = p;
+ int i, j, err = 0;
+ unsigned long irq_status;
+ const struct tcs_request *req;
+ struct tcs_cmd *cmd;
+
+ irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS);
+
+ for_each_set_bit(i, &irq_status, BITS_PER_TYPE(u32)) {
+ req = get_req_from_tcs(drv, i);
+ if (WARN_ON(!req))
+ goto skip;
+
+ err = 0;
+ for (j = 0; j < req->num_cmds; j++) {
+ u32 sts;
+
+ cmd = &req->cmds[j];
+ sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j);
+ if (!(sts & CMD_STATUS_ISSUED) ||
+ ((req->wait_for_compl || cmd->wait) &&
+ !(sts & CMD_STATUS_COMPL))) {
+ pr_err("Incomplete request: %s: addr=%#x data=%#x",
+ drv->name, cmd->addr, cmd->data);
+ err = -EIO;
+ }
+ }
+
+ trace_rpmh_tx_done(drv, i, req, err);
+
+ /*
+ * If wake tcs was re-purposed for sending active
+ * votes, clear AMC trigger & enable modes and
+ * disable interrupt for this TCS
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ __tcs_set_trigger(drv, i, false);
+skip:
+ /* Reclaim the TCS */
+ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
+ writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
+ spin_lock(&drv->lock);
+ clear_bit(i, drv->tcs_in_use);
+ /*
+ * Disable interrupt for WAKE TCS to avoid being
+ * spammed with interrupts coming when the solver
+ * sends its wake votes.
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ enable_tcs_irq(drv, i, false);
+ spin_unlock(&drv->lock);
+ wake_up(&drv->tcs_wait);
+ if (req)
+ rpmh_tx_done(req, err);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @cmd_id: The index within the TCS to start writing.
+ * @msg: The message we want to send, which will contain several addr/data
+ * pairs to program (but few enough that they all fit in one TCS).
+ *
+ * This is used for all types of transfers (active, sleep, and wake).
+ */
+static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
+ const struct tcs_request *msg)
+{
+ u32 msgid;
+ u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE;
+ u32 cmd_enable = 0;
+ struct tcs_cmd *cmd;
+ int i, j;
+
+ /* Convert all commands to RR when the request has wait_for_compl set */
+ cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
+
+ for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
+ cmd = &msg->cmds[i];
+ cmd_enable |= BIT(j);
+ msgid = cmd_msgid;
+ /*
+ * Additionally, if the cmd->wait is set, make the command
+ * response reqd even if the overall request was fire-n-forget.
+ */
+ msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
+
+ write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
+ write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
+ write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
+ trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
+ }
+
+ cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
+ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
+}
+
+/**
+ * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
+ * @drv: The controller.
+ * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
+ * @msg: The message we want to send, which will contain several addr/data
+ * pairs to program (but few enough that they all fit in one TCS).
+ *
+ * This will walk through the TCSes in the group and check if any of them
+ * appear to be sending to addresses referenced in the message. If it finds
+ * one it'll return -EBUSY.
+ *
+ * Only for use for active-only transfers.
+ *
+ * Must be called with the drv->lock held since that protects tcs_in_use.
+ *
+ * Return: 0 if nothing in flight or -EBUSY if we should try again later.
+ * The caller must re-enable interrupts between tries since that's
+ * the only way tcs_in_use will ever be updated and the only way
+ * RSC_DRV_CMD_ENABLE will ever be cleared.
+ */
+static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
+ const struct tcs_request *msg)
+{
+ unsigned long curr_enabled;
+ u32 addr;
+ int j, k;
+ int i = tcs->offset;
+
+ for_each_set_bit_from(i, drv->tcs_in_use, tcs->offset + tcs->num_tcs) {
+ curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i);
+
+ for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
+ addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, i, j);
+ for (k = 0; k < msg->num_cmds; k++) {
+ if (addr == msg->cmds[k].addr)
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
+ * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
+ * we borrowed it because there are zero active-only ones).
+ *
+ * Must be called with the drv->lock held since that protects tcs_in_use.
+ *
+ * Return: The first tcs that's free or -EBUSY if all in use.
+ */
+static int find_free_tcs(struct tcs_group *tcs)
+{
+ const struct rsc_drv *drv = tcs->drv;
+ unsigned long i;
+ unsigned long max = tcs->offset + tcs->num_tcs;
+
+ i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset);
+ if (i >= max)
+ return -EBUSY;
+
+ return i;
+}
+
+/**
+ * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
+ * @drv: The controller.
+ * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
+ * @msg: The data to be sent.
+ *
+ * Claims a tcs in the given tcs_group while making sure that no existing cmd
+ * is in flight that would conflict with the one in @msg.
+ *
+ * Context: Must be called with the drv->lock held since that protects
+ * tcs_in_use.
+ *
+ * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
+ * or the tcs_group is full.
+ */
+static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
+ const struct tcs_request *msg)
+{
+ int ret;
+
+ /*
+ * The h/w does not like if we send a request to the same address,
+ * when one is already in-flight or being processed.
+ */
+ ret = check_for_req_inflight(drv, tcs, msg);
+ if (ret)
+ return ret;
+
+ return find_free_tcs(tcs);
+}
+
+/**
+ * rpmh_rsc_send_data() - Write / trigger active-only message.
+ * @drv: The controller.
+ * @msg: The data to be sent.
+ *
+ * NOTES:
+ * - This is only used for "ACTIVE_ONLY" since the limitations of this
+ * function don't make sense for sleep/wake cases.
+ * - To do the transfer, we will grab a whole TCS for ourselves--we don't
+ * try to share. If there are none available we'll wait indefinitely
+ * for a free one.
+ * - This function will not wait for the commands to be finished, only for
+ * data to be programmed into the RPMh. See rpmh_tx_done() which will
+ * be called when the transfer is fully complete.
+ * - This function must be called with interrupts enabled. If the hardware
+ * is busy doing someone else's transfer we need that transfer to fully
+ * finish so that we can have the hardware, and to fully finish it needs
+ * the interrupt handler to run. If the interrupts is set to run on the
+ * active CPU this can never happen if interrupts are disabled.
+ *
+ * Return: 0 on success, -EINVAL on error.
+ */
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id;
+ unsigned long flags;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ spin_lock_irqsave(&drv->lock, flags);
+
+ /* Wait forever for a free tcs. It better be there eventually! */
+ wait_event_lock_irq(drv->tcs_wait,
+ (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
+ drv->lock);
+
+ tcs->req[tcs_id - tcs->offset] = msg;
+ set_bit(tcs_id, drv->tcs_in_use);
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
+ /*
+ * Clear previously programmed WAKE commands in selected
+ * repurposed TCS to avoid triggering them. tcs->slots will be
+ * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
+ */
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ enable_tcs_irq(drv, tcs_id, true);
+ }
+ spin_unlock_irqrestore(&drv->lock, flags);
+
+ /*
+ * These two can be done after the lock is released because:
+ * - We marked "tcs_in_use" under lock.
+ * - Once "tcs_in_use" has been marked nobody else could be writing
+ * to these registers until the interrupt goes off.
+ * - The interrupt can't go off until we trigger w/ the last line
+ * of __tcs_set_trigger() below.
+ */
+ __tcs_buffer_write(drv, tcs_id, 0, msg);
+ __tcs_set_trigger(drv, tcs_id, true);
+
+ return 0;
+}
+
+/**
+ * find_slots() - Find a place to write the given message.
+ * @tcs: The tcs group to search.
+ * @msg: The message we want to find room for.
+ * @tcs_id: If we return 0 from the function, we return the global ID of the
+ * TCS to write to here.
+ * @cmd_id: If we return 0 from the function, we return the index of
+ * the command array of the returned TCS where the client should
+ * start writing the message.
+ *
+ * Only for use on sleep/wake TCSes since those are the only ones we maintain
+ * tcs->slots for.
+ *
+ * Return: -ENOMEM if there was no room, else 0.
+ */
+static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
+ int *tcs_id, int *cmd_id)
+{
+ int slot, offset;
+ int i = 0;
+
+ /* Do over, until we can fit the full payload in a single TCS */
+ do {
+ slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
+ i, msg->num_cmds, 0);
+ if (slot >= tcs->num_tcs * tcs->ncpt)
+ return -ENOMEM;
+ i += tcs->ncpt;
+ } while (slot + msg->num_cmds - 1 >= i);
+
+ bitmap_set(tcs->slots, slot, msg->num_cmds);
+
+ offset = slot / tcs->ncpt;
+ *tcs_id = offset + tcs->offset;
+ *cmd_id = slot % tcs->ncpt;
+
+ return 0;
+}
+
+/**
+ * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
+ * @drv: The controller.
+ * @msg: The data to be written to the controller.
+ *
+ * This should only be called for sleep/wake state, never active-only
+ * state.
+ *
+ * The caller must ensure that no other RPMH actions are happening and the
+ * controller is idle when this function is called since it runs lockless.
+ *
+ * Return: 0 if no error; else -error.
+ */
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id = 0, cmd_id = 0;
+ int ret;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ /* find the TCS id and the command in the TCS to write to */
+ ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
+ if (!ret)
+ __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
+
+ return ret;
+}
+
+/**
+ * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
+ * @drv: The controller
+ *
+ * Checks if any of the AMCs are busy in handling ACTIVE sets.
+ * This is called from the last cpu powering down before flushing
+ * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
+ * power collapse, so deny from the last cpu's pm notification.
+ *
+ * Context: Must be called with the drv->lock held.
+ *
+ * Return:
+ * * False - AMCs are idle
+ * * True - AMCs are busy
+ */
+static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
+{
+ unsigned long set;
+ const struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
+ unsigned long max;
+
+ /*
+ * If we made an active request on a RSC that does not have a
+ * dedicated TCS for active state use, then re-purposed wake TCSes
+ * should be checked for not busy, because we used wake TCSes for
+ * active requests in this case.
+ */
+ if (!tcs->num_tcs)
+ tcs = &drv->tcs[WAKE_TCS];
+
+ max = tcs->offset + tcs->num_tcs;
+ set = find_next_bit(drv->tcs_in_use, max, tcs->offset);
+
+ return set < max;
+}
+
+/**
+ * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
+ * @nfb: Pointer to the notifier block in struct rsc_drv.
+ * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
+ * @v: Unused
+ *
+ * This function is given to cpu_pm_register_notifier so we can be informed
+ * about when CPUs go down. When all CPUs go down we know no more active
+ * transfers will be started so we write sleep/wake sets. This function gets
+ * called from cpuidle code paths and also at system suspend time.
+ *
+ * If its last CPU going down and AMCs are not busy then writes cached sleep
+ * and wake messages to TCSes. The firmware then takes care of triggering
+ * them when entering deepest low power modes.
+ *
+ * Return: See cpu_pm_register_notifier()
+ */
+static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
+ unsigned long action, void *v)
+{
+ struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
+ int ret = NOTIFY_OK;
+ int cpus_in_pm;
+
+ switch (action) {
+ case CPU_PM_ENTER:
+ cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
+ /*
+ * NOTE: comments for num_online_cpus() point out that it's
+ * only a snapshot so we need to be careful. It should be OK
+ * for us to use, though. It's important for us not to miss
+ * if we're the last CPU going down so it would only be a
+ * problem if a CPU went offline right after we did the check
+ * AND that CPU was not idle AND that CPU was the last non-idle
+ * CPU. That can't happen. CPUs would have to come out of idle
+ * before the CPU could go offline.
+ */
+ if (cpus_in_pm < num_online_cpus())
+ return NOTIFY_OK;
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ atomic_dec(&drv->cpus_in_pm);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * It's likely we're on the last CPU. Grab the drv->lock and write
+ * out the sleep/wake commands to RPMH hardware. Grabbing the lock
+ * means that if we race with another CPU coming up we are still
+ * guaranteed to be safe. If another CPU came up just after we checked
+ * and has grabbed the lock or started an active transfer then we'll
+ * notice we're busy and abort. If another CPU comes up after we start
+ * flushing it will be blocked from starting an active transfer until
+ * we're done flushing. If another CPU starts an active transfer after
+ * we release the lock we're still OK because we're no longer the last
+ * CPU.
+ */
+ if (spin_trylock(&drv->lock)) {
+ if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
+ ret = NOTIFY_BAD;
+ spin_unlock(&drv->lock);
+ } else {
+ /* Another CPU must be up */
+ return NOTIFY_OK;
+ }
+
+ if (ret == NOTIFY_BAD) {
+ /* Double-check if we're here because someone else is up */
+ if (cpus_in_pm < num_online_cpus())
+ ret = NOTIFY_OK;
+ else
+ /* We won't be called w/ CPU_PM_ENTER_FAILED */
+ atomic_dec(&drv->cpus_in_pm);
+ }
+
+ return ret;
+}
+
+static int rpmh_probe_tcs_config(struct platform_device *pdev,
+ struct rsc_drv *drv, void __iomem *base)
+{
+ struct tcs_type_config {
+ u32 type;
+ u32 n;
+ } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
+ struct device_node *dn = pdev->dev.of_node;
+ u32 config, max_tcs, ncpt, offset;
+ int i, ret, n, st = 0;
+ struct tcs_group *tcs;
+
+ ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
+ if (ret)
+ return ret;
+ drv->tcs_base = base + offset;
+
+ config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
+
+ max_tcs = config;
+ max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
+ max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
+
+ ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
+ ncpt = ncpt >> DRV_NCPT_SHIFT;
+
+ n = of_property_count_u32_elems(dn, "qcom,tcs-config");
+ if (n != 2 * TCS_TYPE_NR)
+ return -EINVAL;
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+ i * 2, &tcs_cfg[i].type);
+ if (ret)
+ return ret;
+ if (tcs_cfg[i].type >= TCS_TYPE_NR)
+ return -EINVAL;
+
+ ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+ i * 2 + 1, &tcs_cfg[i].n);
+ if (ret)
+ return ret;
+ if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ tcs = &drv->tcs[tcs_cfg[i].type];
+ if (tcs->drv)
+ return -EINVAL;
+ tcs->drv = drv;
+ tcs->type = tcs_cfg[i].type;
+ tcs->num_tcs = tcs_cfg[i].n;
+ tcs->ncpt = ncpt;
+
+ if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
+ continue;
+
+ if (st + tcs->num_tcs > max_tcs ||
+ st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
+ return -EINVAL;
+
+ tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
+ tcs->offset = st;
+ st += tcs->num_tcs;
+ }
+
+ drv->num_tcs = st;
+
+ return 0;
+}
+
+static int rpmh_rsc_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct rsc_drv *drv;
+ char drv_id[10] = {0};
+ int ret, irq;
+ u32 solver_config;
+ void __iomem *base;
+
+ /*
+ * Even though RPMh doesn't directly use cmd-db, all of its children
+ * do. To avoid adding this check to our children we'll do it now.
+ */
+ ret = cmd_db_ready();
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Command DB not available (%d)\n",
+ ret);
+ return ret;
+ }
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
+ if (ret)
+ return ret;
+
+ drv->name = of_get_property(dn, "label", NULL);
+ if (!drv->name)
+ drv->name = dev_name(&pdev->dev);
+
+ snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
+ base = devm_platform_ioremap_resource_byname(pdev, drv_id);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = rpmh_probe_tcs_config(pdev, drv, base);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&drv->lock);
+ init_waitqueue_head(&drv->tcs_wait);
+ bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
+
+ irq = platform_get_irq(pdev, drv->id);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
+ drv->name, drv);
+ if (ret)
+ return ret;
+
+ /*
+ * CPU PM notification are not required for controllers that support
+ * 'HW solver' mode where they can be in autonomous mode executing low
+ * power mode to power down.
+ */
+ solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG);
+ solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
+ solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
+ if (!solver_config) {
+ drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
+ cpu_pm_register_notifier(&drv->rsc_pm);
+ }
+
+ /* Enable the active TCS to send requests immediately */
+ writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
+ drv->tcs_base + RSC_DRV_IRQ_ENABLE);
+
+ spin_lock_init(&drv->client.cache_lock);
+ INIT_LIST_HEAD(&drv->client.cache);
+ INIT_LIST_HEAD(&drv->client.batch_cache);
+
+ dev_set_drvdata(&pdev->dev, drv);
+
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id rpmh_drv_match[] = {
+ { .compatible = "qcom,rpmh-rsc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmh_drv_match);
+
+static struct platform_driver rpmh_driver = {
+ .probe = rpmh_rsc_probe,
+ .driver = {
+ .name = "rpmh",
+ .of_match_table = rpmh_drv_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rpmh_driver_init(void)
+{
+ return platform_driver_register(&rpmh_driver);
+}
+arch_initcall(rpmh_driver_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
new file mode 100644
index 000000000..01765ee9c
--- /dev/null
+++ b/drivers/soc/qcom/rpmh.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <soc/qcom/rpmh.h>
+
+#include "rpmh-internal.h"
+
+#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
+
+#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
+ struct rpmh_request name = { \
+ .msg = { \
+ .state = s, \
+ .cmds = name.cmd, \
+ .num_cmds = 0, \
+ .wait_for_compl = true, \
+ }, \
+ .cmd = { { 0 } }, \
+ .completion = q, \
+ .dev = device, \
+ .needs_free = false, \
+ }
+
+#define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
+
+/**
+ * struct cache_req: the request object for caching
+ *
+ * @addr: the address of the resource
+ * @sleep_val: the sleep vote
+ * @wake_val: the wake vote
+ * @list: linked list obj
+ */
+struct cache_req {
+ u32 addr;
+ u32 sleep_val;
+ u32 wake_val;
+ struct list_head list;
+};
+
+/**
+ * struct batch_cache_req - An entry in our batch catch
+ *
+ * @list: linked list obj
+ * @count: number of messages
+ * @rpm_msgs: the messages
+ */
+
+struct batch_cache_req {
+ struct list_head list;
+ int count;
+ struct rpmh_request rpm_msgs[];
+};
+
+static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
+{
+ struct rsc_drv *drv = dev_get_drvdata(dev->parent);
+
+ return &drv->client;
+}
+
+void rpmh_tx_done(const struct tcs_request *msg, int r)
+{
+ struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
+ msg);
+ struct completion *compl = rpm_msg->completion;
+ bool free = rpm_msg->needs_free;
+
+ rpm_msg->err = r;
+
+ if (r)
+ dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
+ rpm_msg->msg.cmds[0].addr, r);
+
+ if (!compl)
+ goto exit;
+
+ /* Signal the blocking thread we are done */
+ complete(compl);
+
+exit:
+ if (free)
+ kfree(rpm_msg);
+}
+
+static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
+{
+ struct cache_req *p, *req = NULL;
+
+ list_for_each_entry(p, &ctrlr->cache, list) {
+ if (p->addr == addr) {
+ req = p;
+ break;
+ }
+ }
+
+ return req;
+}
+
+static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
+ enum rpmh_state state,
+ struct tcs_cmd *cmd)
+{
+ struct cache_req *req;
+ unsigned long flags;
+ u32 old_sleep_val, old_wake_val;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ req = __find_req(ctrlr, cmd->addr);
+ if (req)
+ goto existing;
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req) {
+ req = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+
+ req->addr = cmd->addr;
+ req->sleep_val = req->wake_val = UINT_MAX;
+ list_add_tail(&req->list, &ctrlr->cache);
+
+existing:
+ old_sleep_val = req->sleep_val;
+ old_wake_val = req->wake_val;
+
+ switch (state) {
+ case RPMH_ACTIVE_ONLY_STATE:
+ case RPMH_WAKE_ONLY_STATE:
+ req->wake_val = cmd->data;
+ break;
+ case RPMH_SLEEP_STATE:
+ req->sleep_val = cmd->data;
+ break;
+ }
+
+ ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
+ req->wake_val != old_wake_val) &&
+ req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX;
+
+unlock:
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return req;
+}
+
+/**
+ * __rpmh_write: Cache and send the RPMH request
+ *
+ * @dev: The device making the request
+ * @state: Active/Sleep request type
+ * @rpm_msg: The data that needs to be sent (cmds).
+ *
+ * Cache the RPMH request and send if the state is ACTIVE_ONLY.
+ * SLEEP/WAKE_ONLY requests are not sent to the controller at
+ * this time. Use rpmh_flush() to send them to the controller.
+ */
+static int __rpmh_write(const struct device *dev, enum rpmh_state state,
+ struct rpmh_request *rpm_msg)
+{
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ int ret = -EINVAL;
+ struct cache_req *req;
+ int i;
+
+ /* Cache the request in our store and link the payload */
+ for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
+ req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
+
+ if (state == RPMH_ACTIVE_ONLY_STATE) {
+ WARN_ON(irqs_disabled());
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
+ } else {
+ /* Clean up our call by spoofing tx_done */
+ ret = 0;
+ rpmh_tx_done(&rpm_msg->msg, ret);
+ }
+
+ return ret;
+}
+
+static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
+ return -EINVAL;
+
+ memcpy(req->cmd, cmd, n * sizeof(*cmd));
+
+ req->msg.state = state;
+ req->msg.cmds = req->cmd;
+ req->msg.num_cmds = n;
+
+ return 0;
+}
+
+/**
+ * rpmh_write_async: Write a set of RPMH commands
+ *
+ * @dev: The device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in payload
+ *
+ * Write a set of RPMH commands, the order of commands is maintained
+ * and will be sent as a single shot.
+ */
+int rpmh_write_async(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ struct rpmh_request *rpm_msg;
+ int ret;
+
+ rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
+ if (!rpm_msg)
+ return -ENOMEM;
+ rpm_msg->needs_free = true;
+
+ ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
+ if (ret) {
+ kfree(rpm_msg);
+ return ret;
+ }
+
+ return __rpmh_write(dev, state, rpm_msg);
+}
+EXPORT_SYMBOL(rpmh_write_async);
+
+/**
+ * rpmh_write: Write a set of RPMH commands and block until response
+ *
+ * @dev: The device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in @cmd
+ *
+ * May sleep. Do not call from atomic contexts.
+ */
+int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ DECLARE_COMPLETION_ONSTACK(compl);
+ DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
+ int ret;
+
+ ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n);
+ if (ret)
+ return ret;
+
+ ret = __rpmh_write(dev, state, &rpm_msg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
+ WARN_ON(!ret);
+ return (ret > 0) ? 0 : -ETIMEDOUT;
+}
+EXPORT_SYMBOL(rpmh_write);
+
+static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_add_tail(&req->list, &ctrlr->batch_cache);
+ ctrlr->dirty = true;
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+static int flush_batch(struct rpmh_ctrlr *ctrlr)
+{
+ struct batch_cache_req *req;
+ const struct rpmh_request *rpm_msg;
+ int ret = 0;
+ int i;
+
+ /* Send Sleep/Wake requests to the controller, expect no response */
+ list_for_each_entry(req, &ctrlr->batch_cache, list) {
+ for (i = 0; i < req->count; i++) {
+ rpm_msg = req->rpm_msgs + i;
+ ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+ &rpm_msg->msg);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
+ * batch to finish.
+ *
+ * @dev: the device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The array of count of elements in each batch, 0 terminated.
+ *
+ * Write a request to the RSC controller without caching. If the request
+ * state is ACTIVE, then the requests are treated as completion request
+ * and sent to the controller immediately. The function waits until all the
+ * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
+ * request is sent as fire-n-forget and no ack is expected.
+ *
+ * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
+ */
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n)
+{
+ struct batch_cache_req *req;
+ struct rpmh_request *rpm_msgs;
+ struct completion *compls;
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ unsigned long time_left;
+ int count = 0;
+ int ret, i;
+ void *ptr;
+
+ if (!cmd || !n)
+ return -EINVAL;
+
+ while (n[count] > 0)
+ count++;
+ if (!count)
+ return -EINVAL;
+
+ ptr = kzalloc(sizeof(*req) +
+ count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
+ GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ req = ptr;
+ compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
+
+ req->count = count;
+ rpm_msgs = req->rpm_msgs;
+
+ for (i = 0; i < count; i++) {
+ __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
+ cmd += n[i];
+ }
+
+ if (state != RPMH_ACTIVE_ONLY_STATE) {
+ cache_batch(ctrlr, req);
+ return 0;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct completion *compl = &compls[i];
+
+ init_completion(compl);
+ rpm_msgs[i].completion = compl;
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
+ if (ret) {
+ pr_err("Error(%d) sending RPMH message addr=%#x\n",
+ ret, rpm_msgs[i].msg.cmds[0].addr);
+ break;
+ }
+ }
+
+ time_left = RPMH_TIMEOUT_MS;
+ while (i--) {
+ time_left = wait_for_completion_timeout(&compls[i], time_left);
+ if (!time_left) {
+ /*
+ * Better hope they never finish because they'll signal
+ * the completion that we're going to free once
+ * we've returned from this function.
+ */
+ WARN_ON(1);
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ }
+
+exit:
+ kfree(ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmh_write_batch);
+
+static int is_req_valid(struct cache_req *req)
+{
+ return (req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX &&
+ req->sleep_val != req->wake_val);
+}
+
+static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
+ u32 addr, u32 data)
+{
+ DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
+
+ /* Wake sets are always complete and sleep sets are not */
+ rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
+ rpm_msg.cmd[0].addr = addr;
+ rpm_msg.cmd[0].data = data;
+ rpm_msg.msg.num_cmds = 1;
+
+ return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
+}
+
+/**
+ * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
+ *
+ * @ctrlr: Controller making request to flush cached data
+ *
+ * Return:
+ * * 0 - Success
+ * * Error code - Otherwise
+ */
+int rpmh_flush(struct rpmh_ctrlr *ctrlr)
+{
+ struct cache_req *p;
+ int ret = 0;
+
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Currently rpmh_flush() is only called when we think we're running
+ * on the last processor. If the lock is busy it means another
+ * processor is up and it's better to abort than spin.
+ */
+ if (!spin_trylock(&ctrlr->cache_lock))
+ return -EBUSY;
+
+ if (!ctrlr->dirty) {
+ pr_debug("Skipping flush, TCS has latest data.\n");
+ goto exit;
+ }
+
+ /* Invalidate the TCSes first to avoid stale data */
+ rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+
+ /* First flush the cached batch requests */
+ ret = flush_batch(ctrlr);
+ if (ret)
+ goto exit;
+
+ list_for_each_entry(p, &ctrlr->cache, list) {
+ if (!is_req_valid(p)) {
+ pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
+ __func__, p->addr, p->sleep_val, p->wake_val);
+ continue;
+ }
+ ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
+ p->sleep_val);
+ if (ret)
+ goto exit;
+ ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
+ p->wake_val);
+ if (ret)
+ goto exit;
+ }
+
+ ctrlr->dirty = false;
+
+exit:
+ spin_unlock(&ctrlr->cache_lock);
+ return ret;
+}
+
+/**
+ * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
+ *
+ * @dev: The device making the request
+ *
+ * Invalidate the sleep and wake values in batch_cache.
+ */
+void rpmh_invalidate(const struct device *dev)
+{
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ struct batch_cache_req *req, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+ kfree(req);
+ INIT_LIST_HEAD(&ctrlr->batch_cache);
+ ctrlr->dirty = true;
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
new file mode 100644
index 000000000..092f6ab09
--- /dev/null
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
+
+#define RPMH_ARC_MAX_LEVELS 16
+
+/**
+ * struct rpmhpd - top level RPMh power domain resource data structure
+ * @dev: rpmh power domain controller device
+ * @pd: generic_pm_domain corresponding to the power domain
+ * @parent: generic_pm_domain corresponding to the parent's power domain
+ * @peer: A peer power domain in case Active only Voting is
+ * supported
+ * @active_only: True if it represents an Active only peer
+ * @corner: current corner
+ * @active_corner: current active corner
+ * @enable_corner: lowest non-zero corner
+ * @level: An array of level (vlvl) to corner (hlvl) mappings
+ * derived from cmd-db
+ * @level_count: Number of levels supported by the power domain. max
+ * being 16 (0 - 15)
+ * @enabled: true if the power domain is enabled
+ * @res_name: Resource name used for cmd-db lookup
+ * @addr: Resource address as looped up using resource name from
+ * cmd-db
+ */
+struct rpmhpd {
+ struct device *dev;
+ struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
+ struct rpmhpd *peer;
+ const bool active_only;
+ unsigned int corner;
+ unsigned int active_corner;
+ unsigned int enable_corner;
+ u32 level[RPMH_ARC_MAX_LEVELS];
+ size_t level_count;
+ bool enabled;
+ const char *res_name;
+ u32 addr;
+};
+
+struct rpmhpd_desc {
+ struct rpmhpd **rpmhpds;
+ size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmhpd_lock);
+
+/* RPMH powerdomains */
+
+static struct rpmhpd cx_ao;
+static struct rpmhpd mx;
+static struct rpmhpd mx_ao;
+static struct rpmhpd cx = {
+ .pd = { .name = "cx", },
+ .peer = &cx_ao,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd cx_ao = {
+ .pd = { .name = "cx_ao", },
+ .active_only = true,
+ .peer = &cx,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd cx_ao_w_mx_parent;
+static struct rpmhpd cx_w_mx_parent = {
+ .pd = { .name = "cx", },
+ .peer = &cx_ao_w_mx_parent,
+ .parent = &mx.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd cx_ao_w_mx_parent = {
+ .pd = { .name = "cx_ao", },
+ .active_only = true,
+ .peer = &cx_w_mx_parent,
+ .parent = &mx_ao.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd ebi = {
+ .pd = { .name = "ebi", },
+ .res_name = "ebi.lvl",
+};
+
+static struct rpmhpd gfx = {
+ .pd = { .name = "gfx", },
+ .res_name = "gfx.lvl",
+};
+
+static struct rpmhpd lcx = {
+ .pd = { .name = "lcx", },
+ .res_name = "lcx.lvl",
+};
+
+static struct rpmhpd lmx = {
+ .pd = { .name = "lmx", },
+ .res_name = "lmx.lvl",
+};
+
+static struct rpmhpd mmcx_ao;
+static struct rpmhpd mmcx = {
+ .pd = { .name = "mmcx", },
+ .peer = &mmcx_ao,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd mmcx_ao = {
+ .pd = { .name = "mmcx_ao", },
+ .active_only = true,
+ .peer = &mmcx,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd mmcx_ao_w_cx_parent;
+static struct rpmhpd mmcx_w_cx_parent = {
+ .pd = { .name = "mmcx", },
+ .peer = &mmcx_ao_w_cx_parent,
+ .parent = &cx.pd,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd mmcx_ao_w_cx_parent = {
+ .pd = { .name = "mmcx_ao", },
+ .active_only = true,
+ .peer = &mmcx_w_cx_parent,
+ .parent = &cx_ao.pd,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd mss = {
+ .pd = { .name = "mss", },
+ .res_name = "mss.lvl",
+};
+
+static struct rpmhpd mx_ao;
+static struct rpmhpd mx = {
+ .pd = { .name = "mx", },
+ .peer = &mx_ao,
+ .res_name = "mx.lvl",
+};
+
+static struct rpmhpd mx_ao = {
+ .pd = { .name = "mx_ao", },
+ .active_only = true,
+ .peer = &mx,
+ .res_name = "mx.lvl",
+};
+
+static struct rpmhpd mxc_ao;
+static struct rpmhpd mxc = {
+ .pd = { .name = "mxc", },
+ .peer = &mxc_ao,
+ .res_name = "mxc.lvl",
+};
+
+static struct rpmhpd mxc_ao = {
+ .pd = { .name = "mxc_ao", },
+ .active_only = true,
+ .peer = &mxc,
+ .res_name = "mxc.lvl",
+};
+
+static struct rpmhpd nsp = {
+ .pd = { .name = "nsp", },
+ .res_name = "nsp.lvl",
+};
+
+static struct rpmhpd qphy = {
+ .pd = { .name = "qphy", },
+ .res_name = "qphy.lvl",
+};
+
+/* SA8540P RPMH powerdomains */
+static struct rpmhpd *sa8540p_rpmhpds[] = {
+ [SC8280XP_CX] = &cx,
+ [SC8280XP_CX_AO] = &cx_ao,
+ [SC8280XP_EBI] = &ebi,
+ [SC8280XP_GFX] = &gfx,
+ [SC8280XP_LCX] = &lcx,
+ [SC8280XP_LMX] = &lmx,
+ [SC8280XP_MMCX] = &mmcx,
+ [SC8280XP_MMCX_AO] = &mmcx_ao,
+ [SC8280XP_MX] = &mx,
+ [SC8280XP_MX_AO] = &mx_ao,
+ [SC8280XP_NSP] = &nsp,
+};
+
+static const struct rpmhpd_desc sa8540p_desc = {
+ .rpmhpds = sa8540p_rpmhpds,
+ .num_pds = ARRAY_SIZE(sa8540p_rpmhpds),
+};
+
+/* SDM845 RPMH powerdomains */
+static struct rpmhpd *sdm845_rpmhpds[] = {
+ [SDM845_CX] = &cx_w_mx_parent,
+ [SDM845_CX_AO] = &cx_ao_w_mx_parent,
+ [SDM845_EBI] = &ebi,
+ [SDM845_GFX] = &gfx,
+ [SDM845_LCX] = &lcx,
+ [SDM845_LMX] = &lmx,
+ [SDM845_MSS] = &mss,
+ [SDM845_MX] = &mx,
+ [SDM845_MX_AO] = &mx_ao,
+};
+
+static const struct rpmhpd_desc sdm845_desc = {
+ .rpmhpds = sdm845_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdm845_rpmhpds),
+};
+
+/* SDX55 RPMH powerdomains */
+static struct rpmhpd *sdx55_rpmhpds[] = {
+ [SDX55_CX] = &cx_w_mx_parent,
+ [SDX55_MSS] = &mss,
+ [SDX55_MX] = &mx,
+};
+
+static const struct rpmhpd_desc sdx55_desc = {
+ .rpmhpds = sdx55_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdx55_rpmhpds),
+};
+
+/* SDX65 RPMH powerdomains */
+static struct rpmhpd *sdx65_rpmhpds[] = {
+ [SDX65_CX] = &cx_w_mx_parent,
+ [SDX65_CX_AO] = &cx_ao_w_mx_parent,
+ [SDX65_MSS] = &mss,
+ [SDX65_MX] = &mx,
+ [SDX65_MX_AO] = &mx_ao,
+ [SDX65_MXC] = &mxc,
+};
+
+static const struct rpmhpd_desc sdx65_desc = {
+ .rpmhpds = sdx65_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdx65_rpmhpds),
+};
+
+/* SM6350 RPMH powerdomains */
+static struct rpmhpd *sm6350_rpmhpds[] = {
+ [SM6350_CX] = &cx_w_mx_parent,
+ [SM6350_GFX] = &gfx,
+ [SM6350_LCX] = &lcx,
+ [SM6350_LMX] = &lmx,
+ [SM6350_MSS] = &mss,
+ [SM6350_MX] = &mx,
+};
+
+static const struct rpmhpd_desc sm6350_desc = {
+ .rpmhpds = sm6350_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm6350_rpmhpds),
+};
+
+/* SM8150 RPMH powerdomains */
+static struct rpmhpd *sm8150_rpmhpds[] = {
+ [SM8150_CX] = &cx_w_mx_parent,
+ [SM8150_CX_AO] = &cx_ao_w_mx_parent,
+ [SM8150_EBI] = &ebi,
+ [SM8150_GFX] = &gfx,
+ [SM8150_LCX] = &lcx,
+ [SM8150_LMX] = &lmx,
+ [SM8150_MMCX] = &mmcx,
+ [SM8150_MMCX_AO] = &mmcx_ao,
+ [SM8150_MSS] = &mss,
+ [SM8150_MX] = &mx,
+ [SM8150_MX_AO] = &mx_ao,
+};
+
+static const struct rpmhpd_desc sm8150_desc = {
+ .rpmhpds = sm8150_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8150_rpmhpds),
+};
+
+/* SM8250 RPMH powerdomains */
+static struct rpmhpd *sm8250_rpmhpds[] = {
+ [SM8250_CX] = &cx_w_mx_parent,
+ [SM8250_CX_AO] = &cx_ao_w_mx_parent,
+ [SM8250_EBI] = &ebi,
+ [SM8250_GFX] = &gfx,
+ [SM8250_LCX] = &lcx,
+ [SM8250_LMX] = &lmx,
+ [SM8250_MMCX] = &mmcx,
+ [SM8250_MMCX_AO] = &mmcx_ao,
+ [SM8250_MX] = &mx,
+ [SM8250_MX_AO] = &mx_ao,
+};
+
+static const struct rpmhpd_desc sm8250_desc = {
+ .rpmhpds = sm8250_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8250_rpmhpds),
+};
+
+/* SM8350 Power domains */
+static struct rpmhpd *sm8350_rpmhpds[] = {
+ [SM8350_CX] = &cx_w_mx_parent,
+ [SM8350_CX_AO] = &cx_ao_w_mx_parent,
+ [SM8350_EBI] = &ebi,
+ [SM8350_GFX] = &gfx,
+ [SM8350_LCX] = &lcx,
+ [SM8350_LMX] = &lmx,
+ [SM8350_MMCX] = &mmcx,
+ [SM8350_MMCX_AO] = &mmcx_ao,
+ [SM8350_MSS] = &mss,
+ [SM8350_MX] = &mx,
+ [SM8350_MX_AO] = &mx_ao,
+ [SM8350_MXC] = &mxc,
+ [SM8350_MXC_AO] = &mxc_ao,
+};
+
+static const struct rpmhpd_desc sm8350_desc = {
+ .rpmhpds = sm8350_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8350_rpmhpds),
+};
+
+/* SM8450 RPMH powerdomains */
+static struct rpmhpd *sm8450_rpmhpds[] = {
+ [SM8450_CX] = &cx,
+ [SM8450_CX_AO] = &cx_ao,
+ [SM8450_EBI] = &ebi,
+ [SM8450_GFX] = &gfx,
+ [SM8450_LCX] = &lcx,
+ [SM8450_LMX] = &lmx,
+ [SM8450_MMCX] = &mmcx_w_cx_parent,
+ [SM8450_MMCX_AO] = &mmcx_ao_w_cx_parent,
+ [SM8450_MSS] = &mss,
+ [SM8450_MX] = &mx,
+ [SM8450_MX_AO] = &mx_ao,
+ [SM8450_MXC] = &mxc,
+ [SM8450_MXC_AO] = &mxc_ao,
+};
+
+static const struct rpmhpd_desc sm8450_desc = {
+ .rpmhpds = sm8450_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8450_rpmhpds),
+};
+
+/* SC7180 RPMH powerdomains */
+static struct rpmhpd *sc7180_rpmhpds[] = {
+ [SC7180_CX] = &cx_w_mx_parent,
+ [SC7180_CX_AO] = &cx_ao_w_mx_parent,
+ [SC7180_GFX] = &gfx,
+ [SC7180_LCX] = &lcx,
+ [SC7180_LMX] = &lmx,
+ [SC7180_MSS] = &mss,
+ [SC7180_MX] = &mx,
+ [SC7180_MX_AO] = &mx_ao,
+};
+
+static const struct rpmhpd_desc sc7180_desc = {
+ .rpmhpds = sc7180_rpmhpds,
+ .num_pds = ARRAY_SIZE(sc7180_rpmhpds),
+};
+
+/* SC7280 RPMH powerdomains */
+static struct rpmhpd *sc7280_rpmhpds[] = {
+ [SC7280_CX] = &cx,
+ [SC7280_CX_AO] = &cx_ao,
+ [SC7280_EBI] = &ebi,
+ [SC7280_GFX] = &gfx,
+ [SC7280_LCX] = &lcx,
+ [SC7280_LMX] = &lmx,
+ [SC7280_MSS] = &mss,
+ [SC7280_MX] = &mx,
+ [SC7280_MX_AO] = &mx_ao,
+};
+
+static const struct rpmhpd_desc sc7280_desc = {
+ .rpmhpds = sc7280_rpmhpds,
+ .num_pds = ARRAY_SIZE(sc7280_rpmhpds),
+};
+
+/* SC8180x RPMH powerdomains */
+static struct rpmhpd *sc8180x_rpmhpds[] = {
+ [SC8180X_CX] = &cx_w_mx_parent,
+ [SC8180X_CX_AO] = &cx_ao_w_mx_parent,
+ [SC8180X_EBI] = &ebi,
+ [SC8180X_GFX] = &gfx,
+ [SC8180X_LCX] = &lcx,
+ [SC8180X_LMX] = &lmx,
+ [SC8180X_MMCX] = &mmcx,
+ [SC8180X_MMCX_AO] = &mmcx_ao,
+ [SC8180X_MSS] = &mss,
+ [SC8180X_MX] = &mx,
+ [SC8180X_MX_AO] = &mx_ao,
+};
+
+static const struct rpmhpd_desc sc8180x_desc = {
+ .rpmhpds = sc8180x_rpmhpds,
+ .num_pds = ARRAY_SIZE(sc8180x_rpmhpds),
+};
+
+/* SC8280xp RPMH powerdomains */
+static struct rpmhpd *sc8280xp_rpmhpds[] = {
+ [SC8280XP_CX] = &cx,
+ [SC8280XP_CX_AO] = &cx_ao,
+ [SC8280XP_EBI] = &ebi,
+ [SC8280XP_GFX] = &gfx,
+ [SC8280XP_LCX] = &lcx,
+ [SC8280XP_LMX] = &lmx,
+ [SC8280XP_MMCX] = &mmcx,
+ [SC8280XP_MMCX_AO] = &mmcx_ao,
+ [SC8280XP_MX] = &mx,
+ [SC8280XP_MX_AO] = &mx_ao,
+ [SC8280XP_NSP] = &nsp,
+ [SC8280XP_QPHY] = &qphy,
+};
+
+static const struct rpmhpd_desc sc8280xp_desc = {
+ .rpmhpds = sc8280xp_rpmhpds,
+ .num_pds = ARRAY_SIZE(sc8280xp_rpmhpds),
+};
+
+static const struct of_device_id rpmhpd_match_table[] = {
+ { .compatible = "qcom,sa8540p-rpmhpd", .data = &sa8540p_desc },
+ { .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
+ { .compatible = "qcom,sc7280-rpmhpd", .data = &sc7280_desc },
+ { .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc },
+ { .compatible = "qcom,sc8280xp-rpmhpd", .data = &sc8280xp_desc },
+ { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+ { .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
+ { .compatible = "qcom,sdx65-rpmhpd", .data = &sdx65_desc},
+ { .compatible = "qcom,sm6350-rpmhpd", .data = &sm6350_desc },
+ { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
+ { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
+ { .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc },
+ { .compatible = "qcom,sm8450-rpmhpd", .data = &sm8450_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmhpd_match_table);
+
+static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
+ unsigned int corner, bool sync)
+{
+ struct tcs_cmd cmd = {
+ .addr = pd->addr,
+ .data = corner,
+ };
+
+ /*
+ * Wait for an ack only when we are increasing the
+ * perf state of the power domain
+ */
+ if (sync)
+ return rpmh_write(pd->dev, state, &cmd, 1);
+ else
+ return rpmh_write_async(pd->dev, state, &cmd, 1);
+}
+
+static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
+ unsigned int *active, unsigned int *sleep)
+{
+ *active = corner;
+
+ if (pd->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+/*
+ * This function is used to aggregate the votes across the active only
+ * resources and its peers. The aggregated votes are sent to RPMh as
+ * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes
+ * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh
+ * on system sleep).
+ * We send ACTIVE_ONLY votes for resources without any peers. For others,
+ * which have an active only peer, all 3 votes are sent.
+ */
+static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
+{
+ int ret;
+ struct rpmhpd *peer = pd->peer;
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+ to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
+
+ if (peer && peer->enabled)
+ to_active_sleep(peer, peer->corner, &peer_active_corner,
+ &peer_sleep_corner);
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+ ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
+ active_corner > pd->active_corner);
+ if (ret)
+ return ret;
+
+ pd->active_corner = active_corner;
+
+ if (peer) {
+ peer->active_corner = active_corner;
+
+ ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
+ active_corner, false);
+ if (ret)
+ return ret;
+
+ sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+ return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
+ false);
+ }
+
+ return ret;
+}
+
+static int rpmhpd_power_on(struct generic_pm_domain *domain)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ unsigned int corner;
+ int ret;
+
+ mutex_lock(&rpmhpd_lock);
+
+ corner = max(pd->corner, pd->enable_corner);
+ ret = rpmhpd_aggregate_corner(pd, corner);
+ if (!ret)
+ pd->enabled = true;
+
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static int rpmhpd_power_off(struct generic_pm_domain *domain)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret;
+
+ mutex_lock(&rpmhpd_lock);
+
+ ret = rpmhpd_aggregate_corner(pd, 0);
+ if (!ret)
+ pd->enabled = false;
+
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int level)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0, i;
+
+ mutex_lock(&rpmhpd_lock);
+
+ for (i = 0; i < pd->level_count; i++)
+ if (level <= pd->level[i])
+ break;
+
+ /*
+ * If the level requested is more than that supported by the
+ * max corner, just set it to max anyway.
+ */
+ if (i == pd->level_count)
+ i--;
+
+ if (pd->enabled) {
+ /* Ensure that the domain isn't turn off */
+ if (i < pd->enable_corner)
+ i = pd->enable_corner;
+
+ ret = rpmhpd_aggregate_corner(pd, i);
+ if (ret)
+ goto out;
+ }
+
+ pd->corner = i;
+out:
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
+{
+ int i;
+ const u16 *buf;
+
+ buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* 2 bytes used for each command DB aux data entry */
+ rpmhpd->level_count >>= 1;
+
+ if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
+ return -EINVAL;
+
+ for (i = 0; i < rpmhpd->level_count; i++) {
+ rpmhpd->level[i] = buf[i];
+
+ /* Remember the first corner with non-zero level */
+ if (!rpmhpd->level[rpmhpd->enable_corner] && rpmhpd->level[i])
+ rpmhpd->enable_corner = i;
+
+ /*
+ * The AUX data may be zero padded. These 0 valued entries at
+ * the end of the map must be ignored.
+ */
+ if (i > 0 && rpmhpd->level[i] == 0) {
+ rpmhpd->level_count = i;
+ break;
+ }
+ pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
+ rpmhpd->level[i]);
+ }
+
+ return 0;
+}
+
+static int rpmhpd_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ size_t num_pds;
+ struct device *dev = &pdev->dev;
+ struct genpd_onecell_data *data;
+ struct rpmhpd **rpmhpds;
+ const struct rpmhpd_desc *desc;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpmhpds = desc->rpmhpds;
+ num_pds = desc->num_pds;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ data->num_domains = num_pds;
+
+ for (i = 0; i < num_pds; i++) {
+ if (!rpmhpds[i])
+ continue;
+
+ rpmhpds[i]->dev = dev;
+ rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
+ if (!rpmhpds[i]->addr) {
+ dev_err(dev, "Could not find RPMh address for resource %s\n",
+ rpmhpds[i]->res_name);
+ return -ENODEV;
+ }
+
+ ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
+ if (ret != CMD_DB_HW_ARC) {
+ dev_err(dev, "RPMh slave ID mismatch\n");
+ return -EINVAL;
+ }
+
+ ret = rpmhpd_update_level_mapping(rpmhpds[i]);
+ if (ret)
+ return ret;
+
+ rpmhpds[i]->pd.power_off = rpmhpd_power_off;
+ rpmhpds[i]->pd.power_on = rpmhpd_power_on;
+ rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
+ rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
+ pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
+
+ data->domains[i] = &rpmhpds[i]->pd;
+ }
+
+ /* Add subdomains */
+ for (i = 0; i < num_pds; i++) {
+ if (!rpmhpds[i])
+ continue;
+ if (rpmhpds[i]->parent)
+ pm_genpd_add_subdomain(rpmhpds[i]->parent,
+ &rpmhpds[i]->pd);
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmhpd_driver = {
+ .driver = {
+ .name = "qcom-rpmhpd",
+ .of_match_table = rpmhpd_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rpmhpd_probe,
+};
+
+static int __init rpmhpd_init(void)
+{
+ return platform_driver_register(&rpmhpd_driver);
+}
+core_initcall(rpmhpd_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Power Domain Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
new file mode 100644
index 000000000..337b1ad1c
--- /dev/null
+++ b/drivers/soc/qcom/rpmpd.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
+
+/* Resource types:
+ * RPMPD_X is X encoded as a little-endian, lower-case, ASCII string */
+#define RPMPD_SMPA 0x61706d73
+#define RPMPD_LDOA 0x616f646c
+#define RPMPD_SMPB 0x62706d73
+#define RPMPD_LDOB 0x626f646c
+#define RPMPD_RWCX 0x78637772
+#define RPMPD_RWMX 0x786d7772
+#define RPMPD_RWLC 0x636c7772
+#define RPMPD_RWLM 0x6d6c7772
+#define RPMPD_RWSC 0x63737772
+#define RPMPD_RWSM 0x6d737772
+#define RPMPD_RWGX 0x78677772
+
+/* Operation Keys */
+#define KEY_CORNER 0x6e726f63 /* corn */
+#define KEY_ENABLE 0x6e657773 /* swen */
+#define KEY_FLOOR_CORNER 0x636676 /* vfc */
+#define KEY_FLOOR_LEVEL 0x6c6676 /* vfl */
+#define KEY_LEVEL 0x6c766c76 /* vlvl */
+
+#define MAX_CORNER_RPMPD_STATE 6
+
+#define DEFINE_RPMPD_PAIR(_platform, _name, _active, r_type, r_key, \
+ r_id) \
+ static struct rpmpd _platform##_##_active; \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .peer = &_platform##_##_active, \
+ .res_type = RPMPD_##r_type, \
+ .res_id = r_id, \
+ .key = KEY_##r_key, \
+ }; \
+ static struct rpmpd _platform##_##_active = { \
+ .pd = { .name = #_active, }, \
+ .peer = &_platform##_##_name, \
+ .active_only = true, \
+ .res_type = RPMPD_##r_type, \
+ .res_id = r_id, \
+ .key = KEY_##r_key, \
+ }
+
+#define DEFINE_RPMPD_CORNER(_platform, _name, r_type, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_##r_type, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }
+
+#define DEFINE_RPMPD_LEVEL(_platform, _name, r_type, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_##r_type, \
+ .res_id = r_id, \
+ .key = KEY_LEVEL, \
+ }
+
+#define DEFINE_RPMPD_VFC(_platform, _name, r_type, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_##r_type, \
+ .res_id = r_id, \
+ .key = KEY_FLOOR_CORNER, \
+ }
+
+#define DEFINE_RPMPD_VFL(_platform, _name, r_type, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_##r_type, \
+ .res_id = r_id, \
+ .key = KEY_FLOOR_LEVEL, \
+ }
+
+struct rpmpd_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+struct rpmpd {
+ struct generic_pm_domain pd;
+ struct rpmpd *peer;
+ const bool active_only;
+ unsigned int corner;
+ bool enabled;
+ const int res_type;
+ const int res_id;
+ struct qcom_smd_rpm *rpm;
+ unsigned int max_state;
+ __le32 key;
+};
+
+struct rpmpd_desc {
+ struct rpmpd **rpmpds;
+ size_t num_pds;
+ unsigned int max_state;
+};
+
+static DEFINE_MUTEX(rpmpd_lock);
+
+/* mdm9607 RPM Power Domains */
+DEFINE_RPMPD_PAIR(mdm9607, vddcx, vddcx_ao, SMPA, LEVEL, 3);
+DEFINE_RPMPD_VFL(mdm9607, vddcx_vfl, SMPA, 3);
+
+DEFINE_RPMPD_PAIR(mdm9607, vddmx, vddmx_ao, LDOA, LEVEL, 12);
+DEFINE_RPMPD_VFL(mdm9607, vddmx_vfl, LDOA, 12);
+static struct rpmpd *mdm9607_rpmpds[] = {
+ [MDM9607_VDDCX] = &mdm9607_vddcx,
+ [MDM9607_VDDCX_AO] = &mdm9607_vddcx_ao,
+ [MDM9607_VDDCX_VFL] = &mdm9607_vddcx_vfl,
+ [MDM9607_VDDMX] = &mdm9607_vddmx,
+ [MDM9607_VDDMX_AO] = &mdm9607_vddmx_ao,
+ [MDM9607_VDDMX_VFL] = &mdm9607_vddmx_vfl,
+};
+
+static const struct rpmpd_desc mdm9607_desc = {
+ .rpmpds = mdm9607_rpmpds,
+ .num_pds = ARRAY_SIZE(mdm9607_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO,
+};
+
+/* msm8226 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8226, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_VFC(msm8226, vddcx_vfc, SMPA, 1);
+
+static struct rpmpd *msm8226_rpmpds[] = {
+ [MSM8226_VDDCX] = &msm8226_vddcx,
+ [MSM8226_VDDCX_AO] = &msm8226_vddcx_ao,
+ [MSM8226_VDDCX_VFC] = &msm8226_vddcx_vfc,
+};
+
+static const struct rpmpd_desc msm8226_desc = {
+ .rpmpds = msm8226_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8226_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
+/* msm8939 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8939, vddmd, vddmd_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_VFC(msm8939, vddmd_vfc, SMPA, 1);
+
+DEFINE_RPMPD_PAIR(msm8939, vddcx, vddcx_ao, SMPA, CORNER, 2);
+DEFINE_RPMPD_VFC(msm8939, vddcx_vfc, SMPA, 2);
+
+DEFINE_RPMPD_PAIR(msm8939, vddmx, vddmx_ao, LDOA, CORNER, 3);
+
+static struct rpmpd *msm8939_rpmpds[] = {
+ [MSM8939_VDDMDCX] = &msm8939_vddmd,
+ [MSM8939_VDDMDCX_AO] = &msm8939_vddmd_ao,
+ [MSM8939_VDDMDCX_VFC] = &msm8939_vddmd_vfc,
+ [MSM8939_VDDCX] = &msm8939_vddcx,
+ [MSM8939_VDDCX_AO] = &msm8939_vddcx_ao,
+ [MSM8939_VDDCX_VFC] = &msm8939_vddcx_vfc,
+ [MSM8939_VDDMX] = &msm8939_vddmx,
+ [MSM8939_VDDMX_AO] = &msm8939_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8939_desc = {
+ .rpmpds = msm8939_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8939_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
+/* msm8916 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8916, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_PAIR(msm8916, vddmx, vddmx_ao, LDOA, CORNER, 3);
+
+DEFINE_RPMPD_VFC(msm8916, vddcx_vfc, SMPA, 1);
+
+static struct rpmpd *msm8916_rpmpds[] = {
+ [MSM8916_VDDCX] = &msm8916_vddcx,
+ [MSM8916_VDDCX_AO] = &msm8916_vddcx_ao,
+ [MSM8916_VDDCX_VFC] = &msm8916_vddcx_vfc,
+ [MSM8916_VDDMX] = &msm8916_vddmx,
+ [MSM8916_VDDMX_AO] = &msm8916_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8916_desc = {
+ .rpmpds = msm8916_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8916_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
+/* msm8953 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8953, vddmd, vddmd_ao, SMPA, LEVEL, 1);
+DEFINE_RPMPD_PAIR(msm8953, vddcx, vddcx_ao, SMPA, LEVEL, 2);
+DEFINE_RPMPD_PAIR(msm8953, vddmx, vddmx_ao, SMPA, LEVEL, 7);
+
+DEFINE_RPMPD_VFL(msm8953, vddcx_vfl, SMPA, 2);
+
+static struct rpmpd *msm8953_rpmpds[] = {
+ [MSM8953_VDDMD] = &msm8953_vddmd,
+ [MSM8953_VDDMD_AO] = &msm8953_vddmd_ao,
+ [MSM8953_VDDCX] = &msm8953_vddcx,
+ [MSM8953_VDDCX_AO] = &msm8953_vddcx_ao,
+ [MSM8953_VDDCX_VFL] = &msm8953_vddcx_vfl,
+ [MSM8953_VDDMX] = &msm8953_vddmx,
+ [MSM8953_VDDMX_AO] = &msm8953_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8953_desc = {
+ .rpmpds = msm8953_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8953_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO,
+};
+
+/* msm8976 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8976, vddcx, vddcx_ao, SMPA, LEVEL, 2);
+DEFINE_RPMPD_PAIR(msm8976, vddmx, vddmx_ao, SMPA, LEVEL, 6);
+
+DEFINE_RPMPD_VFL(msm8976, vddcx_vfl, RWSC, 2);
+DEFINE_RPMPD_VFL(msm8976, vddmx_vfl, RWSM, 6);
+
+static struct rpmpd *msm8976_rpmpds[] = {
+ [MSM8976_VDDCX] = &msm8976_vddcx,
+ [MSM8976_VDDCX_AO] = &msm8976_vddcx_ao,
+ [MSM8976_VDDCX_VFL] = &msm8976_vddcx_vfl,
+ [MSM8976_VDDMX] = &msm8976_vddmx,
+ [MSM8976_VDDMX_AO] = &msm8976_vddmx_ao,
+ [MSM8976_VDDMX_VFL] = &msm8976_vddmx_vfl,
+};
+
+static const struct rpmpd_desc msm8976_desc = {
+ .rpmpds = msm8976_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8976_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO_HIGH,
+};
+
+/* msm8994 RPM Power domains */
+DEFINE_RPMPD_PAIR(msm8994, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_PAIR(msm8994, vddmx, vddmx_ao, SMPA, CORNER, 2);
+/* Attention! *Some* 8994 boards with pm8004 may use SMPC here! */
+DEFINE_RPMPD_CORNER(msm8994, vddgfx, SMPB, 2);
+
+DEFINE_RPMPD_VFC(msm8994, vddcx_vfc, SMPA, 1);
+DEFINE_RPMPD_VFC(msm8994, vddgfx_vfc, SMPB, 2);
+
+static struct rpmpd *msm8994_rpmpds[] = {
+ [MSM8994_VDDCX] = &msm8994_vddcx,
+ [MSM8994_VDDCX_AO] = &msm8994_vddcx_ao,
+ [MSM8994_VDDCX_VFC] = &msm8994_vddcx_vfc,
+ [MSM8994_VDDMX] = &msm8994_vddmx,
+ [MSM8994_VDDMX_AO] = &msm8994_vddmx_ao,
+ [MSM8994_VDDGFX] = &msm8994_vddgfx,
+ [MSM8994_VDDGFX_VFC] = &msm8994_vddgfx_vfc,
+};
+
+static const struct rpmpd_desc msm8994_desc = {
+ .rpmpds = msm8994_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8994_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
+/* msm8996 RPM Power domains */
+DEFINE_RPMPD_PAIR(msm8996, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_PAIR(msm8996, vddmx, vddmx_ao, SMPA, CORNER, 2);
+DEFINE_RPMPD_CORNER(msm8996, vddsscx, LDOA, 26);
+
+DEFINE_RPMPD_VFC(msm8996, vddcx_vfc, SMPA, 1);
+DEFINE_RPMPD_VFC(msm8996, vddsscx_vfc, LDOA, 26);
+
+static struct rpmpd *msm8996_rpmpds[] = {
+ [MSM8996_VDDCX] = &msm8996_vddcx,
+ [MSM8996_VDDCX_AO] = &msm8996_vddcx_ao,
+ [MSM8996_VDDCX_VFC] = &msm8996_vddcx_vfc,
+ [MSM8996_VDDMX] = &msm8996_vddmx,
+ [MSM8996_VDDMX_AO] = &msm8996_vddmx_ao,
+ [MSM8996_VDDSSCX] = &msm8996_vddsscx,
+ [MSM8996_VDDSSCX_VFC] = &msm8996_vddsscx_vfc,
+};
+
+static const struct rpmpd_desc msm8996_desc = {
+ .rpmpds = msm8996_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8996_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
+/* msm8998 RPM Power domains */
+DEFINE_RPMPD_PAIR(msm8998, vddcx, vddcx_ao, RWCX, LEVEL, 0);
+DEFINE_RPMPD_VFL(msm8998, vddcx_vfl, RWCX, 0);
+
+DEFINE_RPMPD_PAIR(msm8998, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(msm8998, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(msm8998, vdd_ssccx, RWSC, 0);
+DEFINE_RPMPD_VFL(msm8998, vdd_ssccx_vfl, RWSC, 0);
+
+DEFINE_RPMPD_LEVEL(msm8998, vdd_sscmx, RWSM, 0);
+DEFINE_RPMPD_VFL(msm8998, vdd_sscmx_vfl, RWSM, 0);
+
+static struct rpmpd *msm8998_rpmpds[] = {
+ [MSM8998_VDDCX] = &msm8998_vddcx,
+ [MSM8998_VDDCX_AO] = &msm8998_vddcx_ao,
+ [MSM8998_VDDCX_VFL] = &msm8998_vddcx_vfl,
+ [MSM8998_VDDMX] = &msm8998_vddmx,
+ [MSM8998_VDDMX_AO] = &msm8998_vddmx_ao,
+ [MSM8998_VDDMX_VFL] = &msm8998_vddmx_vfl,
+ [MSM8998_SSCCX] = &msm8998_vdd_ssccx,
+ [MSM8998_SSCCX_VFL] = &msm8998_vdd_ssccx_vfl,
+ [MSM8998_SSCMX] = &msm8998_vdd_sscmx,
+ [MSM8998_SSCMX_VFL] = &msm8998_vdd_sscmx_vfl,
+};
+
+static const struct rpmpd_desc msm8998_desc = {
+ .rpmpds = msm8998_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8998_rpmpds),
+ .max_state = RPM_SMD_LEVEL_BINNING,
+};
+
+/* qcs404 RPM Power domains */
+DEFINE_RPMPD_PAIR(qcs404, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(qcs404, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(qcs404, vdd_lpicx, RWLC, 0);
+DEFINE_RPMPD_VFL(qcs404, vdd_lpicx_vfl, RWLC, 0);
+
+DEFINE_RPMPD_LEVEL(qcs404, vdd_lpimx, RWLM, 0);
+DEFINE_RPMPD_VFL(qcs404, vdd_lpimx_vfl, RWLM, 0);
+
+static struct rpmpd *qcs404_rpmpds[] = {
+ [QCS404_VDDMX] = &qcs404_vddmx,
+ [QCS404_VDDMX_AO] = &qcs404_vddmx_ao,
+ [QCS404_VDDMX_VFL] = &qcs404_vddmx_vfl,
+ [QCS404_LPICX] = &qcs404_vdd_lpicx,
+ [QCS404_LPICX_VFL] = &qcs404_vdd_lpicx_vfl,
+ [QCS404_LPIMX] = &qcs404_vdd_lpimx,
+ [QCS404_LPIMX_VFL] = &qcs404_vdd_lpimx_vfl,
+};
+
+static const struct rpmpd_desc qcs404_desc = {
+ .rpmpds = qcs404_rpmpds,
+ .num_pds = ARRAY_SIZE(qcs404_rpmpds),
+ .max_state = RPM_SMD_LEVEL_BINNING,
+};
+
+/* sdm660 RPM Power domains */
+DEFINE_RPMPD_PAIR(sdm660, vddcx, vddcx_ao, RWCX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sdm660, vddcx_vfl, RWCX, 0);
+
+DEFINE_RPMPD_PAIR(sdm660, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sdm660, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(sdm660, vdd_ssccx, RWLC, 0);
+DEFINE_RPMPD_VFL(sdm660, vdd_ssccx_vfl, RWLC, 0);
+
+DEFINE_RPMPD_LEVEL(sdm660, vdd_sscmx, RWLM, 0);
+DEFINE_RPMPD_VFL(sdm660, vdd_sscmx_vfl, RWLM, 0);
+
+static struct rpmpd *sdm660_rpmpds[] = {
+ [SDM660_VDDCX] = &sdm660_vddcx,
+ [SDM660_VDDCX_AO] = &sdm660_vddcx_ao,
+ [SDM660_VDDCX_VFL] = &sdm660_vddcx_vfl,
+ [SDM660_VDDMX] = &sdm660_vddmx,
+ [SDM660_VDDMX_AO] = &sdm660_vddmx_ao,
+ [SDM660_VDDMX_VFL] = &sdm660_vddmx_vfl,
+ [SDM660_SSCCX] = &sdm660_vdd_ssccx,
+ [SDM660_SSCCX_VFL] = &sdm660_vdd_ssccx_vfl,
+ [SDM660_SSCMX] = &sdm660_vdd_sscmx,
+ [SDM660_SSCMX_VFL] = &sdm660_vdd_sscmx_vfl,
+};
+
+static const struct rpmpd_desc sdm660_desc = {
+ .rpmpds = sdm660_rpmpds,
+ .num_pds = ARRAY_SIZE(sdm660_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO,
+};
+
+/* sm4250/6115 RPM Power domains */
+DEFINE_RPMPD_PAIR(sm6115, vddcx, vddcx_ao, RWCX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sm6115, vddcx_vfl, RWCX, 0);
+
+DEFINE_RPMPD_PAIR(sm6115, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sm6115, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(sm6115, vdd_lpi_cx, RWLC, 0);
+DEFINE_RPMPD_LEVEL(sm6115, vdd_lpi_mx, RWLM, 0);
+
+static struct rpmpd *sm6115_rpmpds[] = {
+ [SM6115_VDDCX] = &sm6115_vddcx,
+ [SM6115_VDDCX_AO] = &sm6115_vddcx_ao,
+ [SM6115_VDDCX_VFL] = &sm6115_vddcx_vfl,
+ [SM6115_VDDMX] = &sm6115_vddmx,
+ [SM6115_VDDMX_AO] = &sm6115_vddmx_ao,
+ [SM6115_VDDMX_VFL] = &sm6115_vddmx_vfl,
+ [SM6115_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
+ [SM6115_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
+};
+
+static const struct rpmpd_desc sm6115_desc = {
+ .rpmpds = sm6115_rpmpds,
+ .num_pds = ARRAY_SIZE(sm6115_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
+};
+
+/* sm6125 RPM Power domains */
+DEFINE_RPMPD_PAIR(sm6125, vddcx, vddcx_ao, RWCX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sm6125, vddcx_vfl, RWCX, 0);
+
+DEFINE_RPMPD_PAIR(sm6125, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sm6125, vddmx_vfl, RWMX, 0);
+
+static struct rpmpd *sm6125_rpmpds[] = {
+ [SM6125_VDDCX] = &sm6125_vddcx,
+ [SM6125_VDDCX_AO] = &sm6125_vddcx_ao,
+ [SM6125_VDDCX_VFL] = &sm6125_vddcx_vfl,
+ [SM6125_VDDMX] = &sm6125_vddmx,
+ [SM6125_VDDMX_AO] = &sm6125_vddmx_ao,
+ [SM6125_VDDMX_VFL] = &sm6125_vddmx_vfl,
+};
+
+static const struct rpmpd_desc sm6125_desc = {
+ .rpmpds = sm6125_rpmpds,
+ .num_pds = ARRAY_SIZE(sm6125_rpmpds),
+ .max_state = RPM_SMD_LEVEL_BINNING,
+};
+
+DEFINE_RPMPD_PAIR(sm6375, vddgx, vddgx_ao, RWGX, LEVEL, 0);
+static struct rpmpd *sm6375_rpmpds[] = {
+ [SM6375_VDDCX] = &sm6125_vddcx,
+ [SM6375_VDDCX_AO] = &sm6125_vddcx_ao,
+ [SM6375_VDDCX_VFL] = &sm6125_vddcx_vfl,
+ [SM6375_VDDMX] = &sm6125_vddmx,
+ [SM6375_VDDMX_AO] = &sm6125_vddmx_ao,
+ [SM6375_VDDMX_VFL] = &sm6125_vddmx_vfl,
+ [SM6375_VDDGX] = &sm6375_vddgx,
+ [SM6375_VDDGX_AO] = &sm6375_vddgx_ao,
+ [SM6375_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
+ [SM6375_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
+};
+
+static const struct rpmpd_desc sm6375_desc = {
+ .rpmpds = sm6375_rpmpds,
+ .num_pds = ARRAY_SIZE(sm6375_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
+};
+
+static struct rpmpd *qcm2290_rpmpds[] = {
+ [QCM2290_VDDCX] = &sm6115_vddcx,
+ [QCM2290_VDDCX_AO] = &sm6115_vddcx_ao,
+ [QCM2290_VDDCX_VFL] = &sm6115_vddcx_vfl,
+ [QCM2290_VDDMX] = &sm6115_vddmx,
+ [QCM2290_VDDMX_AO] = &sm6115_vddmx_ao,
+ [QCM2290_VDDMX_VFL] = &sm6115_vddmx_vfl,
+ [QCM2290_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
+ [QCM2290_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
+};
+
+static const struct rpmpd_desc qcm2290_desc = {
+ .rpmpds = qcm2290_rpmpds,
+ .num_pds = ARRAY_SIZE(qcm2290_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
+};
+
+static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
+ { .compatible = "qcom,msm8226-rpmpd", .data = &msm8226_desc },
+ { .compatible = "qcom,msm8909-rpmpd", .data = &msm8916_desc },
+ { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
+ { .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
+ { .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc },
+ { .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
+ { .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc },
+ { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
+ { .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
+ { .compatible = "qcom,qcm2290-rpmpd", .data = &qcm2290_desc },
+ { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
+ { .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc },
+ { .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc },
+ { .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc },
+ { .compatible = "qcom,sm6375-rpmpd", .data = &sm6375_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmpd_match_table);
+
+static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
+{
+ struct rpmpd_req req = {
+ .key = KEY_ENABLE,
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(enable),
+ };
+
+ return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ pd->res_type, pd->res_id, &req, sizeof(req));
+}
+
+static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner)
+{
+ struct rpmpd_req req = {
+ .key = pd->key,
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(corner),
+ };
+
+ return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id,
+ &req, sizeof(req));
+};
+
+static void to_active_sleep(struct rpmpd *pd, unsigned int corner,
+ unsigned int *active, unsigned int *sleep)
+{
+ *active = corner;
+
+ if (pd->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int rpmpd_aggregate_corner(struct rpmpd *pd)
+{
+ int ret;
+ struct rpmpd *peer = pd->peer;
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+ to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner);
+
+ if (peer && peer->enabled)
+ to_active_sleep(peer, peer->corner, &peer_active_corner,
+ &peer_sleep_corner);
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+ ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner);
+ if (ret)
+ return ret;
+
+ sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+ return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner);
+}
+
+static int rpmpd_power_on(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ mutex_lock(&rpmpd_lock);
+
+ ret = rpmpd_send_enable(pd, true);
+ if (ret)
+ goto out;
+
+ pd->enabled = true;
+
+ if (pd->corner)
+ ret = rpmpd_aggregate_corner(pd);
+
+out:
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static int rpmpd_power_off(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ mutex_lock(&rpmpd_lock);
+
+ ret = rpmpd_send_enable(pd, false);
+ if (!ret)
+ pd->enabled = false;
+
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static int rpmpd_set_performance(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ int ret = 0;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ if (state > pd->max_state)
+ state = pd->max_state;
+
+ mutex_lock(&rpmpd_lock);
+
+ pd->corner = state;
+
+ /* Always send updates for vfc and vfl */
+ if (!pd->enabled && pd->key != KEY_FLOOR_CORNER &&
+ pd->key != KEY_FLOOR_LEVEL)
+ goto out;
+
+ ret = rpmpd_aggregate_corner(pd);
+
+out:
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int rpmpd_probe(struct platform_device *pdev)
+{
+ int i;
+ size_t num;
+ struct genpd_onecell_data *data;
+ struct qcom_smd_rpm *rpm;
+ struct rpmpd **rpmpds;
+ const struct rpmpd_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpmpds = desc->rpmpds;
+ num = desc->num_pds;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ data->num_domains = num;
+
+ for (i = 0; i < num; i++) {
+ if (!rpmpds[i]) {
+ dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n",
+ i);
+ continue;
+ }
+
+ rpmpds[i]->rpm = rpm;
+ rpmpds[i]->max_state = desc->max_state;
+ rpmpds[i]->pd.power_off = rpmpd_power_off;
+ rpmpds[i]->pd.power_on = rpmpd_power_on;
+ rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
+ rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance;
+ pm_genpd_init(&rpmpds[i]->pd, NULL, true);
+
+ data->domains[i] = &rpmpds[i]->pd;
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmpd_driver = {
+ .driver = {
+ .name = "qcom-rpmpd",
+ .of_match_table = rpmpd_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rpmpd_probe,
+};
+
+static int __init rpmpd_init(void)
+{
+ return platform_driver_register(&rpmpd_driver);
+}
+core_initcall(rpmpd_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPM Power Domain Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
new file mode 100644
index 000000000..413f9f4ae
--- /dev/null
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include <linux/rpmsg.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#define RPM_REQUEST_TIMEOUT (5 * HZ)
+
+/**
+ * struct qcom_smd_rpm - state of the rpm device driver
+ * @rpm_channel: reference to the smd channel
+ * @icc: interconnect proxy device
+ * @dev: rpm device
+ * @ack: completion for acks
+ * @lock: mutual exclusion around the send/complete pair
+ * @ack_status: result of the rpm request
+ */
+struct qcom_smd_rpm {
+ struct rpmsg_endpoint *rpm_channel;
+ struct platform_device *icc;
+ struct device *dev;
+
+ struct completion ack;
+ struct mutex lock;
+ int ack_status;
+};
+
+/**
+ * struct qcom_rpm_header - header for all rpm requests and responses
+ * @service_type: identifier of the service
+ * @length: length of the payload
+ */
+struct qcom_rpm_header {
+ __le32 service_type;
+ __le32 length;
+};
+
+/**
+ * struct qcom_rpm_request - request message to the rpm
+ * @msg_id: identifier of the outgoing message
+ * @flags: active/sleep state flags
+ * @type: resource type
+ * @id: resource id
+ * @data_len: length of the payload following this header
+ */
+struct qcom_rpm_request {
+ __le32 msg_id;
+ __le32 flags;
+ __le32 type;
+ __le32 id;
+ __le32 data_len;
+};
+
+/**
+ * struct qcom_rpm_message - response message from the rpm
+ * @msg_type: indicator of the type of message
+ * @length: the size of this message, including the message header
+ * @msg_id: message id
+ * @message: textual message from the rpm
+ *
+ * Multiple of these messages can be stacked in an rpm message.
+ */
+struct qcom_rpm_message {
+ __le32 msg_type;
+ __le32 length;
+ union {
+ __le32 msg_id;
+ u8 message[0];
+ };
+};
+
+#define RPM_SERVICE_TYPE_REQUEST 0x00716572 /* "req\0" */
+
+#define RPM_MSG_TYPE_ERR 0x00727265 /* "err\0" */
+#define RPM_MSG_TYPE_MSG_ID 0x2367736d /* "msg#" */
+
+/**
+ * qcom_rpm_smd_write - write @buf to @type:@id
+ * @rpm: rpm handle
+ * @state: active/sleep state flags
+ * @type: resource type
+ * @id: resource identifier
+ * @buf: the data to be written
+ * @count: number of bytes in @buf
+ */
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+ int state,
+ u32 type, u32 id,
+ void *buf,
+ size_t count)
+{
+ static unsigned msg_id = 1;
+ int left;
+ int ret;
+ struct {
+ struct qcom_rpm_header hdr;
+ struct qcom_rpm_request req;
+ u8 payload[];
+ } *pkt;
+ size_t size = sizeof(*pkt) + count;
+
+ /* SMD packets to the RPM may not exceed 256 bytes */
+ if (WARN_ON(size >= 256))
+ return -EINVAL;
+
+ pkt = kmalloc(size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ mutex_lock(&rpm->lock);
+
+ pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST);
+ pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count);
+
+ pkt->req.msg_id = cpu_to_le32(msg_id++);
+ pkt->req.flags = cpu_to_le32(state);
+ pkt->req.type = cpu_to_le32(type);
+ pkt->req.id = cpu_to_le32(id);
+ pkt->req.data_len = cpu_to_le32(count);
+ memcpy(pkt->payload, buf, count);
+
+ ret = rpmsg_send(rpm->rpm_channel, pkt, size);
+ if (ret)
+ goto out;
+
+ left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
+ if (!left)
+ ret = -ETIMEDOUT;
+ else
+ ret = rpm->ack_status;
+
+out:
+ kfree(pkt);
+ mutex_unlock(&rpm->lock);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_smd_write);
+
+static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev,
+ void *data,
+ int count,
+ void *priv,
+ u32 addr)
+{
+ const struct qcom_rpm_header *hdr = data;
+ size_t hdr_length = le32_to_cpu(hdr->length);
+ const struct qcom_rpm_message *msg;
+ struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev);
+ const u8 *buf = data + sizeof(struct qcom_rpm_header);
+ const u8 *end = buf + hdr_length;
+ char msgbuf[32];
+ int status = 0;
+ u32 len, msg_length;
+
+ if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
+ hdr_length < sizeof(struct qcom_rpm_message)) {
+ dev_err(rpm->dev, "invalid request\n");
+ return 0;
+ }
+
+ while (buf < end) {
+ msg = (struct qcom_rpm_message *)buf;
+ msg_length = le32_to_cpu(msg->length);
+ switch (le32_to_cpu(msg->msg_type)) {
+ case RPM_MSG_TYPE_MSG_ID:
+ break;
+ case RPM_MSG_TYPE_ERR:
+ len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf));
+ memcpy_fromio(msgbuf, msg->message, len);
+ msgbuf[len - 1] = 0;
+
+ if (!strcmp(msgbuf, "resource does not exist"))
+ status = -ENXIO;
+ else
+ status = -EINVAL;
+ break;
+ }
+
+ buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4);
+ }
+
+ rpm->ack_status = status;
+ complete(&rpm->ack);
+ return 0;
+}
+
+static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
+{
+ struct qcom_smd_rpm *rpm;
+ int ret;
+
+ rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
+ if (!rpm)
+ return -ENOMEM;
+
+ mutex_init(&rpm->lock);
+ init_completion(&rpm->ack);
+
+ rpm->dev = &rpdev->dev;
+ rpm->rpm_channel = rpdev->ept;
+ dev_set_drvdata(&rpdev->dev, rpm);
+
+ rpm->icc = platform_device_register_data(&rpdev->dev, "icc_smd_rpm", -1,
+ NULL, 0);
+ if (IS_ERR(rpm->icc))
+ return PTR_ERR(rpm->icc);
+
+ ret = of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
+ if (ret)
+ platform_device_unregister(rpm->icc);
+
+ return ret;
+}
+
+static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
+{
+ struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev);
+
+ platform_device_unregister(rpm->icc);
+ of_platform_depopulate(&rpdev->dev);
+}
+
+static const struct of_device_id qcom_smd_rpm_of_match[] = {
+ { .compatible = "qcom,rpm-apq8084" },
+ { .compatible = "qcom,rpm-ipq6018" },
+ { .compatible = "qcom,rpm-msm8226" },
+ { .compatible = "qcom,rpm-msm8909" },
+ { .compatible = "qcom,rpm-msm8916" },
+ { .compatible = "qcom,rpm-msm8936" },
+ { .compatible = "qcom,rpm-msm8953" },
+ { .compatible = "qcom,rpm-msm8974" },
+ { .compatible = "qcom,rpm-msm8976" },
+ { .compatible = "qcom,rpm-msm8994" },
+ { .compatible = "qcom,rpm-msm8996" },
+ { .compatible = "qcom,rpm-msm8998" },
+ { .compatible = "qcom,rpm-sdm660" },
+ { .compatible = "qcom,rpm-sm6115" },
+ { .compatible = "qcom,rpm-sm6125" },
+ { .compatible = "qcom,rpm-qcm2290" },
+ { .compatible = "qcom,rpm-qcs404" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
+
+static struct rpmsg_driver qcom_smd_rpm_driver = {
+ .probe = qcom_smd_rpm_probe,
+ .remove = qcom_smd_rpm_remove,
+ .callback = qcom_smd_rpm_callback,
+ .drv = {
+ .name = "qcom_smd_rpm",
+ .of_match_table = qcom_smd_rpm_of_match,
+ },
+};
+
+static int __init qcom_smd_rpm_init(void)
+{
+ return register_rpmsg_driver(&qcom_smd_rpm_driver);
+}
+arch_initcall(qcom_smd_rpm_init);
+
+static void __exit qcom_smd_rpm_exit(void)
+{
+ unregister_rpmsg_driver(&qcom_smd_rpm_driver);
+}
+module_exit(qcom_smd_rpm_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
new file mode 100644
index 000000000..af8d90efd
--- /dev/null
+++ b/drivers/soc/qcom/smem.c
@@ -0,0 +1,1197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+/*
+ * The Qualcomm shared memory system is a allocate only heap structure that
+ * consists of one of more memory areas that can be accessed by the processors
+ * in the SoC.
+ *
+ * All systems contains a global heap, accessible by all processors in the SoC,
+ * with a table of contents data structure (@smem_header) at the beginning of
+ * the main shared memory block.
+ *
+ * The global header contains meta data for allocations as well as a fixed list
+ * of 512 entries (@smem_global_entry) that can be initialized to reference
+ * parts of the shared memory space.
+ *
+ *
+ * In addition to this global heap a set of "private" heaps can be set up at
+ * boot time with access restrictions so that only certain processor pairs can
+ * access the data.
+ *
+ * These partitions are referenced from an optional partition table
+ * (@smem_ptable), that is found 4kB from the end of the main smem region. The
+ * partition table entries (@smem_ptable_entry) lists the involved processors
+ * (or hosts) and their location in the main shared memory region.
+ *
+ * Each partition starts with a header (@smem_partition_header) that identifies
+ * the partition and holds properties for the two internal memory regions. The
+ * two regions are cached and non-cached memory respectively. Each region
+ * contain a link list of allocation headers (@smem_private_entry) followed by
+ * their data.
+ *
+ * Items in the non-cached region are allocated from the start of the partition
+ * while items in the cached region are allocated from the end. The free area
+ * is hence the region between the cached and non-cached offsets. The header of
+ * cached items comes after the data.
+ *
+ * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
+ * for the global heap. A new global partition is created from the global heap
+ * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
+ * set by the bootloader.
+ *
+ * To synchronize allocations in the shared memory heaps a remote spinlock must
+ * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
+ * platforms.
+ *
+ */
+
+/*
+ * The version member of the smem header contains an array of versions for the
+ * various software components in the SoC. We verify that the boot loader
+ * version is a valid version as a sanity check.
+ */
+#define SMEM_MASTER_SBL_VERSION_INDEX 7
+#define SMEM_GLOBAL_HEAP_VERSION 11
+#define SMEM_GLOBAL_PART_VERSION 12
+
+/*
+ * The first 8 items are only to be allocated by the boot loader while
+ * initializing the heap.
+ */
+#define SMEM_ITEM_LAST_FIXED 8
+
+/* Highest accepted item number, for both global and private heaps */
+#define SMEM_ITEM_COUNT 512
+
+/* Processor/host identifier for the application processor */
+#define SMEM_HOST_APPS 0
+
+/* Processor/host identifier for the global partition */
+#define SMEM_GLOBAL_HOST 0xfffe
+
+/* Max number of processors/hosts in a system */
+#define SMEM_HOST_COUNT 15
+
+/**
+ * struct smem_proc_comm - proc_comm communication struct (legacy)
+ * @command: current command to be executed
+ * @status: status of the currently requested command
+ * @params: parameters to the command
+ */
+struct smem_proc_comm {
+ __le32 command;
+ __le32 status;
+ __le32 params[2];
+};
+
+/**
+ * struct smem_global_entry - entry to reference smem items on the heap
+ * @allocated: boolean to indicate if this entry is used
+ * @offset: offset to the allocated space
+ * @size: size of the allocated space, 8 byte aligned
+ * @aux_base: base address for the memory region used by this unit, or 0 for
+ * the default region. bits 0,1 are reserved
+ */
+struct smem_global_entry {
+ __le32 allocated;
+ __le32 offset;
+ __le32 size;
+ __le32 aux_base; /* bits 1:0 reserved */
+};
+#define AUX_BASE_MASK 0xfffffffc
+
+/**
+ * struct smem_header - header found in beginning of primary smem region
+ * @proc_comm: proc_comm communication interface (legacy)
+ * @version: array of versions for the various subsystems
+ * @initialized: boolean to indicate that smem is initialized
+ * @free_offset: index of the first unallocated byte in smem
+ * @available: number of bytes available for allocation
+ * @reserved: reserved field, must be 0
+ * @toc: array of references to items
+ */
+struct smem_header {
+ struct smem_proc_comm proc_comm[4];
+ __le32 version[32];
+ __le32 initialized;
+ __le32 free_offset;
+ __le32 available;
+ __le32 reserved;
+ struct smem_global_entry toc[SMEM_ITEM_COUNT];
+};
+
+/**
+ * struct smem_ptable_entry - one entry in the @smem_ptable list
+ * @offset: offset, within the main shared memory region, of the partition
+ * @size: size of the partition
+ * @flags: flags for the partition (currently unused)
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @cacheline: alignment for "cached" entries
+ * @reserved: reserved entries for later use
+ */
+struct smem_ptable_entry {
+ __le32 offset;
+ __le32 size;
+ __le32 flags;
+ __le16 host0;
+ __le16 host1;
+ __le32 cacheline;
+ __le32 reserved[7];
+};
+
+/**
+ * struct smem_ptable - partition table for the private partitions
+ * @magic: magic number, must be SMEM_PTABLE_MAGIC
+ * @version: version of the partition table
+ * @num_entries: number of partitions in the table
+ * @reserved: for now reserved entries
+ * @entry: list of @smem_ptable_entry for the @num_entries partitions
+ */
+struct smem_ptable {
+ u8 magic[4];
+ __le32 version;
+ __le32 num_entries;
+ __le32 reserved[5];
+ struct smem_ptable_entry entry[];
+};
+
+static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
+
+/**
+ * struct smem_partition_header - header of the partitions
+ * @magic: magic number, must be SMEM_PART_MAGIC
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @size: size of the partition
+ * @offset_free_uncached: offset to the first free byte of uncached memory in
+ * this partition
+ * @offset_free_cached: offset to the first free byte of cached memory in this
+ * partition
+ * @reserved: for now reserved entries
+ */
+struct smem_partition_header {
+ u8 magic[4];
+ __le16 host0;
+ __le16 host1;
+ __le32 size;
+ __le32 offset_free_uncached;
+ __le32 offset_free_cached;
+ __le32 reserved[3];
+};
+
+/**
+ * struct smem_partition - describes smem partition
+ * @virt_base: starting virtual address of partition
+ * @phys_base: starting physical address of partition
+ * @cacheline: alignment for "cached" entries
+ * @size: size of partition
+ */
+struct smem_partition {
+ void __iomem *virt_base;
+ phys_addr_t phys_base;
+ size_t cacheline;
+ size_t size;
+};
+
+static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
+
+/**
+ * struct smem_private_entry - header of each item in the private partition
+ * @canary: magic number, must be SMEM_PRIVATE_CANARY
+ * @item: identifying number of the smem item
+ * @size: size of the data, including padding bytes
+ * @padding_data: number of bytes of padding of data
+ * @padding_hdr: number of bytes of padding between the header and the data
+ * @reserved: for now reserved entry
+ */
+struct smem_private_entry {
+ u16 canary; /* bytes are the same so no swapping needed */
+ __le16 item;
+ __le32 size; /* includes padding bytes */
+ __le16 padding_data;
+ __le16 padding_hdr;
+ __le32 reserved;
+};
+#define SMEM_PRIVATE_CANARY 0xa5a5
+
+/**
+ * struct smem_info - smem region info located after the table of contents
+ * @magic: magic number, must be SMEM_INFO_MAGIC
+ * @size: size of the smem region
+ * @base_addr: base address of the smem region
+ * @reserved: for now reserved entry
+ * @num_items: highest accepted item number
+ */
+struct smem_info {
+ u8 magic[4];
+ __le32 size;
+ __le32 base_addr;
+ __le32 reserved;
+ __le16 num_items;
+};
+
+static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
+
+/**
+ * struct smem_region - representation of a chunk of memory used for smem
+ * @aux_base: identifier of aux_mem base
+ * @virt_base: virtual base address of memory with this aux_mem identifier
+ * @size: size of the memory region
+ */
+struct smem_region {
+ phys_addr_t aux_base;
+ void __iomem *virt_base;
+ size_t size;
+};
+
+/**
+ * struct qcom_smem - device data for the smem device
+ * @dev: device pointer
+ * @hwlock: reference to a hwspinlock
+ * @ptable: virtual base of partition table
+ * @global_partition: describes for global partition when in use
+ * @partitions: list of partitions of current processor/host
+ * @item_count: max accepted item number
+ * @socinfo: platform device pointer
+ * @num_regions: number of @regions
+ * @regions: list of the memory regions defining the shared memory
+ */
+struct qcom_smem {
+ struct device *dev;
+
+ struct hwspinlock *hwlock;
+
+ u32 item_count;
+ struct platform_device *socinfo;
+ struct smem_ptable *ptable;
+ struct smem_partition global_partition;
+ struct smem_partition partitions[SMEM_HOST_COUNT];
+
+ unsigned num_regions;
+ struct smem_region regions[];
+};
+
+static void *
+phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->offset_free_uncached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_cached_entry(struct smem_partition_header *phdr,
+ size_t cacheline)
+{
+ void *p = phdr;
+ struct smem_private_entry *e;
+
+ return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
+}
+
+static void *
+phdr_to_last_cached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->offset_free_cached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + sizeof(*phdr);
+}
+
+static struct smem_private_entry *
+uncached_entry_next(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
+ le32_to_cpu(e->size);
+}
+
+static struct smem_private_entry *
+cached_entry_next(struct smem_private_entry *e, size_t cacheline)
+{
+ void *p = e;
+
+ return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
+}
+
+static void *uncached_entry_to_item(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
+}
+
+static void *cached_entry_to_item(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p - le32_to_cpu(e->size);
+}
+
+/* Pointer to the one and only smem handle */
+static struct qcom_smem *__smem;
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT 1000
+
+static int qcom_smem_alloc_private(struct qcom_smem *smem,
+ struct smem_partition *part,
+ unsigned item,
+ size_t size)
+{
+ struct smem_private_entry *hdr, *end;
+ struct smem_partition_header *phdr;
+ size_t alloc_size;
+ void *cached;
+ void *p_end;
+
+ phdr = (struct smem_partition_header __force *)part->virt_base;
+ p_end = (void *)phdr + part->size;
+
+ hdr = phdr_to_first_uncached_entry(phdr);
+ end = phdr_to_last_uncached_entry(phdr);
+ cached = phdr_to_last_cached_entry(phdr);
+
+ if (WARN_ON((void *)end > p_end || cached > p_end))
+ return -EINVAL;
+
+ while (hdr < end) {
+ if (hdr->canary != SMEM_PRIVATE_CANARY)
+ goto bad_canary;
+ if (le16_to_cpu(hdr->item) == item)
+ return -EEXIST;
+
+ hdr = uncached_entry_next(hdr);
+ }
+
+ if (WARN_ON((void *)hdr > p_end))
+ return -EINVAL;
+
+ /* Check that we don't grow into the cached region */
+ alloc_size = sizeof(*hdr) + ALIGN(size, 8);
+ if ((void *)hdr + alloc_size > cached) {
+ dev_err(smem->dev, "Out of memory\n");
+ return -ENOSPC;
+ }
+
+ hdr->canary = SMEM_PRIVATE_CANARY;
+ hdr->item = cpu_to_le16(item);
+ hdr->size = cpu_to_le32(ALIGN(size, 8));
+ hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
+ hdr->padding_hdr = 0;
+
+ /*
+ * Ensure the header is written before we advance the free offset, so
+ * that remote processors that does not take the remote spinlock still
+ * gets a consistent view of the linked list.
+ */
+ wmb();
+ le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
+
+ return 0;
+bad_canary:
+ dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
+ le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
+
+ return -EINVAL;
+}
+
+static int qcom_smem_alloc_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t size)
+{
+ struct smem_global_entry *entry;
+ struct smem_header *header;
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (entry->allocated)
+ return -EEXIST;
+
+ size = ALIGN(size, 8);
+ if (WARN_ON(size > le32_to_cpu(header->available)))
+ return -ENOMEM;
+
+ entry->offset = header->free_offset;
+ entry->size = cpu_to_le32(size);
+
+ /*
+ * Ensure the header is consistent before we mark the item allocated,
+ * so that remote processors will get a consistent view of the item
+ * even though they do not take the spinlock on read.
+ */
+ wmb();
+ entry->allocated = cpu_to_le32(1);
+
+ le32_add_cpu(&header->free_offset, size);
+ le32_add_cpu(&header->available, -size);
+
+ return 0;
+}
+
+/**
+ * qcom_smem_alloc() - allocate space for a smem item
+ * @host: remote processor id, or -1
+ * @item: smem item handle
+ * @size: number of bytes to be allocated
+ *
+ * Allocate space for a given smem item of size @size, given that the item is
+ * not yet allocated.
+ */
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
+{
+ struct smem_partition *part;
+ unsigned long flags;
+ int ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (item < SMEM_ITEM_LAST_FIXED) {
+ dev_err(__smem->dev,
+ "Rejecting allocation of static entry %d\n", item);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(item >= __smem->item_count))
+ return -EINVAL;
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ret;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ ret = qcom_smem_alloc_private(__smem, part, item, size);
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ ret = qcom_smem_alloc_private(__smem, part, item, size);
+ } else {
+ ret = qcom_smem_alloc_global(__smem, item, size);
+ }
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smem_alloc);
+
+static void *qcom_smem_get_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t *size)
+{
+ struct smem_header *header;
+ struct smem_region *region;
+ struct smem_global_entry *entry;
+ u64 entry_offset;
+ u32 e_size;
+ u32 aux_base;
+ unsigned i;
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (!entry->allocated)
+ return ERR_PTR(-ENXIO);
+
+ aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
+
+ for (i = 0; i < smem->num_regions; i++) {
+ region = &smem->regions[i];
+
+ if ((u32)region->aux_base == aux_base || !aux_base) {
+ e_size = le32_to_cpu(entry->size);
+ entry_offset = le32_to_cpu(entry->offset);
+
+ if (WARN_ON(e_size + entry_offset > region->size))
+ return ERR_PTR(-EINVAL);
+
+ if (size != NULL)
+ *size = e_size;
+
+ return region->virt_base + entry_offset;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void *qcom_smem_get_private(struct qcom_smem *smem,
+ struct smem_partition *part,
+ unsigned item,
+ size_t *size)
+{
+ struct smem_private_entry *e, *end;
+ struct smem_partition_header *phdr;
+ void *item_ptr, *p_end;
+ u32 padding_data;
+ u32 e_size;
+
+ phdr = (struct smem_partition_header __force *)part->virt_base;
+ p_end = (void *)phdr + part->size;
+
+ e = phdr_to_first_uncached_entry(phdr);
+ end = phdr_to_last_uncached_entry(phdr);
+
+ while (e < end) {
+ if (e->canary != SMEM_PRIVATE_CANARY)
+ goto invalid_canary;
+
+ if (le16_to_cpu(e->item) == item) {
+ if (size != NULL) {
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
+
+ if (WARN_ON(e_size > part->size || padding_data > e_size))
+ return ERR_PTR(-EINVAL);
+
+ *size = e_size - padding_data;
+ }
+
+ item_ptr = uncached_entry_to_item(e);
+ if (WARN_ON(item_ptr > p_end))
+ return ERR_PTR(-EINVAL);
+
+ return item_ptr;
+ }
+
+ e = uncached_entry_next(e);
+ }
+
+ if (WARN_ON((void *)e > p_end))
+ return ERR_PTR(-EINVAL);
+
+ /* Item was not found in the uncached list, search the cached list */
+
+ e = phdr_to_first_cached_entry(phdr, part->cacheline);
+ end = phdr_to_last_cached_entry(phdr);
+
+ if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
+ return ERR_PTR(-EINVAL);
+
+ while (e > end) {
+ if (e->canary != SMEM_PRIVATE_CANARY)
+ goto invalid_canary;
+
+ if (le16_to_cpu(e->item) == item) {
+ if (size != NULL) {
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
+
+ if (WARN_ON(e_size > part->size || padding_data > e_size))
+ return ERR_PTR(-EINVAL);
+
+ *size = e_size - padding_data;
+ }
+
+ item_ptr = cached_entry_to_item(e);
+ if (WARN_ON(item_ptr < (void *)phdr))
+ return ERR_PTR(-EINVAL);
+
+ return item_ptr;
+ }
+
+ e = cached_entry_next(e, part->cacheline);
+ }
+
+ if (WARN_ON((void *)e < (void *)phdr))
+ return ERR_PTR(-EINVAL);
+
+ return ERR_PTR(-ENOENT);
+
+invalid_canary:
+ dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
+ le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * qcom_smem_get() - resolve ptr of size of a smem item
+ * @host: the remote processor, or -1
+ * @item: smem item handle
+ * @size: pointer to be filled out with size of the item
+ *
+ * Looks up smem item and returns pointer to it. Size of smem
+ * item is returned in @size.
+ */
+void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
+{
+ struct smem_partition *part;
+ unsigned long flags;
+ int ret;
+ void *ptr = ERR_PTR(-EPROBE_DEFER);
+
+ if (!__smem)
+ return ptr;
+
+ if (WARN_ON(item >= __smem->item_count))
+ return ERR_PTR(-EINVAL);
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ ptr = qcom_smem_get_private(__smem, part, item, size);
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ ptr = qcom_smem_get_private(__smem, part, item, size);
+ } else {
+ ptr = qcom_smem_get_global(__smem, item, size);
+ }
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+ return ptr;
+
+}
+EXPORT_SYMBOL(qcom_smem_get);
+
+/**
+ * qcom_smem_get_free_space() - retrieve amount of free space in a partition
+ * @host: the remote processor identifying a partition, or -1
+ *
+ * To be used by smem clients as a quick way to determine if any new
+ * allocations has been made.
+ */
+int qcom_smem_get_free_space(unsigned host)
+{
+ struct smem_partition *part;
+ struct smem_partition_header *phdr;
+ struct smem_header *header;
+ unsigned ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ phdr = part->virt_base;
+ ret = le32_to_cpu(phdr->offset_free_cached) -
+ le32_to_cpu(phdr->offset_free_uncached);
+
+ if (ret > le32_to_cpu(part->size))
+ return -EINVAL;
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ phdr = part->virt_base;
+ ret = le32_to_cpu(phdr->offset_free_cached) -
+ le32_to_cpu(phdr->offset_free_uncached);
+
+ if (ret > le32_to_cpu(part->size))
+ return -EINVAL;
+ } else {
+ header = __smem->regions[0].virt_base;
+ ret = le32_to_cpu(header->available);
+
+ if (ret > __smem->regions[0].size)
+ return -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smem_get_free_space);
+
+static bool addr_in_range(void __iomem *base, size_t size, void *addr)
+{
+ return base && ((void __iomem *)addr >= base && (void __iomem *)addr < base + size);
+}
+
+/**
+ * qcom_smem_virt_to_phys() - return the physical address associated
+ * with an smem item pointer (previously returned by qcom_smem_get()
+ * @p: the virtual address to convert
+ *
+ * Returns 0 if the pointer provided is not within any smem region.
+ */
+phys_addr_t qcom_smem_virt_to_phys(void *p)
+{
+ struct smem_partition *part;
+ struct smem_region *area;
+ u64 offset;
+ u32 i;
+
+ for (i = 0; i < SMEM_HOST_COUNT; i++) {
+ part = &__smem->partitions[i];
+
+ if (addr_in_range(part->virt_base, part->size, p)) {
+ offset = p - part->virt_base;
+
+ return (phys_addr_t)part->phys_base + offset;
+ }
+ }
+
+ part = &__smem->global_partition;
+
+ if (addr_in_range(part->virt_base, part->size, p)) {
+ offset = p - part->virt_base;
+
+ return (phys_addr_t)part->phys_base + offset;
+ }
+
+ for (i = 0; i < __smem->num_regions; i++) {
+ area = &__smem->regions[i];
+
+ if (addr_in_range(area->virt_base, area->size, p)) {
+ offset = p - area->virt_base;
+
+ return (phys_addr_t)area->aux_base + offset;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_smem_virt_to_phys);
+
+static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
+{
+ struct smem_header *header;
+ __le32 *versions;
+
+ header = smem->regions[0].virt_base;
+ versions = header->version;
+
+ return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
+}
+
+static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
+{
+ struct smem_ptable *ptable;
+ u32 version;
+
+ ptable = smem->ptable;
+ if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
+ return ERR_PTR(-ENOENT);
+
+ version = le32_to_cpu(ptable->version);
+ if (version != 1) {
+ dev_err(smem->dev,
+ "Unsupported partition header version %d\n", version);
+ return ERR_PTR(-EINVAL);
+ }
+ return ptable;
+}
+
+static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
+{
+ struct smem_ptable *ptable;
+ struct smem_info *info;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR_OR_NULL(ptable))
+ return SMEM_ITEM_COUNT;
+
+ info = (struct smem_info *)&ptable->entry[ptable->num_entries];
+ if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
+ return SMEM_ITEM_COUNT;
+
+ return le16_to_cpu(info->num_items);
+}
+
+/*
+ * Validate the partition header for a partition whose partition
+ * table entry is supplied. Returns a pointer to its header if
+ * valid, or a null pointer otherwise.
+ */
+static struct smem_partition_header *
+qcom_smem_partition_header(struct qcom_smem *smem,
+ struct smem_ptable_entry *entry, u16 host0, u16 host1)
+{
+ struct smem_partition_header *header;
+ u32 phys_addr;
+ u32 size;
+
+ phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
+ header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
+
+ if (!header)
+ return NULL;
+
+ if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
+ dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
+ return NULL;
+ }
+
+ if (host0 != le16_to_cpu(header->host0)) {
+ dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
+ host0, le16_to_cpu(header->host0));
+ return NULL;
+ }
+ if (host1 != le16_to_cpu(header->host1)) {
+ dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
+ host1, le16_to_cpu(header->host1));
+ return NULL;
+ }
+
+ size = le32_to_cpu(header->size);
+ if (size != le32_to_cpu(entry->size)) {
+ dev_err(smem->dev, "bad partition size (%u != %u)\n",
+ size, le32_to_cpu(entry->size));
+ return NULL;
+ }
+
+ if (le32_to_cpu(header->offset_free_uncached) > size) {
+ dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
+ le32_to_cpu(header->offset_free_uncached), size);
+ return NULL;
+ }
+
+ return header;
+}
+
+static int qcom_smem_set_global_partition(struct qcom_smem *smem)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ bool found = false;
+ int i;
+
+ if (smem->global_partition.virt_base) {
+ dev_err(smem->dev, "Already found the global partition\n");
+ return -EINVAL;
+ }
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR(ptable))
+ return PTR_ERR(ptable);
+
+ for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+ entry = &ptable->entry[i];
+ if (!le32_to_cpu(entry->offset))
+ continue;
+ if (!le32_to_cpu(entry->size))
+ continue;
+
+ if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
+ continue;
+
+ if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(smem->dev, "Missing entry for global partition\n");
+ return -EINVAL;
+ }
+
+ header = qcom_smem_partition_header(smem, entry,
+ SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
+ if (!header)
+ return -EINVAL;
+
+ smem->global_partition.virt_base = (void __iomem *)header;
+ smem->global_partition.phys_base = smem->regions[0].aux_base +
+ le32_to_cpu(entry->offset);
+ smem->global_partition.size = le32_to_cpu(entry->size);
+ smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);
+
+ return 0;
+}
+
+static int
+qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ u16 remote_host;
+ u16 host0, host1;
+ int i;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR(ptable))
+ return PTR_ERR(ptable);
+
+ for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+ entry = &ptable->entry[i];
+ if (!le32_to_cpu(entry->offset))
+ continue;
+ if (!le32_to_cpu(entry->size))
+ continue;
+
+ host0 = le16_to_cpu(entry->host0);
+ host1 = le16_to_cpu(entry->host1);
+ if (host0 == local_host)
+ remote_host = host1;
+ else if (host1 == local_host)
+ remote_host = host0;
+ else
+ continue;
+
+ if (remote_host >= SMEM_HOST_COUNT) {
+ dev_err(smem->dev, "bad host %u\n", remote_host);
+ return -EINVAL;
+ }
+
+ if (smem->partitions[remote_host].virt_base) {
+ dev_err(smem->dev, "duplicate host %u\n", remote_host);
+ return -EINVAL;
+ }
+
+ header = qcom_smem_partition_header(smem, entry, host0, host1);
+ if (!header)
+ return -EINVAL;
+
+ smem->partitions[remote_host].virt_base = (void __iomem *)header;
+ smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
+ le32_to_cpu(entry->offset);
+ smem->partitions[remote_host].size = le32_to_cpu(entry->size);
+ smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
+ }
+
+ return 0;
+}
+
+static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
+{
+ u32 ptable_start;
+
+ /* map starting 4K for smem header */
+ region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
+ ptable_start = region->aux_base + region->size - SZ_4K;
+ /* map last 4k for toc */
+ smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
+
+ if (!region->virt_base || !smem->ptable)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
+{
+ u32 phys_addr;
+
+ phys_addr = smem->regions[0].aux_base;
+
+ smem->regions[0].size = size;
+ smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
+
+ if (!smem->regions[0].virt_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
+ struct smem_region *region)
+{
+ struct device *dev = smem->dev;
+ struct device_node *np;
+ struct resource r;
+ int ret;
+
+ np = of_parse_phandle(dev->of_node, name, 0);
+ if (!np) {
+ dev_err(dev, "No %s specified\n", name);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ region->aux_base = r.start;
+ region->size = resource_size(&r);
+
+ return 0;
+}
+
+static int qcom_smem_probe(struct platform_device *pdev)
+{
+ struct smem_header *header;
+ struct reserved_mem *rmem;
+ struct qcom_smem *smem;
+ unsigned long flags;
+ size_t array_size;
+ int num_regions;
+ int hwlock_id;
+ u32 version;
+ u32 size;
+ int ret;
+ int i;
+
+ num_regions = 1;
+ if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
+ num_regions++;
+
+ array_size = num_regions * sizeof(struct smem_region);
+ smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
+ if (!smem)
+ return -ENOMEM;
+
+ smem->dev = &pdev->dev;
+ smem->num_regions = num_regions;
+
+ rmem = of_reserved_mem_lookup(pdev->dev.of_node);
+ if (rmem) {
+ smem->regions[0].aux_base = rmem->base;
+ smem->regions[0].size = rmem->size;
+ } else {
+ /*
+ * Fall back to the memory-region reference, if we're not a
+ * reserved-memory node.
+ */
+ ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
+ if (ret)
+ return ret;
+ }
+
+ if (num_regions > 1) {
+ ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
+ if (ret)
+ return ret;
+ }
+
+
+ ret = qcom_smem_map_toc(smem, &smem->regions[0]);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < num_regions; i++) {
+ smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
+ smem->regions[i].aux_base,
+ smem->regions[i].size);
+ if (!smem->regions[i].virt_base) {
+ dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
+ return -ENOMEM;
+ }
+ }
+
+ header = smem->regions[0].virt_base;
+ if (le32_to_cpu(header->initialized) != 1 ||
+ le32_to_cpu(header->reserved)) {
+ dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
+ return -EINVAL;
+ }
+
+ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+ if (hwlock_id < 0) {
+ if (hwlock_id != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ return hwlock_id;
+ }
+
+ smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ if (!smem->hwlock)
+ return -ENXIO;
+
+ ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
+ if (ret)
+ return ret;
+ size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
+ hwspin_unlock_irqrestore(smem->hwlock, &flags);
+
+ version = qcom_smem_get_sbl_version(smem);
+ /*
+ * smem header mapping is required only in heap version scheme, so unmap
+ * it here. It will be remapped in qcom_smem_map_global() when whole
+ * partition is mapped again.
+ */
+ devm_iounmap(smem->dev, smem->regions[0].virt_base);
+ switch (version >> 16) {
+ case SMEM_GLOBAL_PART_VERSION:
+ ret = qcom_smem_set_global_partition(smem);
+ if (ret < 0)
+ return ret;
+ smem->item_count = qcom_smem_get_item_count(smem);
+ break;
+ case SMEM_GLOBAL_HEAP_VERSION:
+ qcom_smem_map_global(smem, size);
+ smem->item_count = SMEM_ITEM_COUNT;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
+ ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
+ if (ret < 0 && ret != -ENOENT)
+ return ret;
+
+ __smem = smem;
+
+ smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
+ PLATFORM_DEVID_NONE, NULL,
+ 0);
+ if (IS_ERR(smem->socinfo))
+ dev_dbg(&pdev->dev, "failed to register socinfo device\n");
+
+ return 0;
+}
+
+static int qcom_smem_remove(struct platform_device *pdev)
+{
+ platform_device_unregister(__smem->socinfo);
+
+ hwspin_lock_free(__smem->hwlock);
+ __smem = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smem_of_match[] = {
+ { .compatible = "qcom,smem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
+
+static struct platform_driver qcom_smem_driver = {
+ .probe = qcom_smem_probe,
+ .remove = qcom_smem_remove,
+ .driver = {
+ .name = "qcom-smem",
+ .of_match_table = qcom_smem_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init qcom_smem_init(void)
+{
+ return platform_driver_register(&qcom_smem_driver);
+}
+arch_initcall(qcom_smem_init);
+
+static void __exit qcom_smem_exit(void)
+{
+ platform_driver_unregister(&qcom_smem_driver);
+}
+module_exit(qcom_smem_exit)
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
new file mode 100644
index 000000000..e848cc9a3
--- /dev/null
+++ b/drivers/soc/qcom/smem_state.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem_state.h>
+
+static LIST_HEAD(smem_states);
+static DEFINE_MUTEX(list_lock);
+
+/**
+ * struct qcom_smem_state - state context
+ * @refcount: refcount for the state
+ * @orphan: boolean indicator that this state has been unregistered
+ * @list: entry in smem_states list
+ * @of_node: of_node to use for matching the state in DT
+ * @priv: implementation private data
+ * @ops: ops for the state
+ */
+struct qcom_smem_state {
+ struct kref refcount;
+ bool orphan;
+
+ struct list_head list;
+ struct device_node *of_node;
+
+ void *priv;
+
+ struct qcom_smem_state_ops ops;
+};
+
+/**
+ * qcom_smem_state_update_bits() - update the masked bits in state with value
+ * @state: state handle acquired by calling qcom_smem_state_get()
+ * @mask: bit mask for the change
+ * @value: new value for the masked bits
+ *
+ * Returns 0 on success, otherwise negative errno.
+ */
+int qcom_smem_state_update_bits(struct qcom_smem_state *state,
+ u32 mask,
+ u32 value)
+{
+ if (state->orphan)
+ return -ENXIO;
+
+ if (!state->ops.update_bits)
+ return -ENOTSUPP;
+
+ return state->ops.update_bits(state->priv, mask, value);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_update_bits);
+
+static struct qcom_smem_state *of_node_to_state(struct device_node *np)
+{
+ struct qcom_smem_state *state;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(state, &smem_states, list) {
+ if (state->of_node == np) {
+ kref_get(&state->refcount);
+ goto unlock;
+ }
+ }
+ state = ERR_PTR(-EPROBE_DEFER);
+
+unlock:
+ mutex_unlock(&list_lock);
+
+ return state;
+}
+
+/**
+ * qcom_smem_state_get() - acquire handle to a state
+ * @dev: client device pointer
+ * @con_id: name of the state to lookup
+ * @bit: flags from the state reference, indicating which bit's affected
+ *
+ * Returns handle to the state, or ERR_PTR(). qcom_smem_state_put() must be
+ * called to release the returned state handle.
+ */
+struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+ const char *con_id,
+ unsigned *bit)
+{
+ struct qcom_smem_state *state;
+ struct of_phandle_args args;
+ int index = 0;
+ int ret;
+
+ if (con_id) {
+ index = of_property_match_string(dev->of_node,
+ "qcom,smem-state-names",
+ con_id);
+ if (index < 0) {
+ dev_err(dev, "missing qcom,smem-state-names\n");
+ return ERR_PTR(index);
+ }
+ }
+
+ ret = of_parse_phandle_with_args(dev->of_node,
+ "qcom,smem-states",
+ "#qcom,smem-state-cells",
+ index,
+ &args);
+ if (ret) {
+ dev_err(dev, "failed to parse qcom,smem-states property\n");
+ return ERR_PTR(ret);
+ }
+
+ if (args.args_count != 1) {
+ dev_err(dev, "invalid #qcom,smem-state-cells\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ state = of_node_to_state(args.np);
+ if (IS_ERR(state))
+ goto put;
+
+ *bit = args.args[0];
+
+put:
+ of_node_put(args.np);
+ return state;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_get);
+
+static void qcom_smem_state_release(struct kref *ref)
+{
+ struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount);
+
+ list_del(&state->list);
+ of_node_put(state->of_node);
+ kfree(state);
+}
+
+/**
+ * qcom_smem_state_put() - release state handle
+ * @state: state handle to be released
+ */
+void qcom_smem_state_put(struct qcom_smem_state *state)
+{
+ mutex_lock(&list_lock);
+ kref_put(&state->refcount, qcom_smem_state_release);
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_put);
+
+static void devm_qcom_smem_state_release(struct device *dev, void *res)
+{
+ qcom_smem_state_put(*(struct qcom_smem_state **)res);
+}
+
+/**
+ * devm_qcom_smem_state_get() - acquire handle to a devres managed state
+ * @dev: client device pointer
+ * @con_id: name of the state to lookup
+ * @bit: flags from the state reference, indicating which bit's affected
+ *
+ * Returns handle to the state, or ERR_PTR(). qcom_smem_state_put() is called
+ * automatically when @dev is removed.
+ */
+struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev,
+ const char *con_id,
+ unsigned *bit)
+{
+ struct qcom_smem_state **ptr, *state;
+
+ ptr = devres_alloc(devm_qcom_smem_state_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ state = qcom_smem_state_get(dev, con_id, bit);
+ if (!IS_ERR(state)) {
+ *ptr = state;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(devm_qcom_smem_state_get);
+
+/**
+ * qcom_smem_state_register() - register a new state
+ * @of_node: of_node used for matching client lookups
+ * @ops: implementation ops
+ * @priv: implementation specific private data
+ */
+struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
+ const struct qcom_smem_state_ops *ops,
+ void *priv)
+{
+ struct qcom_smem_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&state->refcount);
+
+ state->of_node = of_node_get(of_node);
+ state->ops = *ops;
+ state->priv = priv;
+
+ mutex_lock(&list_lock);
+ list_add(&state->list, &smem_states);
+ mutex_unlock(&list_lock);
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_register);
+
+/**
+ * qcom_smem_state_unregister() - unregister a registered state
+ * @state: state handle to be unregistered
+ */
+void qcom_smem_state_unregister(struct qcom_smem_state *state)
+{
+ state->orphan = true;
+ qcom_smem_state_put(state);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_unregister);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
new file mode 100644
index 000000000..d9c28a8a7
--- /dev/null
+++ b/drivers/soc/qcom/smp2p.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_client.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/spinlock.h>
+
+/*
+ * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
+ * of a single 32-bit value between two processors. Each value has a single
+ * writer (the local side) and a single reader (the remote side). Values are
+ * uniquely identified in the system by the directed edge (local processor ID
+ * to remote processor ID) and a string identifier.
+ *
+ * Each processor is responsible for creating the outgoing SMEM items and each
+ * item is writable by the local processor and readable by the remote
+ * processor. By using two separate SMEM items that are single-reader and
+ * single-writer, SMP2P does not require any remote locking mechanisms.
+ *
+ * The driver uses the Linux GPIO and interrupt framework to expose a virtual
+ * GPIO for each outbound entry and a virtual interrupt controller for each
+ * inbound entry.
+ */
+
+#define SMP2P_MAX_ENTRY 16
+#define SMP2P_MAX_ENTRY_NAME 16
+
+#define SMP2P_FEATURE_SSR_ACK 0x1
+#define SMP2P_FLAGS_RESTART_DONE_BIT 0
+#define SMP2P_FLAGS_RESTART_ACK_BIT 1
+
+#define SMP2P_MAGIC 0x504d5324
+#define SMP2P_ALL_FEATURES SMP2P_FEATURE_SSR_ACK
+
+/**
+ * struct smp2p_smem_item - in memory communication structure
+ * @magic: magic number
+ * @version: version - must be 1
+ * @features: features flag - currently unused
+ * @local_pid: processor id of sending end
+ * @remote_pid: processor id of receiving end
+ * @total_entries: number of entries - always SMP2P_MAX_ENTRY
+ * @valid_entries: number of allocated entries
+ * @flags:
+ * @entries: individual communication entries
+ * @name: name of the entry
+ * @value: content of the entry
+ */
+struct smp2p_smem_item {
+ u32 magic;
+ u8 version;
+ unsigned features:24;
+ u16 local_pid;
+ u16 remote_pid;
+ u16 total_entries;
+ u16 valid_entries;
+ u32 flags;
+
+ struct {
+ u8 name[SMP2P_MAX_ENTRY_NAME];
+ u32 value;
+ } entries[SMP2P_MAX_ENTRY];
+} __packed;
+
+/**
+ * struct smp2p_entry - driver context matching one entry
+ * @node: list entry to keep track of allocated entries
+ * @smp2p: reference to the device driver context
+ * @name: name of the entry, to match against smp2p_smem_item
+ * @value: pointer to smp2p_smem_item entry value
+ * @last_value: last handled value
+ * @domain: irq_domain for inbound entries
+ * @irq_enabled:bitmap to track enabled irq bits
+ * @irq_rising: bitmap to mark irq bits for rising detection
+ * @irq_falling:bitmap to mark irq bits for falling detection
+ * @state: smem state handle
+ * @lock: spinlock to protect read-modify-write of the value
+ */
+struct smp2p_entry {
+ struct list_head node;
+ struct qcom_smp2p *smp2p;
+
+ const char *name;
+ u32 *value;
+ u32 last_value;
+
+ struct irq_domain *domain;
+ DECLARE_BITMAP(irq_enabled, 32);
+ DECLARE_BITMAP(irq_rising, 32);
+ DECLARE_BITMAP(irq_falling, 32);
+
+ struct qcom_smem_state *state;
+
+ spinlock_t lock;
+};
+
+#define SMP2P_INBOUND 0
+#define SMP2P_OUTBOUND 1
+
+/**
+ * struct qcom_smp2p - device driver context
+ * @dev: device driver handle
+ * @in: pointer to the inbound smem item
+ * @out: pointer to the outbound smem item
+ * @smem_items: ids of the two smem items
+ * @valid_entries: already scanned inbound entries
+ * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled
+ * @ssr_ack: current cached state of the local ack bit
+ * @negotiation_done: whether negotiating finished
+ * @local_pid: processor id of the inbound edge
+ * @remote_pid: processor id of the outbound edge
+ * @ipc_regmap: regmap for the outbound ipc
+ * @ipc_offset: offset within the regmap
+ * @ipc_bit: bit in regmap@offset to kick to signal remote processor
+ * @mbox_client: mailbox client handle
+ * @mbox_chan: apcs ipc mailbox channel handle
+ * @inbound: list of inbound entries
+ * @outbound: list of outbound entries
+ */
+struct qcom_smp2p {
+ struct device *dev;
+
+ struct smp2p_smem_item *in;
+ struct smp2p_smem_item *out;
+
+ unsigned smem_items[SMP2P_OUTBOUND + 1];
+
+ unsigned valid_entries;
+
+ bool ssr_ack_enabled;
+ bool ssr_ack;
+ bool negotiation_done;
+
+ unsigned local_pid;
+ unsigned remote_pid;
+
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+
+ struct list_head inbound;
+ struct list_head outbound;
+};
+
+static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
+{
+ /* Make sure any updated data is written before the kick */
+ wmb();
+
+ if (smp2p->mbox_chan) {
+ mbox_send_message(smp2p->mbox_chan, NULL);
+ mbox_client_txdone(smp2p->mbox_chan, 0);
+ } else {
+ regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
+ }
+}
+
+static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *in = smp2p->in;
+ bool restart;
+
+ if (!smp2p->ssr_ack_enabled)
+ return false;
+
+ restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT);
+
+ return restart != smp2p->ssr_ack;
+}
+
+static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ u32 val;
+
+ smp2p->ssr_ack = !smp2p->ssr_ack;
+
+ val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
+ if (smp2p->ssr_ack)
+ val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
+ out->flags = val;
+
+ qcom_smp2p_kick(smp2p);
+}
+
+static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ struct smp2p_smem_item *in = smp2p->in;
+
+ if (in->version == out->version) {
+ out->features &= in->features;
+
+ if (out->features & SMP2P_FEATURE_SSR_ACK)
+ smp2p->ssr_ack_enabled = true;
+
+ smp2p->negotiation_done = true;
+ }
+}
+
+static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *in;
+ struct smp2p_entry *entry;
+ int irq_pin;
+ u32 status;
+ char buf[SMP2P_MAX_ENTRY_NAME];
+ u32 val;
+ int i;
+
+ in = smp2p->in;
+
+ /* Match newly created entries */
+ for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
+ list_for_each_entry(entry, &smp2p->inbound, node) {
+ memcpy(buf, in->entries[i].name, sizeof(buf));
+ if (!strcmp(buf, entry->name)) {
+ entry->value = &in->entries[i].value;
+ break;
+ }
+ }
+ }
+ smp2p->valid_entries = i;
+
+ /* Fire interrupts based on any value changes */
+ list_for_each_entry(entry, &smp2p->inbound, node) {
+ /* Ignore entries not yet allocated by the remote side */
+ if (!entry->value)
+ continue;
+
+ val = readl(entry->value);
+
+ status = val ^ entry->last_value;
+ entry->last_value = val;
+
+ /* No changes of this entry? */
+ if (!status)
+ continue;
+
+ for_each_set_bit(i, entry->irq_enabled, 32) {
+ if (!(status & BIT(i)))
+ continue;
+
+ if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
+ (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ }
+ }
+}
+
+/**
+ * qcom_smp2p_intr() - interrupt handler for incoming notifications
+ * @irq: unused
+ * @data: smp2p driver context
+ *
+ * Handle notifications from the remote side to handle newly allocated entries
+ * or any changes to the state bits of existing entries.
+ */
+static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+{
+ struct smp2p_smem_item *in;
+ struct qcom_smp2p *smp2p = data;
+ unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
+ unsigned int pid = smp2p->remote_pid;
+ bool ack_restart;
+ size_t size;
+
+ in = smp2p->in;
+
+ /* Acquire smem item, if not already found */
+ if (!in) {
+ in = qcom_smem_get(pid, smem_id, &size);
+ if (IS_ERR(in)) {
+ dev_err(smp2p->dev,
+ "Unable to acquire remote smp2p item\n");
+ goto out;
+ }
+
+ smp2p->in = in;
+ }
+
+ if (!smp2p->negotiation_done)
+ qcom_smp2p_negotiate(smp2p);
+
+ if (smp2p->negotiation_done) {
+ ack_restart = qcom_smp2p_check_ssr(smp2p);
+ qcom_smp2p_notify_in(smp2p);
+
+ if (ack_restart)
+ qcom_smp2p_do_ssr_ack(smp2p);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static void smp2p_mask_irq(struct irq_data *irqd)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ clear_bit(irq, entry->irq_enabled);
+}
+
+static void smp2p_unmask_irq(struct irq_data *irqd)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ set_bit(irq, entry->irq_enabled);
+}
+
+static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ set_bit(irq, entry->irq_rising);
+ else
+ clear_bit(irq, entry->irq_rising);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ set_bit(irq, entry->irq_falling);
+ else
+ clear_bit(irq, entry->irq_falling);
+
+ return 0;
+}
+
+static struct irq_chip smp2p_irq_chip = {
+ .name = "smp2p",
+ .irq_mask = smp2p_mask_irq,
+ .irq_unmask = smp2p_unmask_irq,
+ .irq_set_type = smp2p_set_irq_type,
+};
+
+static int smp2p_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct smp2p_entry *entry = d->host_data;
+
+ irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, entry);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops smp2p_irq_ops = {
+ .map = smp2p_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
+ struct smp2p_entry *entry,
+ struct device_node *node)
+{
+ entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
+ if (!entry->domain) {
+ dev_err(smp2p->dev, "failed to add irq_domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int smp2p_update_bits(void *data, u32 mask, u32 value)
+{
+ struct smp2p_entry *entry = data;
+ unsigned long flags;
+ u32 orig;
+ u32 val;
+
+ spin_lock_irqsave(&entry->lock, flags);
+ val = orig = readl(entry->value);
+ val &= ~mask;
+ val |= value;
+ writel(val, entry->value);
+ spin_unlock_irqrestore(&entry->lock, flags);
+
+ if (val != orig)
+ qcom_smp2p_kick(entry->smp2p);
+
+ return 0;
+}
+
+static const struct qcom_smem_state_ops smp2p_state_ops = {
+ .update_bits = smp2p_update_bits,
+};
+
+static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
+ struct smp2p_entry *entry,
+ struct device_node *node)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ char buf[SMP2P_MAX_ENTRY_NAME] = {};
+
+ /* Allocate an entry from the smem item */
+ strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
+ memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
+
+ /* Make the logical entry reference the physical value */
+ entry->value = &out->entries[out->valid_entries].value;
+
+ out->valid_entries++;
+
+ entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
+ if (IS_ERR(entry->state)) {
+ dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
+ return PTR_ERR(entry->state);
+ }
+
+ return 0;
+}
+
+static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out;
+ unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
+ unsigned pid = smp2p->remote_pid;
+ int ret;
+
+ ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
+ if (ret < 0 && ret != -EEXIST) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(smp2p->dev,
+ "unable to allocate local smp2p item\n");
+ return ret;
+ }
+
+ out = qcom_smem_get(pid, smem_id, NULL);
+ if (IS_ERR(out)) {
+ dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
+ return PTR_ERR(out);
+ }
+
+ memset(out, 0, sizeof(*out));
+ out->magic = SMP2P_MAGIC;
+ out->local_pid = smp2p->local_pid;
+ out->remote_pid = smp2p->remote_pid;
+ out->total_entries = SMP2P_MAX_ENTRY;
+ out->valid_entries = 0;
+ out->features = SMP2P_ALL_FEATURES;
+
+ /*
+ * Make sure the rest of the header is written before we validate the
+ * item by writing a valid version number.
+ */
+ wmb();
+ out->version = 1;
+
+ qcom_smp2p_kick(smp2p);
+
+ smp2p->out = out;
+
+ return 0;
+}
+
+static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
+{
+ struct device_node *syscon;
+ struct device *dev = smp2p->dev;
+ const char *key;
+ int ret;
+
+ syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
+ if (!syscon) {
+ dev_err(dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(smp2p->ipc_regmap))
+ return PTR_ERR(smp2p->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
+ if (ret < 0) {
+ dev_err(dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
+ if (ret < 0) {
+ dev_err(dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qcom_smp2p_probe(struct platform_device *pdev)
+{
+ struct smp2p_entry *entry;
+ struct device_node *node;
+ struct qcom_smp2p *smp2p;
+ const char *key;
+ int irq;
+ int ret;
+
+ smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
+ if (!smp2p)
+ return -ENOMEM;
+
+ smp2p->dev = &pdev->dev;
+ INIT_LIST_HEAD(&smp2p->inbound);
+ INIT_LIST_HEAD(&smp2p->outbound);
+
+ platform_set_drvdata(pdev, smp2p);
+
+ key = "qcom,smem";
+ ret = of_property_read_u32_array(pdev->dev.of_node, key,
+ smp2p->smem_items, 2);
+ if (ret)
+ return ret;
+
+ key = "qcom,local-pid";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
+ if (ret)
+ goto report_read_failure;
+
+ key = "qcom,remote-pid";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
+ if (ret)
+ goto report_read_failure;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ smp2p->mbox_client.dev = &pdev->dev;
+ smp2p->mbox_client.knows_txdone = true;
+ smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
+ if (IS_ERR(smp2p->mbox_chan)) {
+ if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
+ return PTR_ERR(smp2p->mbox_chan);
+
+ smp2p->mbox_chan = NULL;
+
+ ret = smp2p_parse_ipc(smp2p);
+ if (ret)
+ return ret;
+ }
+
+ ret = qcom_smp2p_alloc_outbound_item(smp2p);
+ if (ret < 0)
+ goto release_mbox;
+
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ ret = -ENOMEM;
+ of_node_put(node);
+ goto unwind_interfaces;
+ }
+
+ entry->smp2p = smp2p;
+ spin_lock_init(&entry->lock);
+
+ ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
+ if (ret < 0) {
+ of_node_put(node);
+ goto unwind_interfaces;
+ }
+
+ if (of_property_read_bool(node, "interrupt-controller")) {
+ ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
+ if (ret < 0) {
+ of_node_put(node);
+ goto unwind_interfaces;
+ }
+
+ list_add(&entry->node, &smp2p->inbound);
+ } else {
+ ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
+ if (ret < 0) {
+ of_node_put(node);
+ goto unwind_interfaces;
+ }
+
+ list_add(&entry->node, &smp2p->outbound);
+ }
+ }
+
+ /* Kick the outgoing edge after allocating entries */
+ qcom_smp2p_kick(smp2p);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, qcom_smp2p_intr,
+ IRQF_ONESHOT,
+ "smp2p", (void *)smp2p);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request interrupt\n");
+ goto unwind_interfaces;
+ }
+
+ /*
+ * Treat smp2p interrupt as wakeup source, but keep it disabled
+ * by default. User space can decide enabling it depending on its
+ * use cases. For example if remoteproc crashes and device wants
+ * to handle it immediatedly (e.g. to not miss phone calls) it can
+ * enable wakeup source from user space, while other devices which
+ * do not have proper autosleep feature may want to handle it with
+ * other wakeup events (e.g. Power button) instead waking up immediately.
+ */
+ device_set_wakeup_capable(&pdev->dev, true);
+
+ ret = dev_pm_set_wake_irq(&pdev->dev, irq);
+ if (ret)
+ goto set_wake_irq_fail;
+
+ return 0;
+
+set_wake_irq_fail:
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+unwind_interfaces:
+ list_for_each_entry(entry, &smp2p->inbound, node)
+ irq_domain_remove(entry->domain);
+
+ list_for_each_entry(entry, &smp2p->outbound, node)
+ qcom_smem_state_unregister(entry->state);
+
+ smp2p->out->valid_entries = 0;
+
+release_mbox:
+ mbox_free_channel(smp2p->mbox_chan);
+
+ return ret;
+
+report_read_failure:
+ dev_err(&pdev->dev, "failed to read %s\n", key);
+ return -EINVAL;
+}
+
+static int qcom_smp2p_remove(struct platform_device *pdev)
+{
+ struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
+ struct smp2p_entry *entry;
+
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+ list_for_each_entry(entry, &smp2p->inbound, node)
+ irq_domain_remove(entry->domain);
+
+ list_for_each_entry(entry, &smp2p->outbound, node)
+ qcom_smem_state_unregister(entry->state);
+
+ mbox_free_channel(smp2p->mbox_chan);
+
+ smp2p->out->valid_entries = 0;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smp2p_of_match[] = {
+ { .compatible = "qcom,smp2p" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
+
+static struct platform_driver qcom_smp2p_driver = {
+ .probe = qcom_smp2p_probe,
+ .remove = qcom_smp2p_remove,
+ .driver = {
+ .name = "qcom_smp2p",
+ .of_match_table = qcom_smp2p_of_match,
+ },
+};
+module_platform_driver(qcom_smp2p_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
new file mode 100644
index 000000000..3e8994d61
--- /dev/null
+++ b/drivers/soc/qcom/smsm.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+/*
+ * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
+ * for communicating single bit state information to remote processors.
+ *
+ * The implementation is based on two sections of shared memory; the first
+ * holding the state bits and the second holding a matrix of subscription bits.
+ *
+ * The state bits are structured in entries of 32 bits, each belonging to one
+ * system in the SoC. The entry belonging to the local system is considered
+ * read-write, while the rest should be considered read-only.
+ *
+ * The subscription matrix consists of N bitmaps per entry, denoting interest
+ * in updates of the entry for each of the N hosts. Upon updating a state bit
+ * each host's subscription bitmap should be queried and the remote system
+ * should be interrupted if they request so.
+ *
+ * The subscription matrix is laid out in entry-major order:
+ * entry0: [host0 ... hostN]
+ * .
+ * .
+ * entryM: [host0 ... hostN]
+ *
+ * A third, optional, shared memory region might contain information regarding
+ * the number of entries in the state bitmap as well as number of columns in
+ * the subscription matrix.
+ */
+
+/*
+ * Shared memory identifiers, used to acquire handles to respective memory
+ * region.
+ */
+#define SMEM_SMSM_SHARED_STATE 85
+#define SMEM_SMSM_CPU_INTR_MASK 333
+#define SMEM_SMSM_SIZE_INFO 419
+
+/*
+ * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
+ */
+#define SMSM_DEFAULT_NUM_ENTRIES 8
+#define SMSM_DEFAULT_NUM_HOSTS 3
+
+struct smsm_entry;
+struct smsm_host;
+
+/**
+ * struct qcom_smsm - smsm driver context
+ * @dev: smsm device pointer
+ * @local_host: column in the subscription matrix representing this system
+ * @num_hosts: number of columns in the subscription matrix
+ * @num_entries: number of entries in the state map and rows in the subscription
+ * matrix
+ * @local_state: pointer to the local processor's state bits
+ * @subscription: pointer to local processor's row in subscription matrix
+ * @state: smem state handle
+ * @lock: spinlock for read-modify-write of the outgoing state
+ * @entries: context for each of the entries
+ * @hosts: context for each of the hosts
+ */
+struct qcom_smsm {
+ struct device *dev;
+
+ u32 local_host;
+
+ u32 num_hosts;
+ u32 num_entries;
+
+ u32 *local_state;
+ u32 *subscription;
+ struct qcom_smem_state *state;
+
+ spinlock_t lock;
+
+ struct smsm_entry *entries;
+ struct smsm_host *hosts;
+};
+
+/**
+ * struct smsm_entry - per remote processor entry context
+ * @smsm: back-reference to driver context
+ * @domain: IRQ domain for this entry, if representing a remote system
+ * @irq_enabled: bitmap of which state bits IRQs are enabled
+ * @irq_rising: bitmap tracking if rising bits should be propagated
+ * @irq_falling: bitmap tracking if falling bits should be propagated
+ * @last_value: snapshot of state bits last time the interrupts where propagated
+ * @remote_state: pointer to this entry's state bits
+ * @subscription: pointer to a row in the subscription matrix representing this
+ * entry
+ */
+struct smsm_entry {
+ struct qcom_smsm *smsm;
+
+ struct irq_domain *domain;
+ DECLARE_BITMAP(irq_enabled, 32);
+ DECLARE_BITMAP(irq_rising, 32);
+ DECLARE_BITMAP(irq_falling, 32);
+ unsigned long last_value;
+
+ u32 *remote_state;
+ u32 *subscription;
+};
+
+/**
+ * struct smsm_host - representation of a remote host
+ * @ipc_regmap: regmap for outgoing interrupt
+ * @ipc_offset: offset in @ipc_regmap for outgoing interrupt
+ * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt
+ */
+struct smsm_host {
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+};
+
+/**
+ * smsm_update_bits() - change bit in outgoing entry and inform subscribers
+ * @data: smsm context pointer
+ * @mask: value mask
+ * @value: new value
+ *
+ * Used to set and clear the bits in the outgoing/local entry and inform
+ * subscribers about the change.
+ */
+static int smsm_update_bits(void *data, u32 mask, u32 value)
+{
+ struct qcom_smsm *smsm = data;
+ struct smsm_host *hostp;
+ unsigned long flags;
+ u32 changes;
+ u32 host;
+ u32 orig;
+ u32 val;
+
+ spin_lock_irqsave(&smsm->lock, flags);
+
+ /* Update the entry */
+ val = orig = readl(smsm->local_state);
+ val &= ~mask;
+ val |= value;
+
+ /* Don't signal if we didn't change the value */
+ changes = val ^ orig;
+ if (!changes) {
+ spin_unlock_irqrestore(&smsm->lock, flags);
+ goto done;
+ }
+
+ /* Write out the new value */
+ writel(val, smsm->local_state);
+ spin_unlock_irqrestore(&smsm->lock, flags);
+
+ /* Make sure the value update is ordered before any kicks */
+ wmb();
+
+ /* Iterate over all hosts to check whom wants a kick */
+ for (host = 0; host < smsm->num_hosts; host++) {
+ hostp = &smsm->hosts[host];
+
+ val = readl(smsm->subscription + host);
+ if (val & changes && hostp->ipc_regmap) {
+ regmap_write(hostp->ipc_regmap,
+ hostp->ipc_offset,
+ BIT(hostp->ipc_bit));
+ }
+ }
+
+done:
+ return 0;
+}
+
+static const struct qcom_smem_state_ops smsm_state_ops = {
+ .update_bits = smsm_update_bits,
+};
+
+/**
+ * smsm_intr() - cascading IRQ handler for SMSM
+ * @irq: unused
+ * @data: entry related to this IRQ
+ *
+ * This function cascades an incoming interrupt from a remote system, based on
+ * the state bits and configuration.
+ */
+static irqreturn_t smsm_intr(int irq, void *data)
+{
+ struct smsm_entry *entry = data;
+ unsigned i;
+ int irq_pin;
+ u32 changed;
+ u32 val;
+
+ val = readl(entry->remote_state);
+ changed = val ^ xchg(&entry->last_value, val);
+
+ for_each_set_bit(i, entry->irq_enabled, 32) {
+ if (!(changed & BIT(i)))
+ continue;
+
+ if (val & BIT(i)) {
+ if (test_bit(i, entry->irq_rising)) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ } else {
+ if (test_bit(i, entry->irq_falling)) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
+ * @irqd: IRQ handle to be masked
+ *
+ * This un-subscribes the local CPU from interrupts upon changes to the defines
+ * status bit. The bit is also cleared from cascading.
+ */
+static void smsm_mask_irq(struct irq_data *irqd)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+ struct qcom_smsm *smsm = entry->smsm;
+ u32 val;
+
+ if (entry->subscription) {
+ val = readl(entry->subscription + smsm->local_host);
+ val &= ~BIT(irq);
+ writel(val, entry->subscription + smsm->local_host);
+ }
+
+ clear_bit(irq, entry->irq_enabled);
+}
+
+/**
+ * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
+ * @irqd: IRQ handle to be unmasked
+ *
+ * This subscribes the local CPU to interrupts upon changes to the defined
+ * status bit. The bit is also marked for cascading.
+ */
+static void smsm_unmask_irq(struct irq_data *irqd)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+ struct qcom_smsm *smsm = entry->smsm;
+ u32 val;
+
+ /* Make sure our last cached state is up-to-date */
+ if (readl(entry->remote_state) & BIT(irq))
+ set_bit(irq, &entry->last_value);
+ else
+ clear_bit(irq, &entry->last_value);
+
+ set_bit(irq, entry->irq_enabled);
+
+ if (entry->subscription) {
+ val = readl(entry->subscription + smsm->local_host);
+ val |= BIT(irq);
+ writel(val, entry->subscription + smsm->local_host);
+ }
+}
+
+/**
+ * smsm_set_irq_type() - updates the requested IRQ type for the cascading
+ * @irqd: consumer interrupt handle
+ * @type: requested flags
+ */
+static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ set_bit(irq, entry->irq_rising);
+ else
+ clear_bit(irq, entry->irq_rising);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ set_bit(irq, entry->irq_falling);
+ else
+ clear_bit(irq, entry->irq_falling);
+
+ return 0;
+}
+
+static int smsm_get_irqchip_state(struct irq_data *irqd,
+ enum irqchip_irq_state which, bool *state)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+ u32 val;
+
+ if (which != IRQCHIP_STATE_LINE_LEVEL)
+ return -EINVAL;
+
+ val = readl(entry->remote_state);
+ *state = !!(val & BIT(irq));
+
+ return 0;
+}
+
+static struct irq_chip smsm_irq_chip = {
+ .name = "smsm",
+ .irq_mask = smsm_mask_irq,
+ .irq_unmask = smsm_unmask_irq,
+ .irq_set_type = smsm_set_irq_type,
+ .irq_get_irqchip_state = smsm_get_irqchip_state,
+};
+
+/**
+ * smsm_irq_map() - sets up a mapping for a cascaded IRQ
+ * @d: IRQ domain representing an entry
+ * @irq: IRQ to set up
+ * @hw: unused
+ */
+static int smsm_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct smsm_entry *entry = d->host_data;
+
+ irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, entry);
+ irq_set_nested_thread(irq, 1);
+
+ return 0;
+}
+
+static const struct irq_domain_ops smsm_irq_ops = {
+ .map = smsm_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+/**
+ * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
+ * @smsm: smsm driver context
+ * @host_id: index of the remote host to be resolved
+ *
+ * Parses device tree to acquire the information needed for sending the
+ * outgoing interrupts to a remote host - identified by @host_id.
+ */
+static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
+{
+ struct device_node *syscon;
+ struct device_node *node = smsm->dev->of_node;
+ struct smsm_host *host = &smsm->hosts[host_id];
+ char key[16];
+ int ret;
+
+ snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
+ syscon = of_parse_phandle(node, key, 0);
+ if (!syscon)
+ return 0;
+
+ host->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(host->ipc_regmap))
+ return PTR_ERR(host->ipc_regmap);
+
+ ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
+ if (ret < 0) {
+ dev_err(smsm->dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
+ if (ret < 0) {
+ dev_err(smsm->dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
+ * @smsm: smsm driver context
+ * @entry: entry context to be set up
+ * @node: dt node containing the entry's properties
+ */
+static int smsm_inbound_entry(struct qcom_smsm *smsm,
+ struct smsm_entry *entry,
+ struct device_node *node)
+{
+ int ret;
+ int irq;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ dev_err(smsm->dev, "failed to parse smsm interrupt\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(smsm->dev, irq,
+ NULL, smsm_intr,
+ IRQF_ONESHOT,
+ "smsm", (void *)entry);
+ if (ret) {
+ dev_err(smsm->dev, "failed to request interrupt\n");
+ return ret;
+ }
+
+ entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
+ if (!entry->domain) {
+ dev_err(smsm->dev, "failed to add irq_domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * smsm_get_size_info() - parse the optional memory segment for sizes
+ * @smsm: smsm driver context
+ *
+ * Attempt to acquire the number of hosts and entries from the optional shared
+ * memory location. Not being able to find this segment should indicate that
+ * we're on a older system where these values was hard coded to
+ * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
+ *
+ * Returns 0 on success, negative errno on failure.
+ */
+static int smsm_get_size_info(struct qcom_smsm *smsm)
+{
+ size_t size;
+ struct {
+ u32 num_hosts;
+ u32 num_entries;
+ u32 reserved0;
+ u32 reserved1;
+ } *info;
+
+ info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
+ if (IS_ERR(info) && PTR_ERR(info) != -ENOENT) {
+ if (PTR_ERR(info) != -EPROBE_DEFER)
+ dev_err(smsm->dev, "unable to retrieve smsm size info\n");
+ return PTR_ERR(info);
+ } else if (IS_ERR(info) || size != sizeof(*info)) {
+ dev_warn(smsm->dev, "no smsm size info, using defaults\n");
+ smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
+ smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
+ return 0;
+ }
+
+ smsm->num_entries = info->num_entries;
+ smsm->num_hosts = info->num_hosts;
+
+ dev_dbg(smsm->dev,
+ "found custom size of smsm: %d entries %d hosts\n",
+ smsm->num_entries, smsm->num_hosts);
+
+ return 0;
+}
+
+static int qcom_smsm_probe(struct platform_device *pdev)
+{
+ struct device_node *local_node;
+ struct device_node *node;
+ struct smsm_entry *entry;
+ struct qcom_smsm *smsm;
+ u32 *intr_mask;
+ size_t size;
+ u32 *states;
+ u32 id;
+ int ret;
+
+ smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
+ if (!smsm)
+ return -ENOMEM;
+ smsm->dev = &pdev->dev;
+ spin_lock_init(&smsm->lock);
+
+ ret = smsm_get_size_info(smsm);
+ if (ret)
+ return ret;
+
+ smsm->entries = devm_kcalloc(&pdev->dev,
+ smsm->num_entries,
+ sizeof(struct smsm_entry),
+ GFP_KERNEL);
+ if (!smsm->entries)
+ return -ENOMEM;
+
+ smsm->hosts = devm_kcalloc(&pdev->dev,
+ smsm->num_hosts,
+ sizeof(struct smsm_host),
+ GFP_KERNEL);
+ if (!smsm->hosts)
+ return -ENOMEM;
+
+ for_each_child_of_node(pdev->dev.of_node, local_node) {
+ if (of_find_property(local_node, "#qcom,smem-state-cells", NULL))
+ break;
+ }
+ if (!local_node) {
+ dev_err(&pdev->dev, "no state entry\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32(pdev->dev.of_node,
+ "qcom,local-host",
+ &smsm->local_host);
+
+ /* Parse the host properties */
+ for (id = 0; id < smsm->num_hosts; id++) {
+ ret = smsm_parse_ipc(smsm, id);
+ if (ret < 0)
+ goto out_put;
+ }
+
+ /* Acquire the main SMSM state vector */
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
+ smsm->num_entries * sizeof(u32));
+ if (ret < 0 && ret != -EEXIST) {
+ dev_err(&pdev->dev, "unable to allocate shared state entry\n");
+ goto out_put;
+ }
+
+ states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
+ if (IS_ERR(states)) {
+ dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
+ ret = PTR_ERR(states);
+ goto out_put;
+ }
+
+ /* Acquire the list of interrupt mask vectors */
+ size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
+ if (ret < 0 && ret != -EEXIST) {
+ dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
+ goto out_put;
+ }
+
+ intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
+ if (IS_ERR(intr_mask)) {
+ dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
+ ret = PTR_ERR(intr_mask);
+ goto out_put;
+ }
+
+ /* Setup the reference to the local state bits */
+ smsm->local_state = states + smsm->local_host;
+ smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
+
+ /* Register the outgoing state */
+ smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
+ if (IS_ERR(smsm->state)) {
+ dev_err(smsm->dev, "failed to register qcom_smem_state\n");
+ ret = PTR_ERR(smsm->state);
+ goto out_put;
+ }
+
+ /* Register handlers for remote processor entries of interest. */
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ if (!of_property_read_bool(node, "interrupt-controller"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret || id >= smsm->num_entries) {
+ dev_err(&pdev->dev, "invalid reg of entry\n");
+ if (!ret)
+ ret = -EINVAL;
+ goto unwind_interfaces;
+ }
+ entry = &smsm->entries[id];
+
+ entry->smsm = smsm;
+ entry->remote_state = states + id;
+
+ /* Setup subscription pointers and unsubscribe to any kicks */
+ entry->subscription = intr_mask + id * smsm->num_hosts;
+ writel(0, entry->subscription + smsm->local_host);
+
+ ret = smsm_inbound_entry(smsm, entry, node);
+ if (ret < 0)
+ goto unwind_interfaces;
+ }
+
+ platform_set_drvdata(pdev, smsm);
+ of_node_put(local_node);
+
+ return 0;
+
+unwind_interfaces:
+ of_node_put(node);
+ for (id = 0; id < smsm->num_entries; id++)
+ if (smsm->entries[id].domain)
+ irq_domain_remove(smsm->entries[id].domain);
+
+ qcom_smem_state_unregister(smsm->state);
+out_put:
+ of_node_put(local_node);
+ return ret;
+}
+
+static int qcom_smsm_remove(struct platform_device *pdev)
+{
+ struct qcom_smsm *smsm = platform_get_drvdata(pdev);
+ unsigned id;
+
+ for (id = 0; id < smsm->num_entries; id++)
+ if (smsm->entries[id].domain)
+ irq_domain_remove(smsm->entries[id].domain);
+
+ qcom_smem_state_unregister(smsm->state);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smsm_of_match[] = {
+ { .compatible = "qcom,smsm" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
+
+static struct platform_driver qcom_smsm_driver = {
+ .probe = qcom_smsm_probe,
+ .remove = qcom_smsm_remove,
+ .driver = {
+ .name = "qcom-smsm",
+ .of_match_table = qcom_smsm_of_match,
+ },
+};
+module_platform_driver(qcom_smsm_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
new file mode 100644
index 000000000..aa37e1bad
--- /dev/null
+++ b/drivers/soc/qcom/socinfo.c
@@ -0,0 +1,694 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, Linaro Ltd.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+/*
+ * SoC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits.
+ */
+#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define SOCINFO_MINOR(ver) ((ver) & 0xffff)
+#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
+
+#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
+#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
+
+/*
+ * SMEM item id, used to acquire handles to respective
+ * SMEM region.
+ */
+#define SMEM_HW_SW_BUILD_ID 137
+
+#ifdef CONFIG_DEBUG_FS
+#define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32
+#define SMEM_IMAGE_VERSION_SIZE 4096
+#define SMEM_IMAGE_VERSION_NAME_SIZE 75
+#define SMEM_IMAGE_VERSION_VARIANT_SIZE 20
+#define SMEM_IMAGE_VERSION_OEM_SIZE 32
+
+/*
+ * SMEM Image table indices
+ */
+#define SMEM_IMAGE_TABLE_BOOT_INDEX 0
+#define SMEM_IMAGE_TABLE_TZ_INDEX 1
+#define SMEM_IMAGE_TABLE_RPM_INDEX 3
+#define SMEM_IMAGE_TABLE_APPS_INDEX 10
+#define SMEM_IMAGE_TABLE_MPSS_INDEX 11
+#define SMEM_IMAGE_TABLE_ADSP_INDEX 12
+#define SMEM_IMAGE_TABLE_CNSS_INDEX 13
+#define SMEM_IMAGE_TABLE_VIDEO_INDEX 14
+#define SMEM_IMAGE_VERSION_TABLE 469
+
+/*
+ * SMEM Image table names
+ */
+static const char *const socinfo_image_names[] = {
+ [SMEM_IMAGE_TABLE_ADSP_INDEX] = "adsp",
+ [SMEM_IMAGE_TABLE_APPS_INDEX] = "apps",
+ [SMEM_IMAGE_TABLE_BOOT_INDEX] = "boot",
+ [SMEM_IMAGE_TABLE_CNSS_INDEX] = "cnss",
+ [SMEM_IMAGE_TABLE_MPSS_INDEX] = "mpss",
+ [SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm",
+ [SMEM_IMAGE_TABLE_TZ_INDEX] = "tz",
+ [SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video",
+};
+
+static const char *const pmic_models[] = {
+ [0] = "Unknown PMIC model",
+ [1] = "PM8941",
+ [2] = "PM8841",
+ [3] = "PM8019",
+ [4] = "PM8226",
+ [5] = "PM8110",
+ [6] = "PMA8084",
+ [7] = "PMI8962",
+ [8] = "PMD9635",
+ [9] = "PM8994",
+ [10] = "PMI8994",
+ [11] = "PM8916",
+ [12] = "PM8004",
+ [13] = "PM8909/PM8058",
+ [14] = "PM8028",
+ [15] = "PM8901",
+ [16] = "PM8950/PM8027",
+ [17] = "PMI8950/ISL9519",
+ [18] = "PMK8001/PM8921",
+ [19] = "PMI8996/PM8018",
+ [20] = "PM8998/PM8015",
+ [21] = "PMI8998/PM8014",
+ [22] = "PM8821",
+ [23] = "PM8038",
+ [24] = "PM8005/PM8922",
+ [25] = "PM8917",
+ [26] = "PM660L",
+ [27] = "PM660",
+ [30] = "PM8150",
+ [31] = "PM8150L",
+ [32] = "PM8150B",
+ [33] = "PMK8002",
+ [36] = "PM8009",
+ [38] = "PM8150C",
+ [41] = "SMB2351",
+ [45] = "PM6125",
+ [47] = "PMK8350",
+ [48] = "PM8350",
+ [49] = "PM8350C",
+ [50] = "PM8350B",
+ [51] = "PMR735A",
+ [52] = "PMR735B",
+ [58] = "PM8450",
+ [65] = "PM8010",
+};
+#endif /* CONFIG_DEBUG_FS */
+
+/* Socinfo SMEM item structure */
+struct socinfo {
+ __le32 fmt;
+ __le32 id;
+ __le32 ver;
+ char build_id[SMEM_SOCINFO_BUILD_ID_LENGTH];
+ /* Version 2 */
+ __le32 raw_id;
+ __le32 raw_ver;
+ /* Version 3 */
+ __le32 hw_plat;
+ /* Version 4 */
+ __le32 plat_ver;
+ /* Version 5 */
+ __le32 accessory_chip;
+ /* Version 6 */
+ __le32 hw_plat_subtype;
+ /* Version 7 */
+ __le32 pmic_model;
+ __le32 pmic_die_rev;
+ /* Version 8 */
+ __le32 pmic_model_1;
+ __le32 pmic_die_rev_1;
+ __le32 pmic_model_2;
+ __le32 pmic_die_rev_2;
+ /* Version 9 */
+ __le32 foundry_id;
+ /* Version 10 */
+ __le32 serial_num;
+ /* Version 11 */
+ __le32 num_pmics;
+ __le32 pmic_array_offset;
+ /* Version 12 */
+ __le32 chip_family;
+ __le32 raw_device_family;
+ __le32 raw_device_num;
+ /* Version 13 */
+ __le32 nproduct_id;
+ char chip_id[SMEM_SOCINFO_CHIP_ID_LENGTH];
+ /* Version 14 */
+ __le32 num_clusters;
+ __le32 ncluster_array_offset;
+ __le32 num_defective_parts;
+ __le32 ndefective_parts_array_offset;
+ /* Version 15 */
+ __le32 nmodem_supported;
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct socinfo_params {
+ u32 raw_device_family;
+ u32 hw_plat_subtype;
+ u32 accessory_chip;
+ u32 raw_device_num;
+ u32 chip_family;
+ u32 foundry_id;
+ u32 plat_ver;
+ u32 raw_ver;
+ u32 hw_plat;
+ u32 fmt;
+ u32 nproduct_id;
+ u32 num_clusters;
+ u32 ncluster_array_offset;
+ u32 num_defective_parts;
+ u32 ndefective_parts_array_offset;
+ u32 nmodem_supported;
+};
+
+struct smem_image_version {
+ char name[SMEM_IMAGE_VERSION_NAME_SIZE];
+ char variant[SMEM_IMAGE_VERSION_VARIANT_SIZE];
+ char pad;
+ char oem[SMEM_IMAGE_VERSION_OEM_SIZE];
+};
+#endif /* CONFIG_DEBUG_FS */
+
+struct qcom_socinfo {
+ struct soc_device *soc_dev;
+ struct soc_device_attribute attr;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbg_root;
+ struct socinfo_params info;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+struct soc_id {
+ unsigned int id;
+ const char *name;
+};
+
+static const struct soc_id soc_id[] = {
+ { 87, "MSM8960" },
+ { 109, "APQ8064" },
+ { 122, "MSM8660A" },
+ { 123, "MSM8260A" },
+ { 124, "APQ8060A" },
+ { 126, "MSM8974" },
+ { 130, "MPQ8064" },
+ { 138, "MSM8960AB" },
+ { 139, "APQ8060AB" },
+ { 140, "MSM8260AB" },
+ { 141, "MSM8660AB" },
+ { 145, "MSM8626" },
+ { 147, "MSM8610" },
+ { 153, "APQ8064AB" },
+ { 158, "MSM8226" },
+ { 159, "MSM8526" },
+ { 161, "MSM8110" },
+ { 162, "MSM8210" },
+ { 163, "MSM8810" },
+ { 164, "MSM8212" },
+ { 165, "MSM8612" },
+ { 166, "MSM8112" },
+ { 168, "MSM8225Q" },
+ { 169, "MSM8625Q" },
+ { 170, "MSM8125Q" },
+ { 172, "APQ8064AA" },
+ { 178, "APQ8084" },
+ { 184, "APQ8074" },
+ { 185, "MSM8274" },
+ { 186, "MSM8674" },
+ { 194, "MSM8974PRO-AC" },
+ { 198, "MSM8126" },
+ { 199, "APQ8026" },
+ { 200, "MSM8926" },
+ { 205, "MSM8326" },
+ { 206, "MSM8916" },
+ { 207, "MSM8994" },
+ { 208, "APQ8074PRO-AA" },
+ { 209, "APQ8074PRO-AB" },
+ { 210, "APQ8074PRO-AC" },
+ { 211, "MSM8274PRO-AA" },
+ { 212, "MSM8274PRO-AB" },
+ { 213, "MSM8274PRO-AC" },
+ { 214, "MSM8674PRO-AA" },
+ { 215, "MSM8674PRO-AB" },
+ { 216, "MSM8674PRO-AC" },
+ { 217, "MSM8974PRO-AA" },
+ { 218, "MSM8974PRO-AB" },
+ { 219, "APQ8028" },
+ { 220, "MSM8128" },
+ { 221, "MSM8228" },
+ { 222, "MSM8528" },
+ { 223, "MSM8628" },
+ { 224, "MSM8928" },
+ { 225, "MSM8510" },
+ { 226, "MSM8512" },
+ { 233, "MSM8936" },
+ { 239, "MSM8939" },
+ { 240, "APQ8036" },
+ { 241, "APQ8039" },
+ { 246, "MSM8996" },
+ { 247, "APQ8016" },
+ { 248, "MSM8216" },
+ { 249, "MSM8116" },
+ { 250, "MSM8616" },
+ { 251, "MSM8992" },
+ { 253, "APQ8094" },
+ { 290, "MDM9607" },
+ { 291, "APQ8096" },
+ { 292, "MSM8998" },
+ { 293, "MSM8953" },
+ { 296, "MDM8207" },
+ { 297, "MDM9207" },
+ { 298, "MDM9307" },
+ { 299, "MDM9628" },
+ { 304, "APQ8053" },
+ { 305, "MSM8996SG" },
+ { 310, "MSM8996AU" },
+ { 311, "APQ8096AU" },
+ { 312, "APQ8096SG" },
+ { 317, "SDM660" },
+ { 318, "SDM630" },
+ { 319, "APQ8098" },
+ { 321, "SDM845" },
+ { 322, "MDM9206" },
+ { 323, "IPQ8074" },
+ { 324, "SDA660" },
+ { 325, "SDM658" },
+ { 326, "SDA658" },
+ { 327, "SDA630" },
+ { 338, "SDM450" },
+ { 341, "SDA845" },
+ { 342, "IPQ8072" },
+ { 343, "IPQ8076" },
+ { 344, "IPQ8078" },
+ { 345, "SDM636" },
+ { 346, "SDA636" },
+ { 349, "SDM632" },
+ { 350, "SDA632" },
+ { 351, "SDA450" },
+ { 356, "SM8250" },
+ { 375, "IPQ8070" },
+ { 376, "IPQ8071" },
+ { 389, "IPQ8072A" },
+ { 390, "IPQ8074A" },
+ { 391, "IPQ8076A" },
+ { 392, "IPQ8078A" },
+ { 394, "SM6125" },
+ { 395, "IPQ8070A" },
+ { 396, "IPQ8071A" },
+ { 402, "IPQ6018" },
+ { 403, "IPQ6028" },
+ { 421, "IPQ6000" },
+ { 422, "IPQ6010" },
+ { 425, "SC7180" },
+ { 434, "SM6350" },
+ { 439, "SM8350" },
+ { 449, "SC8280XP" },
+ { 453, "IPQ6005" },
+ { 455, "QRB5165" },
+ { 457, "SM8450" },
+ { 459, "SM7225" },
+ { 460, "SA8295P" },
+ { 461, "SA8540P" },
+ { 480, "SM8450" },
+ { 482, "SM8450" },
+ { 487, "SC7280" },
+ { 495, "SC7180P" },
+ { 507, "SM6375" },
+};
+
+static const char *socinfo_machine(struct device *dev, unsigned int id)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(soc_id); idx++) {
+ if (soc_id[idx].id == id)
+ return soc_id[idx].name;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define QCOM_OPEN(name, _func) \
+static int qcom_open_##name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, _func, inode->i_private); \
+} \
+ \
+static const struct file_operations qcom_ ##name## _ops = { \
+ .open = qcom_open_##name, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define DEBUGFS_ADD(info, name) \
+ debugfs_create_file(__stringify(name), 0444, \
+ qcom_socinfo->dbg_root, \
+ info, &qcom_ ##name## _ops)
+
+
+static int qcom_show_build_id(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%s\n", socinfo->build_id);
+
+ return 0;
+}
+
+static int qcom_show_pmic_model(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+ int model = SOCINFO_MINOR(le32_to_cpu(socinfo->pmic_model));
+
+ if (model < 0)
+ return -EINVAL;
+
+ if (model < ARRAY_SIZE(pmic_models) && pmic_models[model])
+ seq_printf(seq, "%s\n", pmic_models[model]);
+ else
+ seq_printf(seq, "unknown (%d)\n", model);
+
+ return 0;
+}
+
+static int qcom_show_pmic_model_array(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+ unsigned int num_pmics = le32_to_cpu(socinfo->num_pmics);
+ unsigned int pmic_array_offset = le32_to_cpu(socinfo->pmic_array_offset);
+ int i;
+ void *ptr = socinfo;
+
+ ptr += pmic_array_offset;
+
+ /* No need for bounds checking, it happened at socinfo_debugfs_init */
+ for (i = 0; i < num_pmics; i++) {
+ unsigned int model = SOCINFO_MINOR(get_unaligned_le32(ptr + 2 * i * sizeof(u32)));
+ unsigned int die_rev = get_unaligned_le32(ptr + (2 * i + 1) * sizeof(u32));
+
+ if (model < ARRAY_SIZE(pmic_models) && pmic_models[model])
+ seq_printf(seq, "%s %u.%u\n", pmic_models[model],
+ SOCINFO_MAJOR(die_rev),
+ SOCINFO_MINOR(die_rev));
+ else
+ seq_printf(seq, "unknown (%d)\n", model);
+ }
+
+ return 0;
+}
+
+static int qcom_show_pmic_die_revision(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%u.%u\n",
+ SOCINFO_MAJOR(le32_to_cpu(socinfo->pmic_die_rev)),
+ SOCINFO_MINOR(le32_to_cpu(socinfo->pmic_die_rev)));
+
+ return 0;
+}
+
+static int qcom_show_chip_id(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%s\n", socinfo->chip_id);
+
+ return 0;
+}
+
+QCOM_OPEN(build_id, qcom_show_build_id);
+QCOM_OPEN(pmic_model, qcom_show_pmic_model);
+QCOM_OPEN(pmic_model_array, qcom_show_pmic_model_array);
+QCOM_OPEN(pmic_die_rev, qcom_show_pmic_die_revision);
+QCOM_OPEN(chip_id, qcom_show_chip_id);
+
+#define DEFINE_IMAGE_OPS(type) \
+static int show_image_##type(struct seq_file *seq, void *p) \
+{ \
+ struct smem_image_version *image_version = seq->private; \
+ if (image_version->type[0] != '\0') \
+ seq_printf(seq, "%s\n", image_version->type); \
+ return 0; \
+} \
+static int open_image_##type(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, show_image_##type, inode->i_private); \
+} \
+ \
+static const struct file_operations qcom_image_##type##_ops = { \
+ .open = open_image_##type, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+DEFINE_IMAGE_OPS(name);
+DEFINE_IMAGE_OPS(variant);
+DEFINE_IMAGE_OPS(oem);
+
+static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
+ struct socinfo *info, size_t info_size)
+{
+ struct smem_image_version *versions;
+ struct dentry *dentry;
+ size_t size;
+ int i;
+ unsigned int num_pmics;
+ unsigned int pmic_array_offset;
+
+ qcom_socinfo->dbg_root = debugfs_create_dir("qcom_socinfo", NULL);
+
+ qcom_socinfo->info.fmt = __le32_to_cpu(info->fmt);
+
+ debugfs_create_x32("info_fmt", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.fmt);
+
+ switch (qcom_socinfo->info.fmt) {
+ case SOCINFO_VERSION(0, 15):
+ qcom_socinfo->info.nmodem_supported = __le32_to_cpu(info->nmodem_supported);
+
+ debugfs_create_u32("nmodem_supported", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nmodem_supported);
+ fallthrough;
+ case SOCINFO_VERSION(0, 14):
+ qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters);
+ qcom_socinfo->info.ncluster_array_offset = __le32_to_cpu(info->ncluster_array_offset);
+ qcom_socinfo->info.num_defective_parts = __le32_to_cpu(info->num_defective_parts);
+ qcom_socinfo->info.ndefective_parts_array_offset = __le32_to_cpu(info->ndefective_parts_array_offset);
+
+ debugfs_create_u32("num_clusters", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_clusters);
+ debugfs_create_u32("ncluster_array_offset", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ncluster_array_offset);
+ debugfs_create_u32("num_defective_parts", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_defective_parts);
+ debugfs_create_u32("ndefective_parts_array_offset", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ndefective_parts_array_offset);
+ fallthrough;
+ case SOCINFO_VERSION(0, 13):
+ qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id);
+
+ debugfs_create_u32("nproduct_id", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nproduct_id);
+ DEBUGFS_ADD(info, chip_id);
+ fallthrough;
+ case SOCINFO_VERSION(0, 12):
+ qcom_socinfo->info.chip_family =
+ __le32_to_cpu(info->chip_family);
+ qcom_socinfo->info.raw_device_family =
+ __le32_to_cpu(info->raw_device_family);
+ qcom_socinfo->info.raw_device_num =
+ __le32_to_cpu(info->raw_device_num);
+
+ debugfs_create_x32("chip_family", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.chip_family);
+ debugfs_create_x32("raw_device_family", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_device_family);
+ debugfs_create_x32("raw_device_number", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_device_num);
+ fallthrough;
+ case SOCINFO_VERSION(0, 11):
+ num_pmics = le32_to_cpu(info->num_pmics);
+ pmic_array_offset = le32_to_cpu(info->pmic_array_offset);
+ if (pmic_array_offset + 2 * num_pmics * sizeof(u32) <= info_size)
+ DEBUGFS_ADD(info, pmic_model_array);
+ fallthrough;
+ case SOCINFO_VERSION(0, 10):
+ case SOCINFO_VERSION(0, 9):
+ qcom_socinfo->info.foundry_id = __le32_to_cpu(info->foundry_id);
+
+ debugfs_create_u32("foundry_id", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.foundry_id);
+ fallthrough;
+ case SOCINFO_VERSION(0, 8):
+ case SOCINFO_VERSION(0, 7):
+ DEBUGFS_ADD(info, pmic_model);
+ DEBUGFS_ADD(info, pmic_die_rev);
+ fallthrough;
+ case SOCINFO_VERSION(0, 6):
+ qcom_socinfo->info.hw_plat_subtype =
+ __le32_to_cpu(info->hw_plat_subtype);
+
+ debugfs_create_u32("hardware_platform_subtype", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.hw_plat_subtype);
+ fallthrough;
+ case SOCINFO_VERSION(0, 5):
+ qcom_socinfo->info.accessory_chip =
+ __le32_to_cpu(info->accessory_chip);
+
+ debugfs_create_u32("accessory_chip", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.accessory_chip);
+ fallthrough;
+ case SOCINFO_VERSION(0, 4):
+ qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver);
+
+ debugfs_create_u32("platform_version", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.plat_ver);
+ fallthrough;
+ case SOCINFO_VERSION(0, 3):
+ qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat);
+
+ debugfs_create_u32("hardware_platform", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.hw_plat);
+ fallthrough;
+ case SOCINFO_VERSION(0, 2):
+ qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver);
+
+ debugfs_create_u32("raw_version", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_ver);
+ fallthrough;
+ case SOCINFO_VERSION(0, 1):
+ DEBUGFS_ADD(info, build_id);
+ break;
+ }
+
+ versions = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_IMAGE_VERSION_TABLE,
+ &size);
+
+ for (i = 0; i < ARRAY_SIZE(socinfo_image_names); i++) {
+ if (!socinfo_image_names[i])
+ continue;
+
+ dentry = debugfs_create_dir(socinfo_image_names[i],
+ qcom_socinfo->dbg_root);
+ debugfs_create_file("name", 0444, dentry, &versions[i],
+ &qcom_image_name_ops);
+ debugfs_create_file("variant", 0444, dentry, &versions[i],
+ &qcom_image_variant_ops);
+ debugfs_create_file("oem", 0444, dentry, &versions[i],
+ &qcom_image_oem_ops);
+ }
+}
+
+static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo)
+{
+ debugfs_remove_recursive(qcom_socinfo->dbg_root);
+}
+#else
+static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
+ struct socinfo *info, size_t info_size)
+{
+}
+static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int qcom_socinfo_probe(struct platform_device *pdev)
+{
+ struct qcom_socinfo *qs;
+ struct socinfo *info;
+ size_t item_size;
+
+ info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID,
+ &item_size);
+ if (IS_ERR(info)) {
+ dev_err(&pdev->dev, "Couldn't find socinfo\n");
+ return PTR_ERR(info);
+ }
+
+ qs = devm_kzalloc(&pdev->dev, sizeof(*qs), GFP_KERNEL);
+ if (!qs)
+ return -ENOMEM;
+
+ qs->attr.family = "Snapdragon";
+ qs->attr.machine = socinfo_machine(&pdev->dev,
+ le32_to_cpu(info->id));
+ qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u",
+ le32_to_cpu(info->id));
+ qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u",
+ SOCINFO_MAJOR(le32_to_cpu(info->ver)),
+ SOCINFO_MINOR(le32_to_cpu(info->ver)));
+ if (offsetof(struct socinfo, serial_num) <= item_size)
+ qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%u",
+ le32_to_cpu(info->serial_num));
+
+ qs->soc_dev = soc_device_register(&qs->attr);
+ if (IS_ERR(qs->soc_dev))
+ return PTR_ERR(qs->soc_dev);
+
+ socinfo_debugfs_init(qs, info, item_size);
+
+ /* Feed the soc specific unique data into entropy pool */
+ add_device_randomness(info, item_size);
+
+ platform_set_drvdata(pdev, qs);
+
+ return 0;
+}
+
+static int qcom_socinfo_remove(struct platform_device *pdev)
+{
+ struct qcom_socinfo *qs = platform_get_drvdata(pdev);
+
+ soc_device_unregister(qs->soc_dev);
+
+ socinfo_debugfs_exit(qs);
+
+ return 0;
+}
+
+static struct platform_driver qcom_socinfo_driver = {
+ .probe = qcom_socinfo_probe,
+ .remove = qcom_socinfo_remove,
+ .driver = {
+ .name = "qcom-socinfo",
+ },
+};
+
+module_platform_driver(qcom_socinfo_driver);
+
+MODULE_DESCRIPTION("Qualcomm SoCinfo driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-socinfo");
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
new file mode 100644
index 000000000..484b42b74
--- /dev/null
+++ b/drivers/soc/qcom/spm.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ *
+ * SAW power controller driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/spm.h>
+
+#define SPM_CTL_INDEX 0x7f
+#define SPM_CTL_INDEX_SHIFT 4
+#define SPM_CTL_EN BIT(0)
+
+enum spm_reg {
+ SPM_REG_CFG,
+ SPM_REG_SPM_CTL,
+ SPM_REG_DLY,
+ SPM_REG_PMIC_DLY,
+ SPM_REG_PMIC_DATA_0,
+ SPM_REG_PMIC_DATA_1,
+ SPM_REG_VCTL,
+ SPM_REG_SEQ_ENTRY,
+ SPM_REG_SPM_STS,
+ SPM_REG_PMIC_STS,
+ SPM_REG_AVS_CTL,
+ SPM_REG_AVS_LIMIT,
+ SPM_REG_NR,
+};
+
+static const u16 spm_reg_offset_v4_1[SPM_REG_NR] = {
+ [SPM_REG_AVS_CTL] = 0x904,
+ [SPM_REG_AVS_LIMIT] = 0x908,
+};
+
+static const struct spm_reg_data spm_reg_660_gold_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4580458,
+};
+
+static const struct spm_reg_data spm_reg_660_silver_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x101c031,
+ .avs_limit = 0x4580458,
+};
+
+static const struct spm_reg_data spm_reg_8998_gold_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4700470,
+};
+
+static const struct spm_reg_data spm_reg_8998_silver_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4200420,
+};
+
+static const u16 spm_reg_offset_v3_0[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_SEQ_ENTRY] = 0x400,
+};
+
+/* SPM register data for 8909 */
+static const struct spm_reg_data spm_reg_8909_cpu = {
+ .reg_offset = spm_reg_offset_v3_0,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x76, 0x76, 0x0B, 0x94, 0x5B, 0x80,
+ 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+/* SPM register data for 8916 */
+static const struct spm_reg_data spm_reg_8916_cpu = {
+ .reg_offset = spm_reg_offset_v3_0,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
+ 0x80, 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+static const u16 spm_reg_offset_v2_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8974, 8084 */
+static const struct spm_reg_data spm_reg_8974_8084_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
+ 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
+ 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 3,
+};
+
+/* SPM register data for 8226 */
+static const struct spm_reg_data spm_reg_8226_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x0,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
+ 0x80, 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x20,
+ [SPM_REG_PMIC_DLY] = 0x24,
+ [SPM_REG_PMIC_DATA_0] = 0x28,
+ [SPM_REG_PMIC_DATA_1] = 0x2C,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8064 */
+static const struct spm_reg_data spm_reg_8064_cpu = {
+ .reg_offset = spm_reg_offset_v1_1,
+ .spm_cfg = 0x1F,
+ .pmic_dly = 0x02020004,
+ .pmic_data[0] = 0x0084009C,
+ .pmic_data[1] = 0x00A4001C,
+ .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
+ 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 2,
+};
+
+static inline void spm_register_write(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ if (drv->reg_data->reg_offset[reg])
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+}
+
+/* Ensure a guaranteed write, before return */
+static inline void spm_register_write_sync(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ u32 ret;
+
+ if (!drv->reg_data->reg_offset[reg])
+ return;
+
+ do {
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ ret = readl_relaxed(drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ if (ret == val)
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+static inline u32 spm_register_read(struct spm_driver_data *drv,
+ enum spm_reg reg)
+{
+ return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
+}
+
+void spm_set_low_power_mode(struct spm_driver_data *drv,
+ enum pm_sleep_mode mode)
+{
+ u32 start_index;
+ u32 ctl_val;
+
+ start_index = drv->reg_data->start_index[mode];
+
+ ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
+ ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
+ ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
+ ctl_val |= SPM_CTL_EN;
+ spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
+}
+
+static const struct of_device_id spm_match_table[] = {
+ { .compatible = "qcom,sdm660-gold-saw2-v4.1-l2",
+ .data = &spm_reg_660_gold_l2 },
+ { .compatible = "qcom,sdm660-silver-saw2-v4.1-l2",
+ .data = &spm_reg_660_silver_l2 },
+ { .compatible = "qcom,msm8226-saw2-v2.1-cpu",
+ .data = &spm_reg_8226_cpu },
+ { .compatible = "qcom,msm8909-saw2-v3.0-cpu",
+ .data = &spm_reg_8909_cpu },
+ { .compatible = "qcom,msm8916-saw2-v3.0-cpu",
+ .data = &spm_reg_8916_cpu },
+ { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,msm8998-gold-saw2-v4.1-l2",
+ .data = &spm_reg_8998_gold_l2 },
+ { .compatible = "qcom,msm8998-silver-saw2-v4.1-l2",
+ .data = &spm_reg_8998_silver_l2 },
+ { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
+ .data = &spm_reg_8064_cpu },
+ { },
+};
+MODULE_DEVICE_TABLE(of, spm_match_table);
+
+static int spm_dev_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match_id;
+ struct spm_driver_data *drv;
+ struct resource *res;
+ void __iomem *addr;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drv->reg_base))
+ return PTR_ERR(drv->reg_base);
+
+ match_id = of_match_node(spm_match_table, pdev->dev.of_node);
+ if (!match_id)
+ return -ENODEV;
+
+ drv->reg_data = match_id->data;
+ platform_set_drvdata(pdev, drv);
+
+ /* Write the SPM sequences first.. */
+ addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
+ __iowrite32_copy(addr, drv->reg_data->seq,
+ ARRAY_SIZE(drv->reg_data->seq) / 4);
+
+ /*
+ * ..and then the control registers.
+ * On some SoC if the control registers are written first and if the
+ * CPU was held in reset, the reset signal could trigger the SPM state
+ * machine, before the sequences are completely written.
+ */
+ spm_register_write(drv, SPM_REG_AVS_CTL, drv->reg_data->avs_ctl);
+ spm_register_write(drv, SPM_REG_AVS_LIMIT, drv->reg_data->avs_limit);
+ spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+ spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0,
+ drv->reg_data->pmic_data[0]);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1,
+ drv->reg_data->pmic_data[1]);
+
+ /* Set up Standby as the default low power mode */
+ if (drv->reg_data->reg_offset[SPM_REG_SPM_CTL])
+ spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+ return 0;
+}
+
+static struct platform_driver spm_driver = {
+ .probe = spm_dev_probe,
+ .driver = {
+ .name = "qcom_spm",
+ .of_match_table = spm_match_table,
+ },
+};
+
+static int __init qcom_spm_init(void)
+{
+ return platform_driver_register(&spm_driver);
+}
+arch_initcall(qcom_spm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/trace-rpmh.h b/drivers/soc/qcom/trace-rpmh.h
new file mode 100644
index 000000000..feb0cb455
--- /dev/null
+++ b/drivers/soc/qcom/trace-rpmh.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_TRACE_RPMH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPMH_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpmh
+
+#include <linux/tracepoint.h>
+#include "rpmh-internal.h"
+
+TRACE_EVENT(rpmh_tx_done,
+
+ TP_PROTO(struct rsc_drv *d, int m, const struct tcs_request *r, int e),
+
+ TP_ARGS(d, m, r, e),
+
+ TP_STRUCT__entry(
+ __string(name, d->name)
+ __field(int, m)
+ __field(u32, addr)
+ __field(u32, data)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, d->name);
+ __entry->m = m;
+ __entry->addr = r->cmds[0].addr;
+ __entry->data = r->cmds[0].data;
+ __entry->err = e;
+ ),
+
+ TP_printk("%s: ack: tcs-m: %d addr: %#x data: %#x errno: %d",
+ __get_str(name), __entry->m, __entry->addr, __entry->data,
+ __entry->err)
+);
+
+TRACE_EVENT(rpmh_send_msg,
+
+ TP_PROTO(struct rsc_drv *d, int m, int n, u32 h,
+ const struct tcs_cmd *c),
+
+ TP_ARGS(d, m, n, h, c),
+
+ TP_STRUCT__entry(
+ __string(name, d->name)
+ __field(int, m)
+ __field(int, n)
+ __field(u32, hdr)
+ __field(u32, addr)
+ __field(u32, data)
+ __field(bool, wait)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, d->name);
+ __entry->m = m;
+ __entry->n = n;
+ __entry->hdr = h;
+ __entry->addr = c->addr;
+ __entry->data = c->data;
+ __entry->wait = c->wait;
+ ),
+
+ TP_printk("%s: send-msg: tcs(m): %d cmd(n): %d msgid: %#x addr: %#x data: %#x complete: %d",
+ __get_str(name), __entry->m, __entry->n, __entry->hdr,
+ __entry->addr, __entry->data, __entry->wait)
+);
+
+#endif /* _TRACE_RPMH_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-rpmh
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
new file mode 100644
index 000000000..2a06d631e
--- /dev/null
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, Linaro Ltd.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ */
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
+
+#define WCNSS_REQUEST_TIMEOUT (5 * HZ)
+#define WCNSS_CBC_TIMEOUT (10 * HZ)
+
+#define WCNSS_ACK_DONE_BOOTING 1
+#define WCNSS_ACK_COLD_BOOTING 2
+
+#define NV_FRAGMENT_SIZE 3072
+#define NVBIN_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+
+/**
+ * struct wcnss_ctrl - driver context
+ * @dev: device handle
+ * @channel: SMD channel handle
+ * @ack: completion for outstanding requests
+ * @cbc: completion for cbc complete indication
+ * @ack_status: status of the outstanding request
+ * @probe_work: worker for uploading nv binary
+ */
+struct wcnss_ctrl {
+ struct device *dev;
+ struct rpmsg_endpoint *channel;
+
+ struct completion ack;
+ struct completion cbc;
+ int ack_status;
+
+ struct work_struct probe_work;
+};
+
+/* message types */
+enum {
+ WCNSS_VERSION_REQ = 0x01000000,
+ WCNSS_VERSION_RESP,
+ WCNSS_DOWNLOAD_NV_REQ,
+ WCNSS_DOWNLOAD_NV_RESP,
+ WCNSS_UPLOAD_CAL_REQ,
+ WCNSS_UPLOAD_CAL_RESP,
+ WCNSS_DOWNLOAD_CAL_REQ,
+ WCNSS_DOWNLOAD_CAL_RESP,
+ WCNSS_VBAT_LEVEL_IND,
+ WCNSS_BUILD_VERSION_REQ,
+ WCNSS_BUILD_VERSION_RESP,
+ WCNSS_PM_CONFIG_REQ,
+ WCNSS_CBC_COMPLETE_IND,
+};
+
+/**
+ * struct wcnss_msg_hdr - common packet header for requests and responses
+ * @type: packet message type
+ * @len: total length of the packet, including this header
+ */
+struct wcnss_msg_hdr {
+ u32 type;
+ u32 len;
+} __packed;
+
+/*
+ * struct wcnss_version_resp - version request response
+ */
+struct wcnss_version_resp {
+ struct wcnss_msg_hdr hdr;
+ u8 major;
+ u8 minor;
+ u8 version;
+ u8 revision;
+} __packed;
+
+/**
+ * struct wcnss_download_nv_req - firmware fragment request
+ * @hdr: common packet wcnss_msg_hdr header
+ * @seq: sequence number of this fragment
+ * @last: boolean indicator of this being the last fragment of the binary
+ * @frag_size: length of this fragment
+ * @fragment: fragment data
+ */
+struct wcnss_download_nv_req {
+ struct wcnss_msg_hdr hdr;
+ u16 seq;
+ u16 last;
+ u32 frag_size;
+ u8 fragment[];
+} __packed;
+
+/**
+ * struct wcnss_download_nv_resp - firmware download response
+ * @hdr: common packet wcnss_msg_hdr header
+ * @status: boolean to indicate success of the download
+ */
+struct wcnss_download_nv_resp {
+ struct wcnss_msg_hdr hdr;
+ u8 status;
+} __packed;
+
+/**
+ * wcnss_ctrl_smd_callback() - handler from SMD responses
+ * @rpdev: remote processor message device pointer
+ * @data: pointer to the incoming data packet
+ * @count: size of the incoming data packet
+ * @priv: unused
+ * @addr: unused
+ *
+ * Handles any incoming packets from the remote WCNSS_CTRL service.
+ */
+static int wcnss_ctrl_smd_callback(struct rpmsg_device *rpdev,
+ void *data,
+ int count,
+ void *priv,
+ u32 addr)
+{
+ struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
+ const struct wcnss_download_nv_resp *nvresp;
+ const struct wcnss_version_resp *version;
+ const struct wcnss_msg_hdr *hdr = data;
+
+ switch (hdr->type) {
+ case WCNSS_VERSION_RESP:
+ if (count != sizeof(*version)) {
+ dev_err(wcnss->dev,
+ "invalid size of version response\n");
+ break;
+ }
+
+ version = data;
+ dev_info(wcnss->dev, "WCNSS Version %d.%d %d.%d\n",
+ version->major, version->minor,
+ version->version, version->revision);
+
+ complete(&wcnss->ack);
+ break;
+ case WCNSS_DOWNLOAD_NV_RESP:
+ if (count != sizeof(*nvresp)) {
+ dev_err(wcnss->dev,
+ "invalid size of download response\n");
+ break;
+ }
+
+ nvresp = data;
+ wcnss->ack_status = nvresp->status;
+ complete(&wcnss->ack);
+ break;
+ case WCNSS_CBC_COMPLETE_IND:
+ dev_dbg(wcnss->dev, "cold boot complete\n");
+ complete(&wcnss->cbc);
+ break;
+ default:
+ dev_info(wcnss->dev, "unknown message type %d\n", hdr->type);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * wcnss_request_version() - send a version request to WCNSS
+ * @wcnss: wcnss ctrl driver context
+ */
+static int wcnss_request_version(struct wcnss_ctrl *wcnss)
+{
+ struct wcnss_msg_hdr msg;
+ int ret;
+
+ msg.type = WCNSS_VERSION_REQ;
+ msg.len = sizeof(msg);
+ ret = rpmsg_send(wcnss->channel, &msg, sizeof(msg));
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_CBC_TIMEOUT);
+ if (!ret) {
+ dev_err(wcnss->dev, "timeout waiting for version response\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * wcnss_download_nv() - send nv binary to WCNSS
+ * @wcnss: wcnss_ctrl state handle
+ * @expect_cbc: indicator to caller that an cbc event is expected
+ *
+ * Returns 0 on success. Negative errno on failure.
+ */
+static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
+{
+ struct wcnss_download_nv_req *req;
+ const struct firmware *fw;
+ struct device *dev = wcnss->dev;
+ const char *nvbin = NVBIN_FILE;
+ const void *data;
+ ssize_t left;
+ int ret;
+
+ req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = of_property_read_string(dev->of_node, "firmware-name", &nvbin);
+ if (ret < 0 && ret != -EINVAL)
+ goto free_req;
+
+ ret = request_firmware(&fw, nvbin, dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to load nv file %s: %d\n", nvbin, ret);
+ goto free_req;
+ }
+
+ data = fw->data;
+ left = fw->size;
+
+ req->hdr.type = WCNSS_DOWNLOAD_NV_REQ;
+ req->hdr.len = sizeof(*req) + NV_FRAGMENT_SIZE;
+
+ req->last = 0;
+ req->frag_size = NV_FRAGMENT_SIZE;
+
+ req->seq = 0;
+ do {
+ if (left <= NV_FRAGMENT_SIZE) {
+ req->last = 1;
+ req->frag_size = left;
+ req->hdr.len = sizeof(*req) + left;
+ }
+
+ memcpy(req->fragment, data, req->frag_size);
+
+ ret = rpmsg_send(wcnss->channel, req, req->hdr.len);
+ if (ret < 0) {
+ dev_err(dev, "failed to send smd packet\n");
+ goto release_fw;
+ }
+
+ /* Increment for next fragment */
+ req->seq++;
+
+ data += NV_FRAGMENT_SIZE;
+ left -= NV_FRAGMENT_SIZE;
+ } while (left > 0);
+
+ ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT);
+ if (!ret) {
+ dev_err(dev, "timeout waiting for nv upload ack\n");
+ ret = -ETIMEDOUT;
+ } else {
+ *expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING;
+ ret = 0;
+ }
+
+release_fw:
+ release_firmware(fw);
+free_req:
+ kfree(req);
+
+ return ret;
+}
+
+/**
+ * qcom_wcnss_open_channel() - open additional SMD channel to WCNSS
+ * @wcnss: wcnss handle, retrieved from drvdata
+ * @name: SMD channel name
+ * @cb: callback to handle incoming data on the channel
+ * @priv: private data for use in the call-back
+ */
+struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv)
+{
+ struct rpmsg_channel_info chinfo;
+ struct wcnss_ctrl *_wcnss = wcnss;
+
+ strscpy(chinfo.name, name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = RPMSG_ADDR_ANY;
+
+ return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo);
+}
+EXPORT_SYMBOL(qcom_wcnss_open_channel);
+
+static void wcnss_async_probe(struct work_struct *work)
+{
+ struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work);
+ bool expect_cbc;
+ int ret;
+
+ ret = wcnss_request_version(wcnss);
+ if (ret < 0)
+ return;
+
+ ret = wcnss_download_nv(wcnss, &expect_cbc);
+ if (ret < 0)
+ return;
+
+ /* Wait for pending cold boot completion if indicated by the nv downloader */
+ if (expect_cbc) {
+ ret = wait_for_completion_timeout(&wcnss->cbc, WCNSS_REQUEST_TIMEOUT);
+ if (!ret)
+ dev_err(wcnss->dev, "expected cold boot completion\n");
+ }
+
+ of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev);
+}
+
+static int wcnss_ctrl_probe(struct rpmsg_device *rpdev)
+{
+ struct wcnss_ctrl *wcnss;
+
+ wcnss = devm_kzalloc(&rpdev->dev, sizeof(*wcnss), GFP_KERNEL);
+ if (!wcnss)
+ return -ENOMEM;
+
+ wcnss->dev = &rpdev->dev;
+ wcnss->channel = rpdev->ept;
+
+ init_completion(&wcnss->ack);
+ init_completion(&wcnss->cbc);
+ INIT_WORK(&wcnss->probe_work, wcnss_async_probe);
+
+ dev_set_drvdata(&rpdev->dev, wcnss);
+
+ schedule_work(&wcnss->probe_work);
+
+ return 0;
+}
+
+static void wcnss_ctrl_remove(struct rpmsg_device *rpdev)
+{
+ struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
+
+ cancel_work_sync(&wcnss->probe_work);
+ of_platform_depopulate(&rpdev->dev);
+}
+
+static const struct of_device_id wcnss_ctrl_of_match[] = {
+ { .compatible = "qcom,wcnss", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, wcnss_ctrl_of_match);
+
+static struct rpmsg_driver wcnss_ctrl_driver = {
+ .probe = wcnss_ctrl_probe,
+ .remove = wcnss_ctrl_remove,
+ .callback = wcnss_ctrl_smd_callback,
+ .drv = {
+ .name = "qcom_wcnss_ctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = wcnss_ctrl_of_match,
+ },
+};
+
+module_rpmsg_driver(wcnss_ctrl_driver);
+
+MODULE_DESCRIPTION("Qualcomm WCNSS control driver");
+MODULE_LICENSE("GPL v2");