summaryrefslogtreecommitdiffstats
path: root/drivers/soc/ti
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/soc/ti/Kconfig103
-rw-r--r--drivers/soc/ti/Makefile16
-rw-r--r--drivers/soc/ti/k3-ringacc.c1555
-rw-r--r--drivers/soc/ti/k3-socinfo.c156
-rw-r--r--drivers/soc/ti/knav_dma.c811
-rw-r--r--drivers/soc/ti/knav_qmss.h387
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c584
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c1908
-rw-r--r--drivers/soc/ti/omap_prm.c994
-rw-r--r--drivers/soc/ti/pm33xx.c611
-rw-r--r--drivers/soc/ti/pruss.c358
-rw-r--r--drivers/soc/ti/smartreflex.c1037
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c124
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c208
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c777
15 files changed, 9629 insertions, 0 deletions
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
new file mode 100644
index 000000000..7e2fb1c16
--- /dev/null
+++ b/drivers/soc/ti/Kconfig
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+#
+# TI SOC drivers
+#
+menuconfig SOC_TI
+ bool "TI SOC drivers support"
+
+if SOC_TI
+
+config KEYSTONE_NAVIGATOR_QMSS
+ tristate "Keystone Queue Manager Sub System"
+ depends on ARCH_KEYSTONE
+ help
+ Say y here to support the Keystone multicore Navigator Queue
+ Manager support. The Queue Manager is a hardware module that
+ is responsible for accelerating management of the packet queues.
+ Packets are queued/de-queued by writing/reading descriptor address
+ to a particular memory mapped location in the Queue Manager module.
+
+ If unsure, say N.
+
+config KEYSTONE_NAVIGATOR_DMA
+ tristate "TI Keystone Navigator Packet DMA support"
+ depends on ARCH_KEYSTONE
+ help
+ Say y tp enable support for the Keystone Navigator Packet DMA on
+ on Keystone family of devices. It sets up the dma channels for the
+ Queue Manager Sub System.
+
+ If unsure, say N.
+
+config AMX3_PM
+ tristate "AMx3 Power Management"
+ depends on SOC_AM33XX || SOC_AM43XX
+ depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM && RTC_DRV_OMAP
+ help
+ Enable power management on AM335x and AM437x. Required for suspend to mem
+ and standby states on both AM335x and AM437x platforms and for deeper cpuidle
+ c-states on AM335x. Also required for rtc and ddr in self-refresh low
+ power mode on AM437x platforms.
+
+config WKUP_M3_IPC
+ tristate "TI AMx3 Wkup-M3 IPC Driver"
+ depends on WKUP_M3_RPROC
+ depends on OMAP2PLUS_MBOX
+ help
+ TI AM33XX and AM43XX have a Cortex M3, the Wakeup M3, to handle
+ low power transitions. This IPC driver provides the necessary API
+ to communicate and use the Wakeup M3 for PM features like suspend
+ resume and boots it using wkup_m3_rproc driver.
+
+config TI_SCI_PM_DOMAINS
+ tristate "TI SCI PM Domains Driver"
+ depends on TI_SCI_PROTOCOL
+ depends on PM_GENERIC_DOMAINS
+ help
+ Generic power domain implementation for TI device implementing
+ the TI SCI protocol.
+
+ To compile this as a module, choose M here. The module will be
+ called ti_sci_pm_domains. Note this is needed early in boot before
+ rootfs may be available.
+
+config TI_K3_RINGACC
+ bool "K3 Ring accelerator Sub System"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_INTA_IRQCHIP
+ help
+ Say y here to support the K3 Ring accelerator module.
+ The Ring Accelerator (RINGACC or RA) provides hardware acceleration
+ to enable straightforward passing of work between a producer
+ and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
+ If unsure, say N.
+
+config TI_K3_SOCINFO
+ bool
+ depends on ARCH_K3 || COMPILE_TEST
+ select SOC_BUS
+ select MFD_SYSCON
+ help
+ Include support for the SoC bus socinfo for the TI K3 Multicore SoC
+ platforms to provide information about the SoC family and
+ variant to user space.
+
+config TI_PRUSS
+ tristate "TI PRU-ICSS Subsystem Platform drivers"
+ depends on SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ select MFD_SYSCON
+ help
+ TI PRU-ICSS Subsystem platform specific support.
+
+ Say Y or M here to support the Programmable Realtime Unit (PRU)
+ processors on various TI SoCs. It's safe to say N here if you're
+ not interested in the PRU or if you are unsure.
+
+endif # SOC_TI
+
+config TI_SCI_INTA_MSI_DOMAIN
+ bool
+ select GENERIC_MSI_IRQ_DOMAIN
+ help
+ Driver to enable Interrupt Aggregator specific MSI Domain.
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
new file mode 100644
index 000000000..cc3c972fa
--- /dev/null
+++ b/drivers/soc/ti/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# TI Keystone SOC drivers
+#
+obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o
+knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
+obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
+obj-$(CONFIG_AMX3_PM) += pm33xx.o
+obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_prm.o
+obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
+obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
+obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
+obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
+obj-$(CONFIG_TI_K3_SOCINFO) += k3-socinfo.o
+obj-$(CONFIG_TI_PRUSS) += pruss.o
+obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
new file mode 100644
index 000000000..f7bf18b82
--- /dev/null
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -0,0 +1,1555 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 NAVSS Ring Accelerator subsystem driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+static LIST_HEAD(k3_ringacc_list);
+static DEFINE_MUTEX(k3_ringacc_list_lock);
+
+#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+#define K3_DMARING_CFG_RING_SIZE_ELCNT_MASK GENMASK(15, 0)
+
+/**
+ * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
+ *
+ * @resv_16: Reserved
+ * @db: Ring Doorbell Register
+ * @resv_4: Reserved
+ * @occ: Ring Occupancy Register
+ * @indx: Ring Current Index Register
+ * @hwocc: Ring Hardware Occupancy Register
+ * @hwindx: Ring Hardware Current Index Register
+ */
+struct k3_ring_rt_regs {
+ u32 resv_16[4];
+ u32 db;
+ u32 resv_4[1];
+ u32 occ;
+ u32 indx;
+ u32 hwocc;
+ u32 hwindx;
+};
+
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+#define K3_DMARING_RT_REGS_STEP 0x2000
+#define K3_DMARING_RT_REGS_REVERSE_OFS 0x1000
+#define K3_RINGACC_RT_OCC_MASK GENMASK(20, 0)
+#define K3_DMARING_RT_OCC_TDOWN_COMPLETE BIT(31)
+#define K3_DMARING_RT_DB_ENTRY_MASK GENMASK(7, 0)
+#define K3_DMARING_RT_DB_TDOWN_ACK BIT(31)
+
+/**
+ * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
+ *
+ * @head_data: Ring Head Entry Data Registers
+ * @tail_data: Ring Tail Entry Data Registers
+ * @peek_head_data: Ring Peek Head Entry Data Regs
+ * @peek_tail_data: Ring Peek Tail Entry Data Regs
+ */
+struct k3_ring_fifo_regs {
+ u32 head_data[128];
+ u32 tail_data[128];
+ u32 peek_head_data[128];
+ u32 peek_tail_data[128];
+};
+
+/**
+ * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
+ *
+ * @revision: Revision Register
+ * @config: Config Register
+ */
+struct k3_ringacc_proxy_gcfg_regs {
+ u32 revision;
+ u32 config;
+};
+
+#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
+
+/**
+ * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
+ *
+ * @control: Proxy Control Register
+ * @status: Proxy Status Register
+ * @resv_512: Reserved
+ * @data: Proxy Data Register
+ */
+struct k3_ringacc_proxy_target_regs {
+ u32 control;
+ u32 status;
+ u8 resv_512[504];
+ u32 data[128];
+};
+
+#define K3_RINGACC_PROXY_TARGET_STEP 0x1000
+#define K3_RINGACC_PROXY_NOT_USED (-1)
+
+enum k3_ringacc_proxy_access_mode {
+ PROXY_ACCESS_MODE_HEAD = 0,
+ PROXY_ACCESS_MODE_TAIL = 1,
+ PROXY_ACCESS_MODE_PEEK_HEAD = 2,
+ PROXY_ACCESS_MODE_PEEK_TAIL = 3,
+};
+
+#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
+#define K3_RINGACC_FIFO_REGS_STEP 0x1000
+#define K3_RINGACC_MAX_DB_RING_CNT (127U)
+
+struct k3_ring_ops {
+ int (*push_tail)(struct k3_ring *ring, void *elm);
+ int (*push_head)(struct k3_ring *ring, void *elm);
+ int (*pop_tail)(struct k3_ring *ring, void *elm);
+ int (*pop_head)(struct k3_ring *ring, void *elm);
+};
+
+/**
+ * struct k3_ring_state - Internal state tracking structure
+ *
+ * @free: Number of free entries
+ * @occ: Occupancy
+ * @windex: Write index
+ * @rindex: Read index
+ */
+struct k3_ring_state {
+ u32 free;
+ u32 occ;
+ u32 windex;
+ u32 rindex;
+ u32 tdown_complete:1;
+};
+
+/**
+ * struct k3_ring - RA Ring descriptor
+ *
+ * @rt: Ring control/status registers
+ * @fifos: Ring queues registers
+ * @proxy: Ring Proxy Datapath registers
+ * @ring_mem_dma: Ring buffer dma address
+ * @ring_mem_virt: Ring buffer virt address
+ * @ops: Ring operations
+ * @size: Ring size in elements
+ * @elm_size: Size of the ring element
+ * @mode: Ring mode
+ * @flags: flags
+ * @state: Ring state
+ * @ring_id: Ring Id
+ * @parent: Pointer on struct @k3_ringacc
+ * @use_count: Use count for shared rings
+ * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ * @dma_dev: device to be used for DMA API (allocation, mapping)
+ * @asel: Address Space Select value for physical addresses
+ */
+struct k3_ring {
+ struct k3_ring_rt_regs __iomem *rt;
+ struct k3_ring_fifo_regs __iomem *fifos;
+ struct k3_ringacc_proxy_target_regs __iomem *proxy;
+ dma_addr_t ring_mem_dma;
+ void *ring_mem_virt;
+ struct k3_ring_ops *ops;
+ u32 size;
+ enum k3_ring_size elm_size;
+ enum k3_ring_mode mode;
+ u32 flags;
+#define K3_RING_FLAG_BUSY BIT(1)
+#define K3_RING_FLAG_SHARED BIT(2)
+#define K3_RING_FLAG_REVERSE BIT(3)
+ struct k3_ring_state state;
+ u32 ring_id;
+ struct k3_ringacc *parent;
+ u32 use_count;
+ int proxy_id;
+ struct device *dma_dev;
+ u32 asel;
+#define K3_ADDRESS_ASEL_SHIFT 48
+};
+
+struct k3_ringacc_ops {
+ int (*init)(struct platform_device *pdev, struct k3_ringacc *ringacc);
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator descriptor
+ *
+ * @dev: pointer on RA device
+ * @proxy_gcfg: RA proxy global config registers
+ * @proxy_target_base: RA proxy datapath region
+ * @num_rings: number of ring in RA
+ * @rings_inuse: bitfield for ring usage tracking
+ * @rm_gp_range: general purpose rings range from tisci
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ * @num_proxies: number of RA proxies
+ * @proxy_inuse: bitfield for proxy usage tracking
+ * @rings: array of rings descriptors (struct @k3_ring)
+ * @list: list of RAs in the system
+ * @req_lock: protect rings allocation
+ * @tisci: pointer ti-sci handle
+ * @tisci_ring_ops: ti-sci rings ops
+ * @tisci_dev_id: ti-sci device id
+ * @ops: SoC specific ringacc operation
+ * @dma_rings: indicate DMA ring (dual ring within BCDMA/PKTDMA)
+ */
+struct k3_ringacc {
+ struct device *dev;
+ struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
+ void __iomem *proxy_target_base;
+ u32 num_rings; /* number of rings in Ringacc module */
+ unsigned long *rings_inuse;
+ struct ti_sci_resource *rm_gp_range;
+
+ bool dma_ring_reset_quirk;
+ u32 num_proxies;
+ unsigned long *proxy_inuse;
+
+ struct k3_ring *rings;
+ struct list_head list;
+ struct mutex req_lock; /* protect rings allocation */
+
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
+ u32 tisci_dev_id;
+
+ const struct k3_ringacc_ops *ops;
+ bool dma_rings;
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator SoC data
+ *
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ */
+struct k3_ringacc_soc_data {
+ unsigned dma_ring_reset_quirk:1;
+};
+
+static int k3_ringacc_ring_read_occ(struct k3_ring *ring)
+{
+ return readl(&ring->rt->occ) & K3_RINGACC_RT_OCC_MASK;
+}
+
+static void k3_ringacc_ring_update_occ(struct k3_ring *ring)
+{
+ u32 val;
+
+ val = readl(&ring->rt->occ);
+
+ ring->state.occ = val & K3_RINGACC_RT_OCC_MASK;
+ ring->state.tdown_complete = !!(val & K3_DMARING_RT_OCC_TDOWN_COMPLETE);
+}
+
+static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+{
+ return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+ (4 << ring->elm_size);
+}
+
+static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
+{
+ return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem);
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_ring_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_ringacc_ring_pop_mem,
+};
+
+static struct k3_ring_ops k3_dmaring_fwd_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_dmaring_fwd_pop,
+};
+
+static struct k3_ring_ops k3_dmaring_reverse_ops = {
+ /* Reverse side of the DMA ring can only be popped by SW */
+ .pop_head = k3_dmaring_reverse_pop,
+};
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_msg_ops = {
+ .push_tail = k3_ringacc_ring_push_io,
+ .push_head = k3_ringacc_ring_push_head_io,
+ .pop_tail = k3_ringacc_ring_pop_tail_io,
+ .pop_head = k3_ringacc_ring_pop_io,
+};
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+ .push_tail = k3_ringacc_ring_push_tail_proxy,
+ .push_head = k3_ringacc_ring_push_head_proxy,
+ .pop_tail = k3_ringacc_ring_pop_tail_proxy,
+ .pop_head = k3_ringacc_ring_pop_head_proxy,
+};
+
+static void k3_ringacc_ring_dump(struct k3_ring *ring)
+{
+ struct device *dev = ring->parent->dev;
+
+ dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
+ dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
+ &ring->ring_mem_dma);
+ dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
+ ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+ dev_dbg(dev, "dump flags %08X\n", ring->flags);
+
+ dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
+ dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
+ dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
+ dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
+ dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
+
+ if (ring->ring_mem_virt)
+ print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
+ 16, 1, ring->ring_mem_virt, 16 * 8, false);
+}
+
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+ int id, u32 flags)
+{
+ int proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (id == K3_RINGACC_RING_ID_ANY) {
+ /* Request for any general purpose ring */
+ struct ti_sci_resource_desc *gp_rings =
+ &ringacc->rm_gp_range->desc[0];
+ unsigned long size;
+
+ size = gp_rings->start + gp_rings->num;
+ id = find_next_zero_bit(ringacc->rings_inuse, size,
+ gp_rings->start);
+ if (id == size)
+ goto error;
+ } else if (id < 0) {
+ goto error;
+ }
+
+ if (test_bit(id, ringacc->rings_inuse) &&
+ !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
+ goto error;
+ else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
+ goto out;
+
+ if (flags & K3_RINGACC_RING_USE_PROXY) {
+ proxy_id = find_first_zero_bit(ringacc->proxy_inuse,
+ ringacc->num_proxies);
+ if (proxy_id == ringacc->num_proxies)
+ goto error;
+ }
+
+ if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ set_bit(proxy_id, ringacc->proxy_inuse);
+ ringacc->rings[id].proxy_id = proxy_id;
+ dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
+ proxy_id);
+ } else {
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
+ }
+
+ set_bit(id, ringacc->rings_inuse);
+out:
+ ringacc->rings[id].use_count++;
+ mutex_unlock(&ringacc->req_lock);
+ return &ringacc->rings[id];
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+
+static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ /*
+ * DMA rings must be requested by ID, completion ring is the reverse
+ * side of the forward ring
+ */
+ if (fwd_id < 0)
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (test_bit(fwd_id, ringacc->rings_inuse)) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ *fwd_ring = &ringacc->rings[fwd_id];
+ *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings];
+ set_bit(fwd_id, ringacc->rings_inuse);
+ ringacc->rings[fwd_id].use_count++;
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id);
+
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return ret;
+}
+
+int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
+ int fwd_id, int compl_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ if (!fwd_ring || !compl_ring)
+ return -EINVAL;
+
+ if (ringacc->dma_rings)
+ return k3_dmaring_request_dual_ring(ringacc, fwd_id,
+ fwd_ring, compl_ring);
+
+ *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
+ if (!(*fwd_ring))
+ return -ENODEV;
+
+ *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0);
+ if (!(*compl_ring)) {
+ k3_ringacc_ring_free(*fwd_ring);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
+
+static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
+{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID;
+ ring_cfg.count = ring->size;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ k3_ringacc_ring_reset_sci(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
+
+static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
+ enum k3_ring_mode mode)
+{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID;
+ ring_cfg.mode = mode;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ if (!ring->parent->dma_ring_reset_quirk)
+ goto reset;
+
+ if (!occ)
+ occ = k3_ringacc_ring_read_occ(ring);
+
+ if (occ) {
+ u32 db_ring_cnt, db_ring_cnt_cur;
+
+ dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
+ ring->ring_id, occ);
+ /* TI-SCI ring reset */
+ k3_ringacc_ring_reset_sci(ring);
+
+ /*
+ * Setup the ring in ring/doorbell mode (if not already in this
+ * mode)
+ */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(
+ ring, K3_RINGACC_RING_MODE_RING);
+ /*
+ * Ring the doorbell 2**22 – ringOcc times.
+ * This will wrap the internal UDMAP ring state occupancy
+ * counter (which is 21-bits wide) to 0.
+ */
+ db_ring_cnt = (1U << 22) - occ;
+
+ while (db_ring_cnt != 0) {
+ /*
+ * Ring the doorbell with the maximum count each
+ * iteration if possible to minimize the total
+ * of writes
+ */
+ if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
+ db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
+ else
+ db_ring_cnt_cur = db_ring_cnt;
+
+ writel(db_ring_cnt_cur, &ring->rt->db);
+ db_ring_cnt -= db_ring_cnt_cur;
+ }
+
+ /* Restore the original ring mode (if not ring mode) */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
+ }
+
+reset:
+ /* Reset the ring */
+ k3_ringacc_ring_reset(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
+
+static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
+{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+int k3_ringacc_ring_free(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc;
+
+ if (!ring)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
+ dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (--ring->use_count)
+ goto out;
+
+ if (!(ring->flags & K3_RING_FLAG_BUSY))
+ goto no_init;
+
+ k3_ringacc_ring_free_sci(ring);
+
+ dma_free_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt, ring->ring_mem_dma);
+ ring->flags = 0;
+ ring->ops = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
+
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ clear_bit(ring->proxy_id, ringacc->proxy_inuse);
+ ring->proxy = NULL;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+
+no_init:
+ clear_bit(ring->ring_id, ringacc->rings_inuse);
+
+out:
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
+
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->ring_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->parent->tisci_dev_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
+
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
+{
+ int irq_num;
+
+ if (!ring)
+ return -EINVAL;
+
+ irq_num = msi_get_virq(ring->parent->dev, ring->ring_id);
+ if (irq_num <= 0)
+ irq_num = -EINVAL;
+ return irq_num;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
+
+static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
+{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ if (!ringacc->tisci)
+ return -EINVAL;
+
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+ ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma);
+ ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma);
+ ring_cfg.count = ring->size;
+ ring_cfg.mode = ring->mode;
+ ring_cfg.size = ring->elm_size;
+ ring_cfg.asel = ring->asel;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+
+ return ret;
+}
+
+static int k3_dmaring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc;
+ struct k3_ring *reverse_ring;
+ int ret = 0;
+
+ if (cfg->elm_size != K3_RINGACC_RING_ELSIZE_8 ||
+ cfg->mode != K3_RINGACC_RING_MODE_RING ||
+ cfg->size & ~K3_DMARING_CFG_RING_SIZE_ELCNT_MASK)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ ring->asel = cfg->asel;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev) {
+ dev_warn(ringacc->dev, "dma_dev is not provided for ring%d\n",
+ ring->ring_id);
+ ring->dma_dev = ringacc->dev;
+ }
+
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ ring->ops = &k3_dmaring_fwd_ops;
+
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+
+ k3_ringacc_ring_dump(ring);
+
+ /* DMA rings: configure reverse ring */
+ reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings];
+ reverse_ring->size = cfg->size;
+ reverse_ring->elm_size = cfg->elm_size;
+ reverse_ring->mode = cfg->mode;
+ reverse_ring->asel = cfg->asel;
+ memset(&reverse_ring->state, 0, sizeof(reverse_ring->state));
+ reverse_ring->ops = &k3_dmaring_reverse_ops;
+
+ reverse_ring->ring_mem_virt = ring->ring_mem_virt;
+ reverse_ring->ring_mem_dma = ring->ring_mem_dma;
+ reverse_ring->flags |= K3_RING_FLAG_BUSY;
+ k3_ringacc_ring_dump(reverse_ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+ ring->proxy = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
+ return ret;
+}
+
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc;
+ int ret = 0;
+
+ if (!ring || !cfg)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ if (ringacc->dma_rings)
+ return k3_dmaring_cfg(ring, cfg);
+
+ if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
+ cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
+ cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
+ !test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
+ ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
+ cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
+ dev_err(ringacc->dev,
+ "Message mode must use proxy for %u element size\n",
+ 4 << ring->elm_size);
+ return -EINVAL;
+ }
+
+ /*
+ * In case of shared ring only the first user (master user) can
+ * configure the ring. The sequence should be by the client:
+ * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
+ * k3_ringacc_ring_cfg(ring, cfg); # master configuration
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ */
+ if (ring->use_count != 1)
+ return 0;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
+ ring->proxy = ringacc->proxy_target_base +
+ ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
+
+ switch (ring->mode) {
+ case K3_RINGACC_RING_MODE_RING:
+ ring->ops = &k3_ring_mode_ring_ops;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev)
+ ring->dma_dev = ringacc->dev;
+ break;
+ case K3_RINGACC_RING_MODE_MESSAGE:
+ ring->dma_dev = ringacc->dev;
+ if (ring->proxy)
+ ring->ops = &k3_ring_mode_proxy_ops;
+ else
+ ring->ops = &k3_ring_mode_msg_ops;
+ break;
+ default:
+ ring->ops = NULL;
+ ret = -EINVAL;
+ goto err_free_proxy;
+ }
+
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+ ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
+ K3_RING_FLAG_SHARED : 0;
+
+ k3_ringacc_ring_dump(ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+ ring->dma_dev = NULL;
+err_free_proxy:
+ ring->proxy = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
+
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return ring->size;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
+
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->state.free)
+ ring->state.free = ring->size - k3_ringacc_ring_read_occ(ring);
+
+ return ring->state.free;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
+
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return k3_ringacc_ring_read_occ(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
+
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
+{
+ return !k3_ringacc_ring_get_free(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
+
+enum k3_ringacc_access_mode {
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
+ K3_RINGACC_ACCESS_MODE_POP_TAIL,
+ K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
+ K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
+};
+
+#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
+#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
+static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
+ enum k3_ringacc_proxy_access_mode mode)
+{
+ u32 val;
+
+ val = ring->ring_id;
+ val |= K3_RINGACC_PROXY_MODE(mode);
+ val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
+ writel(val, &ring->proxy->control);
+ return 0;
+}
+
+static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)&ring->proxy->data;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->state.occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->state.free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free,
+ ring->state.occ);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ ptr = (void __iomem *)&ring->fifos->head_data;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ ptr = (void __iomem *)&ring->fifos->tail_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->state.occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->state.free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n",
+ ring->state.free, ring->state.windex, ring->state.occ,
+ ring->state.rindex);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+/*
+ * The element is 48 bits of address + ASEL bits in the ring.
+ * ASEL is used by the DMAs and should be removed for the kernel as it is not
+ * part of the physical memory address.
+ */
+static void k3_dmaring_remove_asel_from_elem(u64 *elem)
+{
+ *elem &= GENMASK_ULL(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+ u32 elem_idx;
+
+ /*
+ * DMA rings: forward ring is always tied DMA channel and HW does not
+ * maintain any state data required for POP operation and its unknown
+ * how much elements were consumed by HW. So, to actually
+ * do POP, the read pointer has to be recalculated every time.
+ */
+ ring->state.occ = k3_ringacc_ring_read_occ(ring);
+ if (ring->state.windex >= ring->state.occ)
+ elem_idx = ring->state.windex - ring->state.occ;
+ else
+ elem_idx = ring->size - (ring->state.occ - ring->state.windex);
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, elem_idx);
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.windex, elem_idx,
+ elem_ptr);
+ return 0;
+}
+
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
+
+ if (ring->state.occ) {
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
+ writel(-1 & K3_DMARING_RT_DB_ENTRY_MASK, &ring->rt->db);
+ } else if (ring->state.tdown_complete) {
+ dma_addr_t *value = elem;
+
+ *value = CPPI5_TDCM_MARKER;
+ writel(K3_DMARING_RT_DB_TDOWN_ACK, &ring->rt->db);
+ ring->state.tdown_complete = false;
+ }
+
+ dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.rindex, elem_ptr);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
+
+ memcpy(elem_ptr, elem, (4 << ring->elm_size));
+ if (ring->parent->dma_rings) {
+ u64 *addr = elem_ptr;
+
+ *addr |= ((u64)ring->asel << K3_ADDRESS_ASEL_SHIFT);
+ }
+
+ ring->state.windex = (ring->state.windex + 1) % ring->size;
+ ring->state.free--;
+ writel(1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
+ ring->state.free, ring->state.windex);
+
+ return 0;
+}
+
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
+
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
+ ring->state.occ, ring->state.rindex, elem_ptr);
+ return 0;
+}
+
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n",
+ ring->state.free, ring->state.windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_tail)
+ ret = ring->ops->push_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
+
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
+ ring->state.free, ring->state.windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_head)
+ ret = ring->ops->push_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
+
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->state.occ)
+ k3_ringacc_ring_update_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
+ ring->state.rindex);
+
+ if (!ring->state.occ && !ring->state.tdown_complete)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_head)
+ ret = ring->ops->pop_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
+
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->state.occ)
+ k3_ringacc_ring_update_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
+ ring->state.occ, ring->state.rindex);
+
+ if (!ring->state.occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_tail)
+ ret = ring->ops->pop_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
+
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct device_node *ringacc_np;
+ struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
+ struct k3_ringacc *entry;
+
+ ringacc_np = of_parse_phandle(np, property, 0);
+ if (!ringacc_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_for_each_entry(entry, &k3_ringacc_list, list)
+ if (entry->dev->of_node == ringacc_np) {
+ ringacc = entry;
+ break;
+ }
+ mutex_unlock(&k3_ringacc_list_lock);
+ of_node_put(ringacc_np);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
+
+static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+{
+ struct device_node *node = ringacc->dev->of_node;
+ struct device *dev = ringacc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
+ if (ret) {
+ dev_err(dev, "ti,num-rings read failure %d\n", ret);
+ return ret;
+ }
+
+ ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+ if (IS_ERR(ringacc->tisci)) {
+ ret = PTR_ERR(ringacc->tisci);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "ti,sci read fail %d\n", ret);
+ ringacc->tisci = NULL;
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "ti,sci-dev-id",
+ &ringacc->tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
+ return ret;
+ }
+
+ pdev->id = ringacc->tisci_dev_id;
+
+ ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
+ ringacc->tisci_dev_id,
+ "ti,sci-rm-range-gp-rings");
+ if (IS_ERR(ringacc->rm_gp_range)) {
+ dev_err(dev, "Failed to allocate MSI interrupts\n");
+ return PTR_ERR(ringacc->rm_gp_range);
+ }
+
+ return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
+ ringacc->rm_gp_range);
+}
+
+static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = {
+ .dma_ring_reset_quirk = 1,
+};
+
+static const struct soc_device_attribute k3_ringacc_socinfo[] = {
+ { .family = "AM65X",
+ .revision = "SR1.0",
+ .data = &k3_ringacc_soc_data_sr1
+ },
+ {/* sentinel */}
+};
+
+static int k3_ringacc_init(struct platform_device *pdev,
+ struct k3_ringacc *ringacc)
+{
+ const struct soc_device_attribute *soc;
+ void __iomem *base_fifo, *base_rt;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret, i;
+
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi.domain) {
+ dev_err(dev, "Failed to get MSI domain\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = k3_ringacc_probe_dt(ringacc);
+ if (ret)
+ return ret;
+
+ soc = soc_device_match(k3_ringacc_socinfo);
+ if (soc && soc->data) {
+ const struct k3_ringacc_soc_data *soc_data = soc->data;
+
+ ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return PTR_ERR(base_rt);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
+ base_fifo = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_fifo))
+ return PTR_ERR(base_fifo);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
+ ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_gcfg))
+ return PTR_ERR(ringacc->proxy_gcfg);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "proxy_target");
+ ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_target_base))
+ return PTR_ERR(ringacc->proxy_target_base);
+
+ ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
+ K3_RINGACC_PROXY_CFG_THREADS_MASK;
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings,
+ GFP_KERNEL);
+ ringacc->proxy_inuse = devm_bitmap_zalloc(dev, ringacc->num_proxies,
+ GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
+ return -ENOMEM;
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ ringacc->rings[i].rt = base_rt +
+ K3_RINGACC_RT_REGS_STEP * i;
+ ringacc->rings[i].fifos = base_fifo +
+ K3_RINGACC_FIFO_REGS_STEP * i;
+ ringacc->rings[i].parent = ringacc;
+ ringacc->rings[i].ring_id = i;
+ ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
+ ringacc->num_rings,
+ ringacc->rm_gp_range->desc[0].start,
+ ringacc->rm_gp_range->desc[0].num,
+ ringacc->tisci_dev_id);
+ dev_info(dev, "dma-ring-reset-quirk: %s\n",
+ ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
+ dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
+ readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+
+ return 0;
+}
+
+struct ringacc_match_data {
+ struct k3_ringacc_ops ops;
+};
+
+static struct ringacc_match_data k3_ringacc_data = {
+ .ops = {
+ .init = k3_ringacc_init,
+ },
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id k3_ringacc_of_match[] = {
+ { .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, },
+ {},
+};
+
+struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev,
+ struct k3_ringacc_init_data *data)
+{
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ void __iomem *base_rt;
+ struct resource *res;
+ int i;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return ERR_PTR(-ENOMEM);
+
+ ringacc->dev = dev;
+ ringacc->dma_rings = true;
+ ringacc->num_rings = data->num_rings;
+ ringacc->tisci = data->tisci;
+ ringacc->tisci_dev_id = data->tisci_dev_id;
+
+ mutex_init(&ringacc->req_lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ringrt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return ERR_CAST(base_rt);
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings * 2,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings,
+ GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ struct k3_ring *ring = &ringacc->rings[i];
+
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ ring = &ringacc->rings[ringacc->num_rings + i];
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i +
+ K3_DMARING_RT_REGS_REVERSE_OFS;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ ring->flags = K3_RING_FLAG_REVERSE;
+ }
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ dev_info(dev, "Number of rings: %u\n", ringacc->num_rings);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_dmarings_init);
+
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+ const struct ringacc_match_data *match_data;
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ int ret;
+
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
+ return -ENODEV;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return -ENOMEM;
+
+ ringacc->dev = dev;
+ mutex_init(&ringacc->req_lock);
+ ringacc->ops = &match_data->ops;
+
+ ret = ringacc->ops->init(pdev, ringacc);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, ringacc);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_add_tail(&ringacc->list, &k3_ringacc_list);
+ mutex_unlock(&k3_ringacc_list_lock);
+
+ return 0;
+}
+
+static struct platform_driver k3_ringacc_driver = {
+ .probe = k3_ringacc_probe,
+ .driver = {
+ .name = "k3-ringacc",
+ .of_match_table = k3_ringacc_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(k3_ringacc_driver);
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
new file mode 100644
index 000000000..91f441ee6
--- /dev/null
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 SoC info driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+#define CTRLMMR_WKUP_JTAGID_REG 0
+/*
+ * Bits:
+ * 31-28 VARIANT Device variant
+ * 27-12 PARTNO Part number
+ * 11-1 MFG Indicates TI as manufacturer (0x17)
+ * 1 Always 1
+ */
+#define CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT (28)
+#define CTRLMMR_WKUP_JTAGID_VARIANT_MASK GENMASK(31, 28)
+
+#define CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT (12)
+#define CTRLMMR_WKUP_JTAGID_PARTNO_MASK GENMASK(27, 12)
+
+#define CTRLMMR_WKUP_JTAGID_MFG_SHIFT (1)
+#define CTRLMMR_WKUP_JTAGID_MFG_MASK GENMASK(11, 1)
+
+#define CTRLMMR_WKUP_JTAGID_MFG_TI 0x17
+
+static const struct k3_soc_id {
+ unsigned int id;
+ const char *family_name;
+} k3_soc_ids[] = {
+ { 0xBB5A, "AM65X" },
+ { 0xBB64, "J721E" },
+ { 0xBB6D, "J7200" },
+ { 0xBB38, "AM64X" },
+ { 0xBB75, "J721S2"},
+ { 0xBB7E, "AM62X" },
+};
+
+static int
+k3_chipinfo_partno_to_names(unsigned int partno,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(k3_soc_ids); i++)
+ if (partno == k3_soc_ids[i].id) {
+ soc_dev_attr->family = k3_soc_ids[i].family_name;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int k3_chipinfo_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device *dev = &pdev->dev;
+ struct soc_device *soc_dev;
+ struct regmap *regmap;
+ u32 partno_id;
+ u32 variant;
+ u32 jtag_id;
+ u32 mfg;
+ int ret;
+
+ regmap = device_node_to_regmap(node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = regmap_read(regmap, CTRLMMR_WKUP_JTAGID_REG, &jtag_id);
+ if (ret < 0)
+ return ret;
+
+ mfg = (jtag_id & CTRLMMR_WKUP_JTAGID_MFG_MASK) >>
+ CTRLMMR_WKUP_JTAGID_MFG_SHIFT;
+
+ if (mfg != CTRLMMR_WKUP_JTAGID_MFG_TI) {
+ dev_err(dev, "Invalid MFG SoC\n");
+ return -ENODEV;
+ }
+
+ variant = (jtag_id & CTRLMMR_WKUP_JTAGID_VARIANT_MASK) >>
+ CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT;
+ variant++;
+
+ partno_id = (jtag_id & CTRLMMR_WKUP_JTAGID_PARTNO_MASK) >>
+ CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0", variant);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr);
+ if (ret) {
+ dev_err(dev, "Unknown SoC JTAGID[0x%08X]\n", jtag_id);
+ ret = -ENODEV;
+ goto err_free_rev;
+ }
+
+ node = of_find_node_by_path("/");
+ of_property_read_string(node, "model", &soc_dev_attr->machine);
+ of_node_put(node);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err_free_rev;
+ }
+
+ dev_info(dev, "Family:%s rev:%s JTAGID[0x%08x] Detected\n",
+ soc_dev_attr->family,
+ soc_dev_attr->revision, jtag_id);
+
+ return 0;
+
+err_free_rev:
+ kfree(soc_dev_attr->revision);
+err:
+ kfree(soc_dev_attr);
+ return ret;
+}
+
+static const struct of_device_id k3_chipinfo_of_match[] = {
+ { .compatible = "ti,am654-chipid", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver k3_chipinfo_driver = {
+ .driver = {
+ .name = "k3-chipinfo",
+ .of_match_table = k3_chipinfo_of_match,
+ },
+ .probe = k3_chipinfo_probe,
+};
+
+static int __init k3_chipinfo_init(void)
+{
+ return platform_driver_register(&k3_chipinfo_driver);
+}
+subsys_initcall(k3_chipinfo_init);
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
new file mode 100644
index 000000000..84afebd35
--- /dev/null
+++ b/drivers/soc/ti/knav_dma.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/knav_dma.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define REG_MASK 0xffffffff
+
+#define DMA_LOOPBACK BIT(31)
+#define DMA_ENABLE BIT(31)
+#define DMA_TEARDOWN BIT(30)
+
+#define DMA_TX_FILT_PSWORDS BIT(29)
+#define DMA_TX_FILT_EINFO BIT(30)
+#define DMA_TX_PRIO_SHIFT 0
+#define DMA_RX_PRIO_SHIFT 16
+#define DMA_PRIO_MASK GENMASK(3, 0)
+#define DMA_PRIO_DEFAULT 0
+#define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */
+#define DMA_RX_TIMEOUT_MASK GENMASK(16, 0)
+#define DMA_RX_TIMEOUT_SHIFT 0
+
+#define CHAN_HAS_EPIB BIT(30)
+#define CHAN_HAS_PSINFO BIT(29)
+#define CHAN_ERR_RETRY BIT(28)
+#define CHAN_PSINFO_AT_SOP BIT(25)
+#define CHAN_SOP_OFF_SHIFT 16
+#define CHAN_SOP_OFF_MASK GENMASK(9, 0)
+#define DESC_TYPE_SHIFT 26
+#define DESC_TYPE_MASK GENMASK(2, 0)
+
+/*
+ * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical
+ * navigator cloud mapping scheme.
+ * using the 14bit physical queue numbers directly maps into this scheme.
+ */
+#define CHAN_QNUM_MASK GENMASK(14, 0)
+#define DMA_MAX_QMS 4
+#define DMA_TIMEOUT 1 /* msecs */
+#define DMA_INVALID_ID 0xffff
+
+struct reg_global {
+ u32 revision;
+ u32 perf_control;
+ u32 emulation_control;
+ u32 priority_control;
+ u32 qm_base_address[DMA_MAX_QMS];
+};
+
+struct reg_chan {
+ u32 control;
+ u32 mode;
+ u32 __rsvd[6];
+};
+
+struct reg_tx_sched {
+ u32 prio;
+};
+
+struct reg_rx_flow {
+ u32 control;
+ u32 tags;
+ u32 tag_sel;
+ u32 fdq_sel[2];
+ u32 thresh[3];
+};
+
+struct knav_dma_pool_device {
+ struct device *dev;
+ struct list_head list;
+};
+
+struct knav_dma_device {
+ bool loopback, enable_all;
+ unsigned tx_priority, rx_priority, rx_timeout;
+ unsigned logical_queue_managers;
+ unsigned qm_base_address[DMA_MAX_QMS];
+ struct reg_global __iomem *reg_global;
+ struct reg_chan __iomem *reg_tx_chan;
+ struct reg_rx_flow __iomem *reg_rx_flow;
+ struct reg_chan __iomem *reg_rx_chan;
+ struct reg_tx_sched __iomem *reg_tx_sched;
+ unsigned max_rx_chan, max_tx_chan;
+ unsigned max_rx_flow;
+ char name[32];
+ atomic_t ref_count;
+ struct list_head list;
+ struct list_head chan_list;
+ spinlock_t lock;
+};
+
+struct knav_dma_chan {
+ enum dma_transfer_direction direction;
+ struct knav_dma_device *dma;
+ atomic_t ref_count;
+
+ /* registers */
+ struct reg_chan __iomem *reg_chan;
+ struct reg_tx_sched __iomem *reg_tx_sched;
+ struct reg_rx_flow __iomem *reg_rx_flow;
+
+ /* configuration stuff */
+ unsigned channel, flow;
+ struct knav_dma_cfg cfg;
+ struct list_head list;
+ spinlock_t lock;
+};
+
+#define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \
+ ch->channel : ch->flow)
+
+static struct knav_dma_pool_device *kdev;
+
+static bool device_ready;
+bool knav_dma_device_ready(void)
+{
+ return device_ready;
+}
+EXPORT_SYMBOL_GPL(knav_dma_device_ready);
+
+static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg)
+{
+ if (!memcmp(&chan->cfg, cfg, sizeof(*cfg)))
+ return true;
+ else
+ return false;
+}
+
+static int chan_start(struct knav_dma_chan *chan,
+ struct knav_dma_cfg *cfg)
+{
+ u32 v = 0;
+
+ spin_lock(&chan->lock);
+ if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) {
+ if (cfg->u.tx.filt_pswords)
+ v |= DMA_TX_FILT_PSWORDS;
+ if (cfg->u.tx.filt_einfo)
+ v |= DMA_TX_FILT_EINFO;
+ writel_relaxed(v, &chan->reg_chan->mode);
+ writel_relaxed(DMA_ENABLE, &chan->reg_chan->control);
+ }
+
+ if (chan->reg_tx_sched)
+ writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio);
+
+ if (chan->reg_rx_flow) {
+ v = 0;
+
+ if (cfg->u.rx.einfo_present)
+ v |= CHAN_HAS_EPIB;
+ if (cfg->u.rx.psinfo_present)
+ v |= CHAN_HAS_PSINFO;
+ if (cfg->u.rx.err_mode == DMA_RETRY)
+ v |= CHAN_ERR_RETRY;
+ v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT;
+ if (cfg->u.rx.psinfo_at_sop)
+ v |= CHAN_PSINFO_AT_SOP;
+ v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK)
+ << CHAN_SOP_OFF_SHIFT;
+ v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK;
+
+ writel_relaxed(v, &chan->reg_rx_flow->control);
+ writel_relaxed(0, &chan->reg_rx_flow->tags);
+ writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
+
+ v = cfg->u.rx.fdq[0] << 16;
+ v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
+ writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]);
+
+ v = cfg->u.rx.fdq[2] << 16;
+ v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
+ writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]);
+
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
+ }
+
+ /* Keep a copy of the cfg */
+ memcpy(&chan->cfg, cfg, sizeof(*cfg));
+ spin_unlock(&chan->lock);
+
+ return 0;
+}
+
+static int chan_teardown(struct knav_dma_chan *chan)
+{
+ unsigned long end, value;
+
+ if (!chan->reg_chan)
+ return 0;
+
+ /* indicate teardown */
+ writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control);
+
+ /* wait for the dma to shut itself down */
+ end = jiffies + msecs_to_jiffies(DMA_TIMEOUT);
+ do {
+ value = readl_relaxed(&chan->reg_chan->control);
+ if ((value & DMA_ENABLE) == 0)
+ break;
+ } while (time_after(end, jiffies));
+
+ if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) {
+ dev_err(kdev->dev, "timeout waiting for teardown\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void chan_stop(struct knav_dma_chan *chan)
+{
+ spin_lock(&chan->lock);
+ if (chan->reg_rx_flow) {
+ /* first detach fdqs, starve out the flow */
+ writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
+ }
+
+ /* teardown the dma channel */
+ chan_teardown(chan);
+
+ /* then disconnect the completion side */
+ if (chan->reg_rx_flow) {
+ writel_relaxed(0, &chan->reg_rx_flow->control);
+ writel_relaxed(0, &chan->reg_rx_flow->tags);
+ writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
+ }
+
+ memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg));
+ spin_unlock(&chan->lock);
+
+ dev_dbg(kdev->dev, "channel stopped\n");
+}
+
+static void dma_hw_enable_all(struct knav_dma_device *dma)
+{
+ int i;
+
+ for (i = 0; i < dma->max_tx_chan; i++) {
+ writel_relaxed(0, &dma->reg_tx_chan[i].mode);
+ writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control);
+ }
+}
+
+
+static void knav_dma_hw_init(struct knav_dma_device *dma)
+{
+ unsigned v;
+ int i;
+
+ spin_lock(&dma->lock);
+ v = dma->loopback ? DMA_LOOPBACK : 0;
+ writel_relaxed(v, &dma->reg_global->emulation_control);
+
+ v = readl_relaxed(&dma->reg_global->perf_control);
+ v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT);
+ writel_relaxed(v, &dma->reg_global->perf_control);
+
+ v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) |
+ (dma->rx_priority << DMA_RX_PRIO_SHIFT));
+
+ writel_relaxed(v, &dma->reg_global->priority_control);
+
+ /* Always enable all Rx channels. Rx paths are managed using flows */
+ for (i = 0; i < dma->max_rx_chan; i++)
+ writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control);
+
+ for (i = 0; i < dma->logical_queue_managers; i++)
+ writel_relaxed(dma->qm_base_address[i],
+ &dma->reg_global->qm_base_address[i]);
+ spin_unlock(&dma->lock);
+}
+
+static void knav_dma_hw_destroy(struct knav_dma_device *dma)
+{
+ int i;
+ unsigned v;
+
+ spin_lock(&dma->lock);
+ v = ~DMA_ENABLE & REG_MASK;
+
+ for (i = 0; i < dma->max_rx_chan; i++)
+ writel_relaxed(v, &dma->reg_rx_chan[i].control);
+
+ for (i = 0; i < dma->max_tx_chan; i++)
+ writel_relaxed(v, &dma->reg_tx_chan[i].control);
+ spin_unlock(&dma->lock);
+}
+
+static void dma_debug_show_channels(struct seq_file *s,
+ struct knav_dma_chan *chan)
+{
+ int i;
+
+ seq_printf(s, "\t%s %d:\t",
+ ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"),
+ chan_number(chan));
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n",
+ chan->cfg.u.tx.filt_einfo,
+ chan->cfg.u.tx.filt_pswords,
+ chan->cfg.u.tx.priority);
+ } else {
+ seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n",
+ chan->cfg.u.rx.einfo_present,
+ chan->cfg.u.rx.psinfo_present,
+ chan->cfg.u.rx.desc_type);
+ seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ",
+ chan->cfg.u.rx.dst_q,
+ chan->cfg.u.rx.thresh);
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
+ seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
+ seq_printf(s, "\n");
+ }
+}
+
+static void dma_debug_show_devices(struct seq_file *s,
+ struct knav_dma_device *dma)
+{
+ struct knav_dma_chan *chan;
+
+ list_for_each_entry(chan, &dma->chan_list, list) {
+ if (atomic_read(&chan->ref_count))
+ dma_debug_show_channels(s, chan);
+ }
+}
+
+static int knav_dma_debug_show(struct seq_file *s, void *v)
+{
+ struct knav_dma_device *dma;
+
+ list_for_each_entry(dma, &kdev->list, list) {
+ if (atomic_read(&dma->ref_count)) {
+ seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n",
+ dma->name, dma->max_tx_chan, dma->max_rx_flow);
+ dma_debug_show_devices(s, dma);
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(knav_dma_debug);
+
+static int of_channel_match_helper(struct device_node *np, const char *name,
+ const char **dma_instance)
+{
+ struct of_phandle_args args;
+ struct device_node *dma_node;
+ int index;
+
+ dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0);
+ if (!dma_node)
+ return -ENODEV;
+
+ *dma_instance = dma_node->name;
+ index = of_property_match_string(np, "ti,navigator-dma-names", name);
+ if (index < 0) {
+ dev_err(kdev->dev, "No 'ti,navigator-dma-names' property\n");
+ return -ENODEV;
+ }
+
+ if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas",
+ 1, index, &args)) {
+ dev_err(kdev->dev, "Missing the phandle args name %s\n", name);
+ return -ENODEV;
+ }
+
+ if (args.args[0] < 0) {
+ dev_err(kdev->dev, "Missing args for %s\n", name);
+ return -ENODEV;
+ }
+
+ return args.args[0];
+}
+
+/**
+ * knav_dma_open_channel() - try to setup an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ * @config: dma configuration parameters
+ *
+ * Returns pointer to appropriate DMA channel on success or error.
+ */
+void *knav_dma_open_channel(struct device *dev, const char *name,
+ struct knav_dma_cfg *config)
+{
+ struct knav_dma_device *dma = NULL, *iter1;
+ struct knav_dma_chan *chan = NULL, *iter2;
+ int chan_num = -1;
+ const char *instance;
+
+ if (!kdev) {
+ pr_err("keystone-navigator-dma driver not registered\n");
+ return (void *)-EINVAL;
+ }
+
+ chan_num = of_channel_match_helper(dev->of_node, name, &instance);
+ if (chan_num < 0) {
+ dev_err(kdev->dev, "No DMA instance with name %s\n", name);
+ return (void *)-EINVAL;
+ }
+
+ dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
+ config->direction == DMA_MEM_TO_DEV ? "transmit" :
+ config->direction == DMA_DEV_TO_MEM ? "receive" :
+ "unknown", chan_num, instance);
+
+ if (config->direction != DMA_MEM_TO_DEV &&
+ config->direction != DMA_DEV_TO_MEM) {
+ dev_err(kdev->dev, "bad direction\n");
+ return (void *)-EINVAL;
+ }
+
+ /* Look for correct dma instance */
+ list_for_each_entry(iter1, &kdev->list, list) {
+ if (!strcmp(iter1->name, instance)) {
+ dma = iter1;
+ break;
+ }
+ }
+ if (!dma) {
+ dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
+ return (void *)-EINVAL;
+ }
+
+ /* Look for correct dma channel from dma instance */
+ list_for_each_entry(iter2, &dma->chan_list, list) {
+ if (config->direction == DMA_MEM_TO_DEV) {
+ if (iter2->channel == chan_num) {
+ chan = iter2;
+ break;
+ }
+ } else {
+ if (iter2->flow == chan_num) {
+ chan = iter2;
+ break;
+ }
+ }
+ }
+ if (!chan) {
+ dev_err(kdev->dev, "channel %d is not in DMA %s\n",
+ chan_num, instance);
+ return (void *)-EINVAL;
+ }
+
+ if (atomic_read(&chan->ref_count) >= 1) {
+ if (!check_config(chan, config)) {
+ dev_err(kdev->dev, "channel %d config miss-match\n",
+ chan_num);
+ return (void *)-EINVAL;
+ }
+ }
+
+ if (atomic_inc_return(&chan->dma->ref_count) <= 1)
+ knav_dma_hw_init(chan->dma);
+
+ if (atomic_inc_return(&chan->ref_count) <= 1)
+ chan_start(chan, config);
+
+ dev_dbg(kdev->dev, "channel %d opened from DMA %s\n",
+ chan_num, instance);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(knav_dma_open_channel);
+
+/**
+ * knav_dma_close_channel() - Destroy a dma channel
+ *
+ * @channel: dma channel handle
+ *
+ */
+void knav_dma_close_channel(void *channel)
+{
+ struct knav_dma_chan *chan = channel;
+
+ if (!kdev) {
+ pr_err("keystone-navigator-dma driver not registered\n");
+ return;
+ }
+
+ if (atomic_dec_return(&chan->ref_count) <= 0)
+ chan_stop(chan);
+
+ if (atomic_dec_return(&chan->dma->ref_count) <= 0)
+ knav_dma_hw_destroy(chan->dma);
+
+ dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n",
+ chan->channel, chan->flow, chan->dma->name);
+}
+EXPORT_SYMBOL_GPL(knav_dma_close_channel);
+
+static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
+ struct device_node *node,
+ unsigned index, resource_size_t *_size)
+{
+ struct device *dev = kdev->dev;
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ ret = of_address_to_resource(node, index, &res);
+ if (ret) {
+ dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n",
+ node, index);
+ return ERR_PTR(ret);
+ }
+
+ regs = devm_ioremap_resource(kdev->dev, &res);
+ if (IS_ERR(regs))
+ dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n",
+ index, node);
+ if (_size)
+ *_size = resource_size(&res);
+
+ return regs;
+}
+
+static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow)
+{
+ struct knav_dma_device *dma = chan->dma;
+
+ chan->flow = flow;
+ chan->reg_rx_flow = dma->reg_rx_flow + flow;
+ chan->channel = DMA_INVALID_ID;
+ dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow);
+
+ return 0;
+}
+
+static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel)
+{
+ struct knav_dma_device *dma = chan->dma;
+
+ chan->channel = channel;
+ chan->reg_chan = dma->reg_tx_chan + channel;
+ chan->reg_tx_sched = dma->reg_tx_sched + channel;
+ chan->flow = DMA_INVALID_ID;
+ dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan);
+
+ return 0;
+}
+
+static int pktdma_init_chan(struct knav_dma_device *dma,
+ enum dma_transfer_direction dir,
+ unsigned chan_num)
+{
+ struct device *dev = kdev->dev;
+ struct knav_dma_chan *chan;
+ int ret = -EINVAL;
+
+ chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&chan->list);
+ chan->dma = dma;
+ chan->direction = DMA_TRANS_NONE;
+ atomic_set(&chan->ref_count, 0);
+ spin_lock_init(&chan->lock);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ chan->direction = dir;
+ ret = pktdma_init_tx_chan(chan, chan_num);
+ } else if (dir == DMA_DEV_TO_MEM) {
+ chan->direction = dir;
+ ret = pktdma_init_rx_chan(chan, chan_num);
+ } else {
+ dev_err(dev, "channel(%d) direction unknown\n", chan_num);
+ }
+
+ list_add_tail(&chan->list, &dma->chan_list);
+
+ return ret;
+}
+
+static int dma_init(struct device_node *cloud, struct device_node *dma_node)
+{
+ unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
+ struct device_node *node = dma_node;
+ struct knav_dma_device *dma;
+ int ret, len, num_chan = 0;
+ resource_size_t size;
+ u32 timeout;
+ u32 i;
+
+ dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma) {
+ dev_err(kdev->dev, "could not allocate driver mem\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&dma->list);
+ INIT_LIST_HEAD(&dma->chan_list);
+
+ if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
+ dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
+ return -ENODEV;
+ }
+
+ dma->logical_queue_managers = len / sizeof(u32);
+ if (dma->logical_queue_managers > DMA_MAX_QMS) {
+ dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
+ dma->logical_queue_managers);
+ dma->logical_queue_managers = DMA_MAX_QMS;
+ }
+
+ ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
+ dma->qm_base_address,
+ dma->logical_queue_managers);
+ if (ret) {
+ dev_err(kdev->dev, "invalid navigator cloud addresses\n");
+ return -ENODEV;
+ }
+
+ dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
+ if (IS_ERR(dma->reg_global))
+ return PTR_ERR(dma->reg_global);
+ if (size < sizeof(struct reg_global)) {
+ dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
+ return -ENODEV;
+ }
+
+ dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
+ if (IS_ERR(dma->reg_tx_chan))
+ return PTR_ERR(dma->reg_tx_chan);
+
+ max_tx_chan = size / sizeof(struct reg_chan);
+ dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
+ if (IS_ERR(dma->reg_rx_chan))
+ return PTR_ERR(dma->reg_rx_chan);
+
+ max_rx_chan = size / sizeof(struct reg_chan);
+ dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
+ if (IS_ERR(dma->reg_tx_sched))
+ return PTR_ERR(dma->reg_tx_sched);
+
+ max_tx_sched = size / sizeof(struct reg_tx_sched);
+ dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
+ if (IS_ERR(dma->reg_rx_flow))
+ return PTR_ERR(dma->reg_rx_flow);
+
+ max_rx_flow = size / sizeof(struct reg_rx_flow);
+ dma->rx_priority = DMA_PRIO_DEFAULT;
+ dma->tx_priority = DMA_PRIO_DEFAULT;
+
+ dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL);
+ dma->loopback = (of_get_property(node, "ti,loop-back", NULL) != NULL);
+
+ ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
+ if (ret < 0) {
+ dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n",
+ DMA_RX_TIMEOUT_DEFAULT);
+ timeout = DMA_RX_TIMEOUT_DEFAULT;
+ }
+
+ dma->rx_timeout = timeout;
+ dma->max_rx_chan = max_rx_chan;
+ dma->max_rx_flow = max_rx_flow;
+ dma->max_tx_chan = min(max_tx_chan, max_tx_sched);
+ atomic_set(&dma->ref_count, 0);
+ strcpy(dma->name, node->name);
+ spin_lock_init(&dma->lock);
+
+ for (i = 0; i < dma->max_tx_chan; i++) {
+ if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0)
+ num_chan++;
+ }
+
+ for (i = 0; i < dma->max_rx_flow; i++) {
+ if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0)
+ num_chan++;
+ }
+
+ list_add_tail(&dma->list, &kdev->list);
+
+ /*
+ * For DSP software usecases or userpace transport software, setup all
+ * the DMA hardware resources.
+ */
+ if (dma->enable_all) {
+ atomic_inc(&dma->ref_count);
+ knav_dma_hw_init(dma);
+ dma_hw_enable_all(dma);
+ }
+
+ dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n",
+ dma->name, num_chan, dma->max_rx_flow,
+ dma->max_tx_chan, dma->max_rx_chan,
+ dma->loopback ? ", loopback" : "");
+
+ return 0;
+}
+
+static int knav_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *child;
+ int ret = 0;
+
+ if (!node) {
+ dev_err(&pdev->dev, "could not find device info\n");
+ return -EINVAL;
+ }
+
+ kdev = devm_kzalloc(dev,
+ sizeof(struct knav_dma_pool_device), GFP_KERNEL);
+ if (!kdev) {
+ dev_err(dev, "could not allocate driver mem\n");
+ return -ENOMEM;
+ }
+
+ kdev->dev = dev;
+ INIT_LIST_HEAD(&kdev->list);
+
+ pm_runtime_enable(kdev->dev);
+ ret = pm_runtime_resume_and_get(kdev->dev);
+ if (ret < 0) {
+ dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
+ goto err_pm_disable;
+ }
+
+ /* Initialise all packet dmas */
+ for_each_child_of_node(node, child) {
+ ret = dma_init(node, child);
+ if (ret) {
+ of_node_put(child);
+ dev_err(&pdev->dev, "init failed with %d\n", ret);
+ break;
+ }
+ }
+
+ if (list_empty(&kdev->list)) {
+ dev_err(dev, "no valid dma instance\n");
+ ret = -ENODEV;
+ goto err_put_sync;
+ }
+
+ debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
+ &knav_dma_debug_fops);
+
+ device_ready = true;
+ return ret;
+
+err_put_sync:
+ pm_runtime_put_sync(kdev->dev);
+err_pm_disable:
+ pm_runtime_disable(kdev->dev);
+
+ return ret;
+}
+
+static int knav_dma_remove(struct platform_device *pdev)
+{
+ struct knav_dma_device *dma;
+
+ list_for_each_entry(dma, &kdev->list, list) {
+ if (atomic_dec_return(&dma->ref_count) == 0)
+ knav_dma_hw_destroy(dma);
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct of_device_id of_match[] = {
+ { .compatible = "ti,keystone-navigator-dma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver knav_dma_driver = {
+ .probe = knav_dma_probe,
+ .remove = knav_dma_remove,
+ .driver = {
+ .name = "keystone-navigator-dma",
+ .of_match_table = of_match,
+ },
+};
+module_platform_driver(knav_dma_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
+MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
new file mode 100644
index 000000000..a01eda720
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss.h
@@ -0,0 +1,387 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Keystone Navigator QMSS driver internal header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ */
+
+#ifndef __KNAV_QMSS_H__
+#define __KNAV_QMSS_H__
+
+#include <linux/percpu.h>
+
+#define THRESH_GTE BIT(7)
+#define THRESH_LT 0
+
+#define PDSP_CTRL_PC_MASK 0xffff0000
+#define PDSP_CTRL_SOFT_RESET BIT(0)
+#define PDSP_CTRL_ENABLE BIT(1)
+#define PDSP_CTRL_RUNNING BIT(15)
+
+#define ACC_MAX_CHANNEL 48
+#define ACC_DEFAULT_PERIOD 25 /* usecs */
+
+#define ACC_CHANNEL_INT_BASE 2
+
+#define ACC_LIST_ENTRY_TYPE 1
+#define ACC_LIST_ENTRY_WORDS (1 << ACC_LIST_ENTRY_TYPE)
+#define ACC_LIST_ENTRY_QUEUE_IDX 0
+#define ACC_LIST_ENTRY_DESC_IDX (ACC_LIST_ENTRY_WORDS - 1)
+
+#define ACC_CMD_DISABLE_CHANNEL 0x80
+#define ACC_CMD_ENABLE_CHANNEL 0x81
+#define ACC_CFG_MULTI_QUEUE BIT(21)
+
+#define ACC_INTD_OFFSET_EOI (0x0010)
+#define ACC_INTD_OFFSET_COUNT(ch) (0x0300 + 4 * (ch))
+#define ACC_INTD_OFFSET_STATUS(ch) (0x0200 + 4 * ((ch) / 32))
+
+#define RANGE_MAX_IRQS 64
+
+#define ACC_DESCS_MAX SZ_1K
+#define ACC_DESCS_MASK (ACC_DESCS_MAX - 1)
+#define DESC_SIZE_MASK 0xful
+#define DESC_PTR_MASK (~DESC_SIZE_MASK)
+
+#define KNAV_NAME_SIZE 32
+
+enum knav_acc_result {
+ ACC_RET_IDLE,
+ ACC_RET_SUCCESS,
+ ACC_RET_INVALID_COMMAND,
+ ACC_RET_INVALID_CHANNEL,
+ ACC_RET_INACTIVE_CHANNEL,
+ ACC_RET_ACTIVE_CHANNEL,
+ ACC_RET_INVALID_QUEUE,
+ ACC_RET_INVALID_RET,
+};
+
+struct knav_reg_config {
+ u32 revision;
+ u32 __pad1;
+ u32 divert;
+ u32 link_ram_base0;
+ u32 link_ram_size0;
+ u32 link_ram_base1;
+ u32 __pad2[2];
+ u32 starvation[];
+};
+
+struct knav_reg_region {
+ u32 base;
+ u32 start_index;
+ u32 size_count;
+ u32 __pad;
+};
+
+struct knav_reg_pdsp_regs {
+ u32 control;
+ u32 status;
+ u32 cycle_count;
+ u32 stall_count;
+};
+
+struct knav_reg_acc_command {
+ u32 command;
+ u32 queue_mask;
+ u32 list_dma;
+ u32 queue_num;
+ u32 timer_config;
+};
+
+struct knav_link_ram_block {
+ dma_addr_t dma;
+ void *virt;
+ size_t size;
+};
+
+struct knav_acc_info {
+ u32 pdsp_id;
+ u32 start_channel;
+ u32 list_entries;
+ u32 pacing_mode;
+ u32 timer_count;
+ int mem_size;
+ int list_size;
+ struct knav_pdsp_info *pdsp;
+};
+
+struct knav_acc_channel {
+ u32 channel;
+ u32 list_index;
+ u32 open_mask;
+ u32 *list_cpu[2];
+ dma_addr_t list_dma[2];
+ char name[KNAV_NAME_SIZE];
+ atomic_t retrigger_count;
+};
+
+struct knav_pdsp_info {
+ const char *name;
+ struct knav_reg_pdsp_regs __iomem *regs;
+ union {
+ void __iomem *command;
+ struct knav_reg_acc_command __iomem *acc_command;
+ u32 __iomem *qos_command;
+ };
+ void __iomem *intd;
+ u32 __iomem *iram;
+ u32 id;
+ struct list_head list;
+ bool loaded;
+ bool started;
+};
+
+struct knav_qmgr_info {
+ unsigned start_queue;
+ unsigned num_queues;
+ struct knav_reg_config __iomem *reg_config;
+ struct knav_reg_region __iomem *reg_region;
+ struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
+ void __iomem *reg_status;
+ struct list_head list;
+};
+
+#define KNAV_NUM_LINKRAM 2
+
+/**
+ * struct knav_queue_stats: queue statistics
+ * pushes: number of push operations
+ * pops: number of pop operations
+ * push_errors: number of push errors
+ * pop_errors: number of pop errors
+ * notifies: notifier counts
+ */
+struct knav_queue_stats {
+ unsigned int pushes;
+ unsigned int pops;
+ unsigned int push_errors;
+ unsigned int pop_errors;
+ unsigned int notifies;
+};
+
+/**
+ * struct knav_reg_queue: queue registers
+ * @entry_count: valid entries in the queue
+ * @byte_count: total byte count in thhe queue
+ * @packet_size: packet size for the queue
+ * @ptr_size_thresh: packet pointer size threshold
+ */
+struct knav_reg_queue {
+ u32 entry_count;
+ u32 byte_count;
+ u32 packet_size;
+ u32 ptr_size_thresh;
+};
+
+/**
+ * struct knav_region: qmss region info
+ * @dma_start, dma_end: start and end dma address
+ * @virt_start, virt_end: start and end virtual address
+ * @desc_size: descriptor size
+ * @used_desc: consumed descriptors
+ * @id: region number
+ * @num_desc: total descriptors
+ * @link_index: index of the first descriptor
+ * @name: region name
+ * @list: instance in the device's region list
+ * @pools: list of descriptor pools in the region
+ */
+struct knav_region {
+ dma_addr_t dma_start, dma_end;
+ void *virt_start, *virt_end;
+ unsigned desc_size;
+ unsigned used_desc;
+ unsigned id;
+ unsigned num_desc;
+ unsigned link_index;
+ const char *name;
+ struct list_head list;
+ struct list_head pools;
+};
+
+/**
+ * struct knav_pool: qmss pools
+ * @dev: device pointer
+ * @region: qmss region info
+ * @queue: queue registers
+ * @kdev: qmss device pointer
+ * @region_offset: offset from the base
+ * @num_desc: total descriptors
+ * @desc_size: descriptor size
+ * @region_id: region number
+ * @name: pool name
+ * @list: list head
+ * @region_inst: instance in the region's pool list
+ */
+struct knav_pool {
+ struct device *dev;
+ struct knav_region *region;
+ struct knav_queue *queue;
+ struct knav_device *kdev;
+ int region_offset;
+ int num_desc;
+ int desc_size;
+ int region_id;
+ const char *name;
+ struct list_head list;
+ struct list_head region_inst;
+};
+
+/**
+ * struct knav_queue_inst: qmss queue instance properties
+ * @descs: descriptor pointer
+ * @desc_head, desc_tail, desc_count: descriptor counters
+ * @acc: accumulator channel pointer
+ * @kdev: qmss device pointer
+ * @range: range info
+ * @qmgr: queue manager info
+ * @id: queue instance id
+ * @irq_num: irq line number
+ * @notify_needed: notifier needed based on queue type
+ * @num_notifiers: total notifiers
+ * @handles: list head
+ * @name: queue instance name
+ * @irq_name: irq line name
+ */
+struct knav_queue_inst {
+ u32 *descs;
+ atomic_t desc_head, desc_tail, desc_count;
+ struct knav_acc_channel *acc;
+ struct knav_device *kdev;
+ struct knav_range_info *range;
+ struct knav_qmgr_info *qmgr;
+ u32 id;
+ int irq_num;
+ int notify_needed;
+ atomic_t num_notifiers;
+ struct list_head handles;
+ const char *name;
+ const char *irq_name;
+};
+
+/**
+ * struct knav_queue: qmss queue properties
+ * @reg_push, reg_pop, reg_peek: push, pop queue registers
+ * @inst: qmss queue instance properties
+ * @notifier_fn: notifier function
+ * @notifier_fn_arg: notifier function argument
+ * @notifier_enabled: notier enabled for a give queue
+ * @rcu: rcu head
+ * @flags: queue flags
+ * @list: list head
+ */
+struct knav_queue {
+ struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
+ struct knav_queue_inst *inst;
+ struct knav_queue_stats __percpu *stats;
+ knav_queue_notify_fn notifier_fn;
+ void *notifier_fn_arg;
+ atomic_t notifier_enabled;
+ struct rcu_head rcu;
+ unsigned flags;
+ struct list_head list;
+};
+
+enum qmss_version {
+ QMSS,
+ QMSS_66AK2G,
+};
+
+struct knav_device {
+ struct device *dev;
+ unsigned base_id;
+ unsigned num_queues;
+ unsigned num_queues_in_use;
+ unsigned inst_shift;
+ struct knav_link_ram_block link_rams[KNAV_NUM_LINKRAM];
+ void *instances;
+ struct list_head regions;
+ struct list_head queue_ranges;
+ struct list_head pools;
+ struct list_head pdsps;
+ struct list_head qmgrs;
+ enum qmss_version version;
+};
+
+struct knav_range_ops {
+ int (*init_range)(struct knav_range_info *range);
+ int (*free_range)(struct knav_range_info *range);
+ int (*init_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst);
+ int (*open_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags);
+ int (*close_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst);
+ int (*set_notify)(struct knav_range_info *range,
+ struct knav_queue_inst *inst, bool enabled);
+};
+
+struct knav_irq_info {
+ int irq;
+ struct cpumask *cpu_mask;
+};
+
+struct knav_range_info {
+ const char *name;
+ struct knav_device *kdev;
+ unsigned queue_base;
+ unsigned num_queues;
+ void *queue_base_inst;
+ unsigned flags;
+ struct list_head list;
+ struct knav_range_ops *ops;
+ struct knav_acc_info acc_info;
+ struct knav_acc_channel *acc;
+ unsigned num_irqs;
+ struct knav_irq_info irqs[RANGE_MAX_IRQS];
+};
+
+#define RANGE_RESERVED BIT(0)
+#define RANGE_HAS_IRQ BIT(1)
+#define RANGE_HAS_ACCUMULATOR BIT(2)
+#define RANGE_MULTI_QUEUE BIT(3)
+
+#define for_each_region(kdev, region) \
+ list_for_each_entry(region, &kdev->regions, list)
+
+#define first_region(kdev) \
+ list_first_entry_or_null(&kdev->regions, \
+ struct knav_region, list)
+
+#define for_each_queue_range(kdev, range) \
+ list_for_each_entry(range, &kdev->queue_ranges, list)
+
+#define first_queue_range(kdev) \
+ list_first_entry_or_null(&kdev->queue_ranges, \
+ struct knav_range_info, list)
+
+#define for_each_pool(kdev, pool) \
+ list_for_each_entry(pool, &kdev->pools, list)
+
+#define for_each_pdsp(kdev, pdsp) \
+ list_for_each_entry(pdsp, &kdev->pdsps, list)
+
+#define for_each_qmgr(kdev, qmgr) \
+ list_for_each_entry(qmgr, &kdev->qmgrs, list)
+
+static inline struct knav_pdsp_info *
+knav_find_pdsp(struct knav_device *kdev, unsigned pdsp_id)
+{
+ struct knav_pdsp_info *pdsp;
+
+ for_each_pdsp(kdev, pdsp)
+ if (pdsp_id == pdsp->id)
+ return pdsp;
+ return NULL;
+}
+
+extern int knav_init_acc_range(struct knav_device *kdev,
+ struct device_node *node,
+ struct knav_range_info *range);
+extern void knav_queue_notify(struct knav_queue_inst *inst);
+
+#endif /* __KNAV_QMSS_H__ */
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
new file mode 100644
index 000000000..fde66e28e
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Keystone accumulator queue manager
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/soc/ti/knav_qmss.h>
+
+#include "knav_qmss.h"
+
+#define knav_range_offset_to_inst(kdev, range, q) \
+ (range->queue_base_inst + (q << kdev->inst_shift))
+
+static void __knav_acc_notify(struct knav_range_info *range,
+ struct knav_acc_channel *acc)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_queue_inst *inst;
+ int range_base, queue;
+
+ range_base = kdev->base_id + range->queue_base;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ for (queue = 0; queue < range->num_queues; queue++) {
+ inst = knav_range_offset_to_inst(kdev, range,
+ queue);
+ if (inst->notify_needed) {
+ inst->notify_needed = 0;
+ dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
+ range_base + queue);
+ knav_queue_notify(inst);
+ }
+ }
+ } else {
+ queue = acc->channel - range->acc_info.start_channel;
+ inst = knav_range_offset_to_inst(kdev, range, queue);
+ dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
+ range_base + queue);
+ knav_queue_notify(inst);
+ }
+}
+
+static int knav_acc_set_notify(struct knav_range_info *range,
+ struct knav_queue_inst *kq,
+ bool enabled)
+{
+ struct knav_pdsp_info *pdsp = range->acc_info.pdsp;
+ struct knav_device *kdev = range->kdev;
+ u32 mask, offset;
+
+ /*
+ * when enabling, we need to re-trigger an interrupt if we
+ * have descriptors pending
+ */
+ if (!enabled || atomic_read(&kq->desc_count) <= 0)
+ return 0;
+
+ kq->notify_needed = 1;
+ atomic_inc(&kq->acc->retrigger_count);
+ mask = BIT(kq->acc->channel % 32);
+ offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel);
+ dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n",
+ kq->acc->name);
+ writel_relaxed(mask, pdsp->intd + offset);
+ return 0;
+}
+
+static irqreturn_t knav_acc_int_handler(int irq, void *_instdata)
+{
+ struct knav_acc_channel *acc;
+ struct knav_queue_inst *kq = NULL;
+ struct knav_range_info *range;
+ struct knav_pdsp_info *pdsp;
+ struct knav_acc_info *info;
+ struct knav_device *kdev;
+
+ u32 *list, *list_cpu, val, idx, notifies;
+ int range_base, channel, queue = 0;
+ dma_addr_t list_dma;
+
+ range = _instdata;
+ info = &range->acc_info;
+ kdev = range->kdev;
+ pdsp = range->acc_info.pdsp;
+ acc = range->acc;
+
+ range_base = kdev->base_id + range->queue_base;
+ if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
+ for (queue = 0; queue < range->num_irqs; queue++)
+ if (range->irqs[queue].irq == irq)
+ break;
+ kq = knav_range_offset_to_inst(kdev, range, queue);
+ acc += queue;
+ }
+
+ channel = acc->channel;
+ list_dma = acc->list_dma[acc->list_index];
+ list_cpu = acc->list_cpu[acc->list_index];
+ dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, dma %pad\n",
+ channel, acc->list_index, list_cpu, &list_dma);
+ if (atomic_read(&acc->retrigger_count)) {
+ atomic_dec(&acc->retrigger_count);
+ __knav_acc_notify(range, acc);
+ writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+ /* ack the interrupt */
+ writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
+ pdsp->intd + ACC_INTD_OFFSET_EOI);
+
+ return IRQ_HANDLED;
+ }
+
+ notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+ WARN_ON(!notifies);
+ dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size,
+ DMA_FROM_DEVICE);
+
+ for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32));
+ list += ACC_LIST_ENTRY_WORDS) {
+ if (ACC_LIST_ENTRY_WORDS == 1) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x\n",
+ acc->list_index, list, list[0]);
+ } else if (ACC_LIST_ENTRY_WORDS == 2) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x %08x\n",
+ acc->list_index, list, list[0], list[1]);
+ } else if (ACC_LIST_ENTRY_WORDS == 4) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n",
+ acc->list_index, list, list[0], list[1],
+ list[2], list[3]);
+ }
+
+ val = list[ACC_LIST_ENTRY_DESC_IDX];
+ if (!val)
+ break;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
+ if (queue < range_base ||
+ queue >= range_base + range->num_queues) {
+ dev_err(kdev->dev,
+ "bad queue %d, expecting %d-%d\n",
+ queue, range_base,
+ range_base + range->num_queues);
+ break;
+ }
+ queue -= range_base;
+ kq = knav_range_offset_to_inst(kdev, range,
+ queue);
+ }
+
+ if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) {
+ atomic_dec(&kq->desc_count);
+ dev_err(kdev->dev,
+ "acc-irq: queue %d full, entry dropped\n",
+ queue + range_base);
+ continue;
+ }
+
+ idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK;
+ kq->descs[idx] = val;
+ kq->notify_needed = 1;
+ dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n",
+ val, idx, queue + range_base);
+ }
+
+ __knav_acc_notify(range, acc);
+ memset(list_cpu, 0, info->list_size);
+ dma_sync_single_for_device(kdev->dev, list_dma, info->list_size,
+ DMA_TO_DEVICE);
+
+ /* flip to the other list */
+ acc->list_index ^= 1;
+
+ /* reset the interrupt counter */
+ writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+
+ /* ack the interrupt */
+ writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
+ pdsp->intd + ACC_INTD_OFFSET_EOI);
+
+ return IRQ_HANDLED;
+}
+
+static int knav_range_setup_acc_irq(struct knav_range_info *range,
+ int queue, bool enabled)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ struct cpumask *cpu_mask;
+ int ret = 0, irq;
+ u32 old, new;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ acc = range->acc;
+ irq = range->irqs[0].irq;
+ cpu_mask = range->irqs[0].cpu_mask;
+ } else {
+ acc = range->acc + queue;
+ irq = range->irqs[queue].irq;
+ cpu_mask = range->irqs[queue].cpu_mask;
+ }
+
+ old = acc->open_mask;
+ if (enabled)
+ new = old | BIT(queue);
+ else
+ new = old & ~BIT(queue);
+ acc->open_mask = new;
+
+ dev_dbg(kdev->dev,
+ "setup-acc-irq: open mask old %08x, new %08x, channel %s\n",
+ old, new, acc->name);
+
+ if (likely(new == old))
+ return 0;
+
+ if (new && !old) {
+ dev_dbg(kdev->dev,
+ "setup-acc-irq: requesting %s for channel %s\n",
+ acc->name, acc->name);
+ ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
+ range);
+ if (!ret && cpu_mask) {
+ ret = irq_set_affinity_hint(irq, cpu_mask);
+ if (ret) {
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ return ret;
+ }
+ }
+ }
+
+ if (old && !new) {
+ dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n",
+ acc->name, acc->name);
+ ret = irq_set_affinity_hint(irq, NULL);
+ if (ret)
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ free_irq(irq, range);
+ }
+
+ return ret;
+}
+
+static const char *knav_acc_result_str(enum knav_acc_result result)
+{
+ static const char * const result_str[] = {
+ [ACC_RET_IDLE] = "idle",
+ [ACC_RET_SUCCESS] = "success",
+ [ACC_RET_INVALID_COMMAND] = "invalid command",
+ [ACC_RET_INVALID_CHANNEL] = "invalid channel",
+ [ACC_RET_INACTIVE_CHANNEL] = "inactive channel",
+ [ACC_RET_ACTIVE_CHANNEL] = "active channel",
+ [ACC_RET_INVALID_QUEUE] = "invalid queue",
+ [ACC_RET_INVALID_RET] = "invalid return code",
+ };
+
+ if (result >= ARRAY_SIZE(result_str))
+ return result_str[ACC_RET_INVALID_RET];
+ else
+ return result_str[result];
+}
+
+static enum knav_acc_result
+knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp,
+ struct knav_reg_acc_command *cmd)
+{
+ u32 result;
+
+ dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n",
+ cmd->command, cmd->queue_mask, cmd->list_dma,
+ cmd->queue_num, cmd->timer_config);
+
+ writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config);
+ writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num);
+ writel_relaxed(cmd->list_dma, &pdsp->acc_command->list_dma);
+ writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);
+ writel_relaxed(cmd->command, &pdsp->acc_command->command);
+
+ /* wait for the command to clear */
+ do {
+ result = readl_relaxed(&pdsp->acc_command->command);
+ } while ((result >> 8) & 0xff);
+
+ return (result >> 24) & 0xff;
+}
+
+static void knav_acc_setup_cmd(struct knav_device *kdev,
+ struct knav_range_info *range,
+ struct knav_reg_acc_command *cmd,
+ int queue)
+{
+ struct knav_acc_info *info = &range->acc_info;
+ struct knav_acc_channel *acc;
+ int queue_base;
+ u32 queue_mask;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ acc = range->acc;
+ queue_base = range->queue_base;
+ queue_mask = BIT(range->num_queues) - 1;
+ } else {
+ acc = range->acc + queue;
+ queue_base = range->queue_base + queue;
+ queue_mask = 0;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->command = acc->channel;
+ cmd->queue_mask = queue_mask;
+ cmd->list_dma = (u32)acc->list_dma[0];
+ cmd->queue_num = info->list_entries << 16;
+ cmd->queue_num |= queue_base;
+
+ cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18;
+ if (range->flags & RANGE_MULTI_QUEUE)
+ cmd->timer_config |= ACC_CFG_MULTI_QUEUE;
+ cmd->timer_config |= info->pacing_mode << 16;
+ cmd->timer_config |= info->timer_count;
+}
+
+static void knav_acc_stop(struct knav_device *kdev,
+ struct knav_range_info *range,
+ int queue)
+{
+ struct knav_reg_acc_command cmd;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+
+ acc = range->acc + queue;
+
+ knav_acc_setup_cmd(kdev, range, &cmd, queue);
+ cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8;
+ result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
+
+ dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n",
+ acc->name, knav_acc_result_str(result));
+}
+
+static enum knav_acc_result knav_acc_start(struct knav_device *kdev,
+ struct knav_range_info *range,
+ int queue)
+{
+ struct knav_reg_acc_command cmd;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+
+ acc = range->acc + queue;
+
+ knav_acc_setup_cmd(kdev, range, &cmd, queue);
+ cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8;
+ result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
+
+ dev_dbg(kdev->dev, "started acc channel %s, result %s\n",
+ acc->name, knav_acc_result_str(result));
+
+ return result;
+}
+
+static int knav_acc_init_range(struct knav_range_info *range)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+ int queue;
+
+ for (queue = 0; queue < range->num_queues; queue++) {
+ acc = range->acc + queue;
+
+ knav_acc_stop(kdev, range, queue);
+ acc->list_index = 0;
+ result = knav_acc_start(kdev, range, queue);
+
+ if (result != ACC_RET_SUCCESS)
+ return -EIO;
+
+ if (range->flags & RANGE_MULTI_QUEUE)
+ return 0;
+ }
+ return 0;
+}
+
+static int knav_acc_init_queue(struct knav_range_info *range,
+ struct knav_queue_inst *kq)
+{
+ unsigned id = kq->id - range->queue_base;
+
+ kq->descs = devm_kcalloc(range->kdev->dev,
+ ACC_DESCS_MAX, sizeof(u32), GFP_KERNEL);
+ if (!kq->descs)
+ return -ENOMEM;
+
+ kq->acc = range->acc;
+ if ((range->flags & RANGE_MULTI_QUEUE) == 0)
+ kq->acc += id;
+ return 0;
+}
+
+static int knav_acc_open_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags)
+{
+ unsigned id = inst->id - range->queue_base;
+
+ return knav_range_setup_acc_irq(range, id, true);
+}
+
+static int knav_acc_close_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ unsigned id = inst->id - range->queue_base;
+
+ return knav_range_setup_acc_irq(range, id, false);
+}
+
+static int knav_acc_free_range(struct knav_range_info *range)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ struct knav_acc_info *info;
+ int channel, channels;
+
+ info = &range->acc_info;
+
+ if (range->flags & RANGE_MULTI_QUEUE)
+ channels = 1;
+ else
+ channels = range->num_queues;
+
+ for (channel = 0; channel < channels; channel++) {
+ acc = range->acc + channel;
+ if (!acc->list_cpu[0])
+ continue;
+ dma_unmap_single(kdev->dev, acc->list_dma[0],
+ info->mem_size, DMA_BIDIRECTIONAL);
+ free_pages_exact(acc->list_cpu[0], info->mem_size);
+ }
+ devm_kfree(range->kdev->dev, range->acc);
+ return 0;
+}
+
+static struct knav_range_ops knav_acc_range_ops = {
+ .set_notify = knav_acc_set_notify,
+ .init_queue = knav_acc_init_queue,
+ .open_queue = knav_acc_open_queue,
+ .close_queue = knav_acc_close_queue,
+ .init_range = knav_acc_init_range,
+ .free_range = knav_acc_free_range,
+};
+
+/**
+ * knav_init_acc_range: Initialise accumulator ranges
+ *
+ * @kdev: qmss device
+ * @node: device node
+ * @range: qmms range information
+ *
+ * Return 0 on success or error
+ */
+int knav_init_acc_range(struct knav_device *kdev,
+ struct device_node *node,
+ struct knav_range_info *range)
+{
+ struct knav_acc_channel *acc;
+ struct knav_pdsp_info *pdsp;
+ struct knav_acc_info *info;
+ int ret, channel, channels;
+ int list_size, mem_size;
+ dma_addr_t list_dma;
+ void *list_mem;
+ u32 config[5];
+
+ range->flags |= RANGE_HAS_ACCUMULATOR;
+ info = &range->acc_info;
+
+ ret = of_property_read_u32_array(node, "accumulator", config, 5);
+ if (ret)
+ return ret;
+
+ info->pdsp_id = config[0];
+ info->start_channel = config[1];
+ info->list_entries = config[2];
+ info->pacing_mode = config[3];
+ info->timer_count = config[4] / ACC_DEFAULT_PERIOD;
+
+ if (info->start_channel > ACC_MAX_CHANNEL) {
+ dev_err(kdev->dev, "channel %d invalid for range %s\n",
+ info->start_channel, range->name);
+ return -EINVAL;
+ }
+
+ if (info->pacing_mode > 3) {
+ dev_err(kdev->dev, "pacing mode %d invalid for range %s\n",
+ info->pacing_mode, range->name);
+ return -EINVAL;
+ }
+
+ pdsp = knav_find_pdsp(kdev, info->pdsp_id);
+ if (!pdsp) {
+ dev_err(kdev->dev, "pdsp id %d not found for range %s\n",
+ info->pdsp_id, range->name);
+ return -EINVAL;
+ }
+
+ if (!pdsp->started) {
+ dev_err(kdev->dev, "pdsp id %d not started for range %s\n",
+ info->pdsp_id, range->name);
+ return -ENODEV;
+ }
+
+ info->pdsp = pdsp;
+ channels = range->num_queues;
+ if (of_get_property(node, "multi-queue", NULL)) {
+ range->flags |= RANGE_MULTI_QUEUE;
+ channels = 1;
+ if (range->queue_base & (32 - 1)) {
+ dev_err(kdev->dev,
+ "misaligned multi-queue accumulator range %s\n",
+ range->name);
+ return -EINVAL;
+ }
+ if (range->num_queues > 32) {
+ dev_err(kdev->dev,
+ "too many queues in accumulator range %s\n",
+ range->name);
+ return -EINVAL;
+ }
+ }
+
+ /* figure out list size */
+ list_size = info->list_entries;
+ list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32);
+ info->list_size = list_size;
+ mem_size = PAGE_ALIGN(list_size * 2);
+ info->mem_size = mem_size;
+ range->acc = devm_kcalloc(kdev->dev, channels, sizeof(*range->acc),
+ GFP_KERNEL);
+ if (!range->acc)
+ return -ENOMEM;
+
+ for (channel = 0; channel < channels; channel++) {
+ acc = range->acc + channel;
+ acc->channel = info->start_channel + channel;
+
+ /* allocate memory for the two lists */
+ list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA);
+ if (!list_mem)
+ return -ENOMEM;
+
+ list_dma = dma_map_single(kdev->dev, list_mem, mem_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev->dev, list_dma)) {
+ free_pages_exact(list_mem, mem_size);
+ return -ENOMEM;
+ }
+
+ memset(list_mem, 0, mem_size);
+ dma_sync_single_for_device(kdev->dev, list_dma, mem_size,
+ DMA_TO_DEVICE);
+ scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d",
+ acc->channel);
+ acc->list_cpu[0] = list_mem;
+ acc->list_cpu[1] = list_mem + list_size;
+ acc->list_dma[0] = list_dma;
+ acc->list_dma[1] = list_dma + list_size;
+ dev_dbg(kdev->dev, "%s: channel %d, dma %pad, virt %8p\n",
+ acc->name, acc->channel, &list_dma, list_mem);
+ }
+
+ range->ops = &knav_acc_range_ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(knav_init_acc_range);
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
new file mode 100644
index 000000000..8fb76908b
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -0,0 +1,1908 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Keystone Queue Manager subsystem driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/knav_qmss.h>
+
+#include "knav_qmss.h"
+
+static struct knav_device *kdev;
+static DEFINE_MUTEX(knav_dev_lock);
+#define knav_dev_lock_held() \
+ lockdep_is_held(&knav_dev_lock)
+
+/* Queue manager register indices in DTS */
+#define KNAV_QUEUE_PEEK_REG_INDEX 0
+#define KNAV_QUEUE_STATUS_REG_INDEX 1
+#define KNAV_QUEUE_CONFIG_REG_INDEX 2
+#define KNAV_QUEUE_REGION_REG_INDEX 3
+#define KNAV_QUEUE_PUSH_REG_INDEX 4
+#define KNAV_QUEUE_POP_REG_INDEX 5
+
+/* Queue manager register indices in DTS for QMSS in K2G NAVSS.
+ * There are no status and vbusm push registers on this version
+ * of QMSS. Push registers are same as pop, So all indices above 1
+ * are to be re-defined
+ */
+#define KNAV_L_QUEUE_CONFIG_REG_INDEX 1
+#define KNAV_L_QUEUE_REGION_REG_INDEX 2
+#define KNAV_L_QUEUE_PUSH_REG_INDEX 3
+
+/* PDSP register indices in DTS */
+#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
+#define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
+#define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
+#define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
+
+#define knav_queue_idx_to_inst(kdev, idx) \
+ (kdev->instances + (idx << kdev->inst_shift))
+
+#define for_each_handle_rcu(qh, inst) \
+ list_for_each_entry_rcu(qh, &inst->handles, list, \
+ knav_dev_lock_held())
+
+#define for_each_instance(idx, inst, kdev) \
+ for (idx = 0, inst = kdev->instances; \
+ idx < (kdev)->num_queues_in_use; \
+ idx++, inst = knav_queue_idx_to_inst(kdev, idx))
+
+/* All firmware file names end up here. List the firmware file names below.
+ * Newest followed by older ones. Search is done from start of the array
+ * until a firmware file is found.
+ */
+static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
+
+static bool device_ready;
+bool knav_qmss_device_ready(void)
+{
+ return device_ready;
+}
+EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
+
+/**
+ * knav_queue_notify: qmss queue notfier call
+ *
+ * @inst: - qmss queue instance like accumulator
+ */
+void knav_queue_notify(struct knav_queue_inst *inst)
+{
+ struct knav_queue *qh;
+
+ if (!inst)
+ return;
+
+ rcu_read_lock();
+ for_each_handle_rcu(qh, inst) {
+ if (atomic_read(&qh->notifier_enabled) <= 0)
+ continue;
+ if (WARN_ON(!qh->notifier_fn))
+ continue;
+ this_cpu_inc(qh->stats->notifies);
+ qh->notifier_fn(qh->notifier_fn_arg);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(knav_queue_notify);
+
+static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
+{
+ struct knav_queue_inst *inst = _instdata;
+
+ knav_queue_notify(inst);
+ return IRQ_HANDLED;
+}
+
+static int knav_queue_setup_irq(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ unsigned queue = inst->id - range->queue_base;
+ int ret = 0, irq;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ irq = range->irqs[queue].irq;
+ ret = request_irq(irq, knav_queue_int_handler, 0,
+ inst->irq_name, inst);
+ if (ret)
+ return ret;
+ disable_irq(irq);
+ if (range->irqs[queue].cpu_mask) {
+ ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
+ if (ret) {
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+static void knav_queue_free_irq(struct knav_queue_inst *inst)
+{
+ struct knav_range_info *range = inst->range;
+ unsigned queue = inst->id - inst->range->queue_base;
+ int irq;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ irq = range->irqs[queue].irq;
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, inst);
+ }
+}
+
+static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
+{
+ return !list_empty(&inst->handles);
+}
+
+static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
+{
+ return inst->range->flags & RANGE_RESERVED;
+}
+
+static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
+{
+ struct knav_queue *tmp;
+
+ rcu_read_lock();
+ for_each_handle_rcu(tmp, inst) {
+ if (tmp->flags & KNAV_QUEUE_SHARED) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+ return false;
+}
+
+static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
+ unsigned type)
+{
+ if ((type == KNAV_QUEUE_QPEND) &&
+ (inst->range->flags & RANGE_HAS_IRQ)) {
+ return true;
+ } else if ((type == KNAV_QUEUE_ACC) &&
+ (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
+ return true;
+ } else if ((type == KNAV_QUEUE_GP) &&
+ !(inst->range->flags &
+ (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
+ return true;
+ }
+ return false;
+}
+
+static inline struct knav_queue_inst *
+knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
+{
+ struct knav_queue_inst *inst;
+ int idx;
+
+ for_each_instance(idx, inst, kdev) {
+ if (inst->id == id)
+ return inst;
+ }
+ return NULL;
+}
+
+static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
+{
+ if (kdev->base_id <= id &&
+ kdev->base_id + kdev->num_queues > id) {
+ id -= kdev->base_id;
+ return knav_queue_match_id_to_inst(kdev, id);
+ }
+ return NULL;
+}
+
+static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
+ const char *name, unsigned flags)
+{
+ struct knav_queue *qh;
+ unsigned id;
+ int ret = 0;
+
+ qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
+ if (!qh)
+ return ERR_PTR(-ENOMEM);
+
+ qh->stats = alloc_percpu(struct knav_queue_stats);
+ if (!qh->stats) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ qh->flags = flags;
+ qh->inst = inst;
+ id = inst->id - inst->qmgr->start_queue;
+ qh->reg_push = &inst->qmgr->reg_push[id];
+ qh->reg_pop = &inst->qmgr->reg_pop[id];
+ qh->reg_peek = &inst->qmgr->reg_peek[id];
+
+ /* first opener? */
+ if (!knav_queue_is_busy(inst)) {
+ struct knav_range_info *range = inst->range;
+
+ inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
+ if (range->ops && range->ops->open_queue)
+ ret = range->ops->open_queue(range, inst, flags);
+
+ if (ret)
+ goto err;
+ }
+ list_add_tail_rcu(&qh->list, &inst->handles);
+ return qh;
+
+err:
+ if (qh->stats)
+ free_percpu(qh->stats);
+ devm_kfree(inst->kdev->dev, qh);
+ return ERR_PTR(ret);
+}
+
+static struct knav_queue *
+knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
+{
+ struct knav_queue_inst *inst;
+ struct knav_queue *qh;
+
+ mutex_lock(&knav_dev_lock);
+
+ qh = ERR_PTR(-ENODEV);
+ inst = knav_queue_find_by_id(id);
+ if (!inst)
+ goto unlock_ret;
+
+ qh = ERR_PTR(-EEXIST);
+ if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
+ goto unlock_ret;
+
+ qh = ERR_PTR(-EBUSY);
+ if ((flags & KNAV_QUEUE_SHARED) &&
+ (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
+ goto unlock_ret;
+
+ qh = __knav_queue_open(inst, name, flags);
+
+unlock_ret:
+ mutex_unlock(&knav_dev_lock);
+
+ return qh;
+}
+
+static struct knav_queue *knav_queue_open_by_type(const char *name,
+ unsigned type, unsigned flags)
+{
+ struct knav_queue_inst *inst;
+ struct knav_queue *qh = ERR_PTR(-EINVAL);
+ int idx;
+
+ mutex_lock(&knav_dev_lock);
+
+ for_each_instance(idx, inst, kdev) {
+ if (knav_queue_is_reserved(inst))
+ continue;
+ if (!knav_queue_match_type(inst, type))
+ continue;
+ if (knav_queue_is_busy(inst))
+ continue;
+ qh = __knav_queue_open(inst, name, flags);
+ goto unlock_ret;
+ }
+
+unlock_ret:
+ mutex_unlock(&knav_dev_lock);
+ return qh;
+}
+
+static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
+{
+ struct knav_range_info *range = inst->range;
+
+ if (range->ops && range->ops->set_notify)
+ range->ops->set_notify(range, inst, enabled);
+}
+
+static int knav_queue_enable_notifier(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ bool first;
+
+ if (WARN_ON(!qh->notifier_fn))
+ return -EINVAL;
+
+ /* Adjust the per handle notifier count */
+ first = (atomic_inc_return(&qh->notifier_enabled) == 1);
+ if (!first)
+ return 0; /* nothing to do */
+
+ /* Now adjust the per instance notifier count */
+ first = (atomic_inc_return(&inst->num_notifiers) == 1);
+ if (first)
+ knav_queue_set_notify(inst, true);
+
+ return 0;
+}
+
+static int knav_queue_disable_notifier(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ bool last;
+
+ last = (atomic_dec_return(&qh->notifier_enabled) == 0);
+ if (!last)
+ return 0; /* nothing to do */
+
+ last = (atomic_dec_return(&inst->num_notifiers) == 0);
+ if (last)
+ knav_queue_set_notify(inst, false);
+
+ return 0;
+}
+
+static int knav_queue_set_notifier(struct knav_queue *qh,
+ struct knav_queue_notify_config *cfg)
+{
+ knav_queue_notify_fn old_fn = qh->notifier_fn;
+
+ if (!cfg)
+ return -EINVAL;
+
+ if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
+ return -ENOTSUPP;
+
+ if (!cfg->fn && old_fn)
+ knav_queue_disable_notifier(qh);
+
+ qh->notifier_fn = cfg->fn;
+ qh->notifier_fn_arg = cfg->fn_arg;
+
+ if (cfg->fn && !old_fn)
+ knav_queue_enable_notifier(qh);
+
+ return 0;
+}
+
+static int knav_gp_set_notify(struct knav_range_info *range,
+ struct knav_queue_inst *inst,
+ bool enabled)
+{
+ unsigned queue;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ queue = inst->id - range->queue_base;
+ if (enabled)
+ enable_irq(range->irqs[queue].irq);
+ else
+ disable_irq_nosync(range->irqs[queue].irq);
+ }
+ return 0;
+}
+
+static int knav_gp_open_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags)
+{
+ return knav_queue_setup_irq(range, inst);
+}
+
+static int knav_gp_close_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ knav_queue_free_irq(inst);
+ return 0;
+}
+
+static struct knav_range_ops knav_gp_range_ops = {
+ .set_notify = knav_gp_set_notify,
+ .open_queue = knav_gp_open_queue,
+ .close_queue = knav_gp_close_queue,
+};
+
+
+static int knav_queue_get_count(void *qhandle)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+
+ return readl_relaxed(&qh->reg_peek[0].entry_count) +
+ atomic_read(&inst->desc_count);
+}
+
+static void knav_queue_debug_show_instance(struct seq_file *s,
+ struct knav_queue_inst *inst)
+{
+ struct knav_device *kdev = inst->kdev;
+ struct knav_queue *qh;
+ int cpu = 0;
+ int pushes = 0;
+ int pops = 0;
+ int push_errors = 0;
+ int pop_errors = 0;
+ int notifies = 0;
+
+ if (!knav_queue_is_busy(inst))
+ return;
+
+ seq_printf(s, "\tqueue id %d (%s)\n",
+ kdev->base_id + inst->id, inst->name);
+ for_each_handle_rcu(qh, inst) {
+ for_each_possible_cpu(cpu) {
+ pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
+ pops += per_cpu_ptr(qh->stats, cpu)->pops;
+ push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
+ pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
+ notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
+ }
+
+ seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
+ qh,
+ pushes,
+ pops,
+ knav_queue_get_count(qh),
+ notifies,
+ push_errors,
+ pop_errors);
+ }
+}
+
+static int knav_queue_debug_show(struct seq_file *s, void *v)
+{
+ struct knav_queue_inst *inst;
+ int idx;
+
+ mutex_lock(&knav_dev_lock);
+ seq_printf(s, "%s: %u-%u\n",
+ dev_name(kdev->dev), kdev->base_id,
+ kdev->base_id + kdev->num_queues - 1);
+ for_each_instance(idx, inst, kdev)
+ knav_queue_debug_show_instance(s, inst);
+ mutex_unlock(&knav_dev_lock);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
+
+static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
+ u32 flags)
+{
+ unsigned long end;
+ u32 val = 0;
+
+ end = jiffies + msecs_to_jiffies(timeout);
+ while (time_after(end, jiffies)) {
+ val = readl_relaxed(addr);
+ if (flags)
+ val &= flags;
+ if (!val)
+ break;
+ cpu_relax();
+ }
+ return val ? -ETIMEDOUT : 0;
+}
+
+
+static int knav_queue_flush(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ unsigned id = inst->id - inst->qmgr->start_queue;
+
+ atomic_set(&inst->desc_count, 0);
+ writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
+ return 0;
+}
+
+/**
+ * knav_queue_open() - open a hardware queue
+ * @name: - name to give the queue handle
+ * @id: - desired queue number if any or specifes the type
+ * of queue
+ * @flags: - the following flags are applicable to queues:
+ * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
+ * exclusive by default.
+ * Subsequent attempts to open a shared queue should
+ * also have this flag.
+ *
+ * Returns a handle to the open hardware queue if successful. Use IS_ERR()
+ * to check the returned value for error codes.
+ */
+void *knav_queue_open(const char *name, unsigned id,
+ unsigned flags)
+{
+ struct knav_queue *qh = ERR_PTR(-EINVAL);
+
+ switch (id) {
+ case KNAV_QUEUE_QPEND:
+ case KNAV_QUEUE_ACC:
+ case KNAV_QUEUE_GP:
+ qh = knav_queue_open_by_type(name, id, flags);
+ break;
+
+ default:
+ qh = knav_queue_open_by_id(name, id, flags);
+ break;
+ }
+ return qh;
+}
+EXPORT_SYMBOL_GPL(knav_queue_open);
+
+/**
+ * knav_queue_close() - close a hardware queue handle
+ * @qhandle: - handle to close
+ */
+void knav_queue_close(void *qhandle)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+
+ while (atomic_read(&qh->notifier_enabled) > 0)
+ knav_queue_disable_notifier(qh);
+
+ mutex_lock(&knav_dev_lock);
+ list_del_rcu(&qh->list);
+ mutex_unlock(&knav_dev_lock);
+ synchronize_rcu();
+ if (!knav_queue_is_busy(inst)) {
+ struct knav_range_info *range = inst->range;
+
+ if (range->ops && range->ops->close_queue)
+ range->ops->close_queue(range, inst);
+ }
+ free_percpu(qh->stats);
+ devm_kfree(inst->kdev->dev, qh);
+}
+EXPORT_SYMBOL_GPL(knav_queue_close);
+
+/**
+ * knav_queue_device_control() - Perform control operations on a queue
+ * @qhandle: - queue handle
+ * @cmd: - control commands
+ * @arg: - command argument
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_notify_config *cfg;
+ int ret;
+
+ switch ((int)cmd) {
+ case KNAV_QUEUE_GET_ID:
+ ret = qh->inst->kdev->base_id + qh->inst->id;
+ break;
+
+ case KNAV_QUEUE_FLUSH:
+ ret = knav_queue_flush(qh);
+ break;
+
+ case KNAV_QUEUE_SET_NOTIFIER:
+ cfg = (void *)arg;
+ ret = knav_queue_set_notifier(qh, cfg);
+ break;
+
+ case KNAV_QUEUE_ENABLE_NOTIFY:
+ ret = knav_queue_enable_notifier(qh);
+ break;
+
+ case KNAV_QUEUE_DISABLE_NOTIFY:
+ ret = knav_queue_disable_notifier(qh);
+ break;
+
+ case KNAV_QUEUE_GET_COUNT:
+ ret = knav_queue_get_count(qh);
+ break;
+
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(knav_queue_device_control);
+
+
+
+/**
+ * knav_queue_push() - push data (or descriptor) to the tail of a queue
+ * @qhandle: - hardware queue handle
+ * @dma: - DMA data to push
+ * @size: - size of data to push
+ * @flags: - can be used to pass additional information
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_queue_push(void *qhandle, dma_addr_t dma,
+ unsigned size, unsigned flags)
+{
+ struct knav_queue *qh = qhandle;
+ u32 val;
+
+ val = (u32)dma | ((size / 16) - 1);
+ writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
+
+ this_cpu_inc(qh->stats->pushes);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(knav_queue_push);
+
+/**
+ * knav_queue_pop() - pop data (or descriptor) from the head of a queue
+ * @qhandle: - hardware queue handle
+ * @size: - (optional) size of the data pop'ed.
+ *
+ * Returns a DMA address on success, 0 on failure.
+ */
+dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+ dma_addr_t dma;
+ u32 val, idx;
+
+ /* are we accumulated? */
+ if (inst->descs) {
+ if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
+ atomic_inc(&inst->desc_count);
+ return 0;
+ }
+ idx = atomic_inc_return(&inst->desc_head);
+ idx &= ACC_DESCS_MASK;
+ val = inst->descs[idx];
+ } else {
+ val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
+ if (unlikely(!val))
+ return 0;
+ }
+
+ dma = val & DESC_PTR_MASK;
+ if (size)
+ *size = ((val & DESC_SIZE_MASK) + 1) * 16;
+
+ this_cpu_inc(qh->stats->pops);
+ return dma;
+}
+EXPORT_SYMBOL_GPL(knav_queue_pop);
+
+/* carve out descriptors and push into queue */
+static void kdesc_fill_pool(struct knav_pool *pool)
+{
+ struct knav_region *region;
+ int i;
+
+ region = pool->region;
+ pool->desc_size = region->desc_size;
+ for (i = 0; i < pool->num_desc; i++) {
+ int index = pool->region_offset + i;
+ dma_addr_t dma_addr;
+ unsigned dma_size;
+ dma_addr = region->dma_start + (region->desc_size * index);
+ dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
+ dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
+ DMA_TO_DEVICE);
+ knav_queue_push(pool->queue, dma_addr, dma_size, 0);
+ }
+}
+
+/* pop out descriptors and close the queue */
+static void kdesc_empty_pool(struct knav_pool *pool)
+{
+ dma_addr_t dma;
+ unsigned size;
+ void *desc;
+ int i;
+
+ if (!pool->queue)
+ return;
+
+ for (i = 0;; i++) {
+ dma = knav_queue_pop(pool->queue, &size);
+ if (!dma)
+ break;
+ desc = knav_pool_desc_dma_to_virt(pool, dma);
+ if (!desc) {
+ dev_dbg(pool->kdev->dev,
+ "couldn't unmap desc, continuing\n");
+ continue;
+ }
+ }
+ WARN_ON(i != pool->num_desc);
+ knav_queue_close(pool->queue);
+}
+
+
+/* Get the DMA address of a descriptor */
+dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
+{
+ struct knav_pool *pool = ph;
+ return pool->region->dma_start + (virt - pool->region->virt_start);
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
+
+void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
+{
+ struct knav_pool *pool = ph;
+ return pool->region->virt_start + (dma - pool->region->dma_start);
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
+
+/**
+ * knav_pool_create() - Create a pool of descriptors
+ * @name: - name to give the pool handle
+ * @num_desc: - numbers of descriptors in the pool
+ * @region_id: - QMSS region id from which the descriptors are to be
+ * allocated.
+ *
+ * Returns a pool handle on success.
+ * Use IS_ERR_OR_NULL() to identify error values on return.
+ */
+void *knav_pool_create(const char *name,
+ int num_desc, int region_id)
+{
+ struct knav_region *reg_itr, *region = NULL;
+ struct knav_pool *pool, *pi = NULL, *iter;
+ struct list_head *node;
+ unsigned last_offset;
+ int ret;
+
+ if (!kdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!kdev->dev)
+ return ERR_PTR(-ENODEV);
+
+ pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ dev_err(kdev->dev, "out of memory allocating pool\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_region(kdev, reg_itr) {
+ if (reg_itr->id != region_id)
+ continue;
+ region = reg_itr;
+ break;
+ }
+
+ if (!region) {
+ dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
+ if (IS_ERR(pool->queue)) {
+ dev_err(kdev->dev,
+ "failed to open queue for pool(%s), error %ld\n",
+ name, PTR_ERR(pool->queue));
+ ret = PTR_ERR(pool->queue);
+ goto err;
+ }
+
+ pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
+ pool->kdev = kdev;
+ pool->dev = kdev->dev;
+
+ mutex_lock(&knav_dev_lock);
+
+ if (num_desc > (region->num_desc - region->used_desc)) {
+ dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
+ region_id, name);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ /* Region maintains a sorted (by region offset) list of pools
+ * use the first free slot which is large enough to accomodate
+ * the request
+ */
+ last_offset = 0;
+ node = &region->pools;
+ list_for_each_entry(iter, &region->pools, region_inst) {
+ if ((iter->region_offset - last_offset) >= num_desc) {
+ pi = iter;
+ break;
+ }
+ last_offset = iter->region_offset + iter->num_desc;
+ }
+
+ if (pi) {
+ node = &pi->region_inst;
+ pool->region = region;
+ pool->num_desc = num_desc;
+ pool->region_offset = last_offset;
+ region->used_desc += num_desc;
+ list_add_tail(&pool->list, &kdev->pools);
+ list_add_tail(&pool->region_inst, node);
+ } else {
+ dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
+ name, region_id);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ mutex_unlock(&knav_dev_lock);
+ kdesc_fill_pool(pool);
+ return pool;
+
+err_unlock:
+ mutex_unlock(&knav_dev_lock);
+err:
+ kfree(pool->name);
+ devm_kfree(kdev->dev, pool);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(knav_pool_create);
+
+/**
+ * knav_pool_destroy() - Free a pool of descriptors
+ * @ph: - pool handle
+ */
+void knav_pool_destroy(void *ph)
+{
+ struct knav_pool *pool = ph;
+
+ if (!pool)
+ return;
+
+ if (!pool->region)
+ return;
+
+ kdesc_empty_pool(pool);
+ mutex_lock(&knav_dev_lock);
+
+ pool->region->used_desc -= pool->num_desc;
+ list_del(&pool->region_inst);
+ list_del(&pool->list);
+
+ mutex_unlock(&knav_dev_lock);
+ kfree(pool->name);
+ devm_kfree(kdev->dev, pool);
+}
+EXPORT_SYMBOL_GPL(knav_pool_destroy);
+
+
+/**
+ * knav_pool_desc_get() - Get a descriptor from the pool
+ * @ph: - pool handle
+ *
+ * Returns descriptor from the pool.
+ */
+void *knav_pool_desc_get(void *ph)
+{
+ struct knav_pool *pool = ph;
+ dma_addr_t dma;
+ unsigned size;
+ void *data;
+
+ dma = knav_queue_pop(pool->queue, &size);
+ if (unlikely(!dma))
+ return ERR_PTR(-ENOMEM);
+ data = knav_pool_desc_dma_to_virt(pool, dma);
+ return data;
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_get);
+
+/**
+ * knav_pool_desc_put() - return a descriptor to the pool
+ * @ph: - pool handle
+ * @desc: - virtual address
+ */
+void knav_pool_desc_put(void *ph, void *desc)
+{
+ struct knav_pool *pool = ph;
+ dma_addr_t dma;
+ dma = knav_pool_desc_virt_to_dma(pool, desc);
+ knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_put);
+
+/**
+ * knav_pool_desc_map() - Map descriptor for DMA transfer
+ * @ph: - pool handle
+ * @desc: - address of descriptor to map
+ * @size: - size of descriptor to map
+ * @dma: - DMA address return pointer
+ * @dma_sz: - adjusted return pointer
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_pool_desc_map(void *ph, void *desc, unsigned size,
+ dma_addr_t *dma, unsigned *dma_sz)
+{
+ struct knav_pool *pool = ph;
+ *dma = knav_pool_desc_virt_to_dma(pool, desc);
+ size = min(size, pool->region->desc_size);
+ size = ALIGN(size, SMP_CACHE_BYTES);
+ *dma_sz = size;
+ dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
+
+ /* Ensure the descriptor reaches to the memory */
+ __iowmb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_map);
+
+/**
+ * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
+ * @ph: - pool handle
+ * @dma: - DMA address of descriptor to unmap
+ * @dma_sz: - size of descriptor to unmap
+ *
+ * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
+ * error values on return.
+ */
+void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
+{
+ struct knav_pool *pool = ph;
+ unsigned desc_sz;
+ void *desc;
+
+ desc_sz = min(dma_sz, pool->region->desc_size);
+ desc = knav_pool_desc_dma_to_virt(pool, dma);
+ dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
+ prefetch(desc);
+ return desc;
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
+
+/**
+ * knav_pool_count() - Get the number of descriptors in pool.
+ * @ph: - pool handle
+ * Returns number of elements in the pool.
+ */
+int knav_pool_count(void *ph)
+{
+ struct knav_pool *pool = ph;
+ return knav_queue_get_count(pool->queue);
+}
+EXPORT_SYMBOL_GPL(knav_pool_count);
+
+static void knav_queue_setup_region(struct knav_device *kdev,
+ struct knav_region *region)
+{
+ unsigned hw_num_desc, hw_desc_size, size;
+ struct knav_reg_region __iomem *regs;
+ struct knav_qmgr_info *qmgr;
+ struct knav_pool *pool;
+ int id = region->id;
+ struct page *page;
+
+ /* unused region? */
+ if (!region->num_desc) {
+ dev_warn(kdev->dev, "unused region %s\n", region->name);
+ return;
+ }
+
+ /* get hardware descriptor value */
+ hw_num_desc = ilog2(region->num_desc - 1) + 1;
+
+ /* did we force fit ourselves into nothingness? */
+ if (region->num_desc < 32) {
+ region->num_desc = 0;
+ dev_warn(kdev->dev, "too few descriptors in region %s\n",
+ region->name);
+ return;
+ }
+
+ size = region->num_desc * region->desc_size;
+ region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
+ GFP_DMA32);
+ if (!region->virt_start) {
+ region->num_desc = 0;
+ dev_err(kdev->dev, "memory alloc failed for region %s\n",
+ region->name);
+ return;
+ }
+ region->virt_end = region->virt_start + size;
+ page = virt_to_page(region->virt_start);
+
+ region->dma_start = dma_map_page(kdev->dev, page, 0, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev->dev, region->dma_start)) {
+ dev_err(kdev->dev, "dma map failed for region %s\n",
+ region->name);
+ goto fail;
+ }
+ region->dma_end = region->dma_start + size;
+
+ pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ dev_err(kdev->dev, "out of memory allocating dummy pool\n");
+ goto fail;
+ }
+ pool->num_desc = 0;
+ pool->region_offset = region->num_desc;
+ list_add(&pool->region_inst, &region->pools);
+
+ dev_dbg(kdev->dev,
+ "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
+ region->name, id, region->desc_size, region->num_desc,
+ region->link_index, &region->dma_start, &region->dma_end,
+ region->virt_start, region->virt_end);
+
+ hw_desc_size = (region->desc_size / 16) - 1;
+ hw_num_desc -= 5;
+
+ for_each_qmgr(kdev, qmgr) {
+ regs = qmgr->reg_region + id;
+ writel_relaxed((u32)region->dma_start, &regs->base);
+ writel_relaxed(region->link_index, &regs->start_index);
+ writel_relaxed(hw_desc_size << 16 | hw_num_desc,
+ &regs->size_count);
+ }
+ return;
+
+fail:
+ if (region->dma_start)
+ dma_unmap_page(kdev->dev, region->dma_start, size,
+ DMA_BIDIRECTIONAL);
+ if (region->virt_start)
+ free_pages_exact(region->virt_start, size);
+ region->num_desc = 0;
+ return;
+}
+
+static const char *knav_queue_find_name(struct device_node *node)
+{
+ const char *name;
+
+ if (of_property_read_string(node, "label", &name) < 0)
+ name = node->name;
+ if (!name)
+ name = "unknown";
+ return name;
+}
+
+static int knav_queue_setup_regions(struct knav_device *kdev,
+ struct device_node *regions)
+{
+ struct device *dev = kdev->dev;
+ struct knav_region *region;
+ struct device_node *child;
+ u32 temp[2];
+ int ret;
+
+ for_each_child_of_node(regions, child) {
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ of_node_put(child);
+ dev_err(dev, "out of memory allocating region\n");
+ return -ENOMEM;
+ }
+
+ region->name = knav_queue_find_name(child);
+ of_property_read_u32(child, "id", &region->id);
+ ret = of_property_read_u32_array(child, "region-spec", temp, 2);
+ if (!ret) {
+ region->num_desc = temp[0];
+ region->desc_size = temp[1];
+ } else {
+ dev_err(dev, "invalid region info %s\n", region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+
+ if (!of_get_property(child, "link-index", NULL)) {
+ dev_err(dev, "No link info for %s\n", region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+ ret = of_property_read_u32(child, "link-index",
+ &region->link_index);
+ if (ret) {
+ dev_err(dev, "link index not found for %s\n",
+ region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+
+ INIT_LIST_HEAD(&region->pools);
+ list_add_tail(&region->list, &kdev->regions);
+ }
+ if (list_empty(&kdev->regions)) {
+ dev_err(dev, "no valid region information found\n");
+ return -ENODEV;
+ }
+
+ /* Next, we run through the regions and set things up */
+ for_each_region(kdev, region)
+ knav_queue_setup_region(kdev, region);
+
+ return 0;
+}
+
+static int knav_get_link_ram(struct knav_device *kdev,
+ const char *name,
+ struct knav_link_ram_block *block)
+{
+ struct platform_device *pdev = to_platform_device(kdev->dev);
+ struct device_node *node = pdev->dev.of_node;
+ u32 temp[2];
+
+ /*
+ * Note: link ram resources are specified in "entry" sized units. In
+ * reality, although entries are ~40bits in hardware, we treat them as
+ * 64-bit entities here.
+ *
+ * For example, to specify the internal link ram for Keystone-I class
+ * devices, we would set the linkram0 resource to 0x80000-0x83fff.
+ *
+ * This gets a bit weird when other link rams are used. For example,
+ * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
+ * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
+ * which accounts for 64-bits per entry, for 16K entries.
+ */
+ if (!of_property_read_u32_array(node, name , temp, 2)) {
+ if (temp[0]) {
+ /*
+ * queue_base specified => using internal or onchip
+ * link ram WARNING - we do not "reserve" this block
+ */
+ block->dma = (dma_addr_t)temp[0];
+ block->virt = NULL;
+ block->size = temp[1];
+ } else {
+ block->size = temp[1];
+ /* queue_base not specific => allocate requested size */
+ block->virt = dmam_alloc_coherent(kdev->dev,
+ 8 * block->size, &block->dma,
+ GFP_KERNEL);
+ if (!block->virt) {
+ dev_err(kdev->dev, "failed to alloc linkram\n");
+ return -ENOMEM;
+ }
+ }
+ } else {
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int knav_queue_setup_link_ram(struct knav_device *kdev)
+{
+ struct knav_link_ram_block *block;
+ struct knav_qmgr_info *qmgr;
+
+ for_each_qmgr(kdev, qmgr) {
+ block = &kdev->link_rams[0];
+ dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
+ &block->dma, block->virt, block->size);
+ writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
+ if (kdev->version == QMSS_66AK2G)
+ writel_relaxed(block->size,
+ &qmgr->reg_config->link_ram_size0);
+ else
+ writel_relaxed(block->size - 1,
+ &qmgr->reg_config->link_ram_size0);
+ block++;
+ if (!block->size)
+ continue;
+
+ dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
+ &block->dma, block->virt, block->size);
+ writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
+ }
+
+ return 0;
+}
+
+static int knav_setup_queue_range(struct knav_device *kdev,
+ struct device_node *node)
+{
+ struct device *dev = kdev->dev;
+ struct knav_range_info *range;
+ struct knav_qmgr_info *qmgr;
+ u32 temp[2], start, end, id, index;
+ int ret, i;
+
+ range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
+ if (!range) {
+ dev_err(dev, "out of memory allocating range\n");
+ return -ENOMEM;
+ }
+
+ range->kdev = kdev;
+ range->name = knav_queue_find_name(node);
+ ret = of_property_read_u32_array(node, "qrange", temp, 2);
+ if (!ret) {
+ range->queue_base = temp[0] - kdev->base_id;
+ range->num_queues = temp[1];
+ } else {
+ dev_err(dev, "invalid queue range %s\n", range->name);
+ devm_kfree(dev, range);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < RANGE_MAX_IRQS; i++) {
+ struct of_phandle_args oirq;
+
+ if (of_irq_parse_one(node, i, &oirq))
+ break;
+
+ range->irqs[i].irq = irq_create_of_mapping(&oirq);
+ if (range->irqs[i].irq == IRQ_NONE)
+ break;
+
+ range->num_irqs++;
+
+ if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
+ unsigned long mask;
+ int bit;
+
+ range->irqs[i].cpu_mask = devm_kzalloc(dev,
+ cpumask_size(), GFP_KERNEL);
+ if (!range->irqs[i].cpu_mask)
+ return -ENOMEM;
+
+ mask = (oirq.args[2] & 0x0000ff00) >> 8;
+ for_each_set_bit(bit, &mask, BITS_PER_LONG)
+ cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
+ }
+ }
+
+ range->num_irqs = min(range->num_irqs, range->num_queues);
+ if (range->num_irqs)
+ range->flags |= RANGE_HAS_IRQ;
+
+ if (of_get_property(node, "qalloc-by-id", NULL))
+ range->flags |= RANGE_RESERVED;
+
+ if (of_get_property(node, "accumulator", NULL)) {
+ ret = knav_init_acc_range(kdev, node, range);
+ if (ret < 0) {
+ devm_kfree(dev, range);
+ return ret;
+ }
+ } else {
+ range->ops = &knav_gp_range_ops;
+ }
+
+ /* set threshold to 1, and flush out the queues */
+ for_each_qmgr(kdev, qmgr) {
+ start = max(qmgr->start_queue, range->queue_base);
+ end = min(qmgr->start_queue + qmgr->num_queues,
+ range->queue_base + range->num_queues);
+ for (id = start; id < end; id++) {
+ index = id - qmgr->start_queue;
+ writel_relaxed(THRESH_GTE | 1,
+ &qmgr->reg_peek[index].ptr_size_thresh);
+ writel_relaxed(0,
+ &qmgr->reg_push[index].ptr_size_thresh);
+ }
+ }
+
+ list_add_tail(&range->list, &kdev->queue_ranges);
+ dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
+ range->name, range->queue_base,
+ range->queue_base + range->num_queues - 1,
+ range->num_irqs,
+ (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
+ (range->flags & RANGE_RESERVED) ? ", reserved" : "",
+ (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
+ kdev->num_queues_in_use += range->num_queues;
+ return 0;
+}
+
+static int knav_setup_queue_pools(struct knav_device *kdev,
+ struct device_node *queue_pools)
+{
+ struct device_node *type, *range;
+
+ for_each_child_of_node(queue_pools, type) {
+ for_each_child_of_node(type, range) {
+ /* return value ignored, we init the rest... */
+ knav_setup_queue_range(kdev, range);
+ }
+ }
+
+ /* ... and barf if they all failed! */
+ if (list_empty(&kdev->queue_ranges)) {
+ dev_err(kdev->dev, "no valid queue range found\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void knav_free_queue_range(struct knav_device *kdev,
+ struct knav_range_info *range)
+{
+ if (range->ops && range->ops->free_range)
+ range->ops->free_range(range);
+ list_del(&range->list);
+ devm_kfree(kdev->dev, range);
+}
+
+static void knav_free_queue_ranges(struct knav_device *kdev)
+{
+ struct knav_range_info *range;
+
+ for (;;) {
+ range = first_queue_range(kdev);
+ if (!range)
+ break;
+ knav_free_queue_range(kdev, range);
+ }
+}
+
+static void knav_queue_free_regions(struct knav_device *kdev)
+{
+ struct knav_region *region;
+ struct knav_pool *pool, *tmp;
+ unsigned size;
+
+ for (;;) {
+ region = first_region(kdev);
+ if (!region)
+ break;
+ list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
+ knav_pool_destroy(pool);
+
+ size = region->virt_end - region->virt_start;
+ if (size)
+ free_pages_exact(region->virt_start, size);
+ list_del(&region->list);
+ devm_kfree(kdev->dev, region);
+ }
+}
+
+static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
+ struct device_node *node, int index)
+{
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ ret = of_address_to_resource(node, index, &res);
+ if (ret) {
+ dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
+ node, index);
+ return ERR_PTR(ret);
+ }
+
+ regs = devm_ioremap_resource(kdev->dev, &res);
+ if (IS_ERR(regs))
+ dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
+ index, node);
+ return regs;
+}
+
+static int knav_queue_init_qmgrs(struct knav_device *kdev,
+ struct device_node *qmgrs)
+{
+ struct device *dev = kdev->dev;
+ struct knav_qmgr_info *qmgr;
+ struct device_node *child;
+ u32 temp[2];
+ int ret;
+
+ for_each_child_of_node(qmgrs, child) {
+ qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
+ if (!qmgr) {
+ of_node_put(child);
+ dev_err(dev, "out of memory allocating qmgr\n");
+ return -ENOMEM;
+ }
+
+ ret = of_property_read_u32_array(child, "managed-queues",
+ temp, 2);
+ if (!ret) {
+ qmgr->start_queue = temp[0];
+ qmgr->num_queues = temp[1];
+ } else {
+ dev_err(dev, "invalid qmgr queue range\n");
+ devm_kfree(dev, qmgr);
+ continue;
+ }
+
+ dev_info(dev, "qmgr start queue %d, number of queues %d\n",
+ qmgr->start_queue, qmgr->num_queues);
+
+ qmgr->reg_peek =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PEEK_REG_INDEX);
+
+ if (kdev->version == QMSS) {
+ qmgr->reg_status =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_STATUS_REG_INDEX);
+ }
+
+ qmgr->reg_config =
+ knav_queue_map_reg(kdev, child,
+ (kdev->version == QMSS_66AK2G) ?
+ KNAV_L_QUEUE_CONFIG_REG_INDEX :
+ KNAV_QUEUE_CONFIG_REG_INDEX);
+ qmgr->reg_region =
+ knav_queue_map_reg(kdev, child,
+ (kdev->version == QMSS_66AK2G) ?
+ KNAV_L_QUEUE_REGION_REG_INDEX :
+ KNAV_QUEUE_REGION_REG_INDEX);
+
+ qmgr->reg_push =
+ knav_queue_map_reg(kdev, child,
+ (kdev->version == QMSS_66AK2G) ?
+ KNAV_L_QUEUE_PUSH_REG_INDEX :
+ KNAV_QUEUE_PUSH_REG_INDEX);
+
+ if (kdev->version == QMSS) {
+ qmgr->reg_pop =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_POP_REG_INDEX);
+ }
+
+ if (IS_ERR(qmgr->reg_peek) ||
+ ((kdev->version == QMSS) &&
+ (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
+ IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
+ IS_ERR(qmgr->reg_push)) {
+ dev_err(dev, "failed to map qmgr regs\n");
+ if (kdev->version == QMSS) {
+ if (!IS_ERR(qmgr->reg_status))
+ devm_iounmap(dev, qmgr->reg_status);
+ if (!IS_ERR(qmgr->reg_pop))
+ devm_iounmap(dev, qmgr->reg_pop);
+ }
+ if (!IS_ERR(qmgr->reg_peek))
+ devm_iounmap(dev, qmgr->reg_peek);
+ if (!IS_ERR(qmgr->reg_config))
+ devm_iounmap(dev, qmgr->reg_config);
+ if (!IS_ERR(qmgr->reg_region))
+ devm_iounmap(dev, qmgr->reg_region);
+ if (!IS_ERR(qmgr->reg_push))
+ devm_iounmap(dev, qmgr->reg_push);
+ devm_kfree(dev, qmgr);
+ continue;
+ }
+
+ /* Use same push register for pop as well */
+ if (kdev->version == QMSS_66AK2G)
+ qmgr->reg_pop = qmgr->reg_push;
+
+ list_add_tail(&qmgr->list, &kdev->qmgrs);
+ dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
+ qmgr->start_queue, qmgr->num_queues,
+ qmgr->reg_peek, qmgr->reg_status,
+ qmgr->reg_config, qmgr->reg_region,
+ qmgr->reg_push, qmgr->reg_pop);
+ }
+ return 0;
+}
+
+static int knav_queue_init_pdsps(struct knav_device *kdev,
+ struct device_node *pdsps)
+{
+ struct device *dev = kdev->dev;
+ struct knav_pdsp_info *pdsp;
+ struct device_node *child;
+
+ for_each_child_of_node(pdsps, child) {
+ pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
+ if (!pdsp) {
+ of_node_put(child);
+ dev_err(dev, "out of memory allocating pdsp\n");
+ return -ENOMEM;
+ }
+ pdsp->name = knav_queue_find_name(child);
+ pdsp->iram =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
+ pdsp->regs =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_REGS_REG_INDEX);
+ pdsp->intd =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_INTD_REG_INDEX);
+ pdsp->command =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_CMD_REG_INDEX);
+
+ if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
+ IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
+ dev_err(dev, "failed to map pdsp %s regs\n",
+ pdsp->name);
+ if (!IS_ERR(pdsp->command))
+ devm_iounmap(dev, pdsp->command);
+ if (!IS_ERR(pdsp->iram))
+ devm_iounmap(dev, pdsp->iram);
+ if (!IS_ERR(pdsp->regs))
+ devm_iounmap(dev, pdsp->regs);
+ if (!IS_ERR(pdsp->intd))
+ devm_iounmap(dev, pdsp->intd);
+ devm_kfree(dev, pdsp);
+ continue;
+ }
+ of_property_read_u32(child, "id", &pdsp->id);
+ list_add_tail(&pdsp->list, &kdev->pdsps);
+ dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
+ pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
+ pdsp->intd);
+ }
+ return 0;
+}
+
+static int knav_queue_stop_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ u32 val, timeout = 1000;
+ int ret;
+
+ val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
+ writel_relaxed(val, &pdsp->regs->control);
+ ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
+ PDSP_CTRL_RUNNING);
+ if (ret < 0) {
+ dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
+ return ret;
+ }
+ pdsp->loaded = false;
+ pdsp->started = false;
+ return 0;
+}
+
+static int knav_queue_load_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ int i, ret, fwlen;
+ const struct firmware *fw;
+ bool found = false;
+ u32 *fwdata;
+
+ for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
+ if (knav_acc_firmwares[i]) {
+ ret = request_firmware_direct(&fw,
+ knav_acc_firmwares[i],
+ kdev->dev);
+ if (!ret) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ dev_err(kdev->dev, "failed to get firmware for pdsp\n");
+ return -ENODEV;
+ }
+
+ dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
+ knav_acc_firmwares[i]);
+
+ writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
+ /* download the firmware */
+ fwdata = (u32 *)fw->data;
+ fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
+ for (i = 0; i < fwlen; i++)
+ writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
+
+ release_firmware(fw);
+ return 0;
+}
+
+static int knav_queue_start_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ u32 val, timeout = 1000;
+ int ret;
+
+ /* write a command for sync */
+ writel_relaxed(0xffffffff, pdsp->command);
+ while (readl_relaxed(pdsp->command) != 0xffffffff)
+ cpu_relax();
+
+ /* soft reset the PDSP */
+ val = readl_relaxed(&pdsp->regs->control);
+ val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
+ writel_relaxed(val, &pdsp->regs->control);
+
+ /* enable pdsp */
+ val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
+ writel_relaxed(val, &pdsp->regs->control);
+
+ /* wait for command register to clear */
+ ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
+ if (ret < 0) {
+ dev_err(kdev->dev,
+ "timed out on pdsp %s command register wait\n",
+ pdsp->name);
+ return ret;
+ }
+ return 0;
+}
+
+static void knav_queue_stop_pdsps(struct knav_device *kdev)
+{
+ struct knav_pdsp_info *pdsp;
+
+ /* disable all pdsps */
+ for_each_pdsp(kdev, pdsp)
+ knav_queue_stop_pdsp(kdev, pdsp);
+}
+
+static int knav_queue_start_pdsps(struct knav_device *kdev)
+{
+ struct knav_pdsp_info *pdsp;
+ int ret;
+
+ knav_queue_stop_pdsps(kdev);
+ /* now load them all. We return success even if pdsp
+ * is not loaded as acc channels are optional on having
+ * firmware availability in the system. We set the loaded
+ * and stated flag and when initialize the acc range, check
+ * it and init the range only if pdsp is started.
+ */
+ for_each_pdsp(kdev, pdsp) {
+ ret = knav_queue_load_pdsp(kdev, pdsp);
+ if (!ret)
+ pdsp->loaded = true;
+ }
+
+ for_each_pdsp(kdev, pdsp) {
+ if (pdsp->loaded) {
+ ret = knav_queue_start_pdsp(kdev, pdsp);
+ if (!ret)
+ pdsp->started = true;
+ }
+ }
+ return 0;
+}
+
+static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
+{
+ struct knav_qmgr_info *qmgr;
+
+ for_each_qmgr(kdev, qmgr) {
+ if ((id >= qmgr->start_queue) &&
+ (id < qmgr->start_queue + qmgr->num_queues))
+ return qmgr;
+ }
+ return NULL;
+}
+
+static int knav_queue_init_queue(struct knav_device *kdev,
+ struct knav_range_info *range,
+ struct knav_queue_inst *inst,
+ unsigned id)
+{
+ char irq_name[KNAV_NAME_SIZE];
+ inst->qmgr = knav_find_qmgr(id);
+ if (!inst->qmgr)
+ return -1;
+
+ INIT_LIST_HEAD(&inst->handles);
+ inst->kdev = kdev;
+ inst->range = range;
+ inst->irq_num = -1;
+ inst->id = id;
+ scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
+ inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
+
+ if (range->ops && range->ops->init_queue)
+ return range->ops->init_queue(range, inst);
+ else
+ return 0;
+}
+
+static int knav_queue_init_queues(struct knav_device *kdev)
+{
+ struct knav_range_info *range;
+ int size, id, base_idx;
+ int idx = 0, ret = 0;
+
+ /* how much do we need for instance data? */
+ size = sizeof(struct knav_queue_inst);
+
+ /* round this up to a power of 2, keep the index to instance
+ * arithmetic fast.
+ * */
+ kdev->inst_shift = order_base_2(size);
+ size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
+ kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
+ if (!kdev->instances)
+ return -ENOMEM;
+
+ for_each_queue_range(kdev, range) {
+ if (range->ops && range->ops->init_range)
+ range->ops->init_range(range);
+ base_idx = idx;
+ for (id = range->queue_base;
+ id < range->queue_base + range->num_queues; id++, idx++) {
+ ret = knav_queue_init_queue(kdev, range,
+ knav_queue_idx_to_inst(kdev, idx), id);
+ if (ret < 0)
+ return ret;
+ }
+ range->queue_base_inst =
+ knav_queue_idx_to_inst(kdev, base_idx);
+ }
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id keystone_qmss_of_match[] = {
+ {
+ .compatible = "ti,keystone-navigator-qmss",
+ },
+ {
+ .compatible = "ti,66ak2g-navss-qm",
+ .data = (void *)QMSS_66AK2G,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
+
+static int knav_queue_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ u32 temp[2];
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
+ if (!kdev) {
+ dev_err(dev, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
+ if (match && match->data)
+ kdev->version = QMSS_66AK2G;
+
+ platform_set_drvdata(pdev, kdev);
+ kdev->dev = dev;
+ INIT_LIST_HEAD(&kdev->queue_ranges);
+ INIT_LIST_HEAD(&kdev->qmgrs);
+ INIT_LIST_HEAD(&kdev->pools);
+ INIT_LIST_HEAD(&kdev->regions);
+ INIT_LIST_HEAD(&kdev->pdsps);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_disable(&pdev->dev);
+ dev_err(dev, "Failed to enable QMSS\n");
+ return ret;
+ }
+
+ if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
+ dev_err(dev, "queue-range not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ kdev->base_id = temp[0];
+ kdev->num_queues = temp[1];
+
+ /* Initialize queue managers using device tree configuration */
+ qmgrs = of_get_child_by_name(node, "qmgrs");
+ if (!qmgrs) {
+ dev_err(dev, "queue manager info not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_queue_init_qmgrs(kdev, qmgrs);
+ of_node_put(qmgrs);
+ if (ret)
+ goto err;
+
+ /* get pdsp configuration values from device tree */
+ pdsps = of_get_child_by_name(node, "pdsps");
+ if (pdsps) {
+ ret = knav_queue_init_pdsps(kdev, pdsps);
+ if (ret)
+ goto err;
+
+ ret = knav_queue_start_pdsps(kdev);
+ if (ret)
+ goto err;
+ }
+ of_node_put(pdsps);
+
+ /* get usable queue range values from device tree */
+ queue_pools = of_get_child_by_name(node, "queue-pools");
+ if (!queue_pools) {
+ dev_err(dev, "queue-pools not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_setup_queue_pools(kdev, queue_pools);
+ of_node_put(queue_pools);
+ if (ret)
+ goto err;
+
+ ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
+ if (ret) {
+ dev_err(kdev->dev, "could not setup linking ram\n");
+ goto err;
+ }
+
+ ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
+ if (ret) {
+ /*
+ * nothing really, we have one linking ram already, so we just
+ * live within our means
+ */
+ }
+
+ ret = knav_queue_setup_link_ram(kdev);
+ if (ret)
+ goto err;
+
+ regions = of_get_child_by_name(node, "descriptor-regions");
+ if (!regions) {
+ dev_err(dev, "descriptor-regions not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_queue_setup_regions(kdev, regions);
+ of_node_put(regions);
+ if (ret)
+ goto err;
+
+ ret = knav_queue_init_queues(kdev);
+ if (ret < 0) {
+ dev_err(dev, "hwqueue initialization failed\n");
+ goto err;
+ }
+
+ debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
+ &knav_queue_debug_fops);
+ device_ready = true;
+ return 0;
+
+err:
+ knav_queue_stop_pdsps(kdev);
+ knav_queue_free_regions(kdev);
+ knav_free_queue_ranges(kdev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int knav_queue_remove(struct platform_device *pdev)
+{
+ /* TODO: Free resources */
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver keystone_qmss_driver = {
+ .probe = knav_queue_probe,
+ .remove = knav_queue_remove,
+ .driver = {
+ .name = "keystone-navigator-qmss",
+ .of_match_table = keystone_qmss_of_match,
+ },
+};
+module_platform_driver(keystone_qmss_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
+MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
new file mode 100644
index 000000000..913b96437
--- /dev/null
+++ b/drivers/soc/ti/omap_prm.c
@@ -0,0 +1,994 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OMAP2+ PRM driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/reset-controller.h>
+#include <linux/delay.h>
+
+#include <linux/platform_data/ti-prm.h>
+
+enum omap_prm_domain_mode {
+ OMAP_PRMD_OFF,
+ OMAP_PRMD_RETENTION,
+ OMAP_PRMD_ON_INACTIVE,
+ OMAP_PRMD_ON_ACTIVE,
+};
+
+struct omap_prm_domain_map {
+ unsigned int usable_modes; /* Mask of hardware supported modes */
+ unsigned long statechange:1; /* Optional low-power state change */
+ unsigned long logicretstate:1; /* Optional logic off mode */
+};
+
+struct omap_prm_domain {
+ struct device *dev;
+ struct omap_prm *prm;
+ struct generic_pm_domain pd;
+ u16 pwrstctrl;
+ u16 pwrstst;
+ const struct omap_prm_domain_map *cap;
+ u32 pwrstctrl_saved;
+ unsigned int uses_pm_clk:1;
+};
+
+struct omap_rst_map {
+ s8 rst;
+ s8 st;
+};
+
+struct omap_prm_data {
+ u32 base;
+ const char *name;
+ const char *clkdm_name;
+ u16 pwrstctrl;
+ u16 pwrstst;
+ const struct omap_prm_domain_map *dmap;
+ u16 rstctrl;
+ u16 rstst;
+ const struct omap_rst_map *rstmap;
+ u8 flags;
+};
+
+struct omap_prm {
+ const struct omap_prm_data *data;
+ void __iomem *base;
+ struct omap_prm_domain *prmd;
+};
+
+struct omap_reset_data {
+ struct reset_controller_dev rcdev;
+ struct omap_prm *prm;
+ u32 mask;
+ spinlock_t lock;
+ struct clockdomain *clkdm;
+ struct device *dev;
+};
+
+#define genpd_to_prm_domain(gpd) container_of(gpd, struct omap_prm_domain, pd)
+#define to_omap_reset_data(p) container_of((p), struct omap_reset_data, rcdev)
+
+#define OMAP_MAX_RESETS 8
+#define OMAP_RESET_MAX_WAIT 10000
+
+#define OMAP_PRM_HAS_RSTCTRL BIT(0)
+#define OMAP_PRM_HAS_RSTST BIT(1)
+#define OMAP_PRM_HAS_NO_CLKDM BIT(2)
+#define OMAP_PRM_RET_WHEN_IDLE BIT(3)
+
+#define OMAP_PRM_HAS_RESETS (OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_RSTST)
+
+#define PRM_STATE_MAX_WAIT 10000
+#define PRM_LOGICRETSTATE BIT(2)
+#define PRM_LOWPOWERSTATECHANGE BIT(4)
+#define PRM_POWERSTATE_MASK OMAP_PRMD_ON_ACTIVE
+
+#define PRM_ST_INTRANSITION BIT(20)
+
+static const struct omap_prm_domain_map omap_prm_all = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_ON_INACTIVE) |
+ BIT(OMAP_PRMD_RETENTION) | BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_noinact = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_RETENTION) |
+ BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_nooff = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_ON_INACTIVE) |
+ BIT(OMAP_PRMD_RETENTION),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_onoff_noauto = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_alwon = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE),
+};
+
+static const struct omap_prm_domain_map omap_prm_reton = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_RETENTION),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_rst_map rst_map_0[] = {
+ { .rst = 0, .st = 0 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map rst_map_01[] = {
+ { .rst = 0, .st = 0 },
+ { .rst = 1, .st = 1 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map rst_map_012[] = {
+ { .rst = 0, .st = 0 },
+ { .rst = 1, .st = 1 },
+ { .rst = 2, .st = 2 },
+ { .rst = -1 },
+};
+
+static const struct omap_prm_data omap4_prm_data[] = {
+ {
+ .name = "mpu", .base = 0x4a306300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ },
+ {
+ .name = "tesla", .base = 0x4a306400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "abe", .base = 0x4a306500,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_all,
+ },
+ {
+ .name = "always_on_core", .base = 0x4a306600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "core", .base = 0x4a306700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ducati",
+ .rstmap = rst_map_012,
+ .flags = OMAP_PRM_RET_WHEN_IDLE,
+ },
+ {
+ .name = "ivahd", .base = 0x4a306f00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012
+ },
+ {
+ .name = "cam", .base = 0x4a307000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "dss", .base = 0x4a307100,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact
+ },
+ {
+ .name = "gfx", .base = 0x4a307200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "l3init", .base = 0x4a307300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton
+ },
+ {
+ .name = "l4per", .base = 0x4a307400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ .flags = OMAP_PRM_RET_WHEN_IDLE,
+ },
+ {
+ .name = "cefuse", .base = 0x4a307600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "wkup", .base = 0x4a307700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
+ },
+ {
+ .name = "emu", .base = 0x4a307900,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "device", .base = 0x4a307b00,
+ .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ { },
+};
+
+static const struct omap_prm_data omap5_prm_data[] = {
+ {
+ .name = "mpu", .base = 0x4ae06300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ },
+ {
+ .name = "dsp", .base = 0x4ae06400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "abe", .base = 0x4ae06500,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_nooff,
+ },
+ {
+ .name = "coreaon", .base = 0x4ae06600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
+ },
+ {
+ .name = "core", .base = 0x4ae06700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu",
+ .rstmap = rst_map_012
+ },
+ {
+ .name = "iva", .base = 0x4ae07200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012
+ },
+ {
+ .name = "cam", .base = 0x4ae07300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "dss", .base = 0x4ae07400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact
+ },
+ {
+ .name = "gpu", .base = 0x4ae07500,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "l3init", .base = 0x4ae07600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton
+ },
+ {
+ .name = "custefuse", .base = 0x4ae07700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "wkupaon", .base = 0x4ae07800,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
+ },
+ {
+ .name = "emu", .base = 0x4ae07a00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "device", .base = 0x4ae07c00,
+ .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ { },
+};
+
+static const struct omap_prm_data dra7_prm_data[] = {
+ {
+ .name = "mpu", .base = 0x4ae06300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ },
+ {
+ .name = "dsp1", .base = 0x4ae06400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01,
+ },
+ {
+ .name = "ipu", .base = 0x4ae06500,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+ .clkdm_name = "ipu1"
+ },
+ {
+ .name = "coreaon", .base = 0x4ae06628,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "core", .base = 0x4ae06700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x210, .rstst = 0x214, .rstmap = rst_map_012,
+ .clkdm_name = "ipu2"
+ },
+ {
+ .name = "iva", .base = 0x4ae06f00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+ },
+ {
+ .name = "cam", .base = 0x4ae07000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "dss", .base = 0x4ae07100,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "gpu", .base = 0x4ae07200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "l3init", .base = 0x4ae07300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01,
+ .clkdm_name = "pcie"
+ },
+ {
+ .name = "l4per", .base = 0x4ae07400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "custefuse", .base = 0x4ae07600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "wkupaon", .base = 0x4ae07724,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "emu", .base = 0x4ae07900,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "dsp2", .base = 0x4ae07b00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve1", .base = 0x4ae07b40,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve2", .base = 0x4ae07b80,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve3", .base = 0x4ae07bc0,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve4", .base = 0x4ae07c00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "rtc", .base = 0x4ae07c60,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "vpe", .base = 0x4ae07c80,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ { },
+};
+
+static const struct omap_rst_map am3_per_rst_map[] = {
+ { .rst = 1 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map am3_wkup_rst_map[] = {
+ { .rst = 3, .st = 5 },
+ { .rst = -1 },
+};
+
+static const struct omap_prm_data am3_prm_data[] = {
+ {
+ .name = "per", .base = 0x44e00c00,
+ .pwrstctrl = 0xc, .pwrstst = 0x8, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x0, .rstmap = am3_per_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp"
+ },
+ {
+ .name = "wkup", .base = 0x44e00d00,
+ .pwrstctrl = 0x4, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "mpu", .base = 0x44e00e00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ },
+ {
+ .name = "device", .base = 0x44e00f00,
+ .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "rtc", .base = 0x44e01000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "gfx", .base = 0x44e01100,
+ .pwrstctrl = 0, .pwrstst = 0x10, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
+ },
+ {
+ .name = "cefuse", .base = 0x44e01200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ { },
+};
+
+static const struct omap_rst_map am4_per_rst_map[] = {
+ { .rst = 1, .st = 0 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map am4_device_rst_map[] = {
+ { .rst = 0, .st = 1 },
+ { .rst = 1, .st = 0 },
+ { .rst = -1 },
+};
+
+static const struct omap_prm_data am4_prm_data[] = {
+ {
+ .name = "mpu", .base = 0x44df0300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ },
+ {
+ .name = "gfx", .base = 0x44df0400,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
+ },
+ {
+ .name = "rtc", .base = 0x44df0500,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "tamper", .base = 0x44df0600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "cefuse", .base = 0x44df0700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "per", .base = 0x44df0800,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = am4_per_rst_map,
+ .clkdm_name = "pruss_ocp"
+ },
+ {
+ .name = "wkup", .base = 0x44df2000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = am3_wkup_rst_map,
+ .flags = OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "device", .base = 0x44df4000,
+ .rstctrl = 0x0, .rstst = 0x4, .rstmap = am4_device_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ { },
+};
+
+static const struct of_device_id omap_prm_id_table[] = {
+ { .compatible = "ti,omap4-prm-inst", .data = omap4_prm_data },
+ { .compatible = "ti,omap5-prm-inst", .data = omap5_prm_data },
+ { .compatible = "ti,dra7-prm-inst", .data = dra7_prm_data },
+ { .compatible = "ti,am3-prm-inst", .data = am3_prm_data },
+ { .compatible = "ti,am4-prm-inst", .data = am4_prm_data },
+ { },
+};
+
+#ifdef DEBUG
+static void omap_prm_domain_show_state(struct omap_prm_domain *prmd,
+ const char *desc)
+{
+ dev_dbg(prmd->dev, "%s %s: %08x/%08x\n",
+ prmd->pd.name, desc,
+ readl_relaxed(prmd->prm->base + prmd->pwrstctrl),
+ readl_relaxed(prmd->prm->base + prmd->pwrstst));
+}
+#else
+static inline void omap_prm_domain_show_state(struct omap_prm_domain *prmd,
+ const char *desc)
+{
+}
+#endif
+
+static int omap_prm_domain_power_on(struct generic_pm_domain *domain)
+{
+ struct omap_prm_domain *prmd;
+ int ret;
+ u32 v, mode;
+
+ prmd = genpd_to_prm_domain(domain);
+ if (!prmd->cap)
+ return 0;
+
+ omap_prm_domain_show_state(prmd, "on: previous state");
+
+ if (prmd->pwrstctrl_saved)
+ v = prmd->pwrstctrl_saved;
+ else
+ v = readl_relaxed(prmd->prm->base + prmd->pwrstctrl);
+
+ if (prmd->prm->data->flags & OMAP_PRM_RET_WHEN_IDLE)
+ mode = OMAP_PRMD_RETENTION;
+ else
+ mode = OMAP_PRMD_ON_ACTIVE;
+
+ writel_relaxed((v & ~PRM_POWERSTATE_MASK) | mode,
+ prmd->prm->base + prmd->pwrstctrl);
+
+ /* wait for the transition bit to get cleared */
+ ret = readl_relaxed_poll_timeout(prmd->prm->base + prmd->pwrstst,
+ v, !(v & PRM_ST_INTRANSITION), 1,
+ PRM_STATE_MAX_WAIT);
+ if (ret)
+ dev_err(prmd->dev, "%s: %s timed out\n",
+ prmd->pd.name, __func__);
+
+ omap_prm_domain_show_state(prmd, "on: new state");
+
+ return ret;
+}
+
+/* No need to check for holes in the mask for the lowest mode */
+static int omap_prm_domain_find_lowest(struct omap_prm_domain *prmd)
+{
+ return __ffs(prmd->cap->usable_modes);
+}
+
+static int omap_prm_domain_power_off(struct generic_pm_domain *domain)
+{
+ struct omap_prm_domain *prmd;
+ int ret;
+ u32 v;
+
+ prmd = genpd_to_prm_domain(domain);
+ if (!prmd->cap)
+ return 0;
+
+ omap_prm_domain_show_state(prmd, "off: previous state");
+
+ v = readl_relaxed(prmd->prm->base + prmd->pwrstctrl);
+ prmd->pwrstctrl_saved = v;
+
+ v &= ~PRM_POWERSTATE_MASK;
+ v |= omap_prm_domain_find_lowest(prmd);
+
+ if (prmd->cap->statechange)
+ v |= PRM_LOWPOWERSTATECHANGE;
+ if (prmd->cap->logicretstate)
+ v &= ~PRM_LOGICRETSTATE;
+ else
+ v |= PRM_LOGICRETSTATE;
+
+ writel_relaxed(v, prmd->prm->base + prmd->pwrstctrl);
+
+ /* wait for the transition bit to get cleared */
+ ret = readl_relaxed_poll_timeout(prmd->prm->base + prmd->pwrstst,
+ v, !(v & PRM_ST_INTRANSITION), 1,
+ PRM_STATE_MAX_WAIT);
+ if (ret)
+ dev_warn(prmd->dev, "%s: %s timed out\n",
+ __func__, prmd->pd.name);
+
+ omap_prm_domain_show_state(prmd, "off: new state");
+
+ return 0;
+}
+
+/*
+ * Note that ti-sysc already manages the module clocks separately so
+ * no need to manage those. Interconnect instances need clocks managed
+ * for simple-pm-bus.
+ */
+static int omap_prm_domain_attach_clock(struct device *dev,
+ struct omap_prm_domain *prmd)
+{
+ struct device_node *np = dev->of_node;
+ int error;
+
+ if (!of_device_is_compatible(np, "simple-pm-bus"))
+ return 0;
+
+ if (!of_property_read_bool(np, "clocks"))
+ return 0;
+
+ error = pm_clk_create(dev);
+ if (error)
+ return error;
+
+ error = of_pm_clk_add_clks(dev);
+ if (error < 0) {
+ pm_clk_destroy(dev);
+ return error;
+ }
+
+ prmd->uses_pm_clk = 1;
+
+ return 0;
+}
+
+static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data;
+ struct of_phandle_args pd_args;
+ struct omap_prm_domain *prmd;
+ struct device_node *np;
+ int ret;
+
+ prmd = genpd_to_prm_domain(domain);
+ np = dev->of_node;
+
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0, &pd_args);
+ if (ret < 0)
+ return ret;
+
+ if (pd_args.args_count != 0)
+ dev_warn(dev, "%s: unusupported #power-domain-cells: %i\n",
+ prmd->pd.name, pd_args.args_count);
+
+ genpd_data = dev_gpd_data(dev);
+ genpd_data->data = NULL;
+
+ ret = omap_prm_domain_attach_clock(dev, prmd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void omap_prm_domain_detach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data;
+ struct omap_prm_domain *prmd;
+
+ prmd = genpd_to_prm_domain(domain);
+ if (prmd->uses_pm_clk)
+ pm_clk_destroy(dev);
+ genpd_data = dev_gpd_data(dev);
+ genpd_data->data = NULL;
+}
+
+static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
+{
+ struct omap_prm_domain *prmd;
+ struct device_node *np = dev->of_node;
+ const struct omap_prm_data *data;
+ const char *name;
+ int error;
+
+ if (!of_find_property(dev->of_node, "#power-domain-cells", NULL))
+ return 0;
+
+ of_node_put(dev->of_node);
+
+ prmd = devm_kzalloc(dev, sizeof(*prmd), GFP_KERNEL);
+ if (!prmd)
+ return -ENOMEM;
+
+ data = prm->data;
+ name = devm_kasprintf(dev, GFP_KERNEL, "prm_%s",
+ data->name);
+
+ prmd->dev = dev;
+ prmd->prm = prm;
+ prmd->cap = prmd->prm->data->dmap;
+ prmd->pwrstctrl = prmd->prm->data->pwrstctrl;
+ prmd->pwrstst = prmd->prm->data->pwrstst;
+
+ prmd->pd.name = name;
+ prmd->pd.power_on = omap_prm_domain_power_on;
+ prmd->pd.power_off = omap_prm_domain_power_off;
+ prmd->pd.attach_dev = omap_prm_domain_attach_dev;
+ prmd->pd.detach_dev = omap_prm_domain_detach_dev;
+ prmd->pd.flags = GENPD_FLAG_PM_CLK;
+
+ pm_genpd_init(&prmd->pd, NULL, true);
+ error = of_genpd_add_provider_simple(np, &prmd->pd);
+ if (error)
+ pm_genpd_remove(&prmd->pd);
+ else
+ prm->prmd = prmd;
+
+ return error;
+}
+
+static bool _is_valid_reset(struct omap_reset_data *reset, unsigned long id)
+{
+ if (reset->mask & BIT(id))
+ return true;
+
+ return false;
+}
+
+static int omap_reset_get_st_bit(struct omap_reset_data *reset,
+ unsigned long id)
+{
+ const struct omap_rst_map *map = reset->prm->data->rstmap;
+
+ while (map->rst >= 0) {
+ if (map->rst == id)
+ return map->st;
+
+ map++;
+ }
+
+ return id;
+}
+
+static int omap_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+ u32 v;
+ int st_bit = omap_reset_get_st_bit(reset, id);
+ bool has_rstst = reset->prm->data->rstst ||
+ (reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
+
+ /* Check if we have rstst */
+ if (!has_rstst)
+ return -ENOTSUPP;
+
+ /* Check if hw reset line is asserted */
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ if (v & BIT(id))
+ return 1;
+
+ /*
+ * Check reset status, high value means reset sequence has been
+ * completed successfully so we can return 0 here (reset deasserted)
+ */
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstst);
+ v >>= st_bit;
+ v &= 1;
+
+ return !v;
+}
+
+static int omap_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+ u32 v;
+ unsigned long flags;
+
+ /* assert the reset control line */
+ spin_lock_irqsave(&reset->lock, flags);
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ v |= 1 << id;
+ writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ return 0;
+}
+
+static int omap_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+ u32 v;
+ int st_bit;
+ bool has_rstst;
+ unsigned long flags;
+ struct ti_prm_platform_data *pdata = dev_get_platdata(reset->dev);
+ int ret = 0;
+
+ /* Nothing to do if the reset is already deasserted */
+ if (!omap_reset_status(rcdev, id))
+ return 0;
+
+ has_rstst = reset->prm->data->rstst ||
+ (reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
+
+ if (has_rstst) {
+ st_bit = omap_reset_get_st_bit(reset, id);
+
+ /* Clear the reset status by writing 1 to the status bit */
+ v = 1 << st_bit;
+ writel_relaxed(v, reset->prm->base + reset->prm->data->rstst);
+ }
+
+ if (reset->clkdm)
+ pdata->clkdm_deny_idle(reset->clkdm);
+
+ /* de-assert the reset control line */
+ spin_lock_irqsave(&reset->lock, flags);
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ v &= ~(1 << id);
+ writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ /* wait for the reset bit to clear */
+ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+ reset->prm->data->rstctrl,
+ v, !(v & BIT(id)), 1,
+ OMAP_RESET_MAX_WAIT);
+ if (ret)
+ pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+ reset->prm->data->name, id);
+
+ /* wait for the status to be set */
+ if (has_rstst) {
+ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+ reset->prm->data->rstst,
+ v, v & BIT(st_bit), 1,
+ OMAP_RESET_MAX_WAIT);
+ if (ret)
+ pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+ reset->prm->data->name, id);
+ }
+
+ if (reset->clkdm)
+ pdata->clkdm_allow_idle(reset->clkdm);
+
+ return ret;
+}
+
+static const struct reset_control_ops omap_reset_ops = {
+ .assert = omap_reset_assert,
+ .deassert = omap_reset_deassert,
+ .status = omap_reset_status,
+};
+
+static int omap_prm_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+
+ if (!_is_valid_reset(reset, reset_spec->args[0]))
+ return -EINVAL;
+
+ return reset_spec->args[0];
+}
+
+static int omap_prm_reset_init(struct platform_device *pdev,
+ struct omap_prm *prm)
+{
+ struct omap_reset_data *reset;
+ const struct omap_rst_map *map;
+ struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ char buf[32];
+ u32 v;
+
+ /*
+ * Check if we have controllable resets. If either rstctrl is non-zero
+ * or OMAP_PRM_HAS_RSTCTRL flag is set, we have reset control register
+ * for the domain.
+ */
+ if (!prm->data->rstctrl && !(prm->data->flags & OMAP_PRM_HAS_RSTCTRL))
+ return 0;
+
+ /* Check if we have the pdata callbacks in place */
+ if (!pdata || !pdata->clkdm_lookup || !pdata->clkdm_deny_idle ||
+ !pdata->clkdm_allow_idle)
+ return -EINVAL;
+
+ map = prm->data->rstmap;
+ if (!map)
+ return -EINVAL;
+
+ reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.ops = &omap_reset_ops;
+ reset->rcdev.of_node = pdev->dev.of_node;
+ reset->rcdev.nr_resets = OMAP_MAX_RESETS;
+ reset->rcdev.of_xlate = omap_prm_reset_xlate;
+ reset->rcdev.of_reset_n_cells = 1;
+ reset->dev = &pdev->dev;
+ spin_lock_init(&reset->lock);
+
+ reset->prm = prm;
+
+ sprintf(buf, "%s_clkdm", prm->data->clkdm_name ? prm->data->clkdm_name :
+ prm->data->name);
+
+ if (!(prm->data->flags & OMAP_PRM_HAS_NO_CLKDM)) {
+ reset->clkdm = pdata->clkdm_lookup(buf);
+ if (!reset->clkdm)
+ return -EINVAL;
+ }
+
+ while (map->rst >= 0) {
+ reset->mask |= BIT(map->rst);
+ map++;
+ }
+
+ /* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
+ if (prm->data->rstmap == rst_map_012) {
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ if ((v & reset->mask) != reset->mask) {
+ dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
+ writel_relaxed(reset->mask, reset->prm->base +
+ reset->prm->data->rstctrl);
+ }
+ }
+
+ return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+}
+
+static int omap_prm_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ const struct omap_prm_data *data;
+ struct omap_prm *prm;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENOTSUPP;
+
+ prm = devm_kzalloc(&pdev->dev, sizeof(*prm), GFP_KERNEL);
+ if (!prm)
+ return -ENOMEM;
+
+ while (data->base != res->start) {
+ if (!data->base)
+ return -EINVAL;
+ data++;
+ }
+
+ prm->data = data;
+
+ prm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(prm->base))
+ return PTR_ERR(prm->base);
+
+ ret = omap_prm_domain_init(&pdev->dev, prm);
+ if (ret)
+ return ret;
+
+ ret = omap_prm_reset_init(pdev, prm);
+ if (ret)
+ goto err_domain;
+
+ return 0;
+
+err_domain:
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&prm->prmd->pd);
+
+ return ret;
+}
+
+static struct platform_driver omap_prm_driver = {
+ .probe = omap_prm_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = omap_prm_id_table,
+ },
+};
+builtin_platform_driver(omap_prm_driver);
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
new file mode 100644
index 000000000..f04c21157
--- /dev/null
+++ b/drivers/soc/ti/pm33xx.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AM33XX Power Management Routines
+ *
+ * Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Vaibhav Bedia, Dave Gerlach
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_data/pm33xx.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/rtc.h>
+#include <linux/rtc/rtc-omap.h>
+#include <linux/sizes.h>
+#include <linux/sram.h>
+#include <linux/suspend.h>
+#include <linux/ti-emif-sram.h>
+#include <linux/wkup_m3_ipc.h>
+
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+#include <asm/system_misc.h>
+
+#define AMX3_PM_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
+ (unsigned long)pm_sram->do_wfi)
+
+#define RTC_SCRATCH_RESUME_REG 0
+#define RTC_SCRATCH_MAGIC_REG 1
+#define RTC_REG_BOOT_MAGIC 0x8cd0 /* RTC */
+#define GIC_INT_SET_PENDING_BASE 0x200
+#define AM43XX_GIC_DIST_BASE 0x48241000
+
+static void __iomem *rtc_base_virt;
+static struct clk *rtc_fck;
+static u32 rtc_magic_val;
+
+static int (*am33xx_do_wfi_sram)(unsigned long unused);
+static phys_addr_t am33xx_do_wfi_sram_phys;
+
+static struct gen_pool *sram_pool, *sram_pool_data;
+static unsigned long ocmcram_location, ocmcram_location_data;
+
+static struct rtc_device *omap_rtc;
+static void __iomem *gic_dist_base;
+
+static struct am33xx_pm_platform_data *pm_ops;
+static struct am33xx_pm_sram_addr *pm_sram;
+
+static struct device *pm33xx_dev;
+static struct wkup_m3_ipc *m3_ipc;
+
+#ifdef CONFIG_SUSPEND
+static int rtc_only_idle;
+static int retrigger_irq;
+static unsigned long suspend_wfi_flags;
+
+static struct wkup_m3_wakeup_src wakeup_src = {.irq_nr = 0,
+ .src = "Unknown",
+};
+
+static struct wkup_m3_wakeup_src rtc_alarm_wakeup = {
+ .irq_nr = 108, .src = "RTC Alarm",
+};
+
+static struct wkup_m3_wakeup_src rtc_ext_wakeup = {
+ .irq_nr = 0, .src = "Ext wakeup",
+};
+#endif
+
+static u32 sram_suspend_address(unsigned long addr)
+{
+ return ((unsigned long)am33xx_do_wfi_sram +
+ AMX3_PM_SRAM_SYMBOL_OFFSET(addr));
+}
+
+static int am33xx_push_sram_idle(void)
+{
+ struct am33xx_pm_ro_sram_data ro_sram_data;
+ int ret;
+ u32 table_addr, ro_data_addr;
+ void *copy_addr;
+
+ ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
+ ro_sram_data.amx3_pm_sram_data_phys =
+ gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
+ ro_sram_data.rtc_base_virt = rtc_base_virt;
+
+ /* Save physical address to calculate resume offset during pm init */
+ am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
+ ocmcram_location);
+
+ am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
+ pm_sram->do_wfi,
+ *pm_sram->do_wfi_sz);
+ if (!am33xx_do_wfi_sram) {
+ dev_err(pm33xx_dev,
+ "PM: %s: am33xx_do_wfi copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ table_addr =
+ sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
+ ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
+ if (ret) {
+ dev_dbg(pm33xx_dev,
+ "PM: %s: EMIF function copy failed\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ ro_data_addr =
+ sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
+ copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
+ &ro_sram_data,
+ sizeof(ro_sram_data));
+ if (!copy_addr) {
+ dev_err(pm33xx_dev,
+ "PM: %s: ro_sram_data copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int am33xx_do_sram_idle(u32 wfi_flags)
+{
+ if (!m3_ipc || !pm_ops)
+ return 0;
+
+ if (wfi_flags & WFI_FLAG_WAKE_M3)
+ m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
+
+ return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
+}
+
+static int __init am43xx_map_gic(void)
+{
+ gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K);
+
+ if (!gic_dist_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static struct wkup_m3_wakeup_src rtc_wake_src(void)
+{
+ u32 i;
+
+ i = __raw_readl(rtc_base_virt + 0x44) & 0x40;
+
+ if (i) {
+ retrigger_irq = rtc_alarm_wakeup.irq_nr;
+ return rtc_alarm_wakeup;
+ }
+
+ retrigger_irq = rtc_ext_wakeup.irq_nr;
+
+ return rtc_ext_wakeup;
+}
+
+static int am33xx_rtc_only_idle(unsigned long wfi_flags)
+{
+ omap_rtc_power_off_program(&omap_rtc->dev);
+ am33xx_do_wfi_sram(wfi_flags);
+ return 0;
+}
+
+/*
+ * Note that the RTC module clock must be re-enabled only for rtc+ddr suspend.
+ * And looks like the module can stay in SYSC_IDLE_SMART_WKUP mode configured
+ * by the interconnect code just fine for both rtc+ddr suspend and retention
+ * suspend.
+ */
+static int am33xx_pm_suspend(suspend_state_t suspend_state)
+{
+ int i, ret = 0;
+
+ if (suspend_state == PM_SUSPEND_MEM &&
+ pm_ops->check_off_mode_enable()) {
+ ret = clk_prepare_enable(rtc_fck);
+ if (ret) {
+ dev_err(pm33xx_dev, "Failed to enable clock: %i\n", ret);
+ return ret;
+ }
+
+ pm_ops->save_context();
+ suspend_wfi_flags |= WFI_FLAG_RTC_ONLY;
+ clk_save_context();
+ ret = pm_ops->soc_suspend(suspend_state, am33xx_rtc_only_idle,
+ suspend_wfi_flags);
+
+ suspend_wfi_flags &= ~WFI_FLAG_RTC_ONLY;
+ dev_info(pm33xx_dev, "Entering RTC Only mode with DDR in self-refresh\n");
+
+ if (!ret) {
+ clk_restore_context();
+ pm_ops->restore_context();
+ m3_ipc->ops->set_rtc_only(m3_ipc);
+ am33xx_push_sram_idle();
+ }
+ } else {
+ ret = pm_ops->soc_suspend(suspend_state, am33xx_do_wfi_sram,
+ suspend_wfi_flags);
+ }
+
+ if (ret) {
+ dev_err(pm33xx_dev, "PM: Kernel suspend failure\n");
+ } else {
+ i = m3_ipc->ops->request_pm_status(m3_ipc);
+
+ switch (i) {
+ case 0:
+ dev_info(pm33xx_dev,
+ "PM: Successfully put all powerdomains to target state\n");
+ break;
+ case 1:
+ dev_err(pm33xx_dev,
+ "PM: Could not transition all powerdomains to target state\n");
+ ret = -1;
+ break;
+ default:
+ dev_err(pm33xx_dev,
+ "PM: CM3 returned unknown result = %d\n", i);
+ ret = -1;
+ }
+
+ /* print the wakeup reason */
+ if (rtc_only_idle) {
+ wakeup_src = rtc_wake_src();
+ pr_info("PM: Wakeup source %s\n", wakeup_src.src);
+ } else {
+ pr_info("PM: Wakeup source %s\n",
+ m3_ipc->ops->request_wake_src(m3_ipc));
+ }
+ }
+
+ if (suspend_state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable())
+ clk_disable_unprepare(rtc_fck);
+
+ return ret;
+}
+
+static int am33xx_pm_enter(suspend_state_t suspend_state)
+{
+ int ret = 0;
+
+ switch (suspend_state) {
+ case PM_SUSPEND_MEM:
+ case PM_SUSPEND_STANDBY:
+ ret = am33xx_pm_suspend(suspend_state);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int am33xx_pm_begin(suspend_state_t state)
+{
+ int ret = -EINVAL;
+ struct nvmem_device *nvmem;
+
+ if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev,
+ "omap_rtc_scratch0");
+ if (!IS_ERR(nvmem))
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+ (void *)&rtc_magic_val);
+ rtc_only_idle = 1;
+ } else {
+ rtc_only_idle = 0;
+ }
+
+ pm_ops->begin_suspend();
+
+ switch (state) {
+ case PM_SUSPEND_MEM:
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP);
+ break;
+ case PM_SUSPEND_STANDBY:
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_STANDBY);
+ break;
+ }
+
+ return ret;
+}
+
+static void am33xx_pm_end(void)
+{
+ u32 val = 0;
+ struct nvmem_device *nvmem;
+
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
+ if (IS_ERR(nvmem))
+ return;
+
+ m3_ipc->ops->finish_low_power(m3_ipc);
+ if (rtc_only_idle) {
+ if (retrigger_irq) {
+ /*
+ * 32 bits of Interrupt Set-Pending correspond to 32
+ * 32 interrupts. Compute the bit offset of the
+ * Interrupt and set that particular bit
+ * Compute the register offset by dividing interrupt
+ * number by 32 and mutiplying by 4
+ */
+ writel_relaxed(1 << (retrigger_irq & 31),
+ gic_dist_base + GIC_INT_SET_PENDING_BASE
+ + retrigger_irq / 32 * 4);
+ }
+
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+ (void *)&val);
+ }
+
+ rtc_only_idle = 0;
+
+ pm_ops->finish_suspend();
+}
+
+static int am33xx_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static const struct platform_suspend_ops am33xx_pm_ops = {
+ .begin = am33xx_pm_begin,
+ .end = am33xx_pm_end,
+ .enter = am33xx_pm_enter,
+ .valid = am33xx_pm_valid,
+};
+#endif /* CONFIG_SUSPEND */
+
+static void am33xx_pm_set_ipc_ops(void)
+{
+ u32 resume_address;
+ int temp;
+
+ temp = ti_emif_get_mem_type();
+ if (temp < 0) {
+ dev_err(pm33xx_dev, "PM: Cannot determine memory type, no PM available\n");
+ return;
+ }
+ m3_ipc->ops->set_mem_type(m3_ipc, temp);
+
+ /* Physical resume address to be used by ROM code */
+ resume_address = am33xx_do_wfi_sram_phys +
+ *pm_sram->resume_offset + 0x4;
+
+ m3_ipc->ops->set_resume_address(m3_ipc, (void *)resume_address);
+}
+
+static void am33xx_pm_free_sram(void)
+{
+ gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
+ gen_pool_free(sram_pool_data, ocmcram_location_data,
+ sizeof(struct am33xx_pm_ro_sram_data));
+}
+
+/*
+ * Push the minimal suspend-resume code to SRAM
+ */
+static int am33xx_pm_alloc_sram(void)
+{
+ struct device_node *np;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
+ if (!np) {
+ np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+ if (!np) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
+ __func__);
+ return -ENODEV;
+ }
+ }
+
+ sram_pool = of_gen_pool_get(np, "pm-sram", 0);
+ if (!sram_pool) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
+ __func__);
+ ret = -ENODEV;
+ goto mpu_put_node;
+ }
+
+ sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
+ if (!sram_pool_data) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
+ __func__);
+ ret = -ENODEV;
+ goto mpu_put_node;
+ }
+
+ ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
+ if (!ocmcram_location) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
+ __func__);
+ ret = -ENOMEM;
+ goto mpu_put_node;
+ }
+
+ ocmcram_location_data = gen_pool_alloc(sram_pool_data,
+ sizeof(struct emif_regs_amx3));
+ if (!ocmcram_location_data) {
+ dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
+ gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
+ ret = -ENOMEM;
+ }
+
+mpu_put_node:
+ of_node_put(np);
+ return ret;
+}
+
+static int am33xx_pm_rtc_setup(void)
+{
+ struct device_node *np;
+ unsigned long val = 0;
+ struct nvmem_device *nvmem;
+ int error;
+
+ np = of_find_node_by_name(NULL, "rtc");
+
+ if (of_device_is_available(np)) {
+ /* RTC interconnect target module clock */
+ rtc_fck = of_clk_get_by_name(np->parent, "fck");
+ if (IS_ERR(rtc_fck))
+ return PTR_ERR(rtc_fck);
+
+ rtc_base_virt = of_iomap(np, 0);
+ if (!rtc_base_virt) {
+ pr_warn("PM: could not iomap rtc");
+ error = -ENODEV;
+ goto err_clk_put;
+ }
+
+ omap_rtc = rtc_class_open("rtc0");
+ if (!omap_rtc) {
+ pr_warn("PM: rtc0 not available");
+ error = -EPROBE_DEFER;
+ goto err_iounmap;
+ }
+
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev,
+ "omap_rtc_scratch0");
+ if (!IS_ERR(nvmem)) {
+ nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
+ 4, (void *)&rtc_magic_val);
+ if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
+ pr_warn("PM: bootloader does not support rtc-only!\n");
+
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
+ 4, (void *)&val);
+ val = pm_sram->resume_address;
+ nvmem_device_write(nvmem, RTC_SCRATCH_RESUME_REG * 4,
+ 4, (void *)&val);
+ }
+ } else {
+ pr_warn("PM: no-rtc available, rtc-only mode disabled.\n");
+ }
+
+ return 0;
+
+err_iounmap:
+ iounmap(rtc_base_virt);
+err_clk_put:
+ clk_put(rtc_fck);
+
+ return error;
+}
+
+static int am33xx_pm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (!of_machine_is_compatible("ti,am33xx") &&
+ !of_machine_is_compatible("ti,am43"))
+ return -ENODEV;
+
+ pm_ops = dev->platform_data;
+ if (!pm_ops) {
+ dev_err(dev, "PM: Cannot get core PM ops!\n");
+ return -ENODEV;
+ }
+
+ ret = am43xx_map_gic();
+ if (ret) {
+ pr_err("PM: Could not ioremap GIC base\n");
+ return ret;
+ }
+
+ pm_sram = pm_ops->get_sram_addrs();
+ if (!pm_sram) {
+ dev_err(dev, "PM: Cannot get PM asm function addresses!!\n");
+ return -ENODEV;
+ }
+
+ m3_ipc = wkup_m3_ipc_get();
+ if (!m3_ipc) {
+ pr_err("PM: Cannot get wkup_m3_ipc handle\n");
+ return -EPROBE_DEFER;
+ }
+
+ pm33xx_dev = dev;
+
+ ret = am33xx_pm_alloc_sram();
+ if (ret)
+ goto err_wkup_m3_ipc_put;
+
+ ret = am33xx_pm_rtc_setup();
+ if (ret)
+ goto err_free_sram;
+
+ ret = am33xx_push_sram_idle();
+ if (ret)
+ goto err_unsetup_rtc;
+
+ am33xx_pm_set_ipc_ops();
+
+#ifdef CONFIG_SUSPEND
+ suspend_set_ops(&am33xx_pm_ops);
+
+ /*
+ * For a system suspend we must flush the caches, we want
+ * the DDR in self-refresh, we want to save the context
+ * of the EMIF, and we want the wkup_m3 to handle low-power
+ * transition.
+ */
+ suspend_wfi_flags |= WFI_FLAG_FLUSH_CACHE;
+ suspend_wfi_flags |= WFI_FLAG_SELF_REFRESH;
+ suspend_wfi_flags |= WFI_FLAG_SAVE_EMIF;
+ suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
+#endif /* CONFIG_SUSPEND */
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto err_pm_runtime_disable;
+
+ ret = pm_ops->init(am33xx_do_sram_idle);
+ if (ret) {
+ dev_err(dev, "Unable to call core pm init!\n");
+ ret = -ENODEV;
+ goto err_pm_runtime_put;
+ }
+
+ return 0;
+
+err_pm_runtime_put:
+ pm_runtime_put_sync(dev);
+err_pm_runtime_disable:
+ pm_runtime_disable(dev);
+err_unsetup_rtc:
+ iounmap(rtc_base_virt);
+ clk_put(rtc_fck);
+err_free_sram:
+ am33xx_pm_free_sram();
+ pm33xx_dev = NULL;
+err_wkup_m3_ipc_put:
+ wkup_m3_ipc_put(m3_ipc);
+ return ret;
+}
+
+static int am33xx_pm_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ if (pm_ops->deinit)
+ pm_ops->deinit();
+ suspend_set_ops(NULL);
+ wkup_m3_ipc_put(m3_ipc);
+ am33xx_pm_free_sram();
+ iounmap(rtc_base_virt);
+ clk_put(rtc_fck);
+ return 0;
+}
+
+static struct platform_driver am33xx_pm_driver = {
+ .driver = {
+ .name = "pm33xx",
+ },
+ .probe = am33xx_pm_probe,
+ .remove = am33xx_pm_remove,
+};
+module_platform_driver(am33xx_pm_driver);
+
+MODULE_ALIAS("platform:pm33xx");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("am33xx power management driver");
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
new file mode 100644
index 000000000..6882c86b3
--- /dev/null
+++ b/drivers/soc/ti/pruss.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS platform driver for various TI SoCs
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Author(s):
+ * Suman Anna <s-anna@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pruss_driver.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/**
+ * struct pruss_private_data - PRUSS driver private data
+ * @has_no_sharedram: flag to indicate the absence of PRUSS Shared Data RAM
+ * @has_core_mux_clock: flag to indicate the presence of PRUSS core clock
+ */
+struct pruss_private_data {
+ bool has_no_sharedram;
+ bool has_core_mux_clock;
+};
+
+static void pruss_of_free_clk_provider(void *data)
+{
+ struct device_node *clk_mux_np = data;
+
+ of_clk_del_provider(clk_mux_np);
+ of_node_put(clk_mux_np);
+}
+
+static int pruss_clk_mux_setup(struct pruss *pruss, struct clk *clk_mux,
+ char *mux_name, struct device_node *clks_np)
+{
+ struct device_node *clk_mux_np;
+ struct device *dev = pruss->dev;
+ char *clk_mux_name;
+ unsigned int num_parents;
+ const char **parent_names;
+ void __iomem *reg;
+ u32 reg_offset;
+ int ret;
+
+ clk_mux_np = of_get_child_by_name(clks_np, mux_name);
+ if (!clk_mux_np) {
+ dev_err(dev, "%pOF is missing its '%s' node\n", clks_np,
+ mux_name);
+ return -ENODEV;
+ }
+
+ num_parents = of_clk_get_parent_count(clk_mux_np);
+ if (num_parents < 1) {
+ dev_err(dev, "mux-clock %pOF must have parents\n", clk_mux_np);
+ ret = -EINVAL;
+ goto put_clk_mux_np;
+ }
+
+ parent_names = devm_kcalloc(dev, sizeof(*parent_names), num_parents,
+ GFP_KERNEL);
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto put_clk_mux_np;
+ }
+
+ of_clk_parent_fill(clk_mux_np, parent_names, num_parents);
+
+ clk_mux_name = devm_kasprintf(dev, GFP_KERNEL, "%s.%pOFn",
+ dev_name(dev), clk_mux_np);
+ if (!clk_mux_name) {
+ ret = -ENOMEM;
+ goto put_clk_mux_np;
+ }
+
+ ret = of_property_read_u32(clk_mux_np, "reg", &reg_offset);
+ if (ret)
+ goto put_clk_mux_np;
+
+ reg = pruss->cfg_base + reg_offset;
+
+ clk_mux = clk_register_mux(NULL, clk_mux_name, parent_names,
+ num_parents, 0, reg, 0, 1, 0, NULL);
+ if (IS_ERR(clk_mux)) {
+ ret = PTR_ERR(clk_mux);
+ goto put_clk_mux_np;
+ }
+
+ ret = devm_add_action_or_reset(dev, (void(*)(void *))clk_unregister_mux,
+ clk_mux);
+ if (ret) {
+ dev_err(dev, "failed to add clkmux unregister action %d", ret);
+ goto put_clk_mux_np;
+ }
+
+ ret = of_clk_add_provider(clk_mux_np, of_clk_src_simple_get, clk_mux);
+ if (ret)
+ goto put_clk_mux_np;
+
+ ret = devm_add_action_or_reset(dev, pruss_of_free_clk_provider,
+ clk_mux_np);
+ if (ret) {
+ dev_err(dev, "failed to add clkmux free action %d", ret);
+ goto put_clk_mux_np;
+ }
+
+ return 0;
+
+put_clk_mux_np:
+ of_node_put(clk_mux_np);
+ return ret;
+}
+
+static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
+{
+ const struct pruss_private_data *data;
+ struct device_node *clks_np;
+ struct device *dev = pruss->dev;
+ int ret = 0;
+
+ data = of_device_get_match_data(dev);
+
+ clks_np = of_get_child_by_name(cfg_node, "clocks");
+ if (!clks_np) {
+ dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node);
+ return -ENODEV;
+ }
+
+ if (data && data->has_core_mux_clock) {
+ ret = pruss_clk_mux_setup(pruss, pruss->core_clk_mux,
+ "coreclk-mux", clks_np);
+ if (ret) {
+ dev_err(dev, "failed to setup coreclk-mux\n");
+ goto put_clks_node;
+ }
+ }
+
+ ret = pruss_clk_mux_setup(pruss, pruss->iep_clk_mux, "iepclk-mux",
+ clks_np);
+ if (ret) {
+ dev_err(dev, "failed to setup iepclk-mux\n");
+ goto put_clks_node;
+ }
+
+put_clks_node:
+ of_node_put(clks_np);
+
+ return ret;
+}
+
+static struct regmap_config regmap_conf = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss)
+{
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ struct resource res;
+ int ret;
+
+ child = of_get_child_by_name(np, "cfg");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(child, 0, &res)) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!pruss->cfg_base) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
+ (u64)res.start);
+ regmap_conf.max_register = resource_size(&res) - 4;
+
+ pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
+ &regmap_conf);
+ kfree(regmap_conf.name);
+ if (IS_ERR(pruss->cfg_regmap)) {
+ dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
+ PTR_ERR(pruss->cfg_regmap));
+ ret = PTR_ERR(pruss->cfg_regmap);
+ goto node_put;
+ }
+
+ ret = pruss_clk_init(pruss, child);
+ if (ret)
+ dev_err(dev, "pruss_clk_init failed, ret = %d\n", ret);
+
+node_put:
+ of_node_put(child);
+ return ret;
+}
+
+static int pruss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ struct pruss *pruss;
+ struct resource res;
+ int ret, i, index;
+ const struct pruss_private_data *data;
+ const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set the DMA coherent mask");
+ return ret;
+ }
+
+ pruss = devm_kzalloc(dev, sizeof(*pruss), GFP_KERNEL);
+ if (!pruss)
+ return -ENOMEM;
+
+ pruss->dev = dev;
+
+ child = of_get_child_by_name(np, "memories");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'memories' node\n", child);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < PRUSS_MEM_MAX; i++) {
+ /*
+ * On AM437x one of two PRUSS units don't contain Shared RAM,
+ * skip it
+ */
+ if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2)
+ continue;
+
+ index = of_property_match_string(child, "reg-names",
+ mem_names[i]);
+ if (index < 0) {
+ of_node_put(child);
+ return index;
+ }
+
+ if (of_address_to_resource(child, index, &res)) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ pruss->mem_regions[i].va = devm_ioremap(dev, res.start,
+ resource_size(&res));
+ if (!pruss->mem_regions[i].va) {
+ dev_err(dev, "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ of_node_put(child);
+ return -ENOMEM;
+ }
+ pruss->mem_regions[i].pa = res.start;
+ pruss->mem_regions[i].size = resource_size(&res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ mem_names[i], &pruss->mem_regions[i].pa,
+ pruss->mem_regions[i].size, pruss->mem_regions[i].va);
+ }
+ of_node_put(child);
+
+ platform_set_drvdata(pdev, pruss);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "couldn't enable module\n");
+ goto rpm_disable;
+ }
+
+ ret = pruss_cfg_of_init(dev, pruss);
+ if (ret < 0)
+ goto rpm_put;
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ dev_err(dev, "failed to register child devices\n");
+ goto rpm_put;
+ }
+
+ return 0;
+
+rpm_put:
+ pm_runtime_put_sync(dev);
+rpm_disable:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int pruss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ devm_of_platform_depopulate(dev);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+/* instance-specific driver private data */
+static const struct pruss_private_data am437x_pruss1_data = {
+ .has_no_sharedram = false,
+};
+
+static const struct pruss_private_data am437x_pruss0_data = {
+ .has_no_sharedram = true,
+};
+
+static const struct pruss_private_data am65x_j721e_pruss_data = {
+ .has_core_mux_clock = true,
+};
+
+static const struct of_device_id pruss_of_match[] = {
+ { .compatible = "ti,am3356-pruss" },
+ { .compatible = "ti,am4376-pruss0", .data = &am437x_pruss0_data, },
+ { .compatible = "ti,am4376-pruss1", .data = &am437x_pruss1_data, },
+ { .compatible = "ti,am5728-pruss" },
+ { .compatible = "ti,k2g-pruss" },
+ { .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,am642-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,am625-pruss", .data = &am65x_j721e_pruss_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pruss_of_match);
+
+static struct platform_driver pruss_driver = {
+ .driver = {
+ .name = "pruss",
+ .of_match_table = pruss_of_match,
+ },
+ .probe = pruss_probe,
+ .remove = pruss_remove,
+};
+module_platform_driver(pruss_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_DESCRIPTION("PRU-ICSS Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
new file mode 100644
index 000000000..6a389a644
--- /dev/null
+++ b/drivers/soc/ti/smartreflex.c
@@ -0,0 +1,1037 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OMAP SmartReflex Voltage Control
+ *
+ * Author: Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Kalle Jokiniemi
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Lesly A M <x0080970@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/power/smartreflex.h>
+
+#define DRIVER_NAME "smartreflex"
+#define SMARTREFLEX_NAME_LEN 32
+#define NVALUE_NAME_LEN 40
+#define SR_DISABLE_TIMEOUT 200
+
+/* sr_list contains all the instances of smartreflex module */
+static LIST_HEAD(sr_list);
+
+static struct omap_sr_class_data *sr_class;
+static struct dentry *sr_dbg_dir;
+
+static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value)
+{
+ __raw_writel(value, (sr->base + offset));
+}
+
+static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask,
+ u32 value)
+{
+ u32 reg_val;
+
+ /*
+ * Smartreflex error config register is special as it contains
+ * certain status bits which if written a 1 into means a clear
+ * of those bits. So in order to make sure no accidental write of
+ * 1 happens to those status bits, do a clear of them in the read
+ * value. This mean this API doesn't rewrite values in these bits
+ * if they are currently set, but does allow the caller to write
+ * those bits.
+ */
+ if (sr->ip_type == SR_TYPE_V1 && offset == ERRCONFIG_V1)
+ mask |= ERRCONFIG_STATUS_V1_MASK;
+ else if (sr->ip_type == SR_TYPE_V2 && offset == ERRCONFIG_V2)
+ mask |= ERRCONFIG_VPBOUNDINTST_V2;
+
+ reg_val = __raw_readl(sr->base + offset);
+ reg_val &= ~mask;
+
+ value &= mask;
+
+ reg_val |= value;
+
+ __raw_writel(reg_val, (sr->base + offset));
+}
+
+static inline u32 sr_read_reg(struct omap_sr *sr, unsigned offset)
+{
+ return __raw_readl(sr->base + offset);
+}
+
+static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr_info;
+
+ if (!voltdm) {
+ pr_err("%s: Null voltage domain passed!\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry(sr_info, &sr_list, node) {
+ if (voltdm == sr_info->voltdm)
+ return sr_info;
+ }
+
+ return ERR_PTR(-ENODATA);
+}
+
+static irqreturn_t sr_interrupt(int irq, void *data)
+{
+ struct omap_sr *sr_info = data;
+ u32 status = 0;
+
+ switch (sr_info->ip_type) {
+ case SR_TYPE_V1:
+ /* Read the status bits */
+ status = sr_read_reg(sr_info, ERRCONFIG_V1);
+
+ /* Clear them by writing back */
+ sr_write_reg(sr_info, ERRCONFIG_V1, status);
+ break;
+ case SR_TYPE_V2:
+ /* Read the status bits */
+ status = sr_read_reg(sr_info, IRQSTATUS);
+
+ /* Clear them by writing back */
+ sr_write_reg(sr_info, IRQSTATUS, status);
+ break;
+ default:
+ dev_err(&sr_info->pdev->dev, "UNKNOWN IP type %d\n",
+ sr_info->ip_type);
+ return IRQ_NONE;
+ }
+
+ if (sr_class->notify)
+ sr_class->notify(sr_info, status);
+
+ return IRQ_HANDLED;
+}
+
+static void sr_set_clk_length(struct omap_sr *sr)
+{
+ u32 fclk_speed;
+
+ /* Try interconnect target module fck first if it already exists */
+ if (IS_ERR(sr->fck))
+ return;
+
+ fclk_speed = clk_get_rate(sr->fck);
+
+ switch (fclk_speed) {
+ case 12000000:
+ sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK;
+ break;
+ case 13000000:
+ sr->clk_length = SRCLKLENGTH_13MHZ_SYSCLK;
+ break;
+ case 19200000:
+ sr->clk_length = SRCLKLENGTH_19MHZ_SYSCLK;
+ break;
+ case 26000000:
+ sr->clk_length = SRCLKLENGTH_26MHZ_SYSCLK;
+ break;
+ case 38400000:
+ sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Invalid fclk rate: %d\n",
+ __func__, fclk_speed);
+ break;
+ }
+}
+
+static void sr_start_vddautocomp(struct omap_sr *sr)
+{
+ if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ if (!sr_class->enable(sr))
+ sr->autocomp_active = true;
+}
+
+static void sr_stop_vddautocomp(struct omap_sr *sr)
+{
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ if (sr->autocomp_active) {
+ sr_class->disable(sr, 1);
+ sr->autocomp_active = false;
+ }
+}
+
+/*
+ * This function handles the initializations which have to be done
+ * only when both sr device and class driver regiter has
+ * completed. This will be attempted to be called from both sr class
+ * driver register and sr device intializtion API's. Only one call
+ * will ultimately succeed.
+ *
+ * Currently this function registers interrupt handler for a particular SR
+ * if smartreflex class driver is already registered and has
+ * requested for interrupts and the SR interrupt line in present.
+ */
+static int sr_late_init(struct omap_sr *sr_info)
+{
+ struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data;
+ int ret = 0;
+
+ if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
+ ret = devm_request_irq(&sr_info->pdev->dev, sr_info->irq,
+ sr_interrupt, 0, sr_info->name, sr_info);
+ if (ret)
+ goto error;
+ disable_irq(sr_info->irq);
+ }
+
+ if (pdata && pdata->enable_on_init)
+ sr_start_vddautocomp(sr_info);
+
+ return ret;
+
+error:
+ list_del(&sr_info->node);
+ dev_err(&sr_info->pdev->dev, "%s: ERROR in registering interrupt handler. Smartreflex will not function as desired\n",
+ __func__);
+
+ return ret;
+}
+
+static void sr_v1_disable(struct omap_sr *sr)
+{
+ int timeout = 0;
+ int errconf_val = ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTST;
+
+ /* Enable MCUDisableAcknowledge interrupt */
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTEN);
+
+ /* SRCONFIG - disable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+ /* Disable all other SR interrupts and clear the status as needed */
+ if (sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_VPBOUNDINTST_V1)
+ errconf_val |= ERRCONFIG_VPBOUNDINTST_V1;
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+ ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1),
+ errconf_val);
+
+ /*
+ * Wait for SR to be disabled.
+ * wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us.
+ */
+ sr_test_cond_timeout((sr_read_reg(sr, ERRCONFIG_V1) &
+ ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT,
+ timeout);
+
+ if (timeout >= SR_DISABLE_TIMEOUT)
+ dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+ __func__);
+
+ /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN,
+ ERRCONFIG_MCUDISACKINTST);
+}
+
+static void sr_v2_disable(struct omap_sr *sr)
+{
+ int timeout = 0;
+
+ /* Enable MCUDisableAcknowledge interrupt */
+ sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUDISABLEACKINT);
+
+ /* SRCONFIG - disable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+ /*
+ * Disable all other SR interrupts and clear the status
+ * write to status register ONLY on need basis - only if status
+ * is set.
+ */
+ if (sr_read_reg(sr, ERRCONFIG_V2) & ERRCONFIG_VPBOUNDINTST_V2)
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ ERRCONFIG_VPBOUNDINTST_V2);
+ else
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ 0x0);
+ sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT |
+ IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT));
+ sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT |
+ IRQSTATUS_MCVALIDINT |
+ IRQSTATUS_MCBOUNDSINT));
+
+ /*
+ * Wait for SR to be disabled.
+ * wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us.
+ */
+ sr_test_cond_timeout((sr_read_reg(sr, IRQSTATUS) &
+ IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT,
+ timeout);
+
+ if (timeout >= SR_DISABLE_TIMEOUT)
+ dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+ __func__);
+
+ /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+ sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT);
+}
+
+static struct omap_sr_nvalue_table *sr_retrieve_nvalue_row(
+ struct omap_sr *sr, u32 efuse_offs)
+{
+ int i;
+
+ if (!sr->nvalue_table) {
+ dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n",
+ __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < sr->nvalue_count; i++) {
+ if (sr->nvalue_table[i].efuse_offs == efuse_offs)
+ return &sr->nvalue_table[i];
+ }
+
+ return NULL;
+}
+
+/* Public Functions */
+
+/**
+ * sr_configure_errgen() - Configures the SmartReflex to perform AVS using the
+ * error generator module.
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the error generator module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_errgen(struct omap_sr *sr)
+{
+ u32 sr_config, sr_errconfig, errconfig_offs;
+ u32 vpboundint_en, vpboundint_st;
+ u32 senp_en = 0, senn_en = 0;
+ u8 senp_shift, senn_shift;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ if (!sr->clk_length)
+ sr_set_clk_length(sr);
+
+ senp_en = sr->senp_mod;
+ senn_en = sr->senn_mod;
+
+ sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+ SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN;
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_config |= SRCONFIG_DELAYCTRL;
+ senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ break;
+ case SR_TYPE_V2:
+ senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+ sr_write_reg(sr, SRCONFIG, sr_config);
+ sr_errconfig = (sr->err_weight << ERRCONFIG_ERRWEIGHT_SHIFT) |
+ (sr->err_maxlimit << ERRCONFIG_ERRMAXLIMIT_SHIFT) |
+ (sr->err_minlimit << ERRCONFIG_ERRMINLIMIT_SHIFT);
+ sr_modify_reg(sr, errconfig_offs, (SR_ERRWEIGHT_MASK |
+ SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK),
+ sr_errconfig);
+
+ /* Enabling the interrupts if the ERROR module is used */
+ sr_modify_reg(sr, errconfig_offs, (vpboundint_en | vpboundint_st),
+ vpboundint_en);
+
+ return 0;
+}
+
+/**
+ * sr_disable_errgen() - Disables SmartReflex AVS module's errgen component
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable the error generator module inside the smartreflex module.
+ *
+ * Returns 0 on success and error value in case of failure.
+ */
+int sr_disable_errgen(struct omap_sr *sr)
+{
+ u32 errconfig_offs;
+ u32 vpboundint_en, vpboundint_st;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ break;
+ case SR_TYPE_V2:
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Disable the Sensor and errorgen */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN, 0);
+
+ /*
+ * Disable the interrupts of ERROR module
+ * NOTE: modify is a read, modify,write - an implicit OCP barrier
+ * which is required is present here - sequencing is critical
+ * at this point (after errgen is disabled, vpboundint disable)
+ */
+ sr_modify_reg(sr, errconfig_offs, vpboundint_en | vpboundint_st, 0);
+
+ return 0;
+}
+
+/**
+ * sr_configure_minmax() - Configures the SmartReflex to perform AVS using the
+ * minmaxavg module.
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the minmaxavg module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_minmax(struct omap_sr *sr)
+{
+ u32 sr_config, sr_avgwt;
+ u32 senp_en = 0, senn_en = 0;
+ u8 senp_shift, senn_shift;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ if (!sr->clk_length)
+ sr_set_clk_length(sr);
+
+ senp_en = sr->senp_mod;
+ senn_en = sr->senn_mod;
+
+ sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+ SRCONFIG_SENENABLE |
+ (sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT);
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_config |= SRCONFIG_DELAYCTRL;
+ senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+ break;
+ case SR_TYPE_V2:
+ senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+ sr_write_reg(sr, SRCONFIG, sr_config);
+ sr_avgwt = (sr->senp_avgweight << AVGWEIGHT_SENPAVGWEIGHT_SHIFT) |
+ (sr->senn_avgweight << AVGWEIGHT_SENNAVGWEIGHT_SHIFT);
+ sr_write_reg(sr, AVGWEIGHT, sr_avgwt);
+
+ /*
+ * Enabling the interrupts if MINMAXAVG module is used.
+ * TODO: check if all the interrupts are mandatory
+ */
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+ ERRCONFIG_MCUBOUNDINTEN),
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST |
+ ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST));
+ break;
+ case SR_TYPE_V2:
+ sr_write_reg(sr, IRQSTATUS,
+ IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT |
+ IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQENABLE_SET,
+ IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT);
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * sr_enable() - Enables the smartreflex module.
+ * @sr: pointer to which the SR module to be configured belongs to.
+ * @volt: The voltage at which the Voltage domain associated with
+ * the smartreflex module is operating at.
+ * This is required only to program the correct Ntarget value.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * enable a smartreflex module. Returns 0 on success. Returns error
+ * value if the voltage passed is wrong or if ntarget value is wrong.
+ */
+int sr_enable(struct omap_sr *sr, unsigned long volt)
+{
+ struct omap_volt_data *volt_data;
+ struct omap_sr_nvalue_table *nvalue_row;
+ int ret;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ volt_data = omap_voltage_get_voltdata(sr->voltdm, volt);
+
+ if (IS_ERR(volt_data)) {
+ dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table for nominal voltage %ld\n",
+ __func__, volt);
+ return PTR_ERR(volt_data);
+ }
+
+ nvalue_row = sr_retrieve_nvalue_row(sr, volt_data->sr_efuse_offs);
+
+ if (!nvalue_row) {
+ dev_warn(&sr->pdev->dev, "%s: failure getting SR data for this voltage %ld\n",
+ __func__, volt);
+ return -ENODATA;
+ }
+
+ /* errminlimit is opp dependent and hence linked to voltage */
+ sr->err_minlimit = nvalue_row->errminlimit;
+
+ clk_enable(sr->fck);
+
+ /* Check if SR is already enabled. If yes do nothing */
+ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE)
+ goto out_enabled;
+
+ /* Configure SR */
+ ret = sr_class->configure(sr);
+ if (ret)
+ goto out_enabled;
+
+ sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue);
+
+ /* SRCONFIG - enable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE);
+
+out_enabled:
+ sr->enabled = 1;
+
+ return 0;
+}
+
+/**
+ * sr_disable() - Disables the smartreflex module.
+ * @sr: pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable a smartreflex module.
+ */
+void sr_disable(struct omap_sr *sr)
+{
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return;
+ }
+
+ /* Check if SR clocks are already disabled. If yes do nothing */
+ if (!sr->enabled)
+ return;
+
+ /*
+ * Disable SR if only it is indeed enabled. Else just
+ * disable the clocks.
+ */
+ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) {
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_v1_disable(sr);
+ break;
+ case SR_TYPE_V2:
+ sr_v2_disable(sr);
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "UNKNOWN IP type %d\n",
+ sr->ip_type);
+ }
+ }
+
+ clk_disable(sr->fck);
+ sr->enabled = 0;
+}
+
+/**
+ * sr_register_class() - API to register a smartreflex class parameters.
+ * @class_data: The structure containing various sr class specific data.
+ *
+ * This API is to be called by the smartreflex class driver to register itself
+ * with the smartreflex driver during init. Returns 0 on success else the
+ * error value.
+ */
+int sr_register_class(struct omap_sr_class_data *class_data)
+{
+ struct omap_sr *sr_info;
+
+ if (!class_data) {
+ pr_warn("%s:, Smartreflex class data passed is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (sr_class) {
+ pr_warn("%s: Smartreflex class driver already registered\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ sr_class = class_data;
+
+ /*
+ * Call into late init to do initializations that require
+ * both sr driver and sr class driver to be initiallized.
+ */
+ list_for_each_entry(sr_info, &sr_list, node)
+ sr_late_init(sr_info);
+
+ return 0;
+}
+
+/**
+ * omap_sr_enable() - API to enable SR clocks and to call into the
+ * registered smartreflex class enable API.
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to enable
+ * a particular smartreflex module. This API will do the initial
+ * configurations to turn on the smartreflex module and in turn call
+ * into the registered smartreflex class enable API.
+ */
+void omap_sr_enable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->enable(sr);
+}
+
+/**
+ * omap_sr_disable() - API to disable SR without resetting the voltage
+ * processor voltage
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable not to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->disable(sr, 0);
+}
+
+/**
+ * omap_sr_disable_reset_volt() - API to disable SR and reset the
+ * voltage processor voltage
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->disable(sr, 1);
+}
+
+/* PM Debug FS entries to enable and disable smartreflex. */
+static int omap_sr_autocomp_show(void *data, u64 *val)
+{
+ struct omap_sr *sr_info = data;
+
+ if (!sr_info) {
+ pr_warn("%s: omap_sr struct not found\n", __func__);
+ return -EINVAL;
+ }
+
+ *val = sr_info->autocomp_active;
+
+ return 0;
+}
+
+static int omap_sr_autocomp_store(void *data, u64 val)
+{
+ struct omap_sr *sr_info = data;
+
+ if (!sr_info) {
+ pr_warn("%s: omap_sr struct not found\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Sanity check */
+ if (val > 1) {
+ pr_warn("%s: Invalid argument %lld\n", __func__, val);
+ return -EINVAL;
+ }
+
+ /* control enable/disable only if there is a delta in value */
+ if (sr_info->autocomp_active != val) {
+ if (!val)
+ sr_stop_vddautocomp(sr_info);
+ else
+ sr_start_vddautocomp(sr_info);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show,
+ omap_sr_autocomp_store, "%llu\n");
+
+static int omap_sr_probe(struct platform_device *pdev)
+{
+ struct omap_sr *sr_info;
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct resource *mem;
+ struct dentry *nvalue_dir;
+ int i, ret = 0;
+
+ sr_info = devm_kzalloc(&pdev->dev, sizeof(struct omap_sr), GFP_KERNEL);
+ if (!sr_info)
+ return -ENOMEM;
+
+ sr_info->name = devm_kzalloc(&pdev->dev,
+ SMARTREFLEX_NAME_LEN, GFP_KERNEL);
+ if (!sr_info->name)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sr_info);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sr_info->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(sr_info->base))
+ return PTR_ERR(sr_info->base);
+
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return dev_err_probe(&pdev->dev, ret, "failed to get IRQ resource\n");
+ if (ret > 0)
+ sr_info->irq = ret;
+
+ sr_info->fck = devm_clk_get(pdev->dev.parent, "fck");
+ if (IS_ERR(sr_info->fck))
+ return PTR_ERR(sr_info->fck);
+ clk_prepare(sr_info->fck);
+
+ pm_runtime_enable(&pdev->dev);
+
+ snprintf(sr_info->name, SMARTREFLEX_NAME_LEN, "%s", pdata->name);
+
+ sr_info->pdev = pdev;
+ sr_info->srid = pdev->id;
+ sr_info->voltdm = pdata->voltdm;
+ sr_info->nvalue_table = pdata->nvalue_table;
+ sr_info->nvalue_count = pdata->nvalue_count;
+ sr_info->senn_mod = pdata->senn_mod;
+ sr_info->senp_mod = pdata->senp_mod;
+ sr_info->err_weight = pdata->err_weight;
+ sr_info->err_maxlimit = pdata->err_maxlimit;
+ sr_info->accum_data = pdata->accum_data;
+ sr_info->senn_avgweight = pdata->senn_avgweight;
+ sr_info->senp_avgweight = pdata->senp_avgweight;
+ sr_info->autocomp_active = false;
+ sr_info->ip_type = pdata->ip_type;
+
+ sr_set_clk_length(sr_info);
+
+ list_add(&sr_info->node, &sr_list);
+
+ /*
+ * Call into late init to do initializations that require
+ * both sr driver and sr class driver to be initiallized.
+ */
+ if (sr_class) {
+ ret = sr_late_init(sr_info);
+ if (ret) {
+ pr_warn("%s: Error in SR late init\n", __func__);
+ goto err_list_del;
+ }
+ }
+
+ dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__);
+ if (!sr_dbg_dir)
+ sr_dbg_dir = debugfs_create_dir("smartreflex", NULL);
+
+ sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir);
+
+ debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir,
+ sr_info, &pm_sr_fops);
+ debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_weight);
+ debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_maxlimit);
+
+ nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir);
+
+ if (sr_info->nvalue_count == 0 || !sr_info->nvalue_table) {
+ dev_warn(&pdev->dev, "%s: %s: No Voltage table for the corresponding vdd. Cannot create debugfs entries for n-values\n",
+ __func__, sr_info->name);
+
+ ret = -ENODATA;
+ goto err_debugfs;
+ }
+
+ for (i = 0; i < sr_info->nvalue_count; i++) {
+ char name[NVALUE_NAME_LEN + 1];
+
+ snprintf(name, sizeof(name), "volt_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].nvalue));
+ snprintf(name, sizeof(name), "errminlimit_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].errminlimit));
+
+ }
+
+ return 0;
+
+err_debugfs:
+ debugfs_remove_recursive(sr_info->dbg_dir);
+err_list_del:
+ pm_runtime_disable(&pdev->dev);
+ list_del(&sr_info->node);
+ clk_unprepare(sr_info->fck);
+
+ return ret;
+}
+
+static int omap_sr_remove(struct platform_device *pdev)
+{
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct omap_sr *sr_info;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ sr_info = _sr_lookup(pdata->voltdm);
+ if (IS_ERR(sr_info)) {
+ dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+ __func__);
+ return PTR_ERR(sr_info);
+ }
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+ debugfs_remove_recursive(sr_info->dbg_dir);
+
+ pm_runtime_disable(dev);
+ clk_unprepare(sr_info->fck);
+ list_del(&sr_info->node);
+ return 0;
+}
+
+static void omap_sr_shutdown(struct platform_device *pdev)
+{
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct omap_sr *sr_info;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return;
+ }
+
+ sr_info = _sr_lookup(pdata->voltdm);
+ if (IS_ERR(sr_info)) {
+ dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+ __func__);
+ return;
+ }
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+
+ return;
+}
+
+static const struct of_device_id omap_sr_match[] = {
+ { .compatible = "ti,omap3-smartreflex-core", },
+ { .compatible = "ti,omap3-smartreflex-mpu-iva", },
+ { .compatible = "ti,omap4-smartreflex-core", },
+ { .compatible = "ti,omap4-smartreflex-mpu", },
+ { .compatible = "ti,omap4-smartreflex-iva", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_sr_match);
+
+static struct platform_driver smartreflex_driver = {
+ .probe = omap_sr_probe,
+ .remove = omap_sr_remove,
+ .shutdown = omap_sr_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = omap_sr_match,
+ },
+};
+
+static int __init sr_init(void)
+{
+ int ret = 0;
+
+ ret = platform_driver_register(&smartreflex_driver);
+ if (ret) {
+ pr_err("%s: platform driver register failed for SR\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+late_initcall(sr_init);
+
+static void __exit sr_exit(void)
+{
+ platform_driver_unregister(&smartreflex_driver);
+}
+module_exit(sr_exit);
+
+MODULE_DESCRIPTION("OMAP Smartreflex Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
new file mode 100644
index 000000000..991c78b34
--- /dev/null
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' K3 Interrupt Aggregator MSI bus
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+static void ti_sci_inta_msi_write_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ /* Nothing to do */
+}
+
+static void ti_sci_inta_msi_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ /* Nothing to do */
+}
+
+static void ti_sci_inta_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (WARN_ON(!chip))
+ return;
+
+ chip->irq_request_resources = irq_chip_request_resources_parent;
+ chip->irq_release_resources = irq_chip_release_resources_parent;
+ chip->irq_compose_msi_msg = ti_sci_inta_msi_compose_msi_msg;
+ chip->irq_write_msi_msg = ti_sci_inta_msi_write_msg;
+ chip->irq_set_type = irq_chip_set_type_parent;
+ chip->irq_unmask = irq_chip_unmask_parent;
+ chip->irq_mask = irq_chip_mask_parent;
+ chip->irq_ack = irq_chip_ack_parent;
+}
+
+struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ ti_sci_inta_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_FREE_MSI_DESCS;
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (domain)
+ irq_domain_update_bus_token(domain, DOMAIN_BUS_TI_SCI_INTA_MSI);
+
+ return domain;
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_create_irq_domain);
+
+static int ti_sci_inta_msi_alloc_descs(struct device *dev,
+ struct ti_sci_resource *res)
+{
+ struct msi_desc msi_desc;
+ int set, i, count = 0;
+
+ memset(&msi_desc, 0, sizeof(msi_desc));
+ msi_desc.nvec_used = 1;
+
+ for (set = 0; set < res->sets; set++) {
+ for (i = 0; i < res->desc[set].num; i++, count++) {
+ msi_desc.msi_index = res->desc[set].start + i;
+ if (msi_add_msi_desc(dev, &msi_desc))
+ goto fail;
+ }
+
+ for (i = 0; i < res->desc[set].num_sec; i++, count++) {
+ msi_desc.msi_index = res->desc[set].start_sec + i;
+ if (msi_add_msi_desc(dev, &msi_desc))
+ goto fail;
+ }
+ }
+ return count;
+fail:
+ msi_free_msi_descs(dev);
+ return -ENOMEM;
+}
+
+int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
+ struct ti_sci_resource *res)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct irq_domain *msi_domain;
+ int ret, nvec;
+
+ msi_domain = dev_get_msi_domain(dev);
+ if (!msi_domain)
+ return -EINVAL;
+
+ if (pdev->id < 0)
+ return -ENODEV;
+
+ ret = msi_setup_device_data(dev);
+ if (ret)
+ return ret;
+
+ msi_lock_descs(dev);
+ nvec = ti_sci_inta_msi_alloc_descs(dev, res);
+ if (nvec <= 0) {
+ ret = nvec;
+ goto unlock;
+ }
+
+ ret = msi_domain_alloc_irqs_descs_locked(msi_domain, dev, nvec);
+ if (ret)
+ dev_err(dev, "Failed to allocate IRQs %d\n", ret);
+unlock:
+ msi_unlock_descs(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
new file mode 100644
index 000000000..a33ec7eaf
--- /dev/null
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI SCI Generic Power Domain Driver
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ * Dave Gerlach <d-gerlach@ti.com>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+/**
+ * struct ti_sci_genpd_provider: holds common TI SCI genpd provider data
+ * @ti_sci: handle to TI SCI protocol driver that provides ops to
+ * communicate with system control processor.
+ * @dev: pointer to dev for the driver for devm allocs
+ * @pd_list: list of all the power domains on the device
+ * @data: onecell data for genpd core
+ */
+struct ti_sci_genpd_provider {
+ const struct ti_sci_handle *ti_sci;
+ struct device *dev;
+ struct list_head pd_list;
+ struct genpd_onecell_data data;
+};
+
+/**
+ * struct ti_sci_pm_domain: TI specific data needed for power domain
+ * @idx: index of the device that identifies it with the system
+ * control processor.
+ * @exclusive: Permissions for exclusive request or shared request of the
+ * device.
+ * @pd: generic_pm_domain for use with the genpd framework
+ * @node: link for the genpd list
+ * @parent: link to the parent TI SCI genpd provider
+ */
+struct ti_sci_pm_domain {
+ int idx;
+ u8 exclusive;
+ struct generic_pm_domain pd;
+ struct list_head node;
+ struct ti_sci_genpd_provider *parent;
+};
+
+#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd)
+
+/*
+ * ti_sci_pd_power_off(): genpd power down hook
+ * @domain: pointer to the powerdomain to power off
+ */
+static int ti_sci_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(domain);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
+
+ return ti_sci->ops.dev_ops.put_device(ti_sci, pd->idx);
+}
+
+/*
+ * ti_sci_pd_power_on(): genpd power up hook
+ * @domain: pointer to the powerdomain to power on
+ */
+static int ti_sci_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(domain);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
+
+ if (pd->exclusive)
+ return ti_sci->ops.dev_ops.get_device_exclusive(ti_sci,
+ pd->idx);
+ else
+ return ti_sci->ops.dev_ops.get_device(ti_sci, pd->idx);
+}
+
+/*
+ * ti_sci_pd_xlate(): translation service for TI SCI genpds
+ * @genpdspec: DT identification data for the genpd
+ * @data: genpd core data for all the powerdomains on the device
+ */
+static struct generic_pm_domain *ti_sci_pd_xlate(
+ struct of_phandle_args *genpdspec,
+ void *data)
+{
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int idx = genpdspec->args[0];
+
+ if (genpdspec->args_count != 1 && genpdspec->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ if (idx >= genpd_data->num_domains) {
+ pr_err("%s: invalid domain index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!genpd_data->domains[idx])
+ return ERR_PTR(-ENOENT);
+
+ genpd_to_ti_sci_pd(genpd_data->domains[idx])->exclusive =
+ genpdspec->args[1];
+
+ return genpd_data->domains[idx];
+}
+
+static const struct of_device_id ti_sci_pm_domain_matches[] = {
+ { .compatible = "ti,sci-pm-domain", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches);
+
+static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_sci_genpd_provider *pd_provider;
+ struct ti_sci_pm_domain *pd;
+ struct device_node *np = NULL;
+ struct of_phandle_args args;
+ int ret;
+ u32 max_id = 0;
+ int index;
+
+ pd_provider = devm_kzalloc(dev, sizeof(*pd_provider), GFP_KERNEL);
+ if (!pd_provider)
+ return -ENOMEM;
+
+ pd_provider->ti_sci = devm_ti_sci_get_handle(dev);
+ if (IS_ERR(pd_provider->ti_sci))
+ return PTR_ERR(pd_provider->ti_sci);
+
+ pd_provider->dev = dev;
+
+ INIT_LIST_HEAD(&pd_provider->pd_list);
+
+ /* Find highest device ID used for power domains */
+ while (1) {
+ np = of_find_node_with_property(np, "power-domains");
+ if (!np)
+ break;
+
+ index = 0;
+
+ while (1) {
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells",
+ index, &args);
+ if (ret)
+ break;
+
+ if (args.args_count >= 1 && args.np == dev->of_node) {
+ if (args.args[0] > max_id)
+ max_id = args.args[0];
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->pd.name = devm_kasprintf(dev, GFP_KERNEL,
+ "pd:%d",
+ args.args[0]);
+ if (!pd->pd.name)
+ return -ENOMEM;
+
+ pd->pd.power_off = ti_sci_pd_power_off;
+ pd->pd.power_on = ti_sci_pd_power_on;
+ pd->idx = args.args[0];
+ pd->parent = pd_provider;
+
+ pm_genpd_init(&pd->pd, NULL, true);
+
+ list_add(&pd->node, &pd_provider->pd_list);
+ }
+ index++;
+ }
+ }
+
+ pd_provider->data.domains =
+ devm_kcalloc(dev, max_id + 1,
+ sizeof(*pd_provider->data.domains),
+ GFP_KERNEL);
+ if (!pd_provider->data.domains)
+ return -ENOMEM;
+
+ pd_provider->data.num_domains = max_id + 1;
+ pd_provider->data.xlate = ti_sci_pd_xlate;
+
+ list_for_each_entry(pd, &pd_provider->pd_list, node)
+ pd_provider->data.domains[pd->idx] = &pd->pd;
+
+ return of_genpd_add_provider_onecell(dev->of_node, &pd_provider->data);
+}
+
+static struct platform_driver ti_sci_pm_domains_driver = {
+ .probe = ti_sci_pm_domain_probe,
+ .driver = {
+ .name = "ti_sci_pm_domains",
+ .of_match_table = ti_sci_pm_domain_matches,
+ },
+};
+module_platform_driver(ti_sci_pm_domains_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface (SCI) Power Domain driver");
+MODULE_AUTHOR("Dave Gerlach");
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
new file mode 100644
index 000000000..343c58ed5
--- /dev/null
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMx3 Wkup M3 IPC driver
+ *
+ * Copyright (C) 2015 Texas Instruments, Inc.
+ *
+ * Dave Gerlach <d-gerlach@ti.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/suspend.h>
+#include <linux/wkup_m3_ipc.h>
+
+#define AM33XX_CTRL_IPC_REG_COUNT 0x8
+#define AM33XX_CTRL_IPC_REG_OFFSET(m) (0x4 + 4 * (m))
+
+/* AM33XX M3_TXEV_EOI register */
+#define AM33XX_CONTROL_M3_TXEV_EOI 0x00
+
+#define AM33XX_M3_TXEV_ACK (0x1 << 0)
+#define AM33XX_M3_TXEV_ENABLE (0x0 << 0)
+
+#define IPC_CMD_DS0 0x4
+#define IPC_CMD_STANDBY 0xc
+#define IPC_CMD_IDLE 0x10
+#define IPC_CMD_RESET 0xe
+#define DS_IPC_DEFAULT 0xffffffff
+#define M3_VERSION_UNKNOWN 0x0000ffff
+#define M3_BASELINE_VERSION 0x191
+#define M3_STATUS_RESP_MASK (0xffff << 16)
+#define M3_FW_VERSION_MASK 0xffff
+#define M3_WAKE_SRC_MASK 0xff
+
+#define IPC_MEM_TYPE_SHIFT (0x0)
+#define IPC_MEM_TYPE_MASK (0x7 << 0)
+#define IPC_VTT_STAT_SHIFT (0x3)
+#define IPC_VTT_STAT_MASK (0x1 << 3)
+#define IPC_VTT_GPIO_PIN_SHIFT (0x4)
+#define IPC_VTT_GPIO_PIN_MASK (0x3f << 4)
+#define IPC_IO_ISOLATION_STAT_SHIFT (10)
+#define IPC_IO_ISOLATION_STAT_MASK (0x1 << 10)
+
+#define IPC_DBG_HALT_SHIFT (11)
+#define IPC_DBG_HALT_MASK (0x1 << 11)
+
+#define M3_STATE_UNKNOWN 0
+#define M3_STATE_RESET 1
+#define M3_STATE_INITED 2
+#define M3_STATE_MSG_FOR_LP 3
+#define M3_STATE_MSG_FOR_RESET 4
+
+#define WKUP_M3_SD_FW_MAGIC 0x570C
+
+#define WKUP_M3_DMEM_START 0x80000
+#define WKUP_M3_AUXDATA_OFFSET 0x1000
+#define WKUP_M3_AUXDATA_SIZE 0xFF
+
+static struct wkup_m3_ipc *m3_ipc_state;
+
+static const struct wkup_m3_wakeup_src wakeups[] = {
+ {.irq_nr = 16, .src = "PRCM"},
+ {.irq_nr = 35, .src = "USB0_PHY"},
+ {.irq_nr = 36, .src = "USB1_PHY"},
+ {.irq_nr = 40, .src = "I2C0"},
+ {.irq_nr = 41, .src = "RTC Timer"},
+ {.irq_nr = 42, .src = "RTC Alarm"},
+ {.irq_nr = 43, .src = "Timer0"},
+ {.irq_nr = 44, .src = "Timer1"},
+ {.irq_nr = 45, .src = "UART"},
+ {.irq_nr = 46, .src = "GPIO0"},
+ {.irq_nr = 48, .src = "MPU_WAKE"},
+ {.irq_nr = 49, .src = "WDT0"},
+ {.irq_nr = 50, .src = "WDT1"},
+ {.irq_nr = 51, .src = "ADC_TSC"},
+ {.irq_nr = 0, .src = "Unknown"},
+};
+
+/**
+ * wkup_m3_copy_aux_data - Copy auxiliary data to special region of m3 dmem
+ * @data - pointer to data
+ * @sz - size of data to copy (limit 256 bytes)
+ *
+ * Copies any additional blob of data to the wkup_m3 dmem to be used by the
+ * firmware
+ */
+static unsigned long wkup_m3_copy_aux_data(struct wkup_m3_ipc *m3_ipc,
+ const void *data, int sz)
+{
+ unsigned long aux_data_dev_addr;
+ void *aux_data_addr;
+
+ aux_data_dev_addr = WKUP_M3_DMEM_START + WKUP_M3_AUXDATA_OFFSET;
+ aux_data_addr = rproc_da_to_va(m3_ipc->rproc,
+ aux_data_dev_addr,
+ WKUP_M3_AUXDATA_SIZE,
+ NULL);
+ memcpy(aux_data_addr, data, sz);
+
+ return WKUP_M3_AUXDATA_OFFSET;
+}
+
+static void wkup_m3_scale_data_fw_cb(const struct firmware *fw, void *context)
+{
+ unsigned long val, aux_base;
+ struct wkup_m3_scale_data_header hdr;
+ struct wkup_m3_ipc *m3_ipc = context;
+ struct device *dev = m3_ipc->dev;
+
+ if (!fw) {
+ dev_err(dev, "Voltage scale fw name given but file missing.\n");
+ return;
+ }
+
+ memcpy(&hdr, fw->data, sizeof(hdr));
+
+ if (hdr.magic != WKUP_M3_SD_FW_MAGIC) {
+ dev_err(dev, "PM: Voltage Scale Data binary does not appear valid.\n");
+ goto release_sd_fw;
+ }
+
+ aux_base = wkup_m3_copy_aux_data(m3_ipc, fw->data + sizeof(hdr),
+ fw->size - sizeof(hdr));
+
+ val = (aux_base + hdr.sleep_offset);
+ val |= ((aux_base + hdr.wake_offset) << 16);
+
+ m3_ipc->volt_scale_offsets = val;
+
+release_sd_fw:
+ release_firmware(fw);
+};
+
+static int wkup_m3_init_scale_data(struct wkup_m3_ipc *m3_ipc,
+ struct device *dev)
+{
+ int ret = 0;
+
+ /*
+ * If no name is provided, user has already been warned, pm will
+ * still work so return 0
+ */
+
+ if (!m3_ipc->sd_fw_name)
+ return ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+ m3_ipc->sd_fw_name, dev, GFP_ATOMIC,
+ m3_ipc, wkup_m3_scale_data_fw_cb);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void wkup_m3_set_halt_late(bool enabled)
+{
+ if (enabled)
+ m3_ipc_state->halt = (1 << IPC_DBG_HALT_SHIFT);
+ else
+ m3_ipc_state->halt = 0;
+}
+
+static int option_get(void *data, u64 *val)
+{
+ u32 *option = data;
+
+ *val = *option;
+
+ return 0;
+}
+
+static int option_set(void *data, u64 val)
+{
+ u32 *option = data;
+
+ *option = val;
+
+ if (option == &m3_ipc_state->halt) {
+ if (val)
+ wkup_m3_set_halt_late(true);
+ else
+ wkup_m3_set_halt_late(false);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(wkup_m3_ipc_option_fops, option_get, option_set,
+ "%llu\n");
+
+static int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->dbg_path = debugfs_create_dir("wkup_m3_ipc", NULL);
+
+ if (!m3_ipc->dbg_path)
+ return -EINVAL;
+
+ (void)debugfs_create_file("enable_late_halt", 0644,
+ m3_ipc->dbg_path,
+ &m3_ipc->halt,
+ &wkup_m3_ipc_option_fops);
+
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+ debugfs_remove_recursive(m3_ipc->dbg_path);
+}
+#else
+static inline int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
+{
+ writel(AM33XX_M3_TXEV_ACK,
+ m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
+}
+
+static void am33xx_txev_enable(struct wkup_m3_ipc *m3_ipc)
+{
+ writel(AM33XX_M3_TXEV_ENABLE,
+ m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
+}
+
+static void wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc *m3_ipc,
+ u32 val, int ipc_reg_num)
+{
+ if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
+ "ipc register operation out of range"))
+ return;
+
+ writel(val, m3_ipc->ipc_mem_base +
+ AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
+}
+
+static unsigned int wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc *m3_ipc,
+ int ipc_reg_num)
+{
+ if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
+ "ipc register operation out of range"))
+ return 0;
+
+ return readl(m3_ipc->ipc_mem_base +
+ AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
+}
+
+static int wkup_m3_fw_version_read(struct wkup_m3_ipc *m3_ipc)
+{
+ int val;
+
+ val = wkup_m3_ctrl_ipc_read(m3_ipc, 2);
+
+ return val & M3_FW_VERSION_MASK;
+}
+
+static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
+{
+ struct wkup_m3_ipc *m3_ipc = ipc_data;
+ struct device *dev = m3_ipc->dev;
+ int ver = 0;
+
+ am33xx_txev_eoi(m3_ipc);
+
+ switch (m3_ipc->state) {
+ case M3_STATE_RESET:
+ ver = wkup_m3_fw_version_read(m3_ipc);
+
+ if (ver == M3_VERSION_UNKNOWN ||
+ ver < M3_BASELINE_VERSION) {
+ dev_warn(dev, "CM3 Firmware Version %x not supported\n",
+ ver);
+ } else {
+ dev_info(dev, "CM3 Firmware Version = 0x%x\n", ver);
+ }
+
+ m3_ipc->state = M3_STATE_INITED;
+ wkup_m3_init_scale_data(m3_ipc, dev);
+ complete(&m3_ipc->sync_complete);
+ break;
+ case M3_STATE_MSG_FOR_RESET:
+ m3_ipc->state = M3_STATE_INITED;
+ complete(&m3_ipc->sync_complete);
+ break;
+ case M3_STATE_MSG_FOR_LP:
+ complete(&m3_ipc->sync_complete);
+ break;
+ case M3_STATE_UNKNOWN:
+ dev_warn(dev, "Unknown CM3 State\n");
+ }
+
+ am33xx_txev_enable(m3_ipc);
+
+ return IRQ_HANDLED;
+}
+
+static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+{
+ struct device *dev = m3_ipc->dev;
+ mbox_msg_t dummy_msg = 0;
+ int ret;
+
+ if (!m3_ipc->mbox) {
+ dev_err(dev,
+ "No IPC channel to communicate with wkup_m3!\n");
+ return -EIO;
+ }
+
+ /*
+ * Write a dummy message to the mailbox in order to trigger the RX
+ * interrupt to alert the M3 that data is available in the IPC
+ * registers. We must enable the IRQ here and disable it after in
+ * the RX callback to avoid multiple interrupts being received
+ * by the CM3.
+ */
+ ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
+ if (ret < 0) {
+ dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&m3_ipc->sync_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(dev, "MPU<->CM3 sync failure\n");
+ m3_ipc->state = M3_STATE_UNKNOWN;
+ return -EIO;
+ }
+
+ mbox_client_txdone(m3_ipc->mbox, 0);
+ return 0;
+}
+
+static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
+{
+ struct device *dev = m3_ipc->dev;
+ mbox_msg_t dummy_msg = 0;
+ int ret;
+
+ if (!m3_ipc->mbox) {
+ dev_err(dev,
+ "No IPC channel to communicate with wkup_m3!\n");
+ return -EIO;
+ }
+
+ ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
+ if (ret < 0) {
+ dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ mbox_client_txdone(m3_ipc->mbox, 0);
+ return 0;
+}
+
+static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
+{
+ return ((m3_ipc->state != M3_STATE_RESET) &&
+ (m3_ipc->state != M3_STATE_UNKNOWN));
+}
+
+static void wkup_m3_set_vtt_gpio(struct wkup_m3_ipc *m3_ipc, int gpio)
+{
+ m3_ipc->vtt_conf = (1 << IPC_VTT_STAT_SHIFT) |
+ (gpio << IPC_VTT_GPIO_PIN_SHIFT);
+}
+
+static void wkup_m3_set_io_isolation(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->isolation_conf = (1 << IPC_IO_ISOLATION_STAT_SHIFT);
+}
+
+/* Public functions */
+/**
+ * wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ * @mem_type: memory type value read directly from emif
+ *
+ * wkup_m3 must know what memory type is in use to properly suspend
+ * and resume.
+ */
+static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
+{
+ m3_ipc->mem_type = mem_type;
+}
+
+/**
+ * wkup_m3_set_resume_address - Pass wkup_m3 resume address
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ * @addr: Physical address from which resume code should execute
+ */
+static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
+{
+ m3_ipc->resume_addr = (unsigned long)addr;
+}
+
+/**
+ * wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ *
+ * Returns code representing the status of a low power mode transition.
+ * 0 - Successful transition
+ * 1 - Failure to transition to low power state
+ */
+static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
+{
+ unsigned int i;
+ int val;
+
+ val = wkup_m3_ctrl_ipc_read(m3_ipc, 1);
+
+ i = M3_STATUS_RESP_MASK & val;
+ i >>= __ffs(M3_STATUS_RESP_MASK);
+
+ return i;
+}
+
+/**
+ * wkup_m3_prepare_low_power - Request preparation for transition to
+ * low power state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ * @state: A kernel suspend state to enter, either MEM or STANDBY
+ *
+ * Returns 0 if preparation was successful, otherwise returns error code
+ */
+static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
+{
+ struct device *dev = m3_ipc->dev;
+ int m3_power_state;
+ int ret = 0;
+
+ if (!wkup_m3_is_available(m3_ipc))
+ return -ENODEV;
+
+ switch (state) {
+ case WKUP_M3_DEEPSLEEP:
+ m3_power_state = IPC_CMD_DS0;
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->volt_scale_offsets, 5);
+ break;
+ case WKUP_M3_STANDBY:
+ m3_power_state = IPC_CMD_STANDBY;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
+ break;
+ case WKUP_M3_IDLE:
+ m3_power_state = IPC_CMD_IDLE;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
+ break;
+ default:
+ return 1;
+ }
+
+ /* Program each required IPC register then write defaults to others */
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type |
+ m3_ipc->vtt_conf |
+ m3_ipc->isolation_conf |
+ m3_ipc->halt, 4);
+
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
+
+ m3_ipc->state = M3_STATE_MSG_FOR_LP;
+
+ if (state == WKUP_M3_IDLE)
+ ret = wkup_m3_ping_noirq(m3_ipc);
+ else
+ ret = wkup_m3_ping(m3_ipc);
+
+ if (ret) {
+ dev_err(dev, "Unable to ping CM3\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * wkup_m3_finish_low_power - Return m3 to reset state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ *
+ * Returns 0 if reset was successful, otherwise returns error code
+ */
+static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc)
+{
+ struct device *dev = m3_ipc->dev;
+ int ret = 0;
+
+ if (!wkup_m3_is_available(m3_ipc))
+ return -ENODEV;
+
+ wkup_m3_ctrl_ipc_write(m3_ipc, IPC_CMD_RESET, 1);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
+
+ m3_ipc->state = M3_STATE_MSG_FOR_RESET;
+
+ ret = wkup_m3_ping(m3_ipc);
+ if (ret) {
+ dev_err(dev, "Unable to ping CM3\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * wkup_m3_request_wake_src - Get the wakeup source info passed from wkup_m3
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ */
+static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
+{
+ unsigned int wakeup_src_idx;
+ int j, val;
+
+ val = wkup_m3_ctrl_ipc_read(m3_ipc, 6);
+
+ wakeup_src_idx = val & M3_WAKE_SRC_MASK;
+
+ for (j = 0; j < ARRAY_SIZE(wakeups) - 1; j++) {
+ if (wakeups[j].irq_nr == wakeup_src_idx)
+ return wakeups[j].src;
+ }
+ return wakeups[j].src;
+}
+
+/**
+ * wkup_m3_set_rtc_only - Set the rtc_only flag
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ */
+static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
+{
+ if (m3_ipc_state)
+ m3_ipc_state->is_rtc_only = true;
+}
+
+static struct wkup_m3_ipc_ops ipc_ops = {
+ .set_mem_type = wkup_m3_set_mem_type,
+ .set_resume_address = wkup_m3_set_resume_address,
+ .prepare_low_power = wkup_m3_prepare_low_power,
+ .finish_low_power = wkup_m3_finish_low_power,
+ .request_pm_status = wkup_m3_request_pm_status,
+ .request_wake_src = wkup_m3_request_wake_src,
+ .set_rtc_only = wkup_m3_set_rtc_only,
+};
+
+/**
+ * wkup_m3_ipc_get - Return handle to wkup_m3_ipc
+ *
+ * Returns NULL if the wkup_m3 is not yet available, otherwise returns
+ * pointer to wkup_m3_ipc struct.
+ */
+struct wkup_m3_ipc *wkup_m3_ipc_get(void)
+{
+ if (m3_ipc_state)
+ get_device(m3_ipc_state->dev);
+ else
+ return NULL;
+
+ return m3_ipc_state;
+}
+EXPORT_SYMBOL_GPL(wkup_m3_ipc_get);
+
+/**
+ * wkup_m3_ipc_put - Free handle to wkup_m3_ipc returned from wkup_m3_ipc_get
+ * @m3_ipc: A pointer to wkup_m3_ipc struct returned by wkup_m3_ipc_get
+ */
+void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc)
+{
+ if (m3_ipc_state)
+ put_device(m3_ipc_state->dev);
+}
+EXPORT_SYMBOL_GPL(wkup_m3_ipc_put);
+
+static int wkup_m3_rproc_boot_thread(void *arg)
+{
+ struct wkup_m3_ipc *m3_ipc = arg;
+ struct device *dev = m3_ipc->dev;
+ int ret;
+
+ init_completion(&m3_ipc->sync_complete);
+
+ ret = rproc_boot(m3_ipc->rproc);
+ if (ret)
+ dev_err(dev, "rproc_boot failed\n");
+ else
+ m3_ipc_state = m3_ipc;
+
+ return 0;
+}
+
+static int wkup_m3_ipc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq, ret, temp;
+ phandle rproc_phandle;
+ struct rproc *m3_rproc;
+ struct resource *res;
+ struct task_struct *task;
+ struct wkup_m3_ipc *m3_ipc;
+ struct device_node *np = dev->of_node;
+
+ m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
+ if (!m3_ipc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ m3_ipc->ipc_mem_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(m3_ipc->ipc_mem_base))
+ return PTR_ERR(m3_ipc->ipc_mem_base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
+ 0, "wkup_m3_txev", m3_ipc);
+ if (ret) {
+ dev_err(dev, "request_irq failed\n");
+ return ret;
+ }
+
+ m3_ipc->mbox_client.dev = dev;
+ m3_ipc->mbox_client.tx_done = NULL;
+ m3_ipc->mbox_client.tx_prepare = NULL;
+ m3_ipc->mbox_client.rx_callback = NULL;
+ m3_ipc->mbox_client.tx_block = false;
+ m3_ipc->mbox_client.knows_txdone = false;
+
+ m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
+
+ if (IS_ERR(m3_ipc->mbox)) {
+ dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
+ PTR_ERR(m3_ipc->mbox));
+ return PTR_ERR(m3_ipc->mbox);
+ }
+
+ if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
+ dev_err(&pdev->dev, "could not get rproc phandle\n");
+ ret = -ENODEV;
+ goto err_free_mbox;
+ }
+
+ m3_rproc = rproc_get_by_phandle(rproc_phandle);
+ if (!m3_rproc) {
+ dev_err(&pdev->dev, "could not get rproc handle\n");
+ ret = -EPROBE_DEFER;
+ goto err_free_mbox;
+ }
+
+ m3_ipc->rproc = m3_rproc;
+ m3_ipc->dev = dev;
+ m3_ipc->state = M3_STATE_RESET;
+
+ m3_ipc->ops = &ipc_ops;
+
+ if (!of_property_read_u32(np, "ti,vtt-gpio-pin", &temp)) {
+ if (temp >= 0 && temp <= 31)
+ wkup_m3_set_vtt_gpio(m3_ipc, temp);
+ else
+ dev_warn(dev, "Invalid VTT GPIO(%d) pin\n", temp);
+ }
+
+ if (of_find_property(np, "ti,set-io-isolation", NULL))
+ wkup_m3_set_io_isolation(m3_ipc);
+
+ ret = of_property_read_string(np, "firmware-name",
+ &m3_ipc->sd_fw_name);
+ if (ret) {
+ dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
+ }
+
+ /*
+ * Wait for firmware loading completion in a thread so we
+ * can boot the wkup_m3 as soon as it's ready without holding
+ * up kernel boot
+ */
+ task = kthread_run(wkup_m3_rproc_boot_thread, m3_ipc,
+ "wkup_m3_rproc_loader");
+
+ if (IS_ERR(task)) {
+ dev_err(dev, "can't create rproc_boot thread\n");
+ ret = PTR_ERR(task);
+ goto err_put_rproc;
+ }
+
+ wkup_m3_ipc_dbg_init(m3_ipc);
+
+ return 0;
+
+err_put_rproc:
+ rproc_put(m3_rproc);
+err_free_mbox:
+ mbox_free_channel(m3_ipc->mbox);
+ return ret;
+}
+
+static int wkup_m3_ipc_remove(struct platform_device *pdev)
+{
+ wkup_m3_ipc_dbg_destroy(m3_ipc_state);
+
+ mbox_free_channel(m3_ipc_state->mbox);
+
+ rproc_shutdown(m3_ipc_state->rproc);
+ rproc_put(m3_ipc_state->rproc);
+
+ m3_ipc_state = NULL;
+
+ return 0;
+}
+
+static int __maybe_unused wkup_m3_ipc_suspend(struct device *dev)
+{
+ /*
+ * Nothing needs to be done on suspend even with rtc_only flag set
+ */
+ return 0;
+}
+
+static int __maybe_unused wkup_m3_ipc_resume(struct device *dev)
+{
+ if (m3_ipc_state->is_rtc_only) {
+ rproc_shutdown(m3_ipc_state->rproc);
+ rproc_boot(m3_ipc_state->rproc);
+ }
+
+ m3_ipc_state->is_rtc_only = false;
+
+ return 0;
+}
+
+static const struct dev_pm_ops wkup_m3_ipc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(wkup_m3_ipc_suspend, wkup_m3_ipc_resume)
+};
+
+static const struct of_device_id wkup_m3_ipc_of_match[] = {
+ { .compatible = "ti,am3352-wkup-m3-ipc", },
+ { .compatible = "ti,am4372-wkup-m3-ipc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, wkup_m3_ipc_of_match);
+
+static struct platform_driver wkup_m3_ipc_driver = {
+ .probe = wkup_m3_ipc_probe,
+ .remove = wkup_m3_ipc_remove,
+ .driver = {
+ .name = "wkup_m3_ipc",
+ .of_match_table = wkup_m3_ipc_of_match,
+ .pm = &wkup_m3_ipc_pm_ops,
+ },
+};
+
+module_platform_driver(wkup_m3_ipc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("wkup m3 remote processor ipc driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");