summaryrefslogtreecommitdiffstats
path: root/drivers/soc
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/soc/Kconfig22
-rw-r--r--drivers/soc/Makefile27
-rw-r--r--drivers/soc/actions/Kconfig16
-rw-r--r--drivers/soc/actions/Makefile2
-rw-r--r--drivers/soc/actions/owl-sps-helper.c51
-rw-r--r--drivers/soc/actions/owl-sps.c266
-rw-r--r--drivers/soc/amlogic/Kconfig33
-rw-r--r--drivers/soc/amlogic/Makefile3
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c248
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c189
-rw-r--r--drivers/soc/amlogic/meson-mx-socinfo.c175
-rw-r--r--drivers/soc/atmel/Kconfig6
-rw-r--r--drivers/soc/atmel/Makefile1
-rw-r--r--drivers/soc/atmel/soc.c276
-rw-r--r--drivers/soc/atmel/soc.h121
-rw-r--r--drivers/soc/bcm/Kconfig25
-rw-r--r--drivers/soc/bcm/Makefile2
-rw-r--r--drivers/soc/bcm/brcmstb/Kconfig10
-rw-r--r--drivers/soc/bcm/brcmstb/Makefile2
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c265
-rw-r--r--drivers/soc/bcm/brcmstb/common.c143
-rw-r--r--drivers/soc/bcm/brcmstb/pm/Makefile3
-rw-r--r--drivers/soc/bcm/brcmstb/pm/aon_defs.h113
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c838
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-mips.c461
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm.h89
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s2-arm.S76
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s2-mips.S200
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s3-mips.S146
-rw-r--r--drivers/soc/bcm/raspberrypi-power.c249
-rw-r--r--drivers/soc/dove/Makefile1
-rw-r--r--drivers/soc/dove/pmu.c455
-rw-r--r--drivers/soc/fsl/Kconfig31
-rw-r--r--drivers/soc/fsl/Makefile9
-rw-r--r--drivers/soc/fsl/dpio/Makefile8
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h49
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c278
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c545
-rw-r--r--drivers/soc/fsl/dpio/dpio.c198
-rw-r--r--drivers/soc/fsl/dpio/dpio.h83
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c1005
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h444
-rw-r--r--drivers/soc/fsl/guts.c248
-rw-r--r--drivers/soc/fsl/qbman/Kconfig67
-rw-r--r--drivers/soc/fsl/qbman/Makefile13
-rw-r--r--drivers/soc/fsl/qbman/bman.c821
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c288
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c215
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h78
-rw-r--r--drivers/soc/fsl/qbman/bman_test.c53
-rw-r--r--drivers/soc/fsl/qbman/bman_test.h35
-rw-r--r--drivers/soc/fsl/qbman/bman_test_api.c151
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c78
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h114
-rw-r--r--drivers/soc/fsl/qbman/qman.c2889
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c861
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c361
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h267
-rw-r--r--drivers/soc/fsl/qbman/qman_test.c62
-rw-r--r--drivers/soc/fsl/qbman/qman_test.h34
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c245
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c627
-rw-r--r--drivers/soc/fsl/qe/Kconfig42
-rw-r--r--drivers/soc/fsl/qe/Makefile12
-rw-r--r--drivers/soc/fsl/qe/gpio.c344
-rw-r--r--drivers/soc/fsl/qe/qe.c736
-rw-r--r--drivers/soc/fsl/qe/qe_common.c243
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c512
-rw-r--r--drivers/soc/fsl/qe/qe_ic.h103
-rw-r--r--drivers/soc/fsl/qe/qe_io.c194
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c278
-rw-r--r--drivers/soc/fsl/qe/ucc.c662
-rw-r--r--drivers/soc/fsl/qe/ucc_fast.c399
-rw-r--r--drivers/soc/fsl/qe/ucc_slow.c374
-rw-r--r--drivers/soc/fsl/qe/usb.c56
-rw-r--r--drivers/soc/gemini/Makefile2
-rw-r--r--drivers/soc/gemini/soc-gemini.c71
-rw-r--r--drivers/soc/imx/Kconfig10
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/gpc.c549
-rw-r--r--drivers/soc/imx/gpcv2.c377
-rw-r--r--drivers/soc/lantiq/Makefile2
-rw-r--r--drivers/soc/lantiq/fpi-bus.c87
-rw-r--r--drivers/soc/lantiq/gphy.c224
-rw-r--r--drivers/soc/mediatek/Kconfig34
-rw-r--r--drivers/soc/mediatek/Makefile3
-rw-r--r--drivers/soc/mediatek/mtk-infracfg.c87
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c1684
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c1077
-rw-r--r--drivers/soc/qcom/Kconfig166
-rw-r--r--drivers/soc/qcom/Makefile23
-rw-r--r--drivers/soc/qcom/apr.c378
-rw-r--r--drivers/soc/qcom/cmd-db.c317
-rw-r--r--drivers/soc/qcom/glink_ssr.c164
-rw-r--r--drivers/soc/qcom/llcc-sdm845.c94
-rw-r--r--drivers/soc/qcom/llcc-slice.c338
-rw-r--r--drivers/soc/qcom/mdt_loader.c262
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c768
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c259
-rw-r--r--drivers/soc/qcom/qmi_encdec.c816
-rw-r--r--drivers/soc/qcom/qmi_interface.c848
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c311
-rw-r--r--drivers/soc/qcom/rpmh-internal.h114
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c726
-rw-r--r--drivers/soc/qcom/rpmh.c519
-rw-r--r--drivers/soc/qcom/smd-rpm.c258
-rw-r--r--drivers/soc/qcom/smem.c1023
-rw-r--r--drivers/soc/qcom/smem_state.c201
-rw-r--r--drivers/soc/qcom/smp2p.c608
-rw-r--r--drivers/soc/qcom/smsm.c635
-rw-r--r--drivers/soc/qcom/spm.c383
-rw-r--r--drivers/soc/qcom/trace-rpmh.h82
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c366
-rw-r--r--drivers/soc/renesas/Kconfig95
-rw-r--r--drivers/soc/renesas/Makefile27
-rw-r--r--drivers/soc/renesas/r8a7743-sysc.c32
-rw-r--r--drivers/soc/renesas/r8a7745-sysc.c32
-rw-r--r--drivers/soc/renesas/r8a77470-sysc.c29
-rw-r--r--drivers/soc/renesas/r8a7779-sysc.c34
-rw-r--r--drivers/soc/renesas/r8a7790-sysc.c48
-rw-r--r--drivers/soc/renesas/r8a7791-sysc.c33
-rw-r--r--drivers/soc/renesas/r8a7792-sysc.c34
-rw-r--r--drivers/soc/renesas/r8a7794-sysc.c33
-rw-r--r--drivers/soc/renesas/r8a7795-sysc.c78
-rw-r--r--drivers/soc/renesas/r8a7796-sysc.c48
-rw-r--r--drivers/soc/renesas/r8a77965-sysc.c37
-rw-r--r--drivers/soc/renesas/r8a77970-sysc.c39
-rw-r--r--drivers/soc/renesas/r8a77980-sysc.c52
-rw-r--r--drivers/soc/renesas/r8a77990-sysc.c53
-rw-r--r--drivers/soc/renesas/r8a77995-sysc.c30
-rw-r--r--drivers/soc/renesas/r9a06g032-smp.c96
-rw-r--r--drivers/soc/renesas/rcar-rst.c121
-rw-r--r--drivers/soc/renesas/rcar-sysc.c492
-rw-r--r--drivers/soc/renesas/rcar-sysc.h76
-rw-r--r--drivers/soc/renesas/renesas-soc.c327
-rw-r--r--drivers/soc/rockchip/Kconfig28
-rw-r--r--drivers/soc/rockchip/Makefile5
-rw-r--r--drivers/soc/rockchip/grf.c180
-rw-r--r--drivers/soc/rockchip/pm_domains.c1000
-rw-r--r--drivers/soc/samsung/Kconfig24
-rw-r--r--drivers/soc/samsung/Makefile6
-rw-r--r--drivers/soc/samsung/exynos-pmu.c153
-rw-r--r--drivers/soc/samsung/exynos-pmu.h42
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c171
-rw-r--r--drivers/soc/samsung/exynos4-pmu.c209
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c191
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c276
-rw-r--r--drivers/soc/samsung/pm_domains.c168
-rw-r--r--drivers/soc/sunxi/Kconfig11
-rw-r--r--drivers/soc/sunxi/Makefile1
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c407
-rw-r--r--drivers/soc/tegra/Kconfig134
-rw-r--r--drivers/soc/tegra/Makefile7
-rw-r--r--drivers/soc/tegra/common.c36
-rw-r--r--drivers/soc/tegra/flowctrl.c224
-rw-r--r--drivers/soc/tegra/fuse/Makefile11
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c378
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra20.c178
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c182
-rw-r--r--drivers/soc/tegra/fuse/fuse.h112
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra114.c110
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra124.c168
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra20.c110
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c184
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra30.c288
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c183
-rw-r--r--drivers/soc/tegra/pmc.c2005
-rw-r--r--drivers/soc/tegra/powergate-bpmp.c370
-rw-r--r--drivers/soc/ti/Kconfig76
-rw-r--r--drivers/soc/ti/Makefile10
-rw-r--r--drivers/soc/ti/knav_dma.c831
-rw-r--r--drivers/soc/ti/knav_qmss.h395
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c592
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c1922
-rw-r--r--drivers/soc/ti/pm33xx.c363
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c204
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c583
-rw-r--r--drivers/soc/ux500/Kconfig7
-rw-r--r--drivers/soc/ux500/Makefile1
-rw-r--r--drivers/soc/ux500/ux500-soc-id.c222
-rw-r--r--drivers/soc/versatile/Kconfig19
-rw-r--r--drivers/soc/versatile/Makefile2
-rw-r--r--drivers/soc/versatile/soc-integrator.c155
-rw-r--r--drivers/soc/versatile/soc-realview.c138
-rw-r--r--drivers/soc/xilinx/Kconfig20
-rw-r--r--drivers/soc/xilinx/Makefile2
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c630
-rw-r--r--drivers/soc/zte/Kconfig14
-rw-r--r--drivers/soc/zte/Makefile5
-rw-r--r--drivers/soc/zte/zx296718_pm_domains.c181
-rw-r--r--drivers/soc/zte/zx2967_pm_domains.c141
-rw-r--r--drivers/soc/zte/zx2967_pm_domains.h44
192 files changed, 48449 insertions, 0 deletions
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
new file mode 100644
index 000000000..c07b4a852
--- /dev/null
+++ b/drivers/soc/Kconfig
@@ -0,0 +1,22 @@
+menu "SOC (System On Chip) specific Drivers"
+
+source "drivers/soc/actions/Kconfig"
+source "drivers/soc/amlogic/Kconfig"
+source "drivers/soc/atmel/Kconfig"
+source "drivers/soc/bcm/Kconfig"
+source "drivers/soc/fsl/Kconfig"
+source "drivers/soc/imx/Kconfig"
+source "drivers/soc/mediatek/Kconfig"
+source "drivers/soc/qcom/Kconfig"
+source "drivers/soc/renesas/Kconfig"
+source "drivers/soc/rockchip/Kconfig"
+source "drivers/soc/samsung/Kconfig"
+source "drivers/soc/sunxi/Kconfig"
+source "drivers/soc/tegra/Kconfig"
+source "drivers/soc/ti/Kconfig"
+source "drivers/soc/ux500/Kconfig"
+source "drivers/soc/versatile/Kconfig"
+source "drivers/soc/xilinx/Kconfig"
+source "drivers/soc/zte/Kconfig"
+
+endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
new file mode 100644
index 000000000..f0d46b16e
--- /dev/null
+++ b/drivers/soc/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux Kernel SOC specific device drivers.
+#
+
+obj-$(CONFIG_ARCH_ACTIONS) += actions/
+obj-$(CONFIG_ARCH_AT91) += atmel/
+obj-y += bcm/
+obj-$(CONFIG_ARCH_DOVE) += dove/
+obj-$(CONFIG_MACH_DOVE) += dove/
+obj-y += fsl/
+obj-$(CONFIG_ARCH_GEMINI) += gemini/
+obj-$(CONFIG_ARCH_MXC) += imx/
+obj-$(CONFIG_SOC_XWAY) += lantiq/
+obj-y += mediatek/
+obj-y += amlogic/
+obj-y += qcom/
+obj-y += renesas/
+obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
+obj-$(CONFIG_SOC_SAMSUNG) += samsung/
+obj-$(CONFIG_ARCH_SUNXI) += sunxi/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-$(CONFIG_SOC_TI) += ti/
+obj-$(CONFIG_ARCH_U8500) += ux500/
+obj-$(CONFIG_PLAT_VERSATILE) += versatile/
+obj-y += xilinx/
+obj-$(CONFIG_ARCH_ZX) += zte/
diff --git a/drivers/soc/actions/Kconfig b/drivers/soc/actions/Kconfig
new file mode 100644
index 000000000..9d68b5a77
--- /dev/null
+++ b/drivers/soc/actions/Kconfig
@@ -0,0 +1,16 @@
+if ARCH_ACTIONS || COMPILE_TEST
+
+config OWL_PM_DOMAINS_HELPER
+ bool
+
+config OWL_PM_DOMAINS
+ bool "Actions Semi SPS power domains"
+ depends on PM
+ select OWL_PM_DOMAINS_HELPER
+ select PM_GENERIC_DOMAINS
+ help
+ Say 'y' here to enable support for Smart Power System (SPS)
+ power-gating on Actions Semiconductor S500 SoC.
+ If unsure, say 'n'.
+
+endif
diff --git a/drivers/soc/actions/Makefile b/drivers/soc/actions/Makefile
new file mode 100644
index 000000000..1e101b06b
--- /dev/null
+++ b/drivers/soc/actions/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_OWL_PM_DOMAINS_HELPER) += owl-sps-helper.o
+obj-$(CONFIG_OWL_PM_DOMAINS) += owl-sps.o
diff --git a/drivers/soc/actions/owl-sps-helper.c b/drivers/soc/actions/owl-sps-helper.c
new file mode 100644
index 000000000..9d7a2c2b4
--- /dev/null
+++ b/drivers/soc/actions/owl-sps-helper.c
@@ -0,0 +1,51 @@
+/*
+ * Actions Semi Owl Smart Power System (SPS) shared helpers
+ *
+ * Copyright 2012 Actions Semi Inc.
+ * Author: Actions Semi, Inc.
+ *
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#define OWL_SPS_PG_CTL 0x0
+
+int owl_sps_set_pg(void __iomem *base, u32 pwr_mask, u32 ack_mask, bool enable)
+{
+ u32 val;
+ bool ack;
+ int timeout;
+
+ val = readl(base + OWL_SPS_PG_CTL);
+ ack = val & ack_mask;
+ if (ack == enable)
+ return 0;
+
+ if (enable)
+ val |= pwr_mask;
+ else
+ val &= ~pwr_mask;
+
+ writel(val, base + OWL_SPS_PG_CTL);
+
+ for (timeout = 5000; timeout > 0; timeout -= 50) {
+ val = readl(base + OWL_SPS_PG_CTL);
+ if ((val & ack_mask) == (enable ? ack_mask : 0))
+ break;
+ udelay(50);
+ }
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ udelay(10);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(owl_sps_set_pg);
diff --git a/drivers/soc/actions/owl-sps.c b/drivers/soc/actions/owl-sps.c
new file mode 100644
index 000000000..032921d8d
--- /dev/null
+++ b/drivers/soc/actions/owl-sps.c
@@ -0,0 +1,266 @@
+/*
+ * Actions Semi Owl Smart Power System (SPS)
+ *
+ * Copyright 2012 Actions Semi Inc.
+ * Author: Actions Semi, Inc.
+ *
+ * Copyright (c) 2017 Andreas Färber
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_domain.h>
+#include <linux/soc/actions/owl-sps.h>
+#include <dt-bindings/power/owl-s500-powergate.h>
+#include <dt-bindings/power/owl-s700-powergate.h>
+
+struct owl_sps_domain_info {
+ const char *name;
+ int pwr_bit;
+ int ack_bit;
+ unsigned int genpd_flags;
+};
+
+struct owl_sps_info {
+ unsigned num_domains;
+ const struct owl_sps_domain_info *domains;
+};
+
+struct owl_sps {
+ struct device *dev;
+ const struct owl_sps_info *info;
+ void __iomem *base;
+ struct genpd_onecell_data genpd_data;
+ struct generic_pm_domain *domains[];
+};
+
+#define to_owl_pd(gpd) container_of(gpd, struct owl_sps_domain, genpd)
+
+struct owl_sps_domain {
+ struct generic_pm_domain genpd;
+ const struct owl_sps_domain_info *info;
+ struct owl_sps *sps;
+};
+
+static int owl_sps_set_power(struct owl_sps_domain *pd, bool enable)
+{
+ u32 pwr_mask, ack_mask;
+
+ ack_mask = BIT(pd->info->ack_bit);
+ pwr_mask = BIT(pd->info->pwr_bit);
+
+ return owl_sps_set_pg(pd->sps->base, pwr_mask, ack_mask, enable);
+}
+
+static int owl_sps_power_on(struct generic_pm_domain *domain)
+{
+ struct owl_sps_domain *pd = to_owl_pd(domain);
+
+ dev_dbg(pd->sps->dev, "%s power on", pd->info->name);
+
+ return owl_sps_set_power(pd, true);
+}
+
+static int owl_sps_power_off(struct generic_pm_domain *domain)
+{
+ struct owl_sps_domain *pd = to_owl_pd(domain);
+
+ dev_dbg(pd->sps->dev, "%s power off", pd->info->name);
+
+ return owl_sps_set_power(pd, false);
+}
+
+static int owl_sps_init_domain(struct owl_sps *sps, int index)
+{
+ struct owl_sps_domain *pd;
+
+ pd = devm_kzalloc(sps->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->info = &sps->info->domains[index];
+ pd->sps = sps;
+
+ pd->genpd.name = pd->info->name;
+ pd->genpd.power_on = owl_sps_power_on;
+ pd->genpd.power_off = owl_sps_power_off;
+ pd->genpd.flags = pd->info->genpd_flags;
+ pm_genpd_init(&pd->genpd, NULL, false);
+
+ sps->genpd_data.domains[index] = &pd->genpd;
+
+ return 0;
+}
+
+static int owl_sps_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct owl_sps_info *sps_info;
+ struct owl_sps *sps;
+ int i, ret;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "no device node\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
+ if (!match || !match->data) {
+ dev_err(&pdev->dev, "unknown compatible or missing data\n");
+ return -EINVAL;
+ }
+
+ sps_info = match->data;
+
+ sps = devm_kzalloc(&pdev->dev,
+ struct_size(sps, domains, sps_info->num_domains),
+ GFP_KERNEL);
+ if (!sps)
+ return -ENOMEM;
+
+ sps->base = of_io_request_and_map(pdev->dev.of_node, 0, "owl-sps");
+ if (IS_ERR(sps->base)) {
+ dev_err(&pdev->dev, "failed to map sps registers\n");
+ return PTR_ERR(sps->base);
+ }
+
+ sps->dev = &pdev->dev;
+ sps->info = sps_info;
+ sps->genpd_data.domains = sps->domains;
+ sps->genpd_data.num_domains = sps_info->num_domains;
+
+ for (i = 0; i < sps_info->num_domains; i++) {
+ ret = owl_sps_init_domain(sps, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_onecell(pdev->dev.of_node, &sps->genpd_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add provider (%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct owl_sps_domain_info s500_sps_domains[] = {
+ [S500_PD_VDE] = {
+ .name = "VDE",
+ .pwr_bit = 0,
+ .ack_bit = 16,
+ },
+ [S500_PD_VCE_SI] = {
+ .name = "VCE_SI",
+ .pwr_bit = 1,
+ .ack_bit = 17,
+ },
+ [S500_PD_USB2_1] = {
+ .name = "USB2_1",
+ .pwr_bit = 2,
+ .ack_bit = 18,
+ },
+ [S500_PD_CPU2] = {
+ .name = "CPU2",
+ .pwr_bit = 5,
+ .ack_bit = 21,
+ .genpd_flags = GENPD_FLAG_ALWAYS_ON,
+ },
+ [S500_PD_CPU3] = {
+ .name = "CPU3",
+ .pwr_bit = 6,
+ .ack_bit = 22,
+ .genpd_flags = GENPD_FLAG_ALWAYS_ON,
+ },
+ [S500_PD_DMA] = {
+ .name = "DMA",
+ .pwr_bit = 8,
+ .ack_bit = 12,
+ },
+ [S500_PD_DS] = {
+ .name = "DS",
+ .pwr_bit = 9,
+ .ack_bit = 13,
+ },
+ [S500_PD_USB3] = {
+ .name = "USB3",
+ .pwr_bit = 10,
+ .ack_bit = 14,
+ },
+ [S500_PD_USB2_0] = {
+ .name = "USB2_0",
+ .pwr_bit = 11,
+ .ack_bit = 15,
+ },
+};
+
+static const struct owl_sps_info s500_sps_info = {
+ .num_domains = ARRAY_SIZE(s500_sps_domains),
+ .domains = s500_sps_domains,
+};
+
+static const struct owl_sps_domain_info s700_sps_domains[] = {
+ [S700_PD_VDE] = {
+ .name = "VDE",
+ .pwr_bit = 0,
+ },
+ [S700_PD_VCE_SI] = {
+ .name = "VCE_SI",
+ .pwr_bit = 1,
+ },
+ [S700_PD_USB2_1] = {
+ .name = "USB2_1",
+ .pwr_bit = 2,
+ },
+ [S700_PD_HDE] = {
+ .name = "HDE",
+ .pwr_bit = 7,
+ },
+ [S700_PD_DMA] = {
+ .name = "DMA",
+ .pwr_bit = 8,
+ },
+ [S700_PD_DS] = {
+ .name = "DS",
+ .pwr_bit = 9,
+ },
+ [S700_PD_USB3] = {
+ .name = "USB3",
+ .pwr_bit = 10,
+ },
+ [S700_PD_USB2_0] = {
+ .name = "USB2_0",
+ .pwr_bit = 11,
+ },
+};
+
+static const struct owl_sps_info s700_sps_info = {
+ .num_domains = ARRAY_SIZE(s700_sps_domains),
+ .domains = s700_sps_domains,
+};
+
+static const struct of_device_id owl_sps_of_matches[] = {
+ { .compatible = "actions,s500-sps", .data = &s500_sps_info },
+ { .compatible = "actions,s700-sps", .data = &s700_sps_info },
+ { }
+};
+
+static struct platform_driver owl_sps_platform_driver = {
+ .probe = owl_sps_probe,
+ .driver = {
+ .name = "owl-sps",
+ .of_match_table = owl_sps_of_matches,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init owl_sps_init(void)
+{
+ return platform_driver_register(&owl_sps_platform_driver);
+}
+postcore_initcall(owl_sps_init);
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
new file mode 100644
index 000000000..b04f6e4ae
--- /dev/null
+++ b/drivers/soc/amlogic/Kconfig
@@ -0,0 +1,33 @@
+menu "Amlogic SoC drivers"
+
+config MESON_GX_SOCINFO
+ bool "Amlogic Meson GX SoC Information driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ default ARCH_MESON
+ select SOC_BUS
+ help
+ Say yes to support decoding of Amlogic Meson GX SoC family
+ information about the type, package and version.
+
+config MESON_GX_PM_DOMAINS
+ bool "Amlogic Meson GX Power Domains driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on PM && OF
+ default ARCH_MESON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Say yes to expose Amlogic Meson GX Power Domains as
+ Generic Power Domains.
+
+config MESON_MX_SOCINFO
+ bool "Amlogic Meson MX SoC Information driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ default ARCH_MESON
+ select SOC_BUS
+ help
+ Say yes to support decoding of Amlogic Meson6, Meson8,
+ Meson8b and Meson8m2 SoC family information about the type
+ and version.
+
+endmenu
diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile
new file mode 100644
index 000000000..8fa321893
--- /dev/null
+++ b/drivers/soc/amlogic/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o
+obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
+obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
new file mode 100644
index 000000000..05421d029
--- /dev/null
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+
+/* AO Offsets */
+
+#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
+
+#define GEN_PWR_VPU_HDMI BIT(8)
+#define GEN_PWR_VPU_HDMI_ISO BIT(9)
+
+/* HHI Offsets */
+
+#define HHI_MEM_PD_REG0 (0x40 << 2)
+#define HHI_VPU_MEM_PD_REG0 (0x41 << 2)
+#define HHI_VPU_MEM_PD_REG1 (0x42 << 2)
+
+struct meson_gx_pwrc_vpu {
+ struct generic_pm_domain genpd;
+ struct regmap *regmap_ao;
+ struct regmap *regmap_hhi;
+ struct reset_control *rstc;
+ struct clk *vpu_clk;
+ struct clk *vapb_clk;
+};
+
+static inline
+struct meson_gx_pwrc_vpu *genpd_to_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct meson_gx_pwrc_vpu, genpd);
+}
+
+static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
+{
+ struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
+ int i;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
+ udelay(20);
+
+ /* Power Down Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+ 0x3 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+ 0x3 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 8; i < 16; i++) {
+ regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
+ BIT(i), BIT(i));
+ udelay(5);
+ }
+ udelay(20);
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
+
+ msleep(20);
+
+ clk_disable_unprepare(pd->vpu_clk);
+ clk_disable_unprepare(pd->vapb_clk);
+
+ return 0;
+}
+
+static int meson_gx_pwrc_vpu_setup_clk(struct meson_gx_pwrc_vpu *pd)
+{
+ int ret;
+
+ ret = clk_prepare_enable(pd->vpu_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(pd->vapb_clk);
+ if (ret)
+ clk_disable_unprepare(pd->vpu_clk);
+
+ return ret;
+}
+
+static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
+{
+ struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
+ int ret;
+ int i;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI, 0);
+ udelay(20);
+
+ /* Power Up Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+ 0x3 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+ 0x3 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 8; i < 16; i++) {
+ regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
+ BIT(i), 0);
+ udelay(5);
+ }
+ udelay(20);
+
+ ret = reset_control_assert(pd->rstc);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI_ISO, 0);
+
+ ret = reset_control_deassert(pd->rstc);
+ if (ret)
+ return ret;
+
+ ret = meson_gx_pwrc_vpu_setup_clk(pd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool meson_gx_pwrc_vpu_get_power(struct meson_gx_pwrc_vpu *pd)
+{
+ u32 reg;
+
+ regmap_read(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, &reg);
+
+ return (reg & GEN_PWR_VPU_HDMI);
+}
+
+static struct meson_gx_pwrc_vpu vpu_hdmi_pd = {
+ .genpd = {
+ .name = "vpu_hdmi",
+ .power_off = meson_gx_pwrc_vpu_power_off,
+ .power_on = meson_gx_pwrc_vpu_power_on,
+ },
+};
+
+static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap_ao, *regmap_hhi;
+ struct reset_control *rstc;
+ struct clk *vpu_clk;
+ struct clk *vapb_clk;
+ bool powered_off;
+ int ret;
+
+ regmap_ao = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
+ if (IS_ERR(regmap_ao)) {
+ dev_err(&pdev->dev, "failed to get regmap\n");
+ return PTR_ERR(regmap_ao);
+ }
+
+ regmap_hhi = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "amlogic,hhi-sysctrl");
+ if (IS_ERR(regmap_hhi)) {
+ dev_err(&pdev->dev, "failed to get HHI regmap\n");
+ return PTR_ERR(regmap_hhi);
+ }
+
+ rstc = devm_reset_control_array_get(&pdev->dev, false, false);
+ if (IS_ERR(rstc)) {
+ if (PTR_ERR(rstc) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get reset lines\n");
+ return PTR_ERR(rstc);
+ }
+
+ vpu_clk = devm_clk_get(&pdev->dev, "vpu");
+ if (IS_ERR(vpu_clk)) {
+ dev_err(&pdev->dev, "vpu clock request failed\n");
+ return PTR_ERR(vpu_clk);
+ }
+
+ vapb_clk = devm_clk_get(&pdev->dev, "vapb");
+ if (IS_ERR(vapb_clk)) {
+ dev_err(&pdev->dev, "vapb clock request failed\n");
+ return PTR_ERR(vapb_clk);
+ }
+
+ vpu_hdmi_pd.regmap_ao = regmap_ao;
+ vpu_hdmi_pd.regmap_hhi = regmap_hhi;
+ vpu_hdmi_pd.rstc = rstc;
+ vpu_hdmi_pd.vpu_clk = vpu_clk;
+ vpu_hdmi_pd.vapb_clk = vapb_clk;
+
+ powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd);
+
+ /* If already powered, sync the clock states */
+ if (!powered_off) {
+ ret = meson_gx_pwrc_vpu_setup_clk(&vpu_hdmi_pd);
+ if (ret)
+ return ret;
+ }
+
+ pm_genpd_init(&vpu_hdmi_pd.genpd, &pm_domain_always_on_gov,
+ powered_off);
+
+ return of_genpd_add_provider_simple(pdev->dev.of_node,
+ &vpu_hdmi_pd.genpd);
+}
+
+static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev)
+{
+ bool powered_off;
+
+ powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd);
+ if (!powered_off)
+ meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd);
+}
+
+static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
+ { .compatible = "amlogic,meson-gx-pwrc-vpu" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver meson_gx_pwrc_vpu_driver = {
+ .probe = meson_gx_pwrc_vpu_probe,
+ .shutdown = meson_gx_pwrc_vpu_shutdown,
+ .driver = {
+ .name = "meson_gx_pwrc_vpu",
+ .of_match_table = meson_gx_pwrc_vpu_match_table,
+ },
+};
+builtin_platform_driver(meson_gx_pwrc_vpu_driver);
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
new file mode 100644
index 000000000..1ae339f5e
--- /dev/null
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define AO_SEC_SD_CFG8 0xe0
+#define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8
+
+#define SOCINFO_MAJOR GENMASK(31, 24)
+#define SOCINFO_PACK GENMASK(23, 16)
+#define SOCINFO_MINOR GENMASK(15, 8)
+#define SOCINFO_MISC GENMASK(7, 0)
+
+static const struct meson_gx_soc_id {
+ const char *name;
+ unsigned int id;
+} soc_ids[] = {
+ { "GXBB", 0x1f },
+ { "GXTVBB", 0x20 },
+ { "GXL", 0x21 },
+ { "GXM", 0x22 },
+ { "TXL", 0x23 },
+ { "TXLX", 0x24 },
+ { "AXG", 0x25 },
+ { "GXLX", 0x26 },
+ { "TXHD", 0x27 },
+};
+
+static const struct meson_gx_package_id {
+ const char *name;
+ unsigned int major_id;
+ unsigned int pack_id;
+ unsigned int pack_mask;
+} soc_packages[] = {
+ { "S905", 0x1f, 0, 0x20 }, /* pack_id != 0x20 */
+ { "S905H", 0x1f, 0x3, 0xf }, /* pack_id & 0xf == 0x3 */
+ { "S905M", 0x1f, 0x20, 0xf0 }, /* pack_id == 0x20 */
+ { "S905D", 0x21, 0, 0xf0 },
+ { "S905X", 0x21, 0x80, 0xf0 },
+ { "S905W", 0x21, 0xa0, 0xf0 },
+ { "S905L", 0x21, 0xc0, 0xf0 },
+ { "S905M2", 0x21, 0xe0, 0xf0 },
+ { "S912", 0x22, 0, 0x0 }, /* Only S912 is known for GXM */
+ { "962X", 0x24, 0x10, 0xf0 },
+ { "962E", 0x24, 0x20, 0xf0 },
+ { "A113X", 0x25, 0x37, 0xff },
+ { "A113D", 0x25, 0x22, 0xff },
+};
+
+static inline unsigned int socinfo_to_major(u32 socinfo)
+{
+ return FIELD_GET(SOCINFO_MAJOR, socinfo);
+}
+
+static inline unsigned int socinfo_to_minor(u32 socinfo)
+{
+ return FIELD_GET(SOCINFO_MINOR, socinfo);
+}
+
+static inline unsigned int socinfo_to_pack(u32 socinfo)
+{
+ return FIELD_GET(SOCINFO_PACK, socinfo);
+}
+
+static inline unsigned int socinfo_to_misc(u32 socinfo)
+{
+ return FIELD_GET(SOCINFO_MISC, socinfo);
+}
+
+static const char *socinfo_to_package_id(u32 socinfo)
+{
+ unsigned int pack = socinfo_to_pack(socinfo);
+ unsigned int major = socinfo_to_major(socinfo);
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(soc_packages) ; ++i) {
+ if (soc_packages[i].major_id == major &&
+ soc_packages[i].pack_id ==
+ (pack & soc_packages[i].pack_mask))
+ return soc_packages[i].name;
+ }
+
+ return "Unknown";
+}
+
+static const char *socinfo_to_soc_id(u32 socinfo)
+{
+ unsigned int id = socinfo_to_major(socinfo);
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(soc_ids) ; ++i) {
+ if (soc_ids[i].id == id)
+ return soc_ids[i].name;
+ }
+
+ return "Unknown";
+}
+
+static int __init meson_gx_socinfo_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ struct regmap *regmap;
+ unsigned int socinfo;
+ struct device *dev;
+ int ret;
+
+ /* look up for chipid node */
+ np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gx-ao-secure");
+ if (!np)
+ return -ENODEV;
+
+ /* check if interface is enabled */
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
+ /* check if chip-id is available */
+ if (!of_property_read_bool(np, "amlogic,has-chip-id"))
+ return -ENODEV;
+
+ /* node should be a syscon */
+ regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(regmap)) {
+ pr_err("%s: failed to get regmap\n", __func__);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(regmap, AO_SEC_SOCINFO_OFFSET, &socinfo);
+ if (ret < 0)
+ return ret;
+
+ if (!socinfo) {
+ pr_err("%s: invalid chipid value\n", __func__);
+ return -EINVAL;
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENODEV;
+
+ soc_dev_attr->family = "Amlogic Meson";
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%x:%x - %x:%x",
+ socinfo_to_major(socinfo),
+ socinfo_to_minor(socinfo),
+ socinfo_to_pack(socinfo),
+ socinfo_to_misc(socinfo));
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%s (%s)",
+ socinfo_to_soc_id(socinfo),
+ socinfo_to_package_id(socinfo));
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+ dev = soc_device_to_device(soc_dev);
+
+ dev_info(dev, "Amlogic Meson %s Revision %x:%x (%x:%x) Detected\n",
+ soc_dev_attr->soc_id,
+ socinfo_to_major(socinfo),
+ socinfo_to_minor(socinfo),
+ socinfo_to_pack(socinfo),
+ socinfo_to_misc(socinfo));
+
+ return 0;
+}
+device_initcall(meson_gx_socinfo_init);
diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c
new file mode 100644
index 000000000..78f0f1aec
--- /dev/null
+++ b/drivers/soc/amlogic/meson-mx-socinfo.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define MESON_SOCINFO_MAJOR_VER_MESON6 0x16
+#define MESON_SOCINFO_MAJOR_VER_MESON8 0x19
+#define MESON_SOCINFO_MAJOR_VER_MESON8B 0x1b
+
+#define MESON_MX_ASSIST_HW_REV 0x14c
+
+#define MESON_MX_ANALOG_TOP_METAL_REVISION 0x0
+
+#define MESON_MX_BOOTROM_MISC_VER 0x4
+
+static const char *meson_mx_socinfo_revision(unsigned int major_ver,
+ unsigned int misc_ver,
+ unsigned int metal_rev)
+{
+ unsigned int minor_ver;
+
+ switch (major_ver) {
+ case MESON_SOCINFO_MAJOR_VER_MESON6:
+ minor_ver = 0xa;
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8:
+ if (metal_rev == 0x11111112)
+ major_ver = 0x1d;
+
+ if (metal_rev == 0x11111111 || metal_rev == 0x11111112)
+ minor_ver = 0xa;
+ else if (metal_rev == 0x11111113)
+ minor_ver = 0xb;
+ else if (metal_rev == 0x11111133)
+ minor_ver = 0xc;
+ else
+ minor_ver = 0xd;
+
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8B:
+ if (metal_rev == 0x11111111)
+ minor_ver = 0xa;
+ else
+ minor_ver = 0xb;
+
+ break;
+
+ default:
+ minor_ver = 0x0;
+ break;
+ }
+
+ return kasprintf(GFP_KERNEL, "Rev%X (%x - 0:%X)", minor_ver, major_ver,
+ misc_ver);
+}
+
+static const char *meson_mx_socinfo_soc_id(unsigned int major_ver,
+ unsigned int metal_rev)
+{
+ const char *soc_id;
+
+ switch (major_ver) {
+ case MESON_SOCINFO_MAJOR_VER_MESON6:
+ soc_id = "Meson6 (AML8726-MX)";
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8:
+ if (metal_rev == 0x11111112)
+ soc_id = "Meson8m2 (S812)";
+ else
+ soc_id = "Meson8 (S802)";
+
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8B:
+ soc_id = "Meson8b (S805)";
+ break;
+
+ default:
+ soc_id = "Unknown";
+ break;
+ }
+
+ return kstrdup_const(soc_id, GFP_KERNEL);
+}
+
+static const struct of_device_id meson_mx_socinfo_analog_top_ids[] = {
+ { .compatible = "amlogic,meson8-analog-top", },
+ { .compatible = "amlogic,meson8b-analog-top", },
+ { /* sentinel */ }
+};
+
+static int __init meson_mx_socinfo_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ struct regmap *assist_regmap, *bootrom_regmap, *analog_top_regmap;
+ unsigned int major_ver, misc_ver, metal_rev = 0;
+ int ret;
+
+ assist_regmap =
+ syscon_regmap_lookup_by_compatible("amlogic,meson-mx-assist");
+ if (IS_ERR(assist_regmap))
+ return PTR_ERR(assist_regmap);
+
+ bootrom_regmap =
+ syscon_regmap_lookup_by_compatible("amlogic,meson-mx-bootrom");
+ if (IS_ERR(bootrom_regmap))
+ return PTR_ERR(bootrom_regmap);
+
+ np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids);
+ if (np) {
+ analog_top_regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(analog_top_regmap))
+ return PTR_ERR(analog_top_regmap);
+
+ ret = regmap_read(analog_top_regmap,
+ MESON_MX_ANALOG_TOP_METAL_REVISION,
+ &metal_rev);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(assist_regmap, MESON_MX_ASSIST_HW_REV, &major_ver);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(bootrom_regmap, MESON_MX_BOOTROM_MISC_VER,
+ &misc_ver);
+ if (ret < 0)
+ return ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENODEV;
+
+ soc_dev_attr->family = "Amlogic Meson";
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->revision = meson_mx_socinfo_revision(major_ver, misc_ver,
+ metal_rev);
+ soc_dev_attr->soc_id = meson_mx_socinfo_soc_id(major_ver, metal_rev);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree_const(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ dev_info(soc_device_to_device(soc_dev), "Amlogic %s %s detected\n",
+ soc_dev_attr->soc_id, soc_dev_attr->revision);
+
+ return 0;
+}
+device_initcall(meson_mx_socinfo_init);
diff --git a/drivers/soc/atmel/Kconfig b/drivers/soc/atmel/Kconfig
new file mode 100644
index 000000000..6242ebb41
--- /dev/null
+++ b/drivers/soc/atmel/Kconfig
@@ -0,0 +1,6 @@
+config AT91_SOC_ID
+ bool "SoC bus for Atmel ARM SoCs"
+ depends on ARCH_AT91 || COMPILE_TEST
+ default ARCH_AT91
+ help
+ Include support for the SoC bus on the Atmel ARM SoCs.
diff --git a/drivers/soc/atmel/Makefile b/drivers/soc/atmel/Makefile
new file mode 100644
index 000000000..2d92f32e4
--- /dev/null
+++ b/drivers/soc/atmel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_AT91_SOC_ID) += soc.o
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
new file mode 100644
index 000000000..76117405d
--- /dev/null
+++ b/drivers/soc/atmel/soc.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2015 Atmel
+ *
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com
+ * Boris Brezillon <boris.brezillon@free-electrons.com
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#define pr_fmt(fmt) "AT91: " fmt
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include "soc.h"
+
+#define AT91_DBGU_CIDR 0x40
+#define AT91_DBGU_EXID 0x44
+#define AT91_CHIPID_CIDR 0x00
+#define AT91_CHIPID_EXID 0x04
+#define AT91_CIDR_VERSION(x) ((x) & 0x1f)
+#define AT91_CIDR_EXT BIT(31)
+#define AT91_CIDR_MATCH_MASK 0x7fffffe0
+
+static const struct at91_soc __initconst socs[] = {
+#ifdef CONFIG_SOC_AT91RM9200
+ AT91_SOC(AT91RM9200_CIDR_MATCH, 0, "at91rm9200 BGA", "at91rm9200"),
+#endif
+#ifdef CONFIG_SOC_AT91SAM9
+ AT91_SOC(AT91SAM9260_CIDR_MATCH, 0, "at91sam9260", NULL),
+ AT91_SOC(AT91SAM9261_CIDR_MATCH, 0, "at91sam9261", NULL),
+ AT91_SOC(AT91SAM9263_CIDR_MATCH, 0, "at91sam9263", NULL),
+ AT91_SOC(AT91SAM9G20_CIDR_MATCH, 0, "at91sam9g20", NULL),
+ AT91_SOC(AT91SAM9RL64_CIDR_MATCH, 0, "at91sam9rl64", NULL),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M11_EXID_MATCH,
+ "at91sam9m11", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M10_EXID_MATCH,
+ "at91sam9m10", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G46_EXID_MATCH,
+ "at91sam9g46", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G45_EXID_MATCH,
+ "at91sam9g45", "at91sam9g45"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G15_EXID_MATCH,
+ "at91sam9g15", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G35_EXID_MATCH,
+ "at91sam9g35", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X35_EXID_MATCH,
+ "at91sam9x35", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G25_EXID_MATCH,
+ "at91sam9g25", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X25_EXID_MATCH,
+ "at91sam9x25", "at91sam9x5"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN12_EXID_MATCH,
+ "at91sam9cn12", "at91sam9n12"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9N12_EXID_MATCH,
+ "at91sam9n12", "at91sam9n12"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN11_EXID_MATCH,
+ "at91sam9cn11", "at91sam9n12"),
+ AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"),
+ AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"),
+ AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"),
+#endif
+#ifdef CONFIG_SOC_SAMA5
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
+ "sama5d21", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D22CU_EXID_MATCH,
+ "sama5d22", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D225C_D1M_EXID_MATCH,
+ "sama5d225c 16MiB SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D23CU_EXID_MATCH,
+ "sama5d23", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CX_EXID_MATCH,
+ "sama5d24", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CU_EXID_MATCH,
+ "sama5d24", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D26CU_EXID_MATCH,
+ "sama5d26", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CU_EXID_MATCH,
+ "sama5d27", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CN_EXID_MATCH,
+ "sama5d27", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D1G_EXID_MATCH,
+ "sama5d27c 128MiB SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D5M_EXID_MATCH,
+ "sama5d27c 64MiB SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH,
+ "sama5d28", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH,
+ "sama5d28", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28C_D1G_EXID_MATCH,
+ "sama5d28c 128MiB SiP", "sama5d2"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH,
+ "sama5d31", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH,
+ "sama5d33", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D34_EXID_MATCH,
+ "sama5d34", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D35_EXID_MATCH,
+ "sama5d35", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D36_EXID_MATCH,
+ "sama5d36", "sama5d3"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D41_EXID_MATCH,
+ "sama5d41", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D42_EXID_MATCH,
+ "sama5d42", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D43_EXID_MATCH,
+ "sama5d43", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D44_EXID_MATCH,
+ "sama5d44", "sama5d4"),
+#endif
+#ifdef CONFIG_SOC_SAMV7
+ AT91_SOC(SAME70Q21_CIDR_MATCH, SAME70Q21_EXID_MATCH,
+ "same70q21", "same7"),
+ AT91_SOC(SAME70Q20_CIDR_MATCH, SAME70Q20_EXID_MATCH,
+ "same70q20", "same7"),
+ AT91_SOC(SAME70Q19_CIDR_MATCH, SAME70Q19_EXID_MATCH,
+ "same70q19", "same7"),
+ AT91_SOC(SAMS70Q21_CIDR_MATCH, SAMS70Q21_EXID_MATCH,
+ "sams70q21", "sams7"),
+ AT91_SOC(SAMS70Q20_CIDR_MATCH, SAMS70Q20_EXID_MATCH,
+ "sams70q20", "sams7"),
+ AT91_SOC(SAMS70Q19_CIDR_MATCH, SAMS70Q19_EXID_MATCH,
+ "sams70q19", "sams7"),
+ AT91_SOC(SAMV71Q21_CIDR_MATCH, SAMV71Q21_EXID_MATCH,
+ "samv71q21", "samv7"),
+ AT91_SOC(SAMV71Q20_CIDR_MATCH, SAMV71Q20_EXID_MATCH,
+ "samv71q20", "samv7"),
+ AT91_SOC(SAMV71Q19_CIDR_MATCH, SAMV71Q19_EXID_MATCH,
+ "samv71q19", "samv7"),
+ AT91_SOC(SAMV70Q20_CIDR_MATCH, SAMV70Q20_EXID_MATCH,
+ "samv70q20", "samv7"),
+ AT91_SOC(SAMV70Q19_CIDR_MATCH, SAMV70Q19_EXID_MATCH,
+ "samv70q19", "samv7"),
+#endif
+ { /* sentinel */ },
+};
+
+static int __init at91_get_cidr_exid_from_dbgu(u32 *cidr, u32 *exid)
+{
+ struct device_node *np;
+ void __iomem *regs;
+
+ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-dbgu");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "atmel,at91sam9260-dbgu");
+ if (!np)
+ return -ENODEV;
+
+ regs = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!regs) {
+ pr_warn("Could not map DBGU iomem range");
+ return -ENXIO;
+ }
+
+ *cidr = readl(regs + AT91_DBGU_CIDR);
+ *exid = readl(regs + AT91_DBGU_EXID);
+
+ iounmap(regs);
+
+ return 0;
+}
+
+static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid)
+{
+ struct device_node *np;
+ void __iomem *regs;
+
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-chipid");
+ if (!np)
+ return -ENODEV;
+
+ regs = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!regs) {
+ pr_warn("Could not map DBGU iomem range");
+ return -ENXIO;
+ }
+
+ *cidr = readl(regs + AT91_CHIPID_CIDR);
+ *exid = readl(regs + AT91_CHIPID_EXID);
+
+ iounmap(regs);
+
+ return 0;
+}
+
+struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const struct at91_soc *soc;
+ struct soc_device *soc_dev;
+ u32 cidr, exid;
+ int ret;
+
+ /*
+ * With SAMA5D2 and later SoCs, CIDR and EXID registers are no more
+ * in the dbgu device but in the chipid device whose purpose is only
+ * to expose these two registers.
+ */
+ ret = at91_get_cidr_exid_from_dbgu(&cidr, &exid);
+ if (ret)
+ ret = at91_get_cidr_exid_from_chipid(&cidr, &exid);
+ if (ret) {
+ if (ret == -ENODEV)
+ pr_warn("Could not find identification node");
+ return NULL;
+ }
+
+ for (soc = socs; soc->name; soc++) {
+ if (soc->cidr_match != (cidr & AT91_CIDR_MATCH_MASK))
+ continue;
+
+ if (!(cidr & AT91_CIDR_EXT) || soc->exid_match == exid)
+ break;
+ }
+
+ if (!soc->name) {
+ pr_warn("Could not find matching SoC description\n");
+ return NULL;
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return NULL;
+
+ soc_dev_attr->family = soc->family;
+ soc_dev_attr->soc_id = soc->name;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%X",
+ AT91_CIDR_VERSION(cidr));
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr);
+ pr_warn("Could not register SoC device\n");
+ return NULL;
+ }
+
+ if (soc->family)
+ pr_info("Detected SoC family: %s\n", soc->family);
+ pr_info("Detected SoC: %s, revision %X\n", soc->name,
+ AT91_CIDR_VERSION(cidr));
+
+ return soc_dev;
+}
+
+static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+ { .compatible = "atmel,at91rm9200", },
+ { .compatible = "atmel,at91sam9", },
+ { .compatible = "atmel,sama5", },
+ { .compatible = "atmel,samv7", },
+ { }
+};
+
+static int __init atmel_soc_device_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+
+ if (!of_match_node(at91_soc_allowed_list, np))
+ return 0;
+
+ at91_soc_init(socs);
+
+ return 0;
+}
+subsys_initcall(atmel_soc_device_init);
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
new file mode 100644
index 000000000..94cd5d1ab
--- /dev/null
+++ b/drivers/soc/atmel/soc.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2015 Atmel
+ *
+ * Boris Brezillon <boris.brezillon@free-electrons.com
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#ifndef __AT91_SOC_H
+#define __AT91_SOC_H
+
+#include <linux/sys_soc.h>
+
+struct at91_soc {
+ u32 cidr_match;
+ u32 exid_match;
+ const char *name;
+ const char *family;
+};
+
+#define AT91_SOC(__cidr, __exid, __name, __family) \
+ { \
+ .cidr_match = (__cidr), \
+ .exid_match = (__exid), \
+ .name = (__name), \
+ .family = (__family), \
+ }
+
+struct soc_device * __init
+at91_soc_init(const struct at91_soc *socs);
+
+#define AT91RM9200_CIDR_MATCH 0x09290780
+
+#define AT91SAM9260_CIDR_MATCH 0x019803a0
+#define AT91SAM9261_CIDR_MATCH 0x019703a0
+#define AT91SAM9263_CIDR_MATCH 0x019607a0
+#define AT91SAM9G20_CIDR_MATCH 0x019905a0
+#define AT91SAM9RL64_CIDR_MATCH 0x019b03a0
+#define AT91SAM9G45_CIDR_MATCH 0x019b05a0
+#define AT91SAM9X5_CIDR_MATCH 0x019a05a0
+#define AT91SAM9N12_CIDR_MATCH 0x019a07a0
+
+#define AT91SAM9M11_EXID_MATCH 0x00000001
+#define AT91SAM9M10_EXID_MATCH 0x00000002
+#define AT91SAM9G46_EXID_MATCH 0x00000003
+#define AT91SAM9G45_EXID_MATCH 0x00000004
+
+#define AT91SAM9G15_EXID_MATCH 0x00000000
+#define AT91SAM9G35_EXID_MATCH 0x00000001
+#define AT91SAM9X35_EXID_MATCH 0x00000002
+#define AT91SAM9G25_EXID_MATCH 0x00000003
+#define AT91SAM9X25_EXID_MATCH 0x00000004
+
+#define AT91SAM9CN12_EXID_MATCH 0x00000005
+#define AT91SAM9N12_EXID_MATCH 0x00000006
+#define AT91SAM9CN11_EXID_MATCH 0x00000009
+
+#define AT91SAM9XE128_CIDR_MATCH 0x329973a0
+#define AT91SAM9XE256_CIDR_MATCH 0x329a93a0
+#define AT91SAM9XE512_CIDR_MATCH 0x329aa3a0
+
+#define SAMA5D2_CIDR_MATCH 0x0a5c08c0
+#define SAMA5D21CU_EXID_MATCH 0x0000005a
+#define SAMA5D225C_D1M_EXID_MATCH 0x00000053
+#define SAMA5D22CU_EXID_MATCH 0x00000059
+#define SAMA5D22CN_EXID_MATCH 0x00000069
+#define SAMA5D23CU_EXID_MATCH 0x00000058
+#define SAMA5D24CX_EXID_MATCH 0x00000004
+#define SAMA5D24CU_EXID_MATCH 0x00000014
+#define SAMA5D26CU_EXID_MATCH 0x00000012
+#define SAMA5D27C_D1G_EXID_MATCH 0x00000033
+#define SAMA5D27C_D5M_EXID_MATCH 0x00000032
+#define SAMA5D27CU_EXID_MATCH 0x00000011
+#define SAMA5D27CN_EXID_MATCH 0x00000021
+#define SAMA5D28C_D1G_EXID_MATCH 0x00000013
+#define SAMA5D28CU_EXID_MATCH 0x00000010
+#define SAMA5D28CN_EXID_MATCH 0x00000020
+
+#define SAMA5D3_CIDR_MATCH 0x0a5c07c0
+#define SAMA5D31_EXID_MATCH 0x00444300
+#define SAMA5D33_EXID_MATCH 0x00414300
+#define SAMA5D34_EXID_MATCH 0x00414301
+#define SAMA5D35_EXID_MATCH 0x00584300
+#define SAMA5D36_EXID_MATCH 0x00004301
+
+#define SAMA5D4_CIDR_MATCH 0x0a5c07c0
+#define SAMA5D41_EXID_MATCH 0x00000001
+#define SAMA5D42_EXID_MATCH 0x00000002
+#define SAMA5D43_EXID_MATCH 0x00000003
+#define SAMA5D44_EXID_MATCH 0x00000004
+
+#define SAME70Q21_CIDR_MATCH 0x21020e00
+#define SAME70Q21_EXID_MATCH 0x00000002
+#define SAME70Q20_CIDR_MATCH 0x21020c00
+#define SAME70Q20_EXID_MATCH 0x00000002
+#define SAME70Q19_CIDR_MATCH 0x210d0a00
+#define SAME70Q19_EXID_MATCH 0x00000002
+
+#define SAMS70Q21_CIDR_MATCH 0x21120e00
+#define SAMS70Q21_EXID_MATCH 0x00000002
+#define SAMS70Q20_CIDR_MATCH 0x21120c00
+#define SAMS70Q20_EXID_MATCH 0x00000002
+#define SAMS70Q19_CIDR_MATCH 0x211d0a00
+#define SAMS70Q19_EXID_MATCH 0x00000002
+
+#define SAMV71Q21_CIDR_MATCH 0x21220e00
+#define SAMV71Q21_EXID_MATCH 0x00000002
+#define SAMV71Q20_CIDR_MATCH 0x21220c00
+#define SAMV71Q20_EXID_MATCH 0x00000002
+#define SAMV71Q19_CIDR_MATCH 0x212d0a00
+#define SAMV71Q19_EXID_MATCH 0x00000002
+
+#define SAMV70Q20_CIDR_MATCH 0x21320c00
+#define SAMV70Q20_EXID_MATCH 0x00000002
+#define SAMV70Q19_CIDR_MATCH 0x213d0a00
+#define SAMV70Q19_EXID_MATCH 0x00000002
+
+#endif /* __AT91_SOC_H */
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
new file mode 100644
index 000000000..055a845ed
--- /dev/null
+++ b/drivers/soc/bcm/Kconfig
@@ -0,0 +1,25 @@
+menu "Broadcom SoC drivers"
+
+config RASPBERRYPI_POWER
+ bool "Raspberry Pi power domain driver"
+ depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
+ depends on RASPBERRYPI_FIRMWARE=y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the RPi power domains which can be enabled
+ or disabled via the RPi firmware.
+
+config SOC_BRCMSTB
+ bool "Broadcom STB SoC drivers"
+ depends on ARM || ARM64 || BMIPS_GENERIC || COMPILE_TEST
+ select SOC_BUS
+ help
+ Enables drivers for the Broadcom Set-Top Box (STB) series of chips.
+ This option alone enables only some support code, while the drivers
+ can be enabled individually within this menu.
+
+ If unsure, say N.
+
+source "drivers/soc/bcm/brcmstb/Kconfig"
+
+endmenu
diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile
new file mode 100644
index 000000000..dc4fced72
--- /dev/null
+++ b/drivers/soc/bcm/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o
+obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
diff --git a/drivers/soc/bcm/brcmstb/Kconfig b/drivers/soc/bcm/brcmstb/Kconfig
new file mode 100644
index 000000000..d36f6e03c
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/Kconfig
@@ -0,0 +1,10 @@
+if SOC_BRCMSTB
+
+config BRCMSTB_PM
+ bool "Support suspend/resume for STB platforms"
+ default y
+ depends on PM
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ select ARM_CPU_SUSPEND if ARM
+
+endif # SOC_BRCMSTB
diff --git a/drivers/soc/bcm/brcmstb/Makefile b/drivers/soc/bcm/brcmstb/Makefile
new file mode 100644
index 000000000..01687c265
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/Makefile
@@ -0,0 +1,2 @@
+obj-y += common.o biuctrl.o
+obj-$(CONFIG_BRCMSTB_PM) += pm/
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
new file mode 100644
index 000000000..20b63bee5
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -0,0 +1,265 @@
+/*
+ * Broadcom STB SoCs Bus Unit Interface controls
+ *
+ * Copyright (C) 2015, Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "brcmstb: " KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+
+#define CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK 0x70000000
+#define CPU_CREDIT_REG_MCPx_READ_CRED_MASK 0xf
+#define CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK 0xf
+#define CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(x) ((x) * 8)
+#define CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(x) (((x) * 8) + 4)
+
+#define CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(x) ((x) * 8)
+#define CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK 0xff
+
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_THRESHOLD_MASK 0xf
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_MASK 0xf
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT 4
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_ENABLE BIT(8)
+
+static void __iomem *cpubiuctrl_base;
+static bool mcp_wr_pairing_en;
+static const int *cpubiuctrl_regs;
+
+static inline u32 cbc_readl(int reg)
+{
+ int offset = cpubiuctrl_regs[reg];
+
+ if (offset == -1)
+ return (u32)-1;
+
+ return readl_relaxed(cpubiuctrl_base + offset);
+}
+
+static inline void cbc_writel(u32 val, int reg)
+{
+ int offset = cpubiuctrl_regs[reg];
+
+ if (offset == -1)
+ return;
+
+ writel(val, cpubiuctrl_base + offset);
+}
+
+enum cpubiuctrl_regs {
+ CPU_CREDIT_REG = 0,
+ CPU_MCP_FLOW_REG,
+ CPU_WRITEBACK_CTRL_REG
+};
+
+static const int b15_cpubiuctrl_regs[] = {
+ [CPU_CREDIT_REG] = 0x184,
+ [CPU_MCP_FLOW_REG] = -1,
+ [CPU_WRITEBACK_CTRL_REG] = -1,
+};
+
+/* Odd cases, e.g: 7260 */
+static const int b53_cpubiuctrl_no_wb_regs[] = {
+ [CPU_CREDIT_REG] = 0x0b0,
+ [CPU_MCP_FLOW_REG] = 0x0b4,
+ [CPU_WRITEBACK_CTRL_REG] = -1,
+};
+
+static const int b53_cpubiuctrl_regs[] = {
+ [CPU_CREDIT_REG] = 0x0b0,
+ [CPU_MCP_FLOW_REG] = 0x0b4,
+ [CPU_WRITEBACK_CTRL_REG] = 0x22c,
+};
+
+#define NUM_CPU_BIUCTRL_REGS 3
+
+static int __init mcp_write_pairing_set(void)
+{
+ u32 creds = 0;
+
+ if (!cpubiuctrl_base)
+ return -1;
+
+ creds = cbc_readl(CPU_CREDIT_REG);
+ if (mcp_wr_pairing_en) {
+ pr_info("MCP: Enabling write pairing\n");
+ cbc_writel(creds | CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
+ CPU_CREDIT_REG);
+ } else if (creds & CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK) {
+ pr_info("MCP: Disabling write pairing\n");
+ cbc_writel(creds & ~CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
+ CPU_CREDIT_REG);
+ } else {
+ pr_info("MCP: Write pairing already disabled\n");
+ }
+
+ return 0;
+}
+
+static const u32 b53_mach_compat[] = {
+ 0x7268,
+ 0x7271,
+ 0x7278,
+};
+
+static void __init mcp_b53_set(void)
+{
+ unsigned int i;
+ u32 reg;
+
+ reg = brcmstb_get_family_id();
+
+ for (i = 0; i < ARRAY_SIZE(b53_mach_compat); i++) {
+ if (BRCM_ID(reg) == b53_mach_compat[i])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(b53_mach_compat))
+ return;
+
+ /* Set all 3 MCP interfaces to 8 credits */
+ reg = cbc_readl(CPU_CREDIT_REG);
+ for (i = 0; i < 3; i++) {
+ reg &= ~(CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK <<
+ CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i));
+ reg &= ~(CPU_CREDIT_REG_MCPx_READ_CRED_MASK <<
+ CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i));
+ reg |= 8 << CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i);
+ reg |= 8 << CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i);
+ }
+ cbc_writel(reg, CPU_CREDIT_REG);
+
+ /* Max out the number of in-flight Jwords reads on the MCP interface */
+ reg = cbc_readl(CPU_MCP_FLOW_REG);
+ for (i = 0; i < 3; i++)
+ reg |= CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK <<
+ CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(i);
+ cbc_writel(reg, CPU_MCP_FLOW_REG);
+
+ /* Enable writeback throttling, set timeout to 128 cycles, 256 cycles
+ * threshold
+ */
+ reg = cbc_readl(CPU_WRITEBACK_CTRL_REG);
+ reg |= CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_ENABLE;
+ reg &= ~CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_THRESHOLD_MASK;
+ reg &= ~(CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_MASK <<
+ CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT);
+ reg |= 8;
+ reg |= 7 << CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT;
+ cbc_writel(reg, CPU_WRITEBACK_CTRL_REG);
+}
+
+static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
+{
+ struct device_node *cpu_dn;
+ int ret = 0;
+
+ cpubiuctrl_base = of_iomap(np, 0);
+ if (!cpubiuctrl_base) {
+ pr_err("failed to remap BIU control base\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mcp_wr_pairing_en = of_property_read_bool(np, "brcm,write-pairing");
+
+ cpu_dn = of_get_cpu_node(0, NULL);
+ if (!cpu_dn) {
+ pr_err("failed to obtain CPU device node\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
+ cpubiuctrl_regs = b15_cpubiuctrl_regs;
+ else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
+ cpubiuctrl_regs = b53_cpubiuctrl_regs;
+ else {
+ pr_err("unsupported CPU\n");
+ ret = -EINVAL;
+ }
+ of_node_put(cpu_dn);
+
+ if (BRCM_ID(brcmstb_get_family_id()) == 0x7260)
+ cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
+out:
+ of_node_put(np);
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static u32 cpubiuctrl_reg_save[NUM_CPU_BIUCTRL_REGS];
+
+static int brcmstb_cpu_credit_reg_suspend(void)
+{
+ unsigned int i;
+
+ if (!cpubiuctrl_base)
+ return 0;
+
+ for (i = 0; i < NUM_CPU_BIUCTRL_REGS; i++)
+ cpubiuctrl_reg_save[i] = cbc_readl(i);
+
+ return 0;
+}
+
+static void brcmstb_cpu_credit_reg_resume(void)
+{
+ unsigned int i;
+
+ if (!cpubiuctrl_base)
+ return;
+
+ for (i = 0; i < NUM_CPU_BIUCTRL_REGS; i++)
+ cbc_writel(cpubiuctrl_reg_save[i], i);
+}
+
+static struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
+ .suspend = brcmstb_cpu_credit_reg_suspend,
+ .resume = brcmstb_cpu_credit_reg_resume,
+};
+#endif
+
+
+static int __init brcmstb_biuctrl_init(void)
+{
+ struct device_node *np;
+ int ret;
+
+ /* We might be running on a multi-platform kernel, don't make this a
+ * fatal error, just bail out early
+ */
+ np = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
+ if (!np)
+ return 0;
+
+ ret = setup_hifcpubiuctrl_regs(np);
+ if (ret)
+ return ret;
+
+ ret = mcp_write_pairing_set();
+ if (ret) {
+ pr_err("MCP: Unable to disable write pairing!\n");
+ return ret;
+ }
+
+ mcp_b53_set();
+#ifdef CONFIG_PM_SLEEP
+ register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
+#endif
+ return 0;
+}
+early_initcall(brcmstb_biuctrl_init);
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
new file mode 100644
index 000000000..bf9123f72
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+#include <linux/sys_soc.h>
+
+#include <soc/brcmstb/common.h>
+
+static u32 family_id;
+static u32 product_id;
+
+static const struct of_device_id brcmstb_machine_match[] = {
+ { .compatible = "brcm,brcmstb", },
+ { }
+};
+
+bool soc_is_brcmstb(void)
+{
+ const struct of_device_id *match;
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return false;
+
+ match = of_match_node(brcmstb_machine_match, root);
+ of_node_put(root);
+
+ return match != NULL;
+}
+
+u32 brcmstb_get_family_id(void)
+{
+ return family_id;
+}
+EXPORT_SYMBOL(brcmstb_get_family_id);
+
+u32 brcmstb_get_product_id(void)
+{
+ return product_id;
+}
+EXPORT_SYMBOL(brcmstb_get_product_id);
+
+static const struct of_device_id sun_top_ctrl_match[] = {
+ { .compatible = "brcm,bcm7125-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7346-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7358-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7360-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7362-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7420-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7425-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7429-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7435-sun-top-ctrl", },
+ { .compatible = "brcm,brcmstb-sun-top-ctrl", },
+ { }
+};
+
+static int __init brcmstb_soc_device_early_init(void)
+{
+ struct device_node *sun_top_ctrl;
+ void __iomem *sun_top_ctrl_base;
+ int ret = 0;
+
+ /* We could be on a multi-platform kernel, don't make this fatal but
+ * bail out early
+ */
+ sun_top_ctrl = of_find_matching_node(NULL, sun_top_ctrl_match);
+ if (!sun_top_ctrl)
+ return ret;
+
+ sun_top_ctrl_base = of_iomap(sun_top_ctrl, 0);
+ if (!sun_top_ctrl_base) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ family_id = readl(sun_top_ctrl_base);
+ product_id = readl(sun_top_ctrl_base + 0x4);
+ iounmap(sun_top_ctrl_base);
+out:
+ of_node_put(sun_top_ctrl);
+ return ret;
+}
+early_initcall(brcmstb_soc_device_early_init);
+
+static int __init brcmstb_soc_device_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *sun_top_ctrl;
+ struct soc_device *soc_dev;
+ int ret = 0;
+
+ /* We could be on a multi-platform kernel, don't make this fatal but
+ * bail out early
+ */
+ sun_top_ctrl = of_find_matching_node(NULL, sun_top_ctrl_match);
+ if (!sun_top_ctrl)
+ return ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "%x",
+ family_id >> 28 ?
+ family_id >> 16 : family_id >> 8);
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%x",
+ product_id >> 28 ?
+ product_id >> 16 : product_id >> 8);
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c%d",
+ ((product_id & 0xf0) >> 4) + 'A',
+ product_id & 0xf);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr);
+ ret = -ENOMEM;
+ }
+out:
+ of_node_put(sun_top_ctrl);
+ return ret;
+}
+arch_initcall(brcmstb_soc_device_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/Makefile b/drivers/soc/bcm/brcmstb/pm/Makefile
new file mode 100644
index 000000000..08bbd244e
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ARM) += s2-arm.o pm-arm.o
+AFLAGS_s2-arm.o := -march=armv7-a
+obj-$(CONFIG_BMIPS_GENERIC) += s2-mips.o s3-mips.o pm-mips.o
diff --git a/drivers/soc/bcm/brcmstb/pm/aon_defs.h b/drivers/soc/bcm/brcmstb/pm/aon_defs.h
new file mode 100644
index 000000000..fb936abd8
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/aon_defs.h
@@ -0,0 +1,113 @@
+/*
+ * Always ON (AON) register interface between bootloader and Linux
+ *
+ * Copyright © 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BRCMSTB_AON_DEFS_H__
+#define __BRCMSTB_AON_DEFS_H__
+
+#include <linux/compiler.h>
+
+/* Magic number in upper 16-bits */
+#define BRCMSTB_S3_MAGIC_MASK 0xffff0000
+#define BRCMSTB_S3_MAGIC_SHORT 0x5AFE0000
+
+enum {
+ /* Restore random key for AES memory verification (off = fixed key) */
+ S3_FLAG_LOAD_RANDKEY = (1 << 0),
+
+ /* Scratch buffer page table is present */
+ S3_FLAG_SCRATCH_BUFFER_TABLE = (1 << 1),
+
+ /* Skip all memory verification */
+ S3_FLAG_NO_MEM_VERIFY = (1 << 2),
+
+ /*
+ * Modification of this bit reserved for bootloader only.
+ * 1=PSCI started Linux, 0=Direct jump to Linux.
+ */
+ S3_FLAG_PSCI_BOOT = (1 << 3),
+
+ /*
+ * Modification of this bit reserved for bootloader only.
+ * 1=64 bit boot, 0=32 bit boot.
+ */
+ S3_FLAG_BOOTED64 = (1 << 4),
+};
+
+#define BRCMSTB_HASH_LEN (128 / 8) /* 128-bit hash */
+
+#define AON_REG_MAGIC_FLAGS 0x00
+#define AON_REG_CONTROL_LOW 0x04
+#define AON_REG_CONTROL_HIGH 0x08
+#define AON_REG_S3_HASH 0x0c /* hash of S3 params */
+#define AON_REG_CONTROL_HASH_LEN 0x1c
+#define AON_REG_PANIC 0x20
+
+#define BRCMSTB_S3_MAGIC 0x5AFEB007
+#define BRCMSTB_PANIC_MAGIC 0x512E115E
+#define BOOTLOADER_SCRATCH_SIZE 64
+#define BRCMSTB_DTU_STATE_MAP_ENTRIES (8*1024)
+#define BRCMSTB_DTU_CONFIG_ENTRIES (512)
+#define BRCMSTB_DTU_COUNT (2)
+
+#define IMAGE_DESCRIPTORS_BUFSIZE (2 * 1024)
+#define S3_BOOTLOADER_RESERVED (S3_FLAG_PSCI_BOOT | S3_FLAG_BOOTED64)
+
+struct brcmstb_bootloader_dtu_table {
+ uint32_t dtu_state_map[BRCMSTB_DTU_STATE_MAP_ENTRIES];
+ uint32_t dtu_config[BRCMSTB_DTU_CONFIG_ENTRIES];
+};
+
+/*
+ * Bootloader utilizes a custom parameter block left in DRAM for handling S3
+ * warm resume
+ */
+struct brcmstb_s3_params {
+ /* scratch memory for bootloader */
+ uint8_t scratch[BOOTLOADER_SCRATCH_SIZE];
+
+ uint32_t magic; /* BRCMSTB_S3_MAGIC */
+ uint64_t reentry; /* PA */
+
+ /* descriptors */
+ uint32_t hash[BRCMSTB_HASH_LEN / 4];
+
+ /*
+ * If 0, then ignore this parameter (there is only one set of
+ * descriptors)
+ *
+ * If non-0, then a second set of descriptors is stored at:
+ *
+ * descriptors + desc_offset_2
+ *
+ * The MAC result of both descriptors is XOR'd and stored in @hash
+ */
+ uint32_t desc_offset_2;
+
+ /*
+ * (Physical) address of a brcmstb_bootloader_scratch_table, for
+ * providing a large DRAM buffer to the bootloader
+ */
+ uint64_t buffer_table;
+
+ uint32_t spare[70];
+
+ uint8_t descriptors[IMAGE_DESCRIPTORS_BUFSIZE];
+ /*
+ * Must be last member of struct. See brcmstb_pm_s3_finish() for reason.
+ */
+ struct brcmstb_bootloader_dtu_table dtu[BRCMSTB_DTU_COUNT];
+} __packed;
+
+#endif /* __BRCMSTB_AON_DEFS_H__ */
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
new file mode 100644
index 000000000..8ee063474
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -0,0 +1,838 @@
+/*
+ * ARM-specific support for Broadcom STB S2/S3/S5 power management
+ *
+ * S2: clock gate CPUs and as many peripherals as possible
+ * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
+ * self-refresh
+ * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
+ * treat this mode like a soft power-off, with wakeup allowed from AON
+ *
+ * Copyright © 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "brcmstb-pm: " fmt
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/proc_fs.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/suspend.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+
+#include <asm/fncpy.h>
+#include <asm/setup.h>
+#include <asm/suspend.h>
+
+#include "pm.h"
+#include "aon_defs.h"
+
+#define SHIMPHY_DDR_PAD_CNTRL 0x8c
+
+/* Method #0 */
+#define SHIMPHY_PAD_PLL_SEQUENCE BIT(8)
+#define SHIMPHY_PAD_GATE_PLL_S3 BIT(9)
+
+/* Method #1 */
+#define PWRDWN_SEQ_NO_SEQUENCING 0
+#define PWRDWN_SEQ_HOLD_CHANNEL 1
+#define PWRDWN_SEQ_RESET_PLL 2
+#define PWRDWN_SEQ_POWERDOWN_PLL 3
+
+#define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000
+#define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
+
+#define DDR_FORCE_CKE_RST_N BIT(3)
+#define DDR_PHY_RST_N BIT(2)
+#define DDR_PHY_CKE BIT(1)
+
+#define DDR_PHY_NO_CHANNEL 0xffffffff
+
+#define MAX_NUM_MEMC 3
+
+struct brcmstb_memc {
+ void __iomem *ddr_phy_base;
+ void __iomem *ddr_shimphy_base;
+ void __iomem *ddr_ctrl;
+};
+
+struct brcmstb_pm_control {
+ void __iomem *aon_ctrl_base;
+ void __iomem *aon_sram;
+ struct brcmstb_memc memcs[MAX_NUM_MEMC];
+
+ void __iomem *boot_sram;
+ size_t boot_sram_len;
+
+ bool support_warm_boot;
+ size_t pll_status_offset;
+ int num_memc;
+
+ struct brcmstb_s3_params *s3_params;
+ dma_addr_t s3_params_pa;
+ int s3entry_method;
+ u32 warm_boot_offset;
+ u32 phy_a_standby_ctrl_offs;
+ u32 phy_b_standby_ctrl_offs;
+ bool needs_ddr_pad;
+ struct platform_device *pdev;
+};
+
+enum bsp_initiate_command {
+ BSP_CLOCK_STOP = 0x00,
+ BSP_GEN_RANDOM_KEY = 0x4A,
+ BSP_RESTORE_RANDOM_KEY = 0x55,
+ BSP_GEN_FIXED_KEY = 0x63,
+};
+
+#define PM_INITIATE 0x01
+#define PM_INITIATE_SUCCESS 0x00
+#define PM_INITIATE_FAIL 0xfe
+
+static struct brcmstb_pm_control ctrl;
+
+static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
+ void __iomem *ddr_phy_pll_status);
+
+static int brcmstb_init_sram(struct device_node *dn)
+{
+ void __iomem *sram;
+ struct resource res;
+ int ret;
+
+ ret = of_address_to_resource(dn, 0, &res);
+ if (ret)
+ return ret;
+
+ /* Uncached, executable remapping of SRAM */
+ sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
+ if (!sram)
+ return -ENOMEM;
+
+ ctrl.boot_sram = sram;
+ ctrl.boot_sram_len = resource_size(&res);
+
+ return 0;
+}
+
+static const struct of_device_id sram_dt_ids[] = {
+ { .compatible = "mmio-sram" },
+ { /* sentinel */ }
+};
+
+static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+ int ret;
+ int timeo = 1000 * 1000; /* 1 second */
+
+ writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
+ (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
+
+ /* Go! */
+ writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
+
+ /*
+ * If firmware doesn't support the 'ack', then just assume it's done
+ * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
+ */
+ if (of_machine_is_compatible("brcm,bcm74371a0")) {
+ (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
+ mdelay(10);
+ return 0;
+ }
+
+ for (;;) {
+ ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
+ if (!(ret & PM_INITIATE))
+ break;
+ if (timeo <= 0) {
+ pr_err("error: timeout waiting for BSP (%x)\n", ret);
+ break;
+ }
+ timeo -= 50;
+ udelay(50);
+ }
+
+ return (ret & 0xff) != PM_INITIATE_SUCCESS;
+}
+
+static int brcmstb_pm_handshake(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+ u32 tmp;
+ int ret;
+
+ /* BSP power handshake, v1 */
+ tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
+ tmp &= ~1UL;
+ writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
+ (void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
+
+ ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
+ if (ret)
+ pr_err("BSP handshake failed\n");
+
+ /*
+ * HACK: BSP may have internal race on the CLOCK_STOP command.
+ * Avoid touching the BSP for a few milliseconds.
+ */
+ mdelay(3);
+
+ return ret;
+}
+
+static inline void shimphy_set(u32 value, u32 mask)
+{
+ int i;
+
+ if (!ctrl.needs_ddr_pad)
+ return;
+
+ for (i = 0; i < ctrl.num_memc; i++) {
+ u32 tmp;
+
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
+ SHIMPHY_DDR_PAD_CNTRL);
+ tmp = value | (tmp & mask);
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
+ SHIMPHY_DDR_PAD_CNTRL);
+ }
+ wmb(); /* Complete sequence in order. */
+}
+
+static inline void ddr_ctrl_set(bool warmboot)
+{
+ int i;
+
+ for (i = 0; i < ctrl.num_memc; i++) {
+ u32 tmp;
+
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
+ ctrl.warm_boot_offset);
+ if (warmboot)
+ tmp |= 1;
+ else
+ tmp &= ~1; /* Cold boot */
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
+ ctrl.warm_boot_offset);
+ }
+ /* Complete sequence in order */
+ wmb();
+}
+
+static inline void s3entry_method0(void)
+{
+ shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
+ 0xffffffff);
+}
+
+static inline void s3entry_method1(void)
+{
+ /*
+ * S3 Entry Sequence
+ * -----------------
+ * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
+ * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
+ */
+ shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+
+ ddr_ctrl_set(true);
+}
+
+static inline void s5entry_method1(void)
+{
+ int i;
+
+ /*
+ * S5 Entry Sequence
+ * -----------------
+ * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
+ * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
+ * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
+ * DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
+ */
+ shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+
+ ddr_ctrl_set(false);
+
+ for (i = 0; i < ctrl.num_memc; i++) {
+ u32 tmp;
+
+ /* Step 3: Channel A (RST_N = CKE = 0) */
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_a_standby_ctrl_offs);
+ tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_a_standby_ctrl_offs);
+
+ /* Step 3: Channel B? */
+ if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_b_standby_ctrl_offs);
+ tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_b_standby_ctrl_offs);
+ }
+ }
+ /* Must complete */
+ wmb();
+}
+
+/*
+ * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
+ * into a low-power mode
+ */
+static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+
+ if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
+ s5entry_method1();
+
+ /* pm_start_pwrdn transition 0->1 */
+ writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
+
+ if (!onewrite) {
+ (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
+
+ writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
+ (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
+ }
+ wfi();
+}
+
+/* Support S5 cold boot out of "poweroff" */
+static void brcmstb_pm_poweroff(void)
+{
+ brcmstb_pm_handshake();
+
+ /* Clear magic S3 warm-boot value */
+ writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+ (void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+
+ /* Skip wait-for-interrupt signal; just use a countdown */
+ writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
+ (void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
+
+ if (ctrl.s3entry_method == 1) {
+ shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+ ddr_ctrl_set(false);
+ brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
+ return; /* We should never actually get here */
+ }
+
+ brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
+}
+
+static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
+{
+ unsigned int size = ALIGN(len, FNCPY_ALIGN);
+
+ if (ctrl.boot_sram_len < size) {
+ pr_err("standby code will not fit in SRAM\n");
+ return NULL;
+ }
+
+ return fncpy(ctrl.boot_sram, fn, size);
+}
+
+/*
+ * S2 suspend/resume picks up where we left off, so we must execute carefully
+ * from SRAM, in order to allow DDR to come back up safely before we continue.
+ */
+static int brcmstb_pm_s2(void)
+{
+ /* A previous S3 can set a value hazardous to S2, so make sure. */
+ if (ctrl.s3entry_method == 1) {
+ shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+ ddr_ctrl_set(false);
+ }
+
+ brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
+ brcmstb_pm_do_s2_sz);
+ if (!brcmstb_pm_do_s2_sram)
+ return -EINVAL;
+
+ return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
+ ctrl.memcs[0].ddr_phy_base +
+ ctrl.pll_status_offset);
+}
+
+/*
+ * This function is called on a new stack, so don't allow inlining (which will
+ * generate stack references on the old stack). It cannot be made static because
+ * it is referenced from brcmstb_pm_s3()
+ */
+noinline int brcmstb_pm_s3_finish(void)
+{
+ struct brcmstb_s3_params *params = ctrl.s3_params;
+ dma_addr_t params_pa = ctrl.s3_params_pa;
+ phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
+ enum bsp_initiate_command cmd;
+ u32 flags;
+
+ /*
+ * Clear parameter structure, but not DTU area, which has already been
+ * filled in. We know DTU is a the end, so we can just subtract its
+ * size.
+ */
+ memset(params, 0, sizeof(*params) - sizeof(params->dtu));
+
+ flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+
+ flags &= S3_BOOTLOADER_RESERVED;
+ flags |= S3_FLAG_NO_MEM_VERIFY;
+ flags |= S3_FLAG_LOAD_RANDKEY;
+
+ /* Load random / fixed key */
+ if (flags & S3_FLAG_LOAD_RANDKEY)
+ cmd = BSP_GEN_RANDOM_KEY;
+ else
+ cmd = BSP_GEN_FIXED_KEY;
+ if (do_bsp_initiate_command(cmd)) {
+ pr_info("key loading failed\n");
+ return -EIO;
+ }
+
+ params->magic = BRCMSTB_S3_MAGIC;
+ params->reentry = reentry;
+
+ /* No more writes to DRAM */
+ flush_cache_all();
+
+ flags |= BRCMSTB_S3_MAGIC_SHORT;
+
+ writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+ writel_relaxed(lower_32_bits(params_pa),
+ ctrl.aon_sram + AON_REG_CONTROL_LOW);
+ writel_relaxed(upper_32_bits(params_pa),
+ ctrl.aon_sram + AON_REG_CONTROL_HIGH);
+
+ switch (ctrl.s3entry_method) {
+ case 0:
+ s3entry_method0();
+ brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
+ break;
+ case 1:
+ s3entry_method1();
+ brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Must have been interrupted from wfi()? */
+ return -EINTR;
+}
+
+static int brcmstb_pm_do_s3(unsigned long sp)
+{
+ unsigned long save_sp;
+ int ret;
+
+ asm volatile (
+ "mov %[save], sp\n"
+ "mov sp, %[new]\n"
+ "bl brcmstb_pm_s3_finish\n"
+ "mov %[ret], r0\n"
+ "mov %[new], sp\n"
+ "mov sp, %[save]\n"
+ : [save] "=&r" (save_sp), [ret] "=&r" (ret)
+ : [new] "r" (sp)
+ );
+
+ return ret;
+}
+
+static int brcmstb_pm_s3(void)
+{
+ void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
+
+ return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
+}
+
+static int brcmstb_pm_standby(bool deep_standby)
+{
+ int ret;
+
+ if (brcmstb_pm_handshake())
+ return -EIO;
+
+ if (deep_standby)
+ ret = brcmstb_pm_s3();
+ else
+ ret = brcmstb_pm_s2();
+ if (ret)
+ pr_err("%s: standby failed\n", __func__);
+
+ return ret;
+}
+
+static int brcmstb_pm_enter(suspend_state_t state)
+{
+ int ret = -EINVAL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ ret = brcmstb_pm_standby(false);
+ break;
+ case PM_SUSPEND_MEM:
+ ret = brcmstb_pm_standby(true);
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmstb_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return true;
+ case PM_SUSPEND_MEM:
+ return ctrl.support_warm_boot;
+ default:
+ return false;
+ }
+}
+
+static const struct platform_suspend_ops brcmstb_pm_ops = {
+ .enter = brcmstb_pm_enter,
+ .valid = brcmstb_pm_valid,
+};
+
+static const struct of_device_id aon_ctrl_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-aon-ctrl" },
+ {}
+};
+
+struct ddr_phy_ofdata {
+ bool supports_warm_boot;
+ size_t pll_status_offset;
+ int s3entry_method;
+ u32 warm_boot_offset;
+ u32 phy_a_standby_ctrl_offs;
+ u32 phy_b_standby_ctrl_offs;
+};
+
+static struct ddr_phy_ofdata ddr_phy_71_1 = {
+ .supports_warm_boot = true,
+ .pll_status_offset = 0x0c,
+ .s3entry_method = 1,
+ .warm_boot_offset = 0x2c,
+ .phy_a_standby_ctrl_offs = 0x198,
+ .phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
+};
+
+static struct ddr_phy_ofdata ddr_phy_72_0 = {
+ .supports_warm_boot = true,
+ .pll_status_offset = 0x10,
+ .s3entry_method = 1,
+ .warm_boot_offset = 0x40,
+ .phy_a_standby_ctrl_offs = 0x2a4,
+ .phy_b_standby_ctrl_offs = 0x8a4
+};
+
+static struct ddr_phy_ofdata ddr_phy_225_1 = {
+ .supports_warm_boot = false,
+ .pll_status_offset = 0x4,
+ .s3entry_method = 0
+};
+
+static struct ddr_phy_ofdata ddr_phy_240_1 = {
+ .supports_warm_boot = true,
+ .pll_status_offset = 0x4,
+ .s3entry_method = 0
+};
+
+static const struct of_device_id ddr_phy_dt_ids[] = {
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v71.1",
+ .data = &ddr_phy_71_1,
+ },
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v72.0",
+ .data = &ddr_phy_72_0,
+ },
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v225.1",
+ .data = &ddr_phy_225_1,
+ },
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v240.1",
+ .data = &ddr_phy_240_1,
+ },
+ {
+ /* Same as v240.1, for the registers we care about */
+ .compatible = "brcm,brcmstb-ddr-phy-v240.2",
+ .data = &ddr_phy_240_1,
+ },
+ {}
+};
+
+struct ddr_seq_ofdata {
+ bool needs_ddr_pad;
+ u32 warm_boot_offset;
+};
+
+static const struct ddr_seq_ofdata ddr_seq_b22 = {
+ .needs_ddr_pad = false,
+ .warm_boot_offset = 0x2c,
+};
+
+static const struct ddr_seq_ofdata ddr_seq = {
+ .needs_ddr_pad = true,
+};
+
+static const struct of_device_id ddr_shimphy_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
+ {}
+};
+
+static const struct of_device_id brcmstb_memc_of_match[] = {
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
+ .data = &ddr_seq,
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
+ .data = &ddr_seq_b22,
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
+ .data = &ddr_seq_b22,
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
+ .data = &ddr_seq_b22,
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
+ .data = &ddr_seq_b22,
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr",
+ .data = &ddr_seq,
+ },
+ {},
+};
+
+static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
+ int index, const void **ofdata)
+{
+ struct device_node *dn;
+ const struct of_device_id *match;
+
+ dn = of_find_matching_node_and_match(NULL, matches, &match);
+ if (!dn)
+ return ERR_PTR(-EINVAL);
+
+ if (ofdata)
+ *ofdata = match->data;
+
+ return of_io_request_and_map(dn, index, dn->full_name);
+}
+
+static int brcmstb_pm_panic_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block brcmstb_pm_panic_nb = {
+ .notifier_call = brcmstb_pm_panic_notify,
+};
+
+static int brcmstb_pm_probe(struct platform_device *pdev)
+{
+ const struct ddr_phy_ofdata *ddr_phy_data;
+ const struct ddr_seq_ofdata *ddr_seq_data;
+ const struct of_device_id *of_id = NULL;
+ struct device_node *dn;
+ void __iomem *base;
+ int ret, i;
+
+ /* AON ctrl registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_CTRL\n");
+ return PTR_ERR(base);
+ }
+ ctrl.aon_ctrl_base = base;
+
+ /* AON SRAM registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
+ if (IS_ERR(base)) {
+ /* Assume standard offset */
+ ctrl.aon_sram = ctrl.aon_ctrl_base +
+ AON_CTRL_SYSTEM_DATA_RAM_OFS;
+ } else {
+ ctrl.aon_sram = base;
+ }
+
+ writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
+
+ /* DDR PHY registers */
+ base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
+ (const void **)&ddr_phy_data);
+ if (IS_ERR(base)) {
+ pr_err("error mapping DDR PHY\n");
+ return PTR_ERR(base);
+ }
+ ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
+ ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
+ /* Only need DDR PHY 0 for now? */
+ ctrl.memcs[0].ddr_phy_base = base;
+ ctrl.s3entry_method = ddr_phy_data->s3entry_method;
+ ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
+ ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
+ /*
+ * Slightly grosss to use the phy ver to get a memc,
+ * offset but that is the only versioned things so far
+ * we can test for.
+ */
+ ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
+
+ /* DDR SHIM-PHY registers */
+ for_each_matching_node(dn, ddr_shimphy_dt_ids) {
+ i = ctrl.num_memc;
+ if (i >= MAX_NUM_MEMC) {
+ pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
+ break;
+ }
+
+ base = of_io_request_and_map(dn, 0, dn->full_name);
+ if (IS_ERR(base)) {
+ if (!ctrl.support_warm_boot)
+ break;
+
+ pr_err("error mapping DDR SHIMPHY %d\n", i);
+ return PTR_ERR(base);
+ }
+ ctrl.memcs[i].ddr_shimphy_base = base;
+ ctrl.num_memc++;
+ }
+
+ /* Sequencer DRAM Param and Control Registers */
+ i = 0;
+ for_each_matching_node(dn, brcmstb_memc_of_match) {
+ base = of_iomap(dn, 0);
+ if (!base) {
+ pr_err("error mapping DDR Sequencer %d\n", i);
+ return -ENOMEM;
+ }
+
+ of_id = of_match_node(brcmstb_memc_of_match, dn);
+ if (!of_id) {
+ iounmap(base);
+ return -EINVAL;
+ }
+
+ ddr_seq_data = of_id->data;
+ ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
+ /* Adjust warm boot offset based on the DDR sequencer */
+ if (ddr_seq_data->warm_boot_offset)
+ ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
+
+ ctrl.memcs[i].ddr_ctrl = base;
+ i++;
+ }
+
+ pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
+ ctrl.support_warm_boot, ctrl.s3entry_method,
+ ctrl.warm_boot_offset);
+
+ dn = of_find_matching_node(NULL, sram_dt_ids);
+ if (!dn) {
+ pr_err("SRAM not found\n");
+ return -EINVAL;
+ }
+
+ ret = brcmstb_init_sram(dn);
+ if (ret) {
+ pr_err("error setting up SRAM for PM\n");
+ return ret;
+ }
+
+ ctrl.pdev = pdev;
+
+ ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
+ if (!ctrl.s3_params)
+ return -ENOMEM;
+ ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
+ sizeof(*ctrl.s3_params),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
+ pr_err("error mapping DMA memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &brcmstb_pm_panic_nb);
+
+ pm_power_off = brcmstb_pm_poweroff;
+ suspend_set_ops(&brcmstb_pm_ops);
+
+ return 0;
+
+out:
+ kfree(ctrl.s3_params);
+
+ pr_warn("PM: initialization failed with code %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver brcmstb_pm_driver = {
+ .driver = {
+ .name = "brcmstb-pm",
+ .of_match_table = aon_ctrl_dt_ids,
+ },
+};
+
+static int __init brcmstb_pm_init(void)
+{
+ return platform_driver_probe(&brcmstb_pm_driver,
+ brcmstb_pm_probe);
+}
+module_init(brcmstb_pm_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-mips.c b/drivers/soc/bcm/brcmstb/pm/pm-mips.c
new file mode 100644
index 000000000..9300b5f62
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm-mips.c
@@ -0,0 +1,461 @@
+/*
+ * MIPS-specific support for Broadcom STB S2/S3/S5 power management
+ *
+ * Copyright (C) 2016-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <asm/bmips.h>
+#include <asm/tlbflush.h>
+
+#include "pm.h"
+
+#define S2_NUM_PARAMS 6
+#define MAX_NUM_MEMC 3
+
+/* S3 constants */
+#define MAX_GP_REGS 16
+#define MAX_CP0_REGS 32
+#define NUM_MEMC_CLIENTS 128
+#define AON_CTRL_RAM_SIZE 128
+#define BRCMSTB_S3_MAGIC 0x5AFEB007
+
+#define CLEAR_RESET_MASK 0x01
+
+/* Index each CP0 register that needs to be saved */
+#define CONTEXT 0
+#define USER_LOCAL 1
+#define PGMK 2
+#define HWRENA 3
+#define COMPARE 4
+#define STATUS 5
+#define CONFIG 6
+#define MODE 7
+#define EDSP 8
+#define BOOT_VEC 9
+#define EBASE 10
+
+struct brcmstb_memc {
+ void __iomem *ddr_phy_base;
+ void __iomem *arb_base;
+};
+
+struct brcmstb_pm_control {
+ void __iomem *aon_ctrl_base;
+ void __iomem *aon_sram_base;
+ void __iomem *timers_base;
+ struct brcmstb_memc memcs[MAX_NUM_MEMC];
+ int num_memc;
+};
+
+struct brcm_pm_s3_context {
+ u32 cp0_regs[MAX_CP0_REGS];
+ u32 memc0_rts[NUM_MEMC_CLIENTS];
+ u32 sc_boot_vec;
+};
+
+struct brcmstb_mem_transfer;
+
+struct brcmstb_mem_transfer {
+ struct brcmstb_mem_transfer *next;
+ void *src;
+ void *dst;
+ dma_addr_t pa_src;
+ dma_addr_t pa_dst;
+ u32 len;
+ u8 key;
+ u8 mode;
+ u8 src_remapped;
+ u8 dst_remapped;
+ u8 src_dst_remapped;
+};
+
+#define AON_SAVE_SRAM(base, idx, val) \
+ __raw_writel(val, base + (idx << 2))
+
+/* Used for saving registers in asm */
+u32 gp_regs[MAX_GP_REGS];
+
+#define BSP_CLOCK_STOP 0x00
+#define PM_INITIATE 0x01
+
+static struct brcmstb_pm_control ctrl;
+
+static void brcm_pm_save_cp0_context(struct brcm_pm_s3_context *ctx)
+{
+ /* Generic MIPS */
+ ctx->cp0_regs[CONTEXT] = read_c0_context();
+ ctx->cp0_regs[USER_LOCAL] = read_c0_userlocal();
+ ctx->cp0_regs[PGMK] = read_c0_pagemask();
+ ctx->cp0_regs[HWRENA] = read_c0_cache();
+ ctx->cp0_regs[COMPARE] = read_c0_compare();
+ ctx->cp0_regs[STATUS] = read_c0_status();
+
+ /* Broadcom specific */
+ ctx->cp0_regs[CONFIG] = read_c0_brcm_config();
+ ctx->cp0_regs[MODE] = read_c0_brcm_mode();
+ ctx->cp0_regs[EDSP] = read_c0_brcm_edsp();
+ ctx->cp0_regs[BOOT_VEC] = read_c0_brcm_bootvec();
+ ctx->cp0_regs[EBASE] = read_c0_ebase();
+
+ ctx->sc_boot_vec = bmips_read_zscm_reg(0xa0);
+}
+
+static void brcm_pm_restore_cp0_context(struct brcm_pm_s3_context *ctx)
+{
+ /* Restore cp0 state */
+ bmips_write_zscm_reg(0xa0, ctx->sc_boot_vec);
+
+ /* Generic MIPS */
+ write_c0_context(ctx->cp0_regs[CONTEXT]);
+ write_c0_userlocal(ctx->cp0_regs[USER_LOCAL]);
+ write_c0_pagemask(ctx->cp0_regs[PGMK]);
+ write_c0_cache(ctx->cp0_regs[HWRENA]);
+ write_c0_compare(ctx->cp0_regs[COMPARE]);
+ write_c0_status(ctx->cp0_regs[STATUS]);
+
+ /* Broadcom specific */
+ write_c0_brcm_config(ctx->cp0_regs[CONFIG]);
+ write_c0_brcm_mode(ctx->cp0_regs[MODE]);
+ write_c0_brcm_edsp(ctx->cp0_regs[EDSP]);
+ write_c0_brcm_bootvec(ctx->cp0_regs[BOOT_VEC]);
+ write_c0_ebase(ctx->cp0_regs[EBASE]);
+}
+
+static void brcmstb_pm_handshake(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+ u32 tmp;
+
+ /* BSP power handshake, v1 */
+ tmp = __raw_readl(base + AON_CTRL_HOST_MISC_CMDS);
+ tmp &= ~1UL;
+ __raw_writel(tmp, base + AON_CTRL_HOST_MISC_CMDS);
+ (void)__raw_readl(base + AON_CTRL_HOST_MISC_CMDS);
+
+ __raw_writel(0, base + AON_CTRL_PM_INITIATE);
+ (void)__raw_readl(base + AON_CTRL_PM_INITIATE);
+ __raw_writel(BSP_CLOCK_STOP | PM_INITIATE,
+ base + AON_CTRL_PM_INITIATE);
+ /*
+ * HACK: BSP may have internal race on the CLOCK_STOP command.
+ * Avoid touching the BSP for a few milliseconds.
+ */
+ mdelay(3);
+}
+
+static void brcmstb_pm_s5(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+
+ brcmstb_pm_handshake();
+
+ /* Clear magic s3 warm-boot value */
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 0, 0);
+
+ /* Set the countdown */
+ __raw_writel(0x10, base + AON_CTRL_PM_CPU_WAIT_COUNT);
+ (void)__raw_readl(base + AON_CTRL_PM_CPU_WAIT_COUNT);
+
+ /* Prepare to S5 cold boot */
+ __raw_writel(PM_COLD_CONFIG, base + AON_CTRL_PM_CTRL);
+ (void)__raw_readl(base + AON_CTRL_PM_CTRL);
+
+ __raw_writel((PM_COLD_CONFIG | PM_PWR_DOWN), base +
+ AON_CTRL_PM_CTRL);
+ (void)__raw_readl(base + AON_CTRL_PM_CTRL);
+
+ __asm__ __volatile__(
+ " wait\n"
+ : : : "memory");
+}
+
+static int brcmstb_pm_s3(void)
+{
+ struct brcm_pm_s3_context s3_context;
+ void __iomem *memc_arb_base;
+ unsigned long flags;
+ u32 tmp;
+ int i;
+
+ /* Prepare for s3 */
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 0, BRCMSTB_S3_MAGIC);
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 1, (u32)&s3_reentry);
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 2, 0);
+
+ /* Clear RESET_HISTORY */
+ tmp = __raw_readl(ctrl.aon_ctrl_base + AON_CTRL_RESET_CTRL);
+ tmp &= ~CLEAR_RESET_MASK;
+ __raw_writel(tmp, ctrl.aon_ctrl_base + AON_CTRL_RESET_CTRL);
+
+ local_irq_save(flags);
+
+ /* Inhibit DDR_RSTb pulse for both MMCs*/
+ for (i = 0; i < ctrl.num_memc; i++) {
+ tmp = __raw_readl(ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+
+ tmp &= ~0x0f;
+ __raw_writel(tmp, ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+ tmp |= (0x05 | BIT(5));
+ __raw_writel(tmp, ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+ }
+
+ /* Save CP0 context */
+ brcm_pm_save_cp0_context(&s3_context);
+
+ /* Save RTS(skip debug register) */
+ memc_arb_base = ctrl.memcs[0].arb_base + 4;
+ for (i = 0; i < NUM_MEMC_CLIENTS; i++) {
+ s3_context.memc0_rts[i] = __raw_readl(memc_arb_base);
+ memc_arb_base += 4;
+ }
+
+ /* Save I/O context */
+ local_flush_tlb_all();
+ _dma_cache_wback_inv(0, ~0);
+
+ brcm_pm_do_s3(ctrl.aon_ctrl_base, current_cpu_data.dcache.linesz);
+
+ /* CPU reconfiguration */
+ local_flush_tlb_all();
+ bmips_cpu_setup();
+ cpumask_clear(&bmips_booted_mask);
+
+ /* Restore RTS (skip debug register) */
+ memc_arb_base = ctrl.memcs[0].arb_base + 4;
+ for (i = 0; i < NUM_MEMC_CLIENTS; i++) {
+ __raw_writel(s3_context.memc0_rts[i], memc_arb_base);
+ memc_arb_base += 4;
+ }
+
+ /* restore CP0 context */
+ brcm_pm_restore_cp0_context(&s3_context);
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int brcmstb_pm_s2(void)
+{
+ /*
+ * We need to pass 6 arguments to an assembly function. Lets avoid the
+ * stack and pass arguments in a explicit 4 byte array. The assembly
+ * code assumes all arguments are 4 bytes and arguments are ordered
+ * like so:
+ *
+ * 0: AON_CTRl base register
+ * 1: DDR_PHY base register
+ * 2: TIMERS base resgister
+ * 3: I-Cache line size
+ * 4: Restart vector address
+ * 5: Restart vector size
+ */
+ u32 s2_params[6];
+
+ /* Prepare s2 parameters */
+ s2_params[0] = (u32)ctrl.aon_ctrl_base;
+ s2_params[1] = (u32)ctrl.memcs[0].ddr_phy_base;
+ s2_params[2] = (u32)ctrl.timers_base;
+ s2_params[3] = (u32)current_cpu_data.icache.linesz;
+ s2_params[4] = (u32)BMIPS_WARM_RESTART_VEC;
+ s2_params[5] = (u32)(bmips_smp_int_vec_end -
+ bmips_smp_int_vec);
+
+ /* Drop to standby */
+ brcm_pm_do_s2(s2_params);
+
+ return 0;
+}
+
+static int brcmstb_pm_standby(bool deep_standby)
+{
+ brcmstb_pm_handshake();
+
+ /* Send IRQs to BMIPS_WARM_RESTART_VEC */
+ clear_c0_cause(CAUSEF_IV);
+ irq_disable_hazard();
+ set_c0_status(ST0_BEV);
+ irq_disable_hazard();
+
+ if (deep_standby)
+ brcmstb_pm_s3();
+ else
+ brcmstb_pm_s2();
+
+ /* Send IRQs to normal runtime vectors */
+ clear_c0_status(ST0_BEV);
+ irq_disable_hazard();
+ set_c0_cause(CAUSEF_IV);
+ irq_disable_hazard();
+
+ return 0;
+}
+
+static int brcmstb_pm_enter(suspend_state_t state)
+{
+ int ret = -EINVAL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ ret = brcmstb_pm_standby(false);
+ break;
+ case PM_SUSPEND_MEM:
+ ret = brcmstb_pm_standby(true);
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmstb_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return true;
+ case PM_SUSPEND_MEM:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct platform_suspend_ops brcmstb_pm_ops = {
+ .enter = brcmstb_pm_enter,
+ .valid = brcmstb_pm_valid,
+};
+
+static const struct of_device_id aon_ctrl_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-aon-ctrl" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id ddr_phy_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-ddr-phy" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id arb_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-memc-arb" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id timers_ids[] = {
+ { .compatible = "brcm,brcmstb-timers" },
+ { /* sentinel */ }
+};
+
+static inline void __iomem *brcmstb_ioremap_node(struct device_node *dn,
+ int index)
+{
+ return of_io_request_and_map(dn, index, dn->full_name);
+}
+
+static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
+ int index, const void **ofdata)
+{
+ struct device_node *dn;
+ const struct of_device_id *match;
+
+ dn = of_find_matching_node_and_match(NULL, matches, &match);
+ if (!dn)
+ return ERR_PTR(-EINVAL);
+
+ if (ofdata)
+ *ofdata = match->data;
+
+ return brcmstb_ioremap_node(dn, index);
+}
+
+static int brcmstb_pm_init(void)
+{
+ struct device_node *dn;
+ void __iomem *base;
+ int i;
+
+ /* AON ctrl registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_CTRL\n");
+ goto aon_err;
+ }
+ ctrl.aon_ctrl_base = base;
+
+ /* AON SRAM registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_SRAM\n");
+ goto sram_err;
+ }
+ ctrl.aon_sram_base = base;
+
+ ctrl.num_memc = 0;
+ /* Map MEMC DDR PHY registers */
+ for_each_matching_node(dn, ddr_phy_dt_ids) {
+ i = ctrl.num_memc;
+ if (i >= MAX_NUM_MEMC) {
+ pr_warn("Too many MEMCs (max %d)\n", MAX_NUM_MEMC);
+ break;
+ }
+ base = brcmstb_ioremap_node(dn, 0);
+ if (IS_ERR(base))
+ goto ddr_err;
+
+ ctrl.memcs[i].ddr_phy_base = base;
+ ctrl.num_memc++;
+ }
+
+ /* MEMC ARB registers */
+ base = brcmstb_ioremap_match(arb_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping MEMC ARB\n");
+ goto ddr_err;
+ }
+ ctrl.memcs[0].arb_base = base;
+
+ /* Timer registers */
+ base = brcmstb_ioremap_match(timers_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping timers\n");
+ goto tmr_err;
+ }
+ ctrl.timers_base = base;
+
+ /* s3 cold boot aka s5 */
+ pm_power_off = brcmstb_pm_s5;
+
+ suspend_set_ops(&brcmstb_pm_ops);
+
+ return 0;
+
+tmr_err:
+ iounmap(ctrl.memcs[0].arb_base);
+ddr_err:
+ for (i = 0; i < ctrl.num_memc; i++)
+ iounmap(ctrl.memcs[i].ddr_phy_base);
+
+ iounmap(ctrl.aon_sram_base);
+sram_err:
+ iounmap(ctrl.aon_ctrl_base);
+aon_err:
+ return PTR_ERR(base);
+}
+arch_initcall(brcmstb_pm_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm.h b/drivers/soc/bcm/brcmstb/pm/pm.h
new file mode 100644
index 000000000..b7d35ac70
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm.h
@@ -0,0 +1,89 @@
+/*
+ * Definitions for Broadcom STB power management / Always ON (AON) block
+ *
+ * Copyright © 2016-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BRCMSTB_PM_H__
+#define __BRCMSTB_PM_H__
+
+#define AON_CTRL_RESET_CTRL 0x00
+#define AON_CTRL_PM_CTRL 0x04
+#define AON_CTRL_PM_STATUS 0x08
+#define AON_CTRL_PM_CPU_WAIT_COUNT 0x10
+#define AON_CTRL_PM_INITIATE 0x88
+#define AON_CTRL_HOST_MISC_CMDS 0x8c
+#define AON_CTRL_SYSTEM_DATA_RAM_OFS 0x200
+
+/* MIPS PM constants */
+/* MEMC0 offsets */
+#define DDR40_PHY_CONTROL_REGS_0_PLL_STATUS 0x10
+#define DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL 0xa4
+
+/* TIMER offsets */
+#define TIMER_TIMER1_CTRL 0x0c
+#define TIMER_TIMER1_STAT 0x1c
+
+/* TIMER defines */
+#define RESET_TIMER 0x0
+#define START_TIMER 0xbfffffff
+#define TIMER_MASK 0x3fffffff
+
+/* PM_CTRL bitfield (Method #0) */
+#define PM_FAST_PWRDOWN (1 << 6)
+#define PM_WARM_BOOT (1 << 5)
+#define PM_DEEP_STANDBY (1 << 4)
+#define PM_CPU_PWR (1 << 3)
+#define PM_USE_CPU_RDY (1 << 2)
+#define PM_PLL_PWRDOWN (1 << 1)
+#define PM_PWR_DOWN (1 << 0)
+
+/* PM_CTRL bitfield (Method #1) */
+#define PM_DPHY_STANDBY_CLEAR (1 << 20)
+#define PM_MIN_S3_WIDTH_TIMER_BYPASS (1 << 7)
+
+#define PM_S2_COMMAND (PM_PLL_PWRDOWN | PM_USE_CPU_RDY | PM_PWR_DOWN)
+
+/* Method 0 bitmasks */
+#define PM_COLD_CONFIG (PM_PLL_PWRDOWN | PM_DEEP_STANDBY)
+#define PM_WARM_CONFIG (PM_COLD_CONFIG | PM_USE_CPU_RDY | PM_WARM_BOOT)
+
+/* Method 1 bitmask */
+#define M1_PM_WARM_CONFIG (PM_DPHY_STANDBY_CLEAR | \
+ PM_MIN_S3_WIDTH_TIMER_BYPASS | \
+ PM_WARM_BOOT | PM_DEEP_STANDBY | \
+ PM_PLL_PWRDOWN | PM_PWR_DOWN)
+
+#define M1_PM_COLD_CONFIG (PM_DPHY_STANDBY_CLEAR | \
+ PM_MIN_S3_WIDTH_TIMER_BYPASS | \
+ PM_DEEP_STANDBY | \
+ PM_PLL_PWRDOWN | PM_PWR_DOWN)
+
+#ifndef __ASSEMBLY__
+
+#ifndef CONFIG_MIPS
+extern const unsigned long brcmstb_pm_do_s2_sz;
+extern asmlinkage int brcmstb_pm_do_s2(void __iomem *aon_ctrl_base,
+ void __iomem *ddr_phy_pll_status);
+#else
+/* s2 asm */
+extern asmlinkage int brcm_pm_do_s2(u32 *s2_params);
+
+/* s3 asm */
+extern asmlinkage int brcm_pm_do_s3(void __iomem *aon_ctrl_base,
+ int dcache_linesz);
+extern int s3_reentry;
+#endif /* CONFIG_MIPS */
+
+#endif
+
+#endif /* __BRCMSTB_PM_H__ */
diff --git a/drivers/soc/bcm/brcmstb/pm/s2-arm.S b/drivers/soc/bcm/brcmstb/pm/s2-arm.S
new file mode 100644
index 000000000..1d472d564
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s2-arm.S
@@ -0,0 +1,76 @@
+/*
+ * Copyright © 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+#include "pm.h"
+
+ .text
+ .align 3
+
+#define AON_CTRL_REG r10
+#define DDR_PHY_STATUS_REG r11
+
+/*
+ * r0: AON_CTRL base address
+ * r1: DDRY PHY PLL status register address
+ */
+ENTRY(brcmstb_pm_do_s2)
+ stmfd sp!, {r4-r11, lr}
+ mov AON_CTRL_REG, r0
+ mov DDR_PHY_STATUS_REG, r1
+
+ /* Flush memory transactions */
+ dsb
+
+ /* Cache DDR_PHY_STATUS_REG translation */
+ ldr r0, [DDR_PHY_STATUS_REG]
+
+ /* power down request */
+ ldr r0, =PM_S2_COMMAND
+ ldr r1, =0
+ str r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ ldr r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+
+ /* Wait for interrupt */
+ wfi
+ nop
+
+ /* Bring MEMC back up */
+1: ldr r0, [DDR_PHY_STATUS_REG]
+ ands r0, #1
+ beq 1b
+
+ /* Power-up handshake */
+ ldr r0, =1
+ str r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS]
+ ldr r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS]
+
+ ldr r0, =0
+ str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+
+ /* Return to caller */
+ ldr r0, =0
+ ldmfd sp!, {r4-r11, pc}
+
+ ENDPROC(brcmstb_pm_do_s2)
+
+ /* Place literal pool here */
+ .ltorg
+
+ENTRY(brcmstb_pm_do_s2_sz)
+ .word . - brcmstb_pm_do_s2
diff --git a/drivers/soc/bcm/brcmstb/pm/s2-mips.S b/drivers/soc/bcm/brcmstb/pm/s2-mips.S
new file mode 100644
index 000000000..27a14bc46
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s2-mips.S
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+#include "pm.h"
+
+ .text
+ .set noreorder
+ .align 5
+
+/*
+ * a0: u32 params array
+ */
+LEAF(brcm_pm_do_s2)
+
+ subu sp, 64
+ sw ra, 0(sp)
+ sw s0, 4(sp)
+ sw s1, 8(sp)
+ sw s2, 12(sp)
+ sw s3, 16(sp)
+ sw s4, 20(sp)
+ sw s5, 24(sp)
+ sw s6, 28(sp)
+ sw s7, 32(sp)
+
+ /*
+ * Dereference the params array
+ * s0: AON_CTRL base register
+ * s1: DDR_PHY base register
+ * s2: TIMERS base register
+ * s3: I-Cache line size
+ * s4: Restart vector address
+ * s5: Restart vector size
+ */
+ move t0, a0
+
+ lw s0, 0(t0)
+ lw s1, 4(t0)
+ lw s2, 8(t0)
+ lw s3, 12(t0)
+ lw s4, 16(t0)
+ lw s5, 20(t0)
+
+ /* Lock this asm section into the I-cache */
+ addiu t1, s3, -1
+ not t1
+
+ la t0, brcm_pm_do_s2
+ and t0, t1
+
+ la t2, asm_end
+ and t2, t1
+
+1: cache 0x1c, 0(t0)
+ bne t0, t2, 1b
+ addu t0, s3
+
+ /* Lock the interrupt vector into the I-cache */
+ move t0, zero
+
+2: move t1, s4
+ cache 0x1c, 0(t1)
+ addu t1, s3
+ addu t0, s3
+ ble t0, s5, 2b
+ nop
+
+ sync
+
+ /* Power down request */
+ li t0, PM_S2_COMMAND
+ sw zero, AON_CTRL_PM_CTRL(s0)
+ lw zero, AON_CTRL_PM_CTRL(s0)
+ sw t0, AON_CTRL_PM_CTRL(s0)
+ lw t0, AON_CTRL_PM_CTRL(s0)
+
+ /* Enable CP0 interrupt 2 and wait for interrupt */
+ mfc0 t0, CP0_STATUS
+ /* Save cp0 sr for restoring later */
+ move s6, t0
+
+ li t1, ~(ST0_IM | ST0_IE)
+ and t0, t1
+ ori t0, STATUSF_IP2
+ mtc0 t0, CP0_STATUS
+ nop
+ nop
+ nop
+ ori t0, ST0_IE
+ mtc0 t0, CP0_STATUS
+
+ /* Wait for interrupt */
+ wait
+ nop
+
+ /* Wait for memc0 */
+1: lw t0, DDR40_PHY_CONTROL_REGS_0_PLL_STATUS(s1)
+ andi t0, 1
+ beqz t0, 1b
+ nop
+
+ /* 1ms delay needed for stable recovery */
+ /* Use TIMER1 to count 1 ms */
+ li t0, RESET_TIMER
+ sw t0, TIMER_TIMER1_CTRL(s2)
+ lw t0, TIMER_TIMER1_CTRL(s2)
+
+ li t0, START_TIMER
+ sw t0, TIMER_TIMER1_CTRL(s2)
+ lw t0, TIMER_TIMER1_CTRL(s2)
+
+ /* Prepare delay */
+ li t0, TIMER_MASK
+ lw t1, TIMER_TIMER1_STAT(s2)
+ and t1, t0
+ /* 1ms delay */
+ addi t1, 27000
+
+ /* Wait for the timer value to exceed t1 */
+1: lw t0, TIMER_TIMER1_STAT(s2)
+ sgtu t2, t1, t0
+ bnez t2, 1b
+ nop
+
+ /* Power back up */
+ li t1, 1
+ sw t1, AON_CTRL_HOST_MISC_CMDS(s0)
+ lw t1, AON_CTRL_HOST_MISC_CMDS(s0)
+
+ sw zero, AON_CTRL_PM_CTRL(s0)
+ lw zero, AON_CTRL_PM_CTRL(s0)
+
+ /* Unlock I-cache */
+ addiu t1, s3, -1
+ not t1
+
+ la t0, brcm_pm_do_s2
+ and t0, t1
+
+ la t2, asm_end
+ and t2, t1
+
+1: cache 0x00, 0(t0)
+ bne t0, t2, 1b
+ addu t0, s3
+
+ /* Unlock interrupt vector */
+ move t0, zero
+
+2: move t1, s4
+ cache 0x00, 0(t1)
+ addu t1, s3
+ addu t0, s3
+ ble t0, s5, 2b
+ nop
+
+ /* Restore cp0 sr */
+ sync
+ nop
+ mtc0 s6, CP0_STATUS
+ nop
+
+ /* Set return value to success */
+ li v0, 0
+
+ /* Return to caller */
+ lw s7, 32(sp)
+ lw s6, 28(sp)
+ lw s5, 24(sp)
+ lw s4, 20(sp)
+ lw s3, 16(sp)
+ lw s2, 12(sp)
+ lw s1, 8(sp)
+ lw s0, 4(sp)
+ lw ra, 0(sp)
+ addiu sp, 64
+
+ jr ra
+ nop
+END(brcm_pm_do_s2)
+
+ .globl asm_end
+asm_end:
+ nop
+
diff --git a/drivers/soc/bcm/brcmstb/pm/s3-mips.S b/drivers/soc/bcm/brcmstb/pm/s3-mips.S
new file mode 100644
index 000000000..1242308a8
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s3-mips.S
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/bmips.h>
+
+#include "pm.h"
+
+ .text
+ .set noreorder
+ .align 5
+ .global s3_reentry
+
+/*
+ * a0: AON_CTRL base register
+ * a1: D-Cache line size
+ */
+LEAF(brcm_pm_do_s3)
+
+ /* Get the address of s3_context */
+ la t0, gp_regs
+ sw ra, 0(t0)
+ sw s0, 4(t0)
+ sw s1, 8(t0)
+ sw s2, 12(t0)
+ sw s3, 16(t0)
+ sw s4, 20(t0)
+ sw s5, 24(t0)
+ sw s6, 28(t0)
+ sw s7, 32(t0)
+ sw gp, 36(t0)
+ sw sp, 40(t0)
+ sw fp, 44(t0)
+
+ /* Save CP0 Status */
+ mfc0 t1, CP0_STATUS
+ sw t1, 48(t0)
+
+ /* Write-back gp registers - cache will be gone */
+ addiu t1, a1, -1
+ not t1
+ and t0, t1
+
+ /* Flush at least 64 bytes */
+ addiu t2, t0, 64
+ and t2, t1
+
+1: cache 0x17, 0(t0)
+ bne t0, t2, 1b
+ addu t0, a1
+
+ /* Drop to deep standby */
+ li t1, PM_WARM_CONFIG
+ sw zero, AON_CTRL_PM_CTRL(a0)
+ lw zero, AON_CTRL_PM_CTRL(a0)
+ sw t1, AON_CTRL_PM_CTRL(a0)
+ lw t1, AON_CTRL_PM_CTRL(a0)
+
+ li t1, (PM_WARM_CONFIG | PM_PWR_DOWN)
+ sw t1, AON_CTRL_PM_CTRL(a0)
+ lw t1, AON_CTRL_PM_CTRL(a0)
+
+ /* Enable CP0 interrupt 2 and wait for interrupt */
+ mfc0 t0, CP0_STATUS
+
+ li t1, ~(ST0_IM | ST0_IE)
+ and t0, t1
+ ori t0, STATUSF_IP2
+ mtc0 t0, CP0_STATUS
+ nop
+ nop
+ nop
+ ori t0, ST0_IE
+ mtc0 t0, CP0_STATUS
+
+ /* Wait for interrupt */
+ wait
+ nop
+
+s3_reentry:
+
+ /* Clear call/return stack */
+ li t0, (0x06 << 16)
+ mtc0 t0, $22, 2
+ ssnop
+ ssnop
+ ssnop
+
+ /* Clear jump target buffer */
+ li t0, (0x04 << 16)
+ mtc0 t0, $22, 2
+ ssnop
+ ssnop
+ ssnop
+
+ sync
+ nop
+
+ /* Setup mmu defaults */
+ mtc0 zero, CP0_WIRED
+ mtc0 zero, CP0_ENTRYHI
+ li k0, PM_DEFAULT_MASK
+ mtc0 k0, CP0_PAGEMASK
+
+ li sp, BMIPS_WARM_RESTART_VEC
+ la k0, plat_wired_tlb_setup
+ jalr k0
+ nop
+
+ /* Restore general purpose registers */
+ la t0, gp_regs
+ lw fp, 44(t0)
+ lw sp, 40(t0)
+ lw gp, 36(t0)
+ lw s7, 32(t0)
+ lw s6, 28(t0)
+ lw s5, 24(t0)
+ lw s4, 20(t0)
+ lw s3, 16(t0)
+ lw s2, 12(t0)
+ lw s1, 8(t0)
+ lw s0, 4(t0)
+ lw ra, 0(t0)
+
+ /* Restore CP0 status */
+ lw t1, 48(t0)
+ mtc0 t1, CP0_STATUS
+
+ /* Return to caller */
+ li v0, 0
+ jr ra
+ nop
+
+END(brcm_pm_do_s3)
diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c
new file mode 100644
index 000000000..a78dfe0a2
--- /dev/null
+++ b/drivers/soc/bcm/raspberrypi-power.c
@@ -0,0 +1,249 @@
+/* (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Authors:
+ * Alexander Aring <aar@pengutronix.de>
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <dt-bindings/power/raspberrypi-power.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
+/*
+ * Firmware indices for the old power domains interface. Only a few
+ * of them were actually implemented.
+ */
+#define RPI_OLD_POWER_DOMAIN_USB 3
+#define RPI_OLD_POWER_DOMAIN_V3D 10
+
+struct rpi_power_domain {
+ u32 domain;
+ bool enabled;
+ bool old_interface;
+ struct generic_pm_domain base;
+ struct rpi_firmware *fw;
+};
+
+struct rpi_power_domains {
+ bool has_new_interface;
+ struct genpd_onecell_data xlate;
+ struct rpi_firmware *fw;
+ struct rpi_power_domain domains[RPI_POWER_DOMAIN_COUNT];
+};
+
+/*
+ * Packet definition used by RPI_FIRMWARE_SET_POWER_STATE and
+ * RPI_FIRMWARE_SET_DOMAIN_STATE
+ */
+struct rpi_power_domain_packet {
+ u32 domain;
+ u32 on;
+};
+
+/*
+ * Asks the firmware to enable or disable power on a specific power
+ * domain.
+ */
+static int rpi_firmware_set_power(struct rpi_power_domain *rpi_domain, bool on)
+{
+ struct rpi_power_domain_packet packet;
+
+ packet.domain = rpi_domain->domain;
+ packet.on = on;
+ return rpi_firmware_property(rpi_domain->fw,
+ rpi_domain->old_interface ?
+ RPI_FIRMWARE_SET_POWER_STATE :
+ RPI_FIRMWARE_SET_DOMAIN_STATE,
+ &packet, sizeof(packet));
+}
+
+static int rpi_domain_off(struct generic_pm_domain *domain)
+{
+ struct rpi_power_domain *rpi_domain =
+ container_of(domain, struct rpi_power_domain, base);
+
+ return rpi_firmware_set_power(rpi_domain, false);
+}
+
+static int rpi_domain_on(struct generic_pm_domain *domain)
+{
+ struct rpi_power_domain *rpi_domain =
+ container_of(domain, struct rpi_power_domain, base);
+
+ return rpi_firmware_set_power(rpi_domain, true);
+}
+
+static void rpi_common_init_power_domain(struct rpi_power_domains *rpi_domains,
+ int xlate_index, const char *name)
+{
+ struct rpi_power_domain *dom = &rpi_domains->domains[xlate_index];
+
+ dom->fw = rpi_domains->fw;
+
+ dom->base.name = name;
+ dom->base.power_on = rpi_domain_on;
+ dom->base.power_off = rpi_domain_off;
+
+ /*
+ * Treat all power domains as off at boot.
+ *
+ * The firmware itself may be keeping some domains on, but
+ * from Linux's perspective all we control is the refcounts
+ * that we give to the firmware, and we can't ask the firmware
+ * to turn off something that we haven't ourselves turned on.
+ */
+ pm_genpd_init(&dom->base, NULL, true);
+
+ rpi_domains->xlate.domains[xlate_index] = &dom->base;
+}
+
+static void rpi_init_power_domain(struct rpi_power_domains *rpi_domains,
+ int xlate_index, const char *name)
+{
+ struct rpi_power_domain *dom = &rpi_domains->domains[xlate_index];
+
+ if (!rpi_domains->has_new_interface)
+ return;
+
+ /* The DT binding index is the firmware's domain index minus one. */
+ dom->domain = xlate_index + 1;
+
+ rpi_common_init_power_domain(rpi_domains, xlate_index, name);
+}
+
+static void rpi_init_old_power_domain(struct rpi_power_domains *rpi_domains,
+ int xlate_index, int domain,
+ const char *name)
+{
+ struct rpi_power_domain *dom = &rpi_domains->domains[xlate_index];
+
+ dom->old_interface = true;
+ dom->domain = domain;
+
+ rpi_common_init_power_domain(rpi_domains, xlate_index, name);
+}
+
+/*
+ * Detects whether the firmware supports the new power domains interface.
+ *
+ * The firmware doesn't actually return an error on an unknown tag,
+ * and just skips over it, so we do the detection by putting an
+ * unexpected value in the return field and checking if it was
+ * unchanged.
+ */
+static bool
+rpi_has_new_domain_support(struct rpi_power_domains *rpi_domains)
+{
+ struct rpi_power_domain_packet packet;
+ int ret;
+
+ packet.domain = RPI_POWER_DOMAIN_ARM;
+ packet.on = ~0;
+
+ ret = rpi_firmware_property(rpi_domains->fw,
+ RPI_FIRMWARE_GET_DOMAIN_STATE,
+ &packet, sizeof(packet));
+
+ return ret == 0 && packet.on != ~0;
+}
+
+static int rpi_power_probe(struct platform_device *pdev)
+{
+ struct device_node *fw_np;
+ struct device *dev = &pdev->dev;
+ struct rpi_power_domains *rpi_domains;
+
+ rpi_domains = devm_kzalloc(dev, sizeof(*rpi_domains), GFP_KERNEL);
+ if (!rpi_domains)
+ return -ENOMEM;
+
+ rpi_domains->xlate.domains =
+ devm_kcalloc(dev,
+ RPI_POWER_DOMAIN_COUNT,
+ sizeof(*rpi_domains->xlate.domains),
+ GFP_KERNEL);
+ if (!rpi_domains->xlate.domains)
+ return -ENOMEM;
+
+ rpi_domains->xlate.num_domains = RPI_POWER_DOMAIN_COUNT;
+
+ fw_np = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
+ if (!fw_np) {
+ dev_err(&pdev->dev, "no firmware node\n");
+ return -ENODEV;
+ }
+
+ rpi_domains->fw = rpi_firmware_get(fw_np);
+ of_node_put(fw_np);
+ if (!rpi_domains->fw)
+ return -EPROBE_DEFER;
+
+ rpi_domains->has_new_interface =
+ rpi_has_new_domain_support(rpi_domains);
+
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_I2C0, "I2C0");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_I2C1, "I2C1");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_I2C2, "I2C2");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_VIDEO_SCALER,
+ "VIDEO_SCALER");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_VPU1, "VPU1");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_HDMI, "HDMI");
+
+ /*
+ * Use the old firmware interface for USB power, so that we
+ * can turn it on even if the firmware hasn't been updated.
+ */
+ rpi_init_old_power_domain(rpi_domains, RPI_POWER_DOMAIN_USB,
+ RPI_OLD_POWER_DOMAIN_USB, "USB");
+
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_VEC, "VEC");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_JPEG, "JPEG");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_H264, "H264");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_V3D, "V3D");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_ISP, "ISP");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_UNICAM0, "UNICAM0");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_UNICAM1, "UNICAM1");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CCP2RX, "CCP2RX");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CSI2, "CSI2");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CPI, "CPI");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_DSI0, "DSI0");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_DSI1, "DSI1");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_TRANSPOSER,
+ "TRANSPOSER");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CCP2TX, "CCP2TX");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_CDP, "CDP");
+ rpi_init_power_domain(rpi_domains, RPI_POWER_DOMAIN_ARM, "ARM");
+
+ of_genpd_add_provider_onecell(dev->of_node, &rpi_domains->xlate);
+
+ platform_set_drvdata(pdev, rpi_domains);
+
+ return 0;
+}
+
+static const struct of_device_id rpi_power_of_match[] = {
+ { .compatible = "raspberrypi,bcm2835-power", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rpi_power_of_match);
+
+static struct platform_driver rpi_power_driver = {
+ .driver = {
+ .name = "raspberrypi-power",
+ .of_match_table = rpi_power_of_match,
+ },
+ .probe = rpi_power_probe,
+};
+builtin_platform_driver(rpi_power_driver);
+
+MODULE_AUTHOR("Alexander Aring <aar@pengutronix.de>");
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Raspberry Pi power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/dove/Makefile b/drivers/soc/dove/Makefile
new file mode 100644
index 000000000..2db8e6551
--- /dev/null
+++ b/drivers/soc/dove/Makefile
@@ -0,0 +1 @@
+obj-y += pmu.o
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
new file mode 100644
index 000000000..5abb08ffb
--- /dev/null
+++ b/drivers/soc/dove/pmu.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell Dove PMU support
+ */
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset.h>
+#include <linux/reset-controller.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/dove/pmu.h>
+#include <linux/spinlock.h>
+
+#define NR_PMU_IRQS 7
+
+#define PMC_SW_RST 0x30
+#define PMC_IRQ_CAUSE 0x50
+#define PMC_IRQ_MASK 0x54
+
+#define PMU_PWR 0x10
+#define PMU_ISO 0x58
+
+struct pmu_data {
+ spinlock_t lock;
+ struct device_node *of_node;
+ void __iomem *pmc_base;
+ void __iomem *pmu_base;
+ struct irq_chip_generic *irq_gc;
+ struct irq_domain *irq_domain;
+#ifdef CONFIG_RESET_CONTROLLER
+ struct reset_controller_dev reset;
+#endif
+};
+
+/*
+ * The PMU contains a register to reset various subsystems within the
+ * SoC. Export this as a reset controller.
+ */
+#ifdef CONFIG_RESET_CONTROLLER
+#define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
+
+static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val = ~BIT(id);
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val = BIT(id);
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops pmu_reset_ops = {
+ .reset = pmu_reset_reset,
+ .assert = pmu_reset_assert,
+ .deassert = pmu_reset_deassert,
+};
+
+static struct reset_controller_dev pmu_reset __initdata = {
+ .ops = &pmu_reset_ops,
+ .owner = THIS_MODULE,
+ .nr_resets = 32,
+};
+
+static void __init pmu_reset_init(struct pmu_data *pmu)
+{
+ int ret;
+
+ pmu->reset = pmu_reset;
+ pmu->reset.of_node = pmu->of_node;
+
+ ret = reset_controller_register(&pmu->reset);
+ if (ret)
+ pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret);
+}
+#else
+static void __init pmu_reset_init(struct pmu_data *pmu)
+{
+}
+#endif
+
+struct pmu_domain {
+ struct pmu_data *pmu;
+ u32 pwr_mask;
+ u32 rst_mask;
+ u32 iso_mask;
+ struct generic_pm_domain base;
+};
+
+#define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
+
+/*
+ * This deals with the "old" Marvell sequence of bringing a power domain
+ * down/up, which is: apply power, release reset, disable isolators.
+ *
+ * Later devices apparantly use a different sequence: power up, disable
+ * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
+ * enable module clock, deassert reset.
+ *
+ * Note: reading the assembly, it seems that the IO accessors have an
+ * unfortunate side-effect - they cause memory already read into registers
+ * for the if () to be re-read for the bit-set or bit-clear operation.
+ * The code is written to avoid this.
+ */
+static int pmu_domain_power_off(struct generic_pm_domain *domain)
+{
+ struct pmu_domain *pmu_dom = to_pmu_domain(domain);
+ struct pmu_data *pmu = pmu_dom->pmu;
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *pmu_base = pmu->pmu_base;
+ void __iomem *pmc_base = pmu->pmc_base;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /* Enable isolators */
+ if (pmu_dom->iso_mask) {
+ val = ~pmu_dom->iso_mask;
+ val &= readl_relaxed(pmu_base + PMU_ISO);
+ writel_relaxed(val, pmu_base + PMU_ISO);
+ }
+
+ /* Reset unit */
+ if (pmu_dom->rst_mask) {
+ val = ~pmu_dom->rst_mask;
+ val &= readl_relaxed(pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmc_base + PMC_SW_RST);
+ }
+
+ /* Power down */
+ val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask;
+ writel_relaxed(val, pmu_base + PMU_PWR);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_domain_power_on(struct generic_pm_domain *domain)
+{
+ struct pmu_domain *pmu_dom = to_pmu_domain(domain);
+ struct pmu_data *pmu = pmu_dom->pmu;
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *pmu_base = pmu->pmu_base;
+ void __iomem *pmc_base = pmu->pmc_base;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /* Power on */
+ val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR);
+ writel_relaxed(val, pmu_base + PMU_PWR);
+
+ /* Release reset */
+ if (pmu_dom->rst_mask) {
+ val = pmu_dom->rst_mask;
+ val |= readl_relaxed(pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmc_base + PMC_SW_RST);
+ }
+
+ /* Disable isolators */
+ if (pmu_dom->iso_mask) {
+ val = pmu_dom->iso_mask;
+ val |= readl_relaxed(pmu_base + PMU_ISO);
+ writel_relaxed(val, pmu_base + PMU_ISO);
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static void __pmu_domain_register(struct pmu_domain *domain,
+ struct device_node *np)
+{
+ unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
+
+ domain->base.power_off = pmu_domain_power_off;
+ domain->base.power_on = pmu_domain_power_on;
+
+ pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask));
+
+ if (np)
+ of_genpd_add_provider_simple(np, &domain->base);
+}
+
+/* PMU IRQ controller */
+static void pmu_irq_handler(struct irq_desc *desc)
+{
+ struct pmu_data *pmu = irq_desc_get_handler_data(desc);
+ struct irq_chip_generic *gc = pmu->irq_gc;
+ struct irq_domain *domain = pmu->irq_domain;
+ void __iomem *base = gc->reg_base;
+ u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache;
+ u32 done = ~0;
+
+ if (stat == 0) {
+ handle_bad_irq(desc);
+ return;
+ }
+
+ while (stat) {
+ u32 hwirq = fls(stat) - 1;
+
+ stat &= ~(1 << hwirq);
+ done &= ~(1 << hwirq);
+
+ generic_handle_irq(irq_find_mapping(domain, hwirq));
+ }
+
+ /*
+ * The PMU mask register is not RW0C: it is RW. This means that
+ * the bits take whatever value is written to them; if you write
+ * a '1', you will set the interrupt.
+ *
+ * Unfortunately this means there is NO race free way to clear
+ * these interrupts.
+ *
+ * So, let's structure the code so that the window is as small as
+ * possible.
+ */
+ irq_gc_lock(gc);
+ done &= readl_relaxed(base + PMC_IRQ_CAUSE);
+ writel_relaxed(done, base + PMC_IRQ_CAUSE);
+ irq_gc_unlock(gc);
+}
+
+static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
+{
+ const char *name = "pmu_irq";
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+ int ret;
+
+ /* mask and clear all interrupts */
+ writel(0, pmu->pmc_base + PMC_IRQ_MASK);
+ writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
+
+ domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("%s: unable to add irq domain\n", name);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name,
+ handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret);
+ irq_domain_remove(domain);
+ return ret;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = pmu->pmc_base;
+ gc->chip_types[0].regs.mask = PMC_IRQ_MASK;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+ pmu->irq_domain = domain;
+ pmu->irq_gc = gc;
+
+ irq_set_handler_data(irq, pmu);
+ irq_set_chained_handler(irq, pmu_irq_handler);
+
+ return 0;
+}
+
+int __init dove_init_pmu_legacy(const struct dove_pmu_initdata *initdata)
+{
+ const struct dove_pmu_domain_initdata *domain_initdata;
+ struct pmu_data *pmu;
+ int ret;
+
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ spin_lock_init(&pmu->lock);
+ pmu->pmc_base = initdata->pmc_base;
+ pmu->pmu_base = initdata->pmu_base;
+
+ pmu_reset_init(pmu);
+ for (domain_initdata = initdata->domains; domain_initdata->name;
+ domain_initdata++) {
+ struct pmu_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (domain) {
+ domain->pmu = pmu;
+ domain->pwr_mask = domain_initdata->pwr_mask;
+ domain->rst_mask = domain_initdata->rst_mask;
+ domain->iso_mask = domain_initdata->iso_mask;
+ domain->base.name = domain_initdata->name;
+
+ __pmu_domain_register(domain, NULL);
+ }
+ }
+
+ ret = dove_init_pmu_irq(pmu, initdata->irq);
+ if (ret)
+ pr_err("dove_init_pmu_irq() failed: %d\n", ret);
+
+ if (pmu->irq_domain)
+ irq_domain_associate_many(pmu->irq_domain,
+ initdata->irq_domain_start,
+ 0, NR_PMU_IRQS);
+
+ return 0;
+}
+
+/*
+ * pmu: power-manager@d0000 {
+ * compatible = "marvell,dove-pmu";
+ * reg = <0xd0000 0x8000> <0xd8000 0x8000>;
+ * interrupts = <33>;
+ * interrupt-controller;
+ * #reset-cells = 1;
+ * vpu_domain: vpu-domain {
+ * #power-domain-cells = <0>;
+ * marvell,pmu_pwr_mask = <0x00000008>;
+ * marvell,pmu_iso_mask = <0x00000001>;
+ * resets = <&pmu 16>;
+ * };
+ * gpu_domain: gpu-domain {
+ * #power-domain-cells = <0>;
+ * marvell,pmu_pwr_mask = <0x00000004>;
+ * marvell,pmu_iso_mask = <0x00000002>;
+ * resets = <&pmu 18>;
+ * };
+ * };
+ */
+int __init dove_init_pmu(void)
+{
+ struct device_node *np_pmu, *domains_node, *np;
+ struct pmu_data *pmu;
+ int ret, parent_irq;
+
+ /* Lookup the PMU node */
+ np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu");
+ if (!np_pmu)
+ return 0;
+
+ domains_node = of_get_child_by_name(np_pmu, "domains");
+ if (!domains_node) {
+ pr_err("%s: failed to find domains sub-node\n", np_pmu->name);
+ return 0;
+ }
+
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ spin_lock_init(&pmu->lock);
+ pmu->of_node = np_pmu;
+ pmu->pmc_base = of_iomap(pmu->of_node, 0);
+ pmu->pmu_base = of_iomap(pmu->of_node, 1);
+ if (!pmu->pmc_base || !pmu->pmu_base) {
+ pr_err("%s: failed to map PMU\n", np_pmu->name);
+ iounmap(pmu->pmu_base);
+ iounmap(pmu->pmc_base);
+ kfree(pmu);
+ return -ENOMEM;
+ }
+
+ pmu_reset_init(pmu);
+
+ for_each_available_child_of_node(domains_node, np) {
+ struct of_phandle_args args;
+ struct pmu_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ break;
+
+ domain->pmu = pmu;
+ domain->base.name = kstrdup(np->name, GFP_KERNEL);
+ if (!domain->base.name) {
+ kfree(domain);
+ break;
+ }
+
+ of_property_read_u32(np, "marvell,pmu_pwr_mask",
+ &domain->pwr_mask);
+ of_property_read_u32(np, "marvell,pmu_iso_mask",
+ &domain->iso_mask);
+
+ /*
+ * We parse the reset controller property directly here
+ * to ensure that we can operate when the reset controller
+ * support is not configured into the kernel.
+ */
+ ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
+ 0, &args);
+ if (ret == 0) {
+ if (args.np == pmu->of_node)
+ domain->rst_mask = BIT(args.args[0]);
+ of_node_put(args.np);
+ }
+
+ __pmu_domain_register(domain, np);
+ }
+
+ /* Loss of the interrupt controller is not a fatal error. */
+ parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
+ if (!parent_irq) {
+ pr_err("%s: no interrupt specified\n", np_pmu->name);
+ } else {
+ ret = dove_init_pmu_irq(pmu, parent_irq);
+ if (ret)
+ pr_err("dove_init_pmu_irq() failed: %d\n", ret);
+ }
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
new file mode 100644
index 000000000..8f80e8bbf
--- /dev/null
+++ b/drivers/soc/fsl/Kconfig
@@ -0,0 +1,31 @@
+#
+# NXP/Freescale QorIQ series SOC drivers
+#
+
+menu "NXP/Freescale QorIQ SoC drivers"
+
+source "drivers/soc/fsl/qbman/Kconfig"
+source "drivers/soc/fsl/qe/Kconfig"
+
+config FSL_GUTS
+ bool
+ select SOC_BUS
+ help
+ The global utilities block controls power management, I/O device
+ enabling, power-onreset(POR) configuration monitoring, alternate
+ function selection for multiplexed signals,and clock control.
+ This driver is to manage and access global utilities block.
+ Initially only reading SVR and registering soc device are supported.
+ Other guts accesses, such as reading RCW, should eventually be moved
+ into this driver as well.
+
+config FSL_MC_DPIO
+ tristate "QorIQ DPAA2 DPIO driver"
+ depends on FSL_MC_BUS
+ help
+ Driver for the DPAA2 DPIO object. A DPIO provides queue and
+ buffer management facilities for software to interact with
+ other DPAA2 objects. This driver does not expose the DPIO
+ objects individually, but groups them under a service layer
+ API.
+endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
new file mode 100644
index 000000000..803ef1bfb
--- /dev/null
+++ b/drivers/soc/fsl/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Linux Kernel SOC fsl specific device drivers
+#
+
+obj-$(CONFIG_FSL_DPAA) += qbman/
+obj-$(CONFIG_QUICC_ENGINE) += qe/
+obj-$(CONFIG_CPM) += qe/
+obj-$(CONFIG_FSL_GUTS) += guts.o
+obj-$(CONFIG_FSL_MC_DPIO) += dpio/
diff --git a/drivers/soc/fsl/dpio/Makefile b/drivers/soc/fsl/dpio/Makefile
new file mode 100644
index 000000000..b9ff24c76
--- /dev/null
+++ b/drivers/soc/fsl/dpio/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# QorIQ DPAA2 DPIO driver
+#
+
+obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
+
+fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
new file mode 100644
index 000000000..ab8f82ee7
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef _FSL_DPIO_CMD_H
+#define _FSL_DPIO_CMD_H
+
+/* DPIO Version */
+#define DPIO_VER_MAJOR 4
+#define DPIO_VER_MINOR 2
+
+/* Command Versioning */
+
+#define DPIO_CMD_ID_OFFSET 4
+#define DPIO_CMD_BASE_VERSION 1
+
+#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
+#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
+#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
+#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
+#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
+#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
+
+struct dpio_cmd_open {
+ __le32 dpio_id;
+};
+
+#define DPIO_CHANNEL_MODE_MASK 0x3
+
+struct dpio_rsp_get_attr {
+ /* cmd word 0 */
+ __le32 id;
+ __le16 qbman_portal_id;
+ u8 num_priorities;
+ u8 channel_mode;
+ /* cmd word 1 */
+ __le64 qbman_portal_ce_addr;
+ /* cmd word 2 */
+ __le64 qbman_portal_ci_addr;
+ /* cmd word 3 */
+ __le32 qbman_version;
+};
+
+#endif /* _FSL_DPIO_CMD_H */
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
new file mode 100644
index 000000000..ea6f8904c
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright NXP 2016
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "qbman-portal.h"
+#include "dpio.h"
+#include "dpio-cmd.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("DPIO Driver");
+
+struct dpio_priv {
+ struct dpaa2_io *io;
+};
+
+static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct dpio_priv *priv = dev_get_drvdata(dev);
+
+ return dpaa2_io_irq(priv->io);
+}
+
+static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
+{
+ struct fsl_mc_device_irq *irq;
+
+ irq = dpio_dev->irqs[0];
+
+ /* clear the affinity hint */
+ irq_set_affinity_hint(irq->msi_desc->irq, NULL);
+}
+
+static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
+{
+ struct dpio_priv *priv;
+ int error;
+ struct fsl_mc_device_irq *irq;
+
+ priv = dev_get_drvdata(&dpio_dev->dev);
+
+ irq = dpio_dev->irqs[0];
+ error = devm_request_irq(&dpio_dev->dev,
+ irq->msi_desc->irq,
+ dpio_irq_handler,
+ 0,
+ dev_name(&dpio_dev->dev),
+ &dpio_dev->dev);
+ if (error < 0) {
+ dev_err(&dpio_dev->dev,
+ "devm_request_irq() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /* set the affinity hint */
+ if (irq_set_affinity_hint(irq->msi_desc->irq, cpumask_of(cpu)))
+ dev_err(&dpio_dev->dev,
+ "irq_set_affinity failed irq %d cpu %d\n",
+ irq->msi_desc->irq, cpu);
+
+ return 0;
+}
+
+static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
+{
+ struct dpio_attr dpio_attrs;
+ struct dpaa2_io_desc desc;
+ struct dpio_priv *priv;
+ int err = -ENOMEM;
+ struct device *dev = &dpio_dev->dev;
+ static int next_cpu = -1;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto err_priv_alloc;
+
+ dev_set_drvdata(dev, priv);
+
+ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
+ if (err) {
+ dev_dbg(dev, "MC portal allocation failed\n");
+ err = -EPROBE_DEFER;
+ goto err_priv_alloc;
+ }
+
+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+ &dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
+ &dpio_attrs);
+ if (err) {
+ dev_err(dev, "dpio_get_attributes() failed %d\n", err);
+ goto err_get_attr;
+ }
+ desc.qman_version = dpio_attrs.qbman_version;
+
+ err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_enable() failed %d\n", err);
+ goto err_get_attr;
+ }
+
+ /* initialize DPIO descriptor */
+ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
+ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
+ desc.dpio_id = dpio_dev->obj_desc.id;
+
+ /* get the cpu to use for the affinity hint */
+ if (next_cpu == -1)
+ next_cpu = cpumask_first(cpu_online_mask);
+ else
+ next_cpu = cpumask_next(next_cpu, cpu_online_mask);
+
+ if (!cpu_possible(next_cpu)) {
+ dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
+ err = -ERANGE;
+ goto err_allocate_irqs;
+ }
+ desc.cpu = next_cpu;
+
+ /*
+ * Set the CENA regs to be the cache inhibited area of the portal to
+ * avoid coherency issues if a user migrates to another core.
+ */
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]),
+ MEMREMAP_WC);
+ if (IS_ERR(desc.regs_cena)) {
+ dev_err(dev, "devm_memremap failed\n");
+ err = PTR_ERR(desc.regs_cena);
+ goto err_allocate_irqs;
+ }
+
+ desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]));
+ if (!desc.regs_cinh) {
+ err = -ENOMEM;
+ dev_err(dev, "devm_ioremap failed\n");
+ goto err_allocate_irqs;
+ }
+
+ err = fsl_mc_allocate_irqs(dpio_dev);
+ if (err) {
+ dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err);
+ goto err_allocate_irqs;
+ }
+
+ err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
+ if (err)
+ goto err_register_dpio_irq;
+
+ priv->io = dpaa2_io_create(&desc);
+ if (!priv->io) {
+ dev_err(dev, "dpaa2_io_create failed\n");
+ err = -ENOMEM;
+ goto err_dpaa2_io_create;
+ }
+
+ dev_info(dev, "probed\n");
+ dev_dbg(dev, " receives_notifications = %d\n",
+ desc.receives_notifications);
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+ fsl_mc_portal_free(dpio_dev->mc_io);
+
+ return 0;
+
+err_dpaa2_io_create:
+ unregister_dpio_irq_handlers(dpio_dev);
+err_register_dpio_irq:
+ fsl_mc_free_irqs(dpio_dev);
+err_allocate_irqs:
+ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+err_get_attr:
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+err_open:
+ fsl_mc_portal_free(dpio_dev->mc_io);
+err_priv_alloc:
+ return err;
+}
+
+/* Tear down interrupts for a given DPIO object */
+static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
+{
+ unregister_dpio_irq_handlers(dpio_dev);
+ fsl_mc_free_irqs(dpio_dev);
+}
+
+static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
+{
+ struct device *dev;
+ struct dpio_priv *priv;
+ int err;
+
+ dev = &dpio_dev->dev;
+ priv = dev_get_drvdata(dev);
+
+ dpaa2_io_down(priv->io);
+
+ dpio_teardown_irqs(dpio_dev);
+
+ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
+ if (err) {
+ dev_err(dev, "MC portal allocation failed\n");
+ goto err_mcportal;
+ }
+
+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+ &dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_open() failed\n");
+ goto err_open;
+ }
+
+ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+ fsl_mc_portal_free(dpio_dev->mc_io);
+
+ return 0;
+
+err_open:
+ fsl_mc_portal_free(dpio_dev->mc_io);
+err_mcportal:
+ return err;
+}
+
+static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpio",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_dpio_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_dpio_probe,
+ .remove = dpaa2_dpio_remove,
+ .match_id_table = dpaa2_dpio_match_id_table
+};
+
+static int dpio_driver_init(void)
+{
+ return fsl_mc_driver_register(&dpaa2_dpio_driver);
+}
+
+static void dpio_driver_exit(void)
+{
+ fsl_mc_driver_unregister(&dpaa2_dpio_driver);
+}
+module_init(dpio_driver_init);
+module_exit(dpio_driver_exit);
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
new file mode 100644
index 000000000..9b17f7234
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#include <linux/types.h>
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include "dpio.h"
+#include "qbman-portal.h"
+
+struct dpaa2_io {
+ struct dpaa2_io_desc dpio_desc;
+ struct qbman_swp_desc swp_desc;
+ struct qbman_swp *swp;
+ struct list_head node;
+ /* protect against multiple management commands */
+ spinlock_t lock_mgmt_cmd;
+ /* protect notifications list */
+ spinlock_t lock_notifications;
+ struct list_head notifications;
+};
+
+struct dpaa2_io_store {
+ unsigned int max;
+ dma_addr_t paddr;
+ struct dpaa2_dq *vaddr;
+ void *alloced_addr; /* unaligned value from kmalloc() */
+ unsigned int idx; /* position of the next-to-be-returned entry */
+ struct qbman_swp *swp; /* portal used to issue VDQCR */
+ struct device *dev; /* device used for DMA mapping */
+};
+
+/* keep a per cpu array of DPIOs for fast access */
+static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
+static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
+static DEFINE_SPINLOCK(dpio_list_lock);
+
+static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
+ int cpu)
+{
+ if (d)
+ return d;
+
+ if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
+ return NULL;
+
+ /*
+ * If cpu == -1, choose the current cpu, with no guarantees about
+ * potentially being migrated away.
+ */
+ if (unlikely(cpu < 0))
+ cpu = smp_processor_id();
+
+ /* If a specific cpu was requested, pick it up immediately */
+ return dpio_by_cpu[cpu];
+}
+
+static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
+{
+ if (d)
+ return d;
+
+ spin_lock(&dpio_list_lock);
+ d = list_entry(dpio_list.next, struct dpaa2_io, node);
+ list_del(&d->node);
+ list_add_tail(&d->node, &dpio_list);
+ spin_unlock(&dpio_list_lock);
+
+ return d;
+}
+
+/**
+ * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
+ * @cpu: the cpu id
+ *
+ * Return the affine dpaa2_io service, or NULL if there is no service affined
+ * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
+ * service.
+ */
+struct dpaa2_io *dpaa2_io_service_select(int cpu)
+{
+ if (cpu == DPAA2_IO_ANY_CPU)
+ return service_select(NULL);
+
+ return service_select_by_cpu(NULL, cpu);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+
+/**
+ * dpaa2_io_create() - create a dpaa2_io object.
+ * @desc: the dpaa2_io descriptor
+ *
+ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
+ * DPIO object.
+ *
+ * Return a valid dpaa2_io object for success, or NULL for failure.
+ */
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
+{
+ struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+
+ if (!obj)
+ return NULL;
+
+ /* check if CPU is out of range (-1 means any cpu) */
+ if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
+ kfree(obj);
+ return NULL;
+ }
+
+ obj->dpio_desc = *desc;
+ obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
+ obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+ obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+ obj->swp = qbman_swp_init(&obj->swp_desc);
+
+ if (!obj->swp) {
+ kfree(obj);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&obj->node);
+ spin_lock_init(&obj->lock_mgmt_cmd);
+ spin_lock_init(&obj->lock_notifications);
+ INIT_LIST_HEAD(&obj->notifications);
+
+ /* For now only enable DQRR interrupts */
+ qbman_swp_interrupt_set_trigger(obj->swp,
+ QBMAN_SWP_INTERRUPT_DQRI);
+ qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
+ if (obj->dpio_desc.receives_notifications)
+ qbman_swp_push_set(obj->swp, 0, 1);
+
+ spin_lock(&dpio_list_lock);
+ list_add_tail(&obj->node, &dpio_list);
+ if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
+ dpio_by_cpu[desc->cpu] = obj;
+ spin_unlock(&dpio_list_lock);
+
+ return obj;
+}
+
+/**
+ * dpaa2_io_down() - release the dpaa2_io object.
+ * @d: the dpaa2_io object to be released.
+ *
+ * The "struct dpaa2_io" type can represent an individual DPIO object (as
+ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
+ * which can be used to group/encapsulate multiple DPIO objects. In all cases,
+ * each handle obtained should be released using this function.
+ */
+void dpaa2_io_down(struct dpaa2_io *d)
+{
+ kfree(d);
+}
+
+#define DPAA_POLL_MAX 32
+
+/**
+ * dpaa2_io_irq() - ISR for DPIO interrupts
+ *
+ * @obj: the given DPIO object.
+ *
+ * Return IRQ_HANDLED for success or IRQ_NONE if there
+ * were no pending interrupts.
+ */
+irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
+{
+ const struct dpaa2_dq *dq;
+ int max = 0;
+ struct qbman_swp *swp;
+ u32 status;
+
+ swp = obj->swp;
+ status = qbman_swp_interrupt_read_status(swp);
+ if (!status)
+ return IRQ_NONE;
+
+ dq = qbman_swp_dqrr_next(swp);
+ while (dq) {
+ if (qbman_result_is_SCN(dq)) {
+ struct dpaa2_io_notification_ctx *ctx;
+ u64 q64;
+
+ q64 = qbman_result_SCN_ctx(dq);
+ ctx = (void *)(uintptr_t)q64;
+ ctx->cb(ctx);
+ } else {
+ pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
+ }
+ qbman_swp_dqrr_consume(swp, dq);
+ ++max;
+ if (max > DPAA_POLL_MAX)
+ goto done;
+ dq = qbman_swp_dqrr_next(swp);
+ }
+done:
+ qbman_swp_interrupt_clear_status(swp, status);
+ qbman_swp_interrupt_set_inhibit(swp, 0);
+ return IRQ_HANDLED;
+}
+
+/**
+ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
+ * notifications on the given DPIO service.
+ * @d: the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * The caller should make the MC command to attach a DPAA2 object to
+ * a DPIO after this function completes successfully. In that way:
+ * (a) The DPIO service is "ready" to handle a notification arrival
+ * (which might happen before the "attach" command to MC has
+ * returned control of execution back to the caller)
+ * (b) The DPIO service can provide back to the caller the 'dpio_id' and
+ * 'qman64' parameters that it should pass along in the MC command
+ * in order for the object to be configured to produce the right
+ * notification fields to the DPIO service.
+ *
+ * Return 0 for success, or -ENODEV for failure.
+ */
+int dpaa2_io_service_register(struct dpaa2_io *d,
+ struct dpaa2_io_notification_ctx *ctx)
+{
+ unsigned long irqflags;
+
+ d = service_select_by_cpu(d, ctx->desired_cpu);
+ if (!d)
+ return -ENODEV;
+
+ ctx->dpio_id = d->dpio_desc.dpio_id;
+ ctx->qman64 = (u64)(uintptr_t)ctx;
+ ctx->dpio_private = d;
+ spin_lock_irqsave(&d->lock_notifications, irqflags);
+ list_add(&ctx->node, &d->notifications);
+ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+
+ /* Enable the generation of CDAN notifications */
+ if (ctx->is_cdan)
+ return qbman_swp_CDAN_set_context_enable(d->swp,
+ (u16)ctx->id,
+ ctx->qman64);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
+
+/**
+ * dpaa2_io_service_deregister - The opposite of 'register'.
+ * @service: the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * This function should be called only after sending the MC command to
+ * to detach the notification-producing device from the DPIO.
+ */
+void dpaa2_io_service_deregister(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx)
+{
+ struct dpaa2_io *d = ctx->dpio_private;
+ unsigned long irqflags;
+
+ if (ctx->is_cdan)
+ qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
+
+ spin_lock_irqsave(&d->lock_notifications, irqflags);
+ list_del(&ctx->node);
+ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
+
+/**
+ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
+ * @d: the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
+ * considered "disarmed". Ie. the user can issue pull dequeue operations on that
+ * traffic source for as long as it likes. Eventually it may wish to "rearm"
+ * that source to allow it to produce another FQDAN/CDAN, that's what this
+ * function achieves.
+ *
+ * Return 0 for success.
+ */
+int dpaa2_io_service_rearm(struct dpaa2_io *d,
+ struct dpaa2_io_notification_ctx *ctx)
+{
+ unsigned long irqflags;
+ int err;
+
+ d = service_select_by_cpu(d, ctx->desired_cpu);
+ if (!unlikely(d))
+ return -ENODEV;
+
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ if (ctx->is_cdan)
+ err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
+ else
+ err = qbman_swp_fq_schedule(d->swp, ctx->id);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
+
+/**
+ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
+ * @d: the given DPIO service.
+ * @channelid: the given channel id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
+
+/**
+ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
+ * @d: the given DPIO service.
+ * @qdid: the given queuing destination id.
+ * @prio: the given queuing priority.
+ * @qdbin: the given queuing destination bin.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
+ u32 qdid, u8 prio, u16 qdbin,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
+
+/**
+ * dpaa2_io_service_release() - Release buffers to a buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the buffer pool id.
+ * @buffers: the buffers to be released.
+ * @num_buffers: the number of the buffers to be released.
+ *
+ * Return 0 for success, and negative error code for failure.
+ */
+int dpaa2_io_service_release(struct dpaa2_io *d,
+ u32 bpid,
+ const u64 *buffers,
+ unsigned int num_buffers)
+{
+ struct qbman_release_desc rd;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_release_desc_clear(&rd);
+ qbman_release_desc_set_bpid(&rd, bpid);
+
+ return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
+
+/**
+ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the buffer pool id.
+ * @buffers: the buffer addresses for acquired buffers.
+ * @num_buffers: the expected number of the buffers to acquire.
+ *
+ * Return a negative error code if the command failed, otherwise it returns
+ * the number of buffers acquired, which may be less than the number requested.
+ * Eg. if the buffer pool is empty, this will return zero.
+ */
+int dpaa2_io_service_acquire(struct dpaa2_io *d,
+ u32 bpid,
+ u64 *buffers,
+ unsigned int num_buffers)
+{
+ unsigned long irqflags;
+ int err;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
+
+/*
+ * 'Stores' are reusable memory blocks for holding dequeue results, and to
+ * assist with parsing those results.
+ */
+
+/**
+ * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
+ * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
+ * @dev: the device to allow mapping/unmapping the DMAable region.
+ *
+ * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
+ * The 'dpaa2_io_store' returned is a DPIO service managed object.
+ *
+ * Return pointer to dpaa2_io_store struct for successfully created storage
+ * memory, or NULL on error.
+ */
+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
+ struct device *dev)
+{
+ struct dpaa2_io_store *ret;
+ size_t size;
+
+ if (!max_frames || (max_frames > 16))
+ return NULL;
+
+ ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->max = max_frames;
+ size = max_frames * sizeof(struct dpaa2_dq) + 64;
+ ret->alloced_addr = kzalloc(size, GFP_KERNEL);
+ if (!ret->alloced_addr) {
+ kfree(ret);
+ return NULL;
+ }
+
+ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
+ ret->paddr = dma_map_single(dev, ret->vaddr,
+ sizeof(struct dpaa2_dq) * max_frames,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, ret->paddr)) {
+ kfree(ret->alloced_addr);
+ kfree(ret);
+ return NULL;
+ }
+
+ ret->idx = 0;
+ ret->dev = dev;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
+
+/**
+ * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
+ * result.
+ * @s: the storage memory to be destroyed.
+ */
+void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
+{
+ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
+ DMA_FROM_DEVICE);
+ kfree(s->alloced_addr);
+ kfree(s);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
+
+/**
+ * dpaa2_io_store_next() - Determine when the next dequeue result is available.
+ * @s: the dpaa2_io_store object.
+ * @is_last: indicate whether this is the last frame in the pull command.
+ *
+ * When an object driver performs dequeues to a dpaa2_io_store, this function
+ * can be used to determine when the next frame result is available. Once
+ * this function returns non-NULL, a subsequent call to it will try to find
+ * the next dequeue result.
+ *
+ * Note that if a pull-dequeue has a NULL result because the target FQ/channel
+ * was empty, then this function will also return NULL (rather than expecting
+ * the caller to always check for this. As such, "is_last" can be used to
+ * differentiate between "end-of-empty-dequeue" and "still-waiting".
+ *
+ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
+ */
+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
+{
+ int match;
+ struct dpaa2_dq *ret = &s->vaddr[s->idx];
+
+ match = qbman_result_has_new_result(s->swp, ret);
+ if (!match) {
+ *is_last = 0;
+ return NULL;
+ }
+
+ s->idx++;
+
+ if (dpaa2_dq_is_pull_complete(ret)) {
+ *is_last = 1;
+ s->idx = 0;
+ /*
+ * If we get an empty dequeue result to terminate a zero-results
+ * vdqcr, return NULL to the caller rather than expecting him to
+ * check non-NULL results every time.
+ */
+ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
+ ret = NULL;
+ } else {
+ *is_last = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
new file mode 100644
index 000000000..ff37c80e1
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "dpio.h"
+#include "dpio-cmd.h"
+
+/*
+ * Data Path I/O Portal API
+ * Contains initialization APIs and runtime control APIs for DPIO
+ */
+
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpio_id: DPIO unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpio_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_cmd_open *dpio_cmd;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ dpio_cmd = (struct dpio_cmd_open *)cmd.params;
+ dpio_cmd->dpio_id = cpu_to_le32(dpio_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpio_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_attributes() - Retrieve DPIO attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpio_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_rsp_get_attr *dpio_rsp;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpio_rsp->id);
+ attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id);
+ attr->num_priorities = dpio_rsp->num_priorities;
+ attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK;
+ attr->qbman_portal_ce_offset =
+ le64_to_cpu(dpio_rsp->qbman_portal_ce_addr);
+ attr->qbman_portal_ci_offset =
+ le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
+ attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
+
+ return 0;
+}
+
+/**
+ * dpio_get_api_version - Get Data Path I/O API version
+ * @mc_io: Pointer to MC portal's DPIO object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of DPIO API
+ * @minor_ver: Minor version of DPIO API
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
new file mode 100644
index 000000000..49194c8e4
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef __FSL_DPIO_H
+#define __FSL_DPIO_H
+
+struct fsl_mc_io;
+
+int dpio_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpio_id,
+ u16 *token);
+
+int dpio_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * enum dpio_channel_mode - DPIO notification channel mode
+ * @DPIO_NO_CHANNEL: No support for notification channel
+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
+ * dedicated channel in the DPIO; user should point the queue's
+ * destination in the relevant interface to this DPIO
+ */
+enum dpio_channel_mode {
+ DPIO_NO_CHANNEL = 0,
+ DPIO_LOCAL_CHANNEL = 1,
+};
+
+/**
+ * struct dpio_cfg - Structure representing DPIO configuration
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ */
+struct dpio_cfg {
+ enum dpio_channel_mode channel_mode;
+ u8 num_priorities;
+};
+
+int dpio_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpio_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpio_attr - Structure representing DPIO attributes
+ * @id: DPIO object ID
+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
+ * @qbman_portal_id: Software portal ID
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ * @qbman_version: QBMAN version
+ */
+struct dpio_attr {
+ int id;
+ u64 qbman_portal_ce_offset;
+ u64 qbman_portal_ci_offset;
+ u16 qbman_portal_id;
+ enum dpio_channel_mode channel_mode;
+ u8 num_priorities;
+ u32 qbman_version;
+};
+
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpio_attr *attr);
+
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+#endif /* __FSL_DPIO_H */
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
new file mode 100644
index 000000000..cf1d448ea
--- /dev/null
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <soc/fsl/dpaa2-global.h>
+
+#include "qbman-portal.h"
+
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_MASK 0xffff0000
+
+/* All QBMan command and result structures use this "valid bit" encoding */
+#define QB_VALID_BIT ((u32)0x80)
+
+/* QBMan portal management command codes */
+#define QBMAN_MC_ACQUIRE 0x30
+#define QBMAN_WQCHAN_CONFIGURE 0x46
+
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DCAP 0xac0
+#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_RAR 0xcc0
+#define QBMAN_CINH_SWP_ISR 0xe00
+#define QBMAN_CINH_SWP_IER 0xe40
+#define QBMAN_CINH_SWP_ISDR 0xe80
+#define QBMAN_CINH_SWP_IIR 0xec0
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_CR 0x600
+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR 0x780
+
+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
+
+/* Define token used to determine if response written to memory is valid */
+#define QMAN_DQ_TOKEN_VALID 1
+
+/* SDQCR attribute codes */
+#define QB_SDQCR_FC_SHIFT 29
+#define QB_SDQCR_FC_MASK 0x1
+#define QB_SDQCR_DCT_SHIFT 24
+#define QB_SDQCR_DCT_MASK 0x3
+#define QB_SDQCR_TOK_SHIFT 16
+#define QB_SDQCR_TOK_MASK 0xff
+#define QB_SDQCR_SRC_SHIFT 0
+#define QB_SDQCR_SRC_MASK 0xffff
+
+/* opaque token for static dequeues */
+#define QMAN_SDQCR_TOKEN 0xbb
+
+enum qbman_sdqcr_dct {
+ qbman_sdqcr_dct_null = 0,
+ qbman_sdqcr_dct_prio_ics,
+ qbman_sdqcr_dct_active_ics,
+ qbman_sdqcr_dct_active
+};
+
+enum qbman_sdqcr_fc {
+ qbman_sdqcr_fc_one = 0,
+ qbman_sdqcr_fc_up_to_3 = 1
+};
+
+/* Portal Access */
+
+static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
+{
+ return readl_relaxed(p->addr_cinh + offset);
+}
+
+static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
+ u32 value)
+{
+ writel_relaxed(value, p->addr_cinh + offset);
+}
+
+static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
+{
+ return p->addr_cena + offset;
+}
+
+#define QBMAN_CINH_SWP_CFG 0xd00
+
+#define SWP_CFG_DQRR_MF_SHIFT 20
+#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_WN_SHIFT 14
+#define SWP_CFG_RPM_SHIFT 12
+#define SWP_CFG_DCM_SHIFT 10
+#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_SD_SHIFT 5
+#define SWP_CFG_SP_SHIFT 4
+#define SWP_CFG_SE_SHIFT 3
+#define SWP_CFG_DP_SHIFT 2
+#define SWP_CFG_DE_SHIFT 1
+#define SWP_CFG_EP_SHIFT 0
+
+static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
+ u8 epm, int sd, int sp, int se,
+ int dp, int de, int ep)
+{
+ return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
+ est << SWP_CFG_EST_SHIFT |
+ wn << SWP_CFG_WN_SHIFT |
+ rpm << SWP_CFG_RPM_SHIFT |
+ dcm << SWP_CFG_DCM_SHIFT |
+ epm << SWP_CFG_EPM_SHIFT |
+ sd << SWP_CFG_SD_SHIFT |
+ sp << SWP_CFG_SP_SHIFT |
+ se << SWP_CFG_SE_SHIFT |
+ dp << SWP_CFG_DP_SHIFT |
+ de << SWP_CFG_DE_SHIFT |
+ ep << SWP_CFG_EP_SHIFT);
+}
+
+/**
+ * qbman_swp_init() - Create a functional object representing the given
+ * QBMan portal descriptor.
+ * @d: the given qbman swp descriptor
+ *
+ * Return qbman_swp portal for success, NULL if the object cannot
+ * be created.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
+{
+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+ u32 reg;
+
+ if (!p)
+ return NULL;
+ p->desc = d;
+ p->mc.valid_bit = QB_VALID_BIT;
+ p->sdq = 0;
+ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+
+ atomic_set(&p->vdq.available, 1);
+ p->vdq.valid_bit = QB_VALID_BIT;
+ p->dqrr.next_idx = 0;
+ p->dqrr.valid_bit = QB_VALID_BIT;
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
+ p->dqrr.dqrr_size = 4;
+ p->dqrr.reset_bug = 1;
+ } else {
+ p->dqrr.dqrr_size = 8;
+ p->dqrr.reset_bug = 0;
+ }
+
+ p->addr_cena = d->cena_bar;
+ p->addr_cinh = d->cinh_bar;
+
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 0, /* EQCR_CI stashing threshold */
+ 3, /* RPM: Valid bit mode, RCR in array mode */
+ 2, /* DCM: Discrete consumption ack mode */
+ 3, /* EPM: Valid bit mode, EQCR in array mode */
+ 0, /* mem stashing drop enable == FALSE */
+ 1, /* mem stashing priority == TRUE */
+ 0, /* mem stashing enable == FALSE */
+ 1, /* dequeue stashing priority == TRUE */
+ 0, /* dequeue stashing enable == FALSE */
+ 0); /* EQCR_CI stashing priority == FALSE */
+
+ qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
+ reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
+ if (!reg) {
+ pr_err("qbman: the portal is not enabled!\n");
+ return NULL;
+ }
+
+ /*
+ * SDQCR needs to be initialized to 0 when no channels are
+ * being dequeued from or else the QMan HW will indicate an
+ * error. The values that were calculated above will be
+ * applied when dequeues from a specific channel are enabled.
+ */
+ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
+ return p;
+}
+
+/**
+ * qbman_swp_finish() - Create and destroy a functional object representing
+ * the given QBMan portal descriptor.
+ * @p: the qbman_swp object to be destroyed
+ */
+void qbman_swp_finish(struct qbman_swp *p)
+{
+ kfree(p);
+}
+
+/**
+ * qbman_swp_interrupt_read_status()
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_ISR register.
+ */
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
+}
+
+/**
+ * qbman_swp_interrupt_clear_status()
+ * @p: the given software portal
+ * @mask: The mask to clear in SWP_ISR register
+ */
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_trigger() - read interrupt enable register
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_IER register.
+ */
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_IER);
+}
+
+/**
+ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
+ * @p: the given software portal
+ * @mask: The mask of bits to enable in SWP_IER
+ */
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
+ * @p: the given software portal object
+ *
+ * Return the value in the SWP_IIR register.
+ */
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
+}
+
+/**
+ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
+ * @p: the given software portal object
+ * @mask: The mask to set in SWP_IIR register
+ */
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+}
+
+/*
+ * Different management commands all use this common base layer of code to issue
+ * commands and poll for results.
+ */
+
+/*
+ * Returns a pointer to where the caller should fill in their management command
+ * (caller should ignore the verb byte)
+ */
+void *qbman_swp_mc_start(struct qbman_swp *p)
+{
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
+}
+
+/*
+ * Commits merges in the caller-supplied command verb (which should not include
+ * the valid-bit) and submits the command to hardware
+ */
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
+{
+ u8 *v = cmd;
+
+ dma_wmb();
+ *v = cmd_verb | p->mc.valid_bit;
+}
+
+/*
+ * Checks for a completed response (returns non-NULL if only if the response
+ * is complete).
+ */
+void *qbman_swp_mc_result(struct qbman_swp *p)
+{
+ u32 *ret, verb;
+
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+
+ /* Remove the valid-bit - command completed if the rest is non-zero */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ return ret;
+}
+
+#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
+enum qb_enqueue_commands {
+ enqueue_empty = 0,
+ enqueue_response_always = 1,
+ enqueue_rejects_to_fq = 2
+};
+
+#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
+#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
+#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
+
+/**
+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ */
+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
+ * @d: the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ */
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
+{
+ d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
+ if (respond_success)
+ d->verb |= enqueue_response_always;
+ else
+ d->verb |= enqueue_rejects_to_fq;
+}
+
+/*
+ * Exactly one of the following descriptor "targets" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * -enqueue to a frame queue
+ * -enqueue to a queuing destination
+ */
+
+/**
+ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
+ * @d: the enqueue descriptor
+ * @fqid: the id of the frame queue to be enqueued
+ */
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
+{
+ d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
+ d->tgtid = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
+ * @d: the enqueue descriptor
+ * @qdid: the id of the queuing destination to be enqueued
+ * @qd_bin: the queuing destination bin
+ * @qd_prio: the queuing destination priority
+ */
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+ u32 qd_bin, u32 qd_prio)
+{
+ d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
+ d->tgtid = cpu_to_le32(qdid);
+ d->qdbin = cpu_to_le16(qd_bin);
+ d->qpri = qd_prio;
+}
+
+#define EQAR_IDX(eqar) ((eqar) & 0x7)
+#define EQAR_VB(eqar) ((eqar) & 0x80)
+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
+/**
+ * qbman_swp_enqueue() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc *p;
+ u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
+
+ if (!EQAR_SUCCESS(eqar))
+ return -EBUSY;
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ memcpy(&p->dca, &d->dca, 31);
+ memcpy(&p->fd, fd, sizeof(*fd));
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ dma_wmb();
+ p->verb = d->verb | EQAR_VB(eqar);
+
+ return 0;
+}
+
+/* Static (push) dequeue */
+
+/**
+ * qbman_swp_push_get() - Get the push dequeue setup
+ * @p: the software portal object
+ * @channel_idx: the channel index to query
+ * @enabled: returned boolean to show whether the push dequeue is enabled
+ * for the given channel
+ */
+void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
+{
+ u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+
+ WARN_ON(channel_idx > 15);
+ *enabled = src | (1 << channel_idx);
+}
+
+/**
+ * qbman_swp_push_set() - Enable or disable push dequeue
+ * @p: the software portal object
+ * @channel_idx: the channel index (0 to 15)
+ * @enable: enable or disable push dequeue
+ */
+void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
+{
+ u16 dqsrc;
+
+ WARN_ON(channel_idx > 15);
+ if (enable)
+ s->sdq |= 1 << channel_idx;
+ else
+ s->sdq &= ~(1 << channel_idx);
+
+ /* Read make the complete src map. If no channels are enabled
+ * the SDQCR must be 0 or else QMan will assert errors
+ */
+ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+ if (dqsrc != 0)
+ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
+ else
+ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
+}
+
+#define QB_VDQCR_VERB_DCT_SHIFT 0
+#define QB_VDQCR_VERB_DT_SHIFT 2
+#define QB_VDQCR_VERB_RLS_SHIFT 4
+#define QB_VDQCR_VERB_WAE_SHIFT 5
+
+enum qb_pull_dt_e {
+ qb_pull_dt_channel,
+ qb_pull_dt_workqueue,
+ qb_pull_dt_framequeue
+};
+
+/**
+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state
+ * @d: the pull dequeue descriptor to be cleared
+ */
+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
+ * @d: the pull dequeue descriptor to be set
+ * @storage: the pointer of the memory to store the dequeue result
+ * @storage_phys: the physical address of the storage memory
+ * @stash: to indicate whether write allocate is enabled
+ *
+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
+ * produced to the given memory location (using the DMA address which
+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
+ * those writes to main-memory express a cache-warming attribute.
+ */
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct dpaa2_dq *storage,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ /* save the virtual address */
+ d->rsp_addr_virt = (u64)(uintptr_t)storage;
+
+ if (!storage) {
+ d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
+ return;
+ }
+ d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
+ if (stash)
+ d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
+ else
+ d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
+
+ d->rsp_addr = cpu_to_le64(storage_phys);
+}
+
+/**
+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
+ * @d: the pull dequeue descriptor to be set
+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive
+ */
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
+{
+ d->numf = numframes - 1;
+}
+
+/*
+ * Exactly one of the following descriptor "actions" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - pull dequeue from the given frame queue (FQ)
+ * - pull dequeue from any FQ in the given work queue (WQ)
+ * - pull dequeue from any FQ in any WQ in the given channel
+ */
+
+/**
+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
+ * @fqid: the frame queue index of the given FQ
+ */
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
+{
+ d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
+ * @wqid: composed of channel id and wqid within the channel
+ * @dct: the dequeue command type
+ */
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+ enum qbman_pull_type_e dct)
+{
+ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(wqid);
+}
+
+/**
+ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
+ * dequeues
+ * @chid: the channel id to be dequeued
+ * @dct: the dequeue command type
+ */
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+ enum qbman_pull_type_e dct)
+{
+ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(chid);
+}
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ struct qbman_pull_desc *p;
+
+ if (!atomic_dec_and_test(&s->vdq.available)) {
+ atomic_inc(&s->vdq.available);
+ return -EBUSY;
+ }
+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ p->numf = d->numf;
+ p->tok = QMAN_DQ_TOKEN_VALID;
+ p->dq_src = d->dq_src;
+ p->rsp_addr = d->rsp_addr;
+ p->rsp_addr_virt = d->rsp_addr_virt;
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+
+ return 0;
+}
+
+#define QMAN_DQRR_PI_MASK 0xf
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ u32 verb;
+ u32 response_verb;
+ u32 flags;
+ struct dpaa2_dq *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /*
+ * We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /*
+ * if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ }
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /*
+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ return NULL;
+ }
+ /*
+ * There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+ if (!s->dqrr.next_idx)
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+ /*
+ * If this is the final response to a volatile dequeue command
+ * indicate that the vdq is available
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESULT_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
+ (flags & DPAA2_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.available);
+
+ prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+ return p;
+}
+
+/**
+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
+ * qbman_swp_dqrr_next().
+ * @s: the software portal object
+ * @dq: the DQRR entry to be consumed
+ */
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+ qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+}
+
+/**
+ * qbman_result_has_new_result() - Check and get the dequeue response from the
+ * dq storage memory set in pull dequeue command
+ * @s: the software portal object
+ * @dq: the dequeue result read from the memory
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ *
+ * Only used for user-provided storage of dequeue results, not DQRR. For
+ * efficiency purposes, the driver will perform any required endianness
+ * conversion to ensure that the user's dequeue result storage is in host-endian
+ * format. As such, once the user has called qbman_result_has_new_result() and
+ * been returned a valid dequeue result, they should not call it again on
+ * the same memory location (except of course if another dequeue command has
+ * been executed to produce a new result to that location).
+ */
+int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+ if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
+ return 0;
+
+ /*
+ * Set token to be 0 so we will detect change back to 1
+ * next time the looping is traversed. Const is cast away here
+ * as we want users to treat the dequeue responses as read only.
+ */
+ ((struct dpaa2_dq *)dq)->dq.tok = 0;
+
+ /*
+ * Determine whether VDQCR is available based on whether the
+ * current result is sitting in the first storage location of
+ * the busy command.
+ */
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.available);
+ }
+
+ return 1;
+}
+
+/**
+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ */
+void qbman_release_desc_clear(struct qbman_release_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+ d->verb = 1 << 5; /* Release Command Valid */
+}
+
+/**
+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ */
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
+{
+ d->bpid = cpu_to_le16(bpid);
+}
+
+/**
+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
+ * interrupt source should be asserted after the release command is completed.
+ */
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
+{
+ if (enable)
+ d->verb |= 1 << 6;
+ else
+ d->verb &= ~(1 << 6);
+}
+
+#define RAR_IDX(rar) ((rar) & 0x7)
+#define RAR_VB(rar) ((rar) & 0x80)
+#define RAR_SUCCESS(rar) ((rar) & 0x100)
+
+/**
+ * qbman_swp_release() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
+{
+ int i;
+ struct qbman_release_desc *p;
+ u32 rar;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ /* Start the release command */
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ /* Copy the caller's buffer pointers to the command */
+ for (i = 0; i < num_buffers; i++)
+ p->buf[i] = cpu_to_le64(buffers[i]);
+ p->bpid = d->bpid;
+
+ /*
+ * Set the verb byte, have to substitute in the valid-bit and the number
+ * of buffers.
+ */
+ dma_wmb();
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+
+ return 0;
+}
+
+struct qbman_acquire_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 bpid;
+ u8 num;
+ u8 reserved2[59];
+};
+
+struct qbman_acquire_rslt {
+ u8 verb;
+ u8 rslt;
+ __le16 reserved;
+ u8 num;
+ u8 reserved2[3];
+ __le64 buf[7];
+};
+
+/**
+ * qbman_swp_acquire() - Issue a buffer acquire command
+ * @s: the software portal object
+ * @bpid: the buffer pool index
+ * @buffers: a pointer pointing to the acquired buffer addresses
+ * @num_buffers: number of buffers to be acquired, must be less than 8
+ *
+ * Return 0 for success, or negative error code if the acquire command
+ * fails.
+ */
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+ unsigned int num_buffers)
+{
+ struct qbman_acquire_desc *p;
+ struct qbman_acquire_rslt *r;
+ int i;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->bpid = cpu_to_le16(bpid);
+ p->num = num_buffers;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
+ if (unlikely(!r)) {
+ pr_err("qbman: acquire from BPID %d failed, no response\n",
+ bpid);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
+ bpid, r->rslt);
+ return -EIO;
+ }
+
+ WARN_ON(r->num > num_buffers);
+
+ /* Copy the acquired buffers to the caller's array */
+ for (i = 0; i < r->num; i++)
+ buffers[i] = le64_to_cpu(r->buf[i]);
+
+ return (int)r->num;
+}
+
+struct qbman_alt_fq_state_desc {
+ u8 verb;
+ u8 reserved[3];
+ __le32 fqid;
+ u8 reserved2[56];
+};
+
+struct qbman_alt_fq_state_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 reserved[62];
+};
+
+#define ALT_FQ_FQID_MASK 0x00FFFFFF
+
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+ u8 alt_fq_verb)
+{
+ struct qbman_alt_fq_state_desc *p;
+ struct qbman_alt_fq_state_rslt *r;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
+ if (unlikely(!r)) {
+ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
+ alt_fq_verb);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
+ fqid, r->verb, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+struct qbman_cdan_ctrl_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 ch;
+ u8 we;
+ u8 ctrl;
+ __le16 reserved2;
+ __le64 cdan_ctx;
+ u8 reserved3[48];
+
+};
+
+struct qbman_cdan_ctrl_rslt {
+ u8 verb;
+ u8 rslt;
+ __le16 ch;
+ u8 reserved[60];
+};
+
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+ u8 we_mask, u8 cdan_en,
+ u64 ctx)
+{
+ struct qbman_cdan_ctrl_desc *p = NULL;
+ struct qbman_cdan_ctrl_rslt *r = NULL;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->ch = cpu_to_le16(channelid);
+ p->we = we_mask;
+ if (cdan_en)
+ p->ctrl = 1;
+ else
+ p->ctrl = 0;
+ p->cdan_ctx = cpu_to_le64(ctx);
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
+ if (unlikely(!r)) {
+ pr_err("qbman: wqchan config failed, no response\n");
+ return -EIO;
+ }
+
+ WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
+ channelid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
new file mode 100644
index 000000000..89d1dd996
--- /dev/null
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef __FSL_QBMAN_PORTAL_H
+#define __FSL_QBMAN_PORTAL_H
+
+#include <soc/fsl/dpaa2-fd.h>
+
+struct dpaa2_dq;
+struct qbman_swp;
+
+/* qbman software portal descriptor structure */
+struct qbman_swp_desc {
+ void *cena_bar; /* Cache-enabled portal base address */
+ void __iomem *cinh_bar; /* Cache-inhibited portal base address */
+ u32 qman_version;
+};
+
+#define QBMAN_SWP_INTERRUPT_EQRI 0x01
+#define QBMAN_SWP_INTERRUPT_EQDI 0x02
+#define QBMAN_SWP_INTERRUPT_DQRI 0x04
+#define QBMAN_SWP_INTERRUPT_RCRI 0x08
+#define QBMAN_SWP_INTERRUPT_RCDI 0x10
+#define QBMAN_SWP_INTERRUPT_VDCI 0x20
+
+/* the structure for pull dequeue descriptor */
+struct qbman_pull_desc {
+ u8 verb;
+ u8 numf;
+ u8 tok;
+ u8 reserved;
+ __le32 dq_src;
+ __le64 rsp_addr;
+ u64 rsp_addr_virt;
+ u8 padding[40];
+};
+
+enum qbman_pull_type_e {
+ /* dequeue with priority precedence, respect intra-class scheduling */
+ qbman_pull_type_prio = 1,
+ /* dequeue with active FQ precedence, respect ICS */
+ qbman_pull_type_active,
+ /* dequeue with active FQ precedence, no ICS */
+ qbman_pull_type_active_noics
+};
+
+/* Definitions for parsing dequeue entries */
+#define QBMAN_RESULT_MASK 0x7f
+#define QBMAN_RESULT_DQ 0x60
+#define QBMAN_RESULT_FQRN 0x21
+#define QBMAN_RESULT_FQRNI 0x22
+#define QBMAN_RESULT_FQPN 0x24
+#define QBMAN_RESULT_FQDAN 0x25
+#define QBMAN_RESULT_CDAN 0x26
+#define QBMAN_RESULT_CSCN_MEM 0x27
+#define QBMAN_RESULT_CGCU 0x28
+#define QBMAN_RESULT_BPSCN 0x29
+#define QBMAN_RESULT_CSCN_WQ 0x2a
+
+/* QBMan FQ management command codes */
+#define QBMAN_FQ_SCHEDULE 0x48
+#define QBMAN_FQ_FORCE 0x49
+#define QBMAN_FQ_XON 0x4d
+#define QBMAN_FQ_XOFF 0x4e
+
+/* structure of enqueue descriptor */
+struct qbman_eq_desc {
+ u8 verb;
+ u8 dca;
+ __le16 seqnum;
+ __le16 orpid;
+ __le16 reserved1;
+ __le32 tgtid;
+ __le32 tag;
+ __le16 qdbin;
+ u8 qpri;
+ u8 reserved[3];
+ u8 wae;
+ u8 rspid;
+ __le64 rsp_addr;
+ u8 fd[32];
+};
+
+/* buffer release descriptor */
+struct qbman_release_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 bpid;
+ __le32 reserved2;
+ __le64 buf[7];
+};
+
+/* Management command result codes */
+#define QBMAN_MC_RSLT_OK 0xf0
+
+#define CODE_CDAN_WE_EN 0x1
+#define CODE_CDAN_WE_CTX 0x4
+
+/* portal data structure */
+struct qbman_swp {
+ const struct qbman_swp_desc *desc;
+ void *addr_cena;
+ void __iomem *addr_cinh;
+
+ /* Management commands */
+ struct {
+ u32 valid_bit; /* 0x00 or 0x80 */
+ } mc;
+
+ /* Push dequeues */
+ u32 sdq;
+
+ /* Volatile dequeues */
+ struct {
+ atomic_t available; /* indicates if a command can be sent */
+ u32 valid_bit; /* 0x00 or 0x80 */
+ struct dpaa2_dq *storage; /* NULL if DQRR */
+ } vdq;
+
+ /* DQRR */
+ struct {
+ u32 next_idx;
+ u32 valid_bit;
+ u8 dqrr_size;
+ int reset_bug; /* indicates dqrr reset workaround is needed */
+ } dqrr;
+};
+
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
+void qbman_swp_finish(struct qbman_swp *p);
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
+
+void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
+void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
+
+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct dpaa2_dq *storage,
+ dma_addr_t storage_phys,
+ int stash);
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+ enum qbman_pull_type_e dct);
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+ enum qbman_pull_type_e dct);
+
+int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
+
+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
+
+int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
+
+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+ u32 qd_bin, u32 qd_prio);
+
+int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+
+void qbman_release_desc_clear(struct qbman_release_desc *d);
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers);
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+ unsigned int num_buffers);
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+ u8 alt_fq_verb);
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+ u8 we_mask, u8 cdan_en,
+ u64 ctx);
+
+void *qbman_swp_mc_start(struct qbman_swp *p);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
+void *qbman_swp_mc_result(struct qbman_swp *p);
+
+/**
+ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
+ * @dq: the dequeue result to be checked
+ *
+ * DQRR entries may contain non-dequeue results, ie. notifications
+ */
+static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
+}
+
+/**
+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
+ * @dq: the dequeue result to be checked
+ *
+ */
+static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
+{
+ return !qbman_result_is_DQ(dq);
+}
+
+/* FQ Data Availability */
+static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
+}
+
+/* Channel Data Availability */
+static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
+}
+
+/* Congestion State Change */
+static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
+}
+
+/* Buffer Pool State Change */
+static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
+}
+
+/* Congestion Group Count Update */
+static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
+}
+
+/* Retirement */
+static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
+}
+
+/* Retirement Immediate */
+static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
+}
+
+ /* Park */
+static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
+}
+
+/**
+ * qbman_result_SCN_state() - Get the state field in State-change notification
+ */
+static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
+{
+ return scn->scn.state;
+}
+
+#define SCN_RID_MASK 0x00FFFFFF
+
+/**
+ * qbman_result_SCN_rid() - Get the resource id in State-change notification
+ */
+static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
+{
+ return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
+}
+
+/**
+ * qbman_result_SCN_ctx() - Get the context data in State-change notification
+ */
+static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
+{
+ return le64_to_cpu(scn->scn.ctx);
+}
+
+/**
+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state
+ * @s: the software portal object
+ * @fqid: the index of frame queue to be scheduled
+ *
+ * There are a couple of different ways that a FQ can end up parked state,
+ * This schedules it.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
+}
+
+/**
+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state
+ * @s: the software portal object
+ * @fqid: the index of frame queue to be forced
+ *
+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
+ * and thus be available for selection by any channel-dequeuing behaviour (push
+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
+ * empty at the time this happens, the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
+}
+
+/**
+ * qbman_swp_fq_xon() - sets FQ flow-control to XON
+ * @s: the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
+}
+
+/**
+ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
+ * @s: the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ * XOFF FQs will remain in the tenatively-scheduled state, even when
+ * non-empty, meaning they won't be selected for scheduled dequeuing.
+ * If a FQ is changed to XOFF after it had already become truly-scheduled
+ * to a channel, and a pull dequeue of that channel occurs that selects
+ * that FQ for dequeuing, then the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
+}
+
+/* If the user has been allocated a channel object that is going to generate
+ * CDANs to another channel, then the qbman_swp_CDAN* functions will be
+ * necessary.
+ *
+ * CDAN-enabled channels only generate a single CDAN notification, after which
+ * they need to be reenabled before they'll generate another. The idea is
+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
+ * reenable step. Each function generates a distinct command to hardware, so a
+ * combination function is provided if the user wishes to modify the "context"
+ * (which shows up in each CDAN message) each time they reenable, as a single
+ * command to hardware.
+ */
+
+/**
+ * qbman_swp_CDAN_set_context() - Set CDAN context
+ * @s: the software portal object
+ * @channelid: the channel index
+ * @ctx: the context to be set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
+ u64 ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_CTX,
+ 0, ctx);
+}
+
+/**
+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 1, 0);
+}
+
+/**
+ * qbman_swp_CDAN_disable() - disable CDAN for the channel
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 0, 0);
+}
+
+/**
+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ * @ctx:i the context set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
+ u16 channelid,
+ u64 ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
+ 1, ctx);
+}
+
+/* Wraps up submit + poll-for-result */
+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+ u8 cmd_verb)
+{
+ int loopvar = 1000;
+
+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
+
+ do {
+ cmd = qbman_swp_mc_result(swp);
+ } while (!cmd && loopvar--);
+
+ WARN_ON(!loopvar);
+
+ return cmd;
+}
+
+#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
new file mode 100644
index 000000000..302e0c8d6
--- /dev/null
+++ b/drivers/soc/fsl/guts.c
@@ -0,0 +1,248 @@
+/*
+ * Freescale QorIQ Platforms GUTS Driver
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/sys_soc.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/fsl/guts.h>
+
+struct guts {
+ struct ccsr_guts __iomem *regs;
+ bool little_endian;
+};
+
+struct fsl_soc_die_attr {
+ char *die;
+ u32 svr;
+ u32 mask;
+};
+
+static struct guts *guts;
+static struct soc_device_attribute soc_dev_attr;
+static struct soc_device *soc_dev;
+
+
+/* SoC die attribute definition for QorIQ platform */
+static const struct fsl_soc_die_attr fsl_soc_die[] = {
+ /*
+ * Power Architecture-based SoCs T Series
+ */
+
+ /* Die: T4240, SoC: T4240/T4160/T4080 */
+ { .die = "T4240",
+ .svr = 0x82400000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T1040, SoC: T1040/T1020/T1042/T1022 */
+ { .die = "T1040",
+ .svr = 0x85200000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T2080, SoC: T2080/T2081 */
+ { .die = "T2080",
+ .svr = 0x85300000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T1024, SoC: T1024/T1014/T1023/T1013 */
+ { .die = "T1024",
+ .svr = 0x85400000,
+ .mask = 0xfff00000,
+ },
+
+ /*
+ * ARM-based SoCs LS Series
+ */
+
+ /* Die: LS1043A, SoC: LS1043A/LS1023A */
+ { .die = "LS1043A",
+ .svr = 0x87920000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS2080A, SoC: LS2080A/LS2040A/LS2085A */
+ { .die = "LS2080A",
+ .svr = 0x87010000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1088A, SoC: LS1088A/LS1048A/LS1084A/LS1044A */
+ { .die = "LS1088A",
+ .svr = 0x87030000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1012A, SoC: LS1012A */
+ { .die = "LS1012A",
+ .svr = 0x87040000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS1046A, SoC: LS1046A/LS1026A */
+ { .die = "LS1046A",
+ .svr = 0x87070000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS2088A, SoC: LS2088A/LS2048A/LS2084A/LS2044A */
+ { .die = "LS2088A",
+ .svr = 0x87090000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1021A, SoC: LS1021A/LS1020A/LS1022A */
+ { .die = "LS1021A",
+ .svr = 0x87000000,
+ .mask = 0xfff70000,
+ },
+ { },
+};
+
+static const struct fsl_soc_die_attr *fsl_soc_die_match(
+ u32 svr, const struct fsl_soc_die_attr *matches)
+{
+ while (matches->svr) {
+ if (matches->svr == (svr & matches->mask))
+ return matches;
+ matches++;
+ };
+ return NULL;
+}
+
+u32 fsl_guts_get_svr(void)
+{
+ u32 svr = 0;
+
+ if (!guts || !guts->regs)
+ return svr;
+
+ if (guts->little_endian)
+ svr = ioread32(&guts->regs->svr);
+ else
+ svr = ioread32be(&guts->regs->svr);
+
+ return svr;
+}
+EXPORT_SYMBOL(fsl_guts_get_svr);
+
+static int fsl_guts_probe(struct platform_device *pdev)
+{
+ struct device_node *root, *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ const struct fsl_soc_die_attr *soc_die;
+ const char *machine;
+ u32 svr;
+
+ /* Initialize guts */
+ guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL);
+ if (!guts)
+ return -ENOMEM;
+
+ guts->little_endian = of_property_read_bool(np, "little-endian");
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ guts->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(guts->regs))
+ return PTR_ERR(guts->regs);
+
+ /* Register soc device */
+ root = of_find_node_by_path("/");
+ if (of_property_read_string(root, "model", &machine))
+ of_property_read_string_index(root, "compatible", 0, &machine);
+ of_node_put(root);
+ if (machine)
+ soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+
+ svr = fsl_guts_get_svr();
+ soc_die = fsl_soc_die_match(svr, fsl_soc_die);
+ if (soc_die) {
+ soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL,
+ "QorIQ %s", soc_die->die);
+ } else {
+ soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "QorIQ");
+ }
+ if (!soc_dev_attr.family)
+ return -ENOMEM;
+ soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL,
+ "svr:0x%08x", svr);
+ if (!soc_dev_attr.soc_id)
+ return -ENOMEM;
+ soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
+ (svr >> 4) & 0xf, svr & 0xf);
+ if (!soc_dev_attr.revision)
+ return -ENOMEM;
+
+ soc_dev = soc_device_register(&soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ pr_info("Machine: %s\n", soc_dev_attr.machine);
+ pr_info("SoC family: %s\n", soc_dev_attr.family);
+ pr_info("SoC ID: %s, Revision: %s\n",
+ soc_dev_attr.soc_id, soc_dev_attr.revision);
+ return 0;
+}
+
+static int fsl_guts_remove(struct platform_device *dev)
+{
+ soc_device_unregister(soc_dev);
+ return 0;
+}
+
+/*
+ * Table for matching compatible strings, for device tree
+ * guts node, for Freescale QorIQ SOCs.
+ */
+static const struct of_device_id fsl_guts_of_match[] = {
+ { .compatible = "fsl,qoriq-device-config-1.0", },
+ { .compatible = "fsl,qoriq-device-config-2.0", },
+ { .compatible = "fsl,p1010-guts", },
+ { .compatible = "fsl,p1020-guts", },
+ { .compatible = "fsl,p1021-guts", },
+ { .compatible = "fsl,p1022-guts", },
+ { .compatible = "fsl,p1023-guts", },
+ { .compatible = "fsl,p2020-guts", },
+ { .compatible = "fsl,bsc9131-guts", },
+ { .compatible = "fsl,bsc9132-guts", },
+ { .compatible = "fsl,mpc8536-guts", },
+ { .compatible = "fsl,mpc8544-guts", },
+ { .compatible = "fsl,mpc8548-guts", },
+ { .compatible = "fsl,mpc8568-guts", },
+ { .compatible = "fsl,mpc8569-guts", },
+ { .compatible = "fsl,mpc8572-guts", },
+ { .compatible = "fsl,ls1021a-dcfg", },
+ { .compatible = "fsl,ls1043a-dcfg", },
+ { .compatible = "fsl,ls2080a-dcfg", },
+ { .compatible = "fsl,ls1088a-dcfg", },
+ { .compatible = "fsl,ls1012a-dcfg", },
+ { .compatible = "fsl,ls1046a-dcfg", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
+
+static struct platform_driver fsl_guts_driver = {
+ .driver = {
+ .name = "fsl-guts",
+ .of_match_table = fsl_guts_of_match,
+ },
+ .probe = fsl_guts_probe,
+ .remove = fsl_guts_remove,
+};
+
+static int __init fsl_guts_init(void)
+{
+ return platform_driver_register(&fsl_guts_driver);
+}
+core_initcall(fsl_guts_init);
+
+static void __exit fsl_guts_exit(void)
+{
+ platform_driver_unregister(&fsl_guts_driver);
+}
+module_exit(fsl_guts_exit);
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644
index 000000000..d570cb5fd
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -0,0 +1,67 @@
+menuconfig FSL_DPAA
+ bool "QorIQ DPAA1 framework support"
+ depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
+ select GENERIC_ALLOCATOR
+ help
+ The Freescale Data Path Acceleration Architecture (DPAA) is a set of
+ hardware components on specific QorIQ multicore processors.
+ This architecture provides the infrastructure to support simplified
+ sharing of networking interfaces and accelerators by multiple CPUs.
+ The major h/w blocks composing DPAA are BMan and QMan.
+
+ The Buffer Manager (BMan) is a hardware buffer pool management block
+ that allows software and accelerators on the datapath to acquire and
+ release buffers in order to build frames.
+
+ The Queue Manager (QMan) is a hardware queue management block
+ that allows software and accelerators on the datapath to enqueue and
+ dequeue frames in order to communicate.
+
+if FSL_DPAA
+
+config FSL_DPAA_CHECKING
+ bool "Additional driver checking"
+ help
+ Compiles in additional checks, to sanity-check the drivers and
+ any use of the exported API. Not recommended for performance.
+
+config FSL_BMAN_TEST
+ tristate "BMan self-tests"
+ help
+ Compile the BMan self-test code. These tests will
+ exercise the BMan APIs to confirm functionality
+ of both the software drivers and hardware device.
+
+config FSL_BMAN_TEST_API
+ bool "High-level API self-test"
+ depends on FSL_BMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine
+ to the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST
+ tristate "QMan self-tests"
+ help
+ Compile self-test code for QMan.
+
+config FSL_QMAN_TEST_API
+ bool "QMan high-level self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine to
+ the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST_STASH
+ bool "QMan 'hot potato' data-stashing self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This performs a "hot potato" style test enqueuing/dequeuing a frame
+ across a series of FQs scheduled to different portals (and cpus), with
+ DQRR, data and context stashing always on.
+
+endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644
index 000000000..811312ad5
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \
+ bman_portal.o qman_portal.o \
+ bman.o qman.o dpaa_sys.o
+
+obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o
+bman-test-y = bman_test.o
+bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o
+
+obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o
+qman-test-y = qman_test.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 000000000..f9485cedc
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,821 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x3000
+#define BM_REG_RCR_CI_CINH 0x3100
+#define BM_REG_RCR_ITR 0x3200
+#define BM_REG_CFG 0x3300
+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
+#define BM_REG_ISR 0x3e00
+#define BM_REG_IER 0x3e40
+#define BM_REG_ISDR 0x3e80
+#define BM_REG_IIR 0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+#else
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x0000
+#define BM_REG_RCR_CI_CINH 0x0004
+#define BM_REG_RCR_ITR 0x0008
+#define BM_REG_CFG 0x0100
+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
+#define BM_REG_ISR 0x0e00
+#define BM_REG_IER 0x0e04
+#define BM_REG_ISDR 0x0e08
+#define BM_REG_IIR 0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+#endif
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+/* Release Command */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+};
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+struct bm_mc_command {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used by acquire command */
+ u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+ struct {
+ u8 verb;
+ u8 bpid;
+ u8 __reserved[62];
+ };
+ struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT 10000 /* us */
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ union bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void *ce; /* cache-enabled */
+ __be32 *ce_be; /* Same as above but for direct access */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+ return ioread32be(p->addr.ci + offset);
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+ iowrite32be(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+ return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
+}
+
+struct bman_portal {
+ struct bm_portal p;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ /* probing time config params for cpu-affine portals */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
+ */
+struct bman_pool {
+ /* index of the buffer pool to encapsulate (0-63) */
+ u32 bpid;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+};
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct bman_portal *p = ptr;
+ struct bm_portal *portal = &p->p;
+ u32 clear = p->irq_sources;
+ u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ clear |= poll_portal_slow(p, is);
+ bm_out(portal, BM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+/* --- RCR API --- */
+
+#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~RCR_CARRY;
+
+ return (struct bm_rcr_entry *)addr;
+}
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
+}
+#endif
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
+{
+ /* increment to the next RCR pointer and handle overflow and 'vbit' */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = rcr_carryclear(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static int bm_rcr_get_avail(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static int bm_rcr_get_fill(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(portal, BM_REG_RCR_ITR, ithresh);
+}
+
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
+}
+
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+ diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 1;
+#endif
+ dpaa_zero(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ DPAA_ASSERT(rcr->available >= 1);
+ dma_wmb();
+ rcursor = rcr->cursor;
+ rcursor->_ncw_verb = myverb | rcr->vbit;
+ dpaa_flush(rcursor);
+ rcr_inc(rcr);
+ rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ enum bm_rcr_cmode cmode)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+ BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+ | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(portal, BM_REG_CFG, cfg);
+ return 0;
+}
+
+static void bm_rcr_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_rcr *rcr = &portal->rcr;
+ int i;
+
+ DPAA_ASSERT(!rcr->busy);
+
+ i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr_ptr2idx(rcr->cursor))
+ pr_crit("losing uncommitted RCR entries\n");
+
+ i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+#endif
+}
+
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
+ 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static void bm_mc_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_hw;
+#endif
+}
+
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!rr->verb) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+ union bm_mc_result **mcr)
+{
+ int timeout = BM_MCR_TIMEOUT;
+
+ do {
+ *mcr = bm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
+{
+ bm_out(portal, BM_REG_SCN(0), 0);
+ bm_out(portal, BM_REG_SCN(1), 0);
+}
+
+static int bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
+{
+ struct bm_portal *p;
+ int ret;
+
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt_ce;
+ p->addr.ce_be = c->addr_virt_ce;
+ p->addr.ci = c->addr_virt_ci;
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ dev_err(c->dev, "RCR initialisation failed\n");
+ goto fail_rcr;
+ }
+ if (bm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_disable(p);
+
+ /* Write-to-clear any stale interrupt status bits */
+ bm_out(p, BM_REG_ISDR, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_out(p, BM_REG_IER, 0);
+ bm_out(p, BM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+ if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+ irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+ dev_err(c->dev, "irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
+
+ bm_out(p, BM_REG_ISDR, 0);
+ bm_out(p, BM_REG_IIR, 0);
+
+ return 0;
+
+fail_rcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+fail_rcr:
+ return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
+{
+ struct bman_portal *portal;
+ int err;
+
+ portal = &per_cpu(bman_affine_portal, c->cpu);
+ err = bman_create_portal(portal, c);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+ u32 ret = is;
+
+ if (is & BM_PIRQ_RCRI) {
+ bm_rcr_cce_update(&p->p);
+ bm_rcr_set_ithresh(&p->p, 0);
+ bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+ is &= ~BM_PIRQ_RCRI;
+ }
+
+ /* There should be no status register bits left undefined */
+ DPAA_ASSERT(!is);
+ return ret;
+}
+
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ p->irq_sources |= bits & BM_PIRQ_VISIBLE;
+ bm_out(&p->p, BM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+ return 0;
+}
+
+static int bm_shutdown_pool(u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ union bm_mc_result *bm_res;
+
+ while (1) {
+ struct bman_portal *p = get_affine_portal();
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(&p->p);
+ bm_cmd->bpid = bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Command timedout\n");
+ return -ETIMEDOUT;
+ }
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ put_affine_portal();
+ /* Pool is empty */
+ return 0;
+ }
+ put_affine_portal();
+ }
+
+ return 0;
+}
+
+struct gen_pool *bm_bpalloc;
+
+static int bm_alloc_bpid_range(u32 *result, u32 count)
+{
+ unsigned long addr;
+
+ addr = gen_pool_alloc(bm_bpalloc, count);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+static int bm_release_bpid(u32 bpid)
+{
+ int ret;
+
+ ret = bm_shutdown_pool(bpid);
+ if (ret) {
+ pr_debug("BPID %d leaked\n", bpid);
+ return ret;
+ }
+
+ gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+
+struct bman_pool *bman_new_pool(void)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (bm_alloc_bpid_range(&bpid, 1))
+ return NULL;
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+
+ pool->bpid = bpid;
+
+ return pool;
+err:
+ bm_release_bpid(bpid);
+ kfree(pool);
+ return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+ bm_release_bpid(pool->bpid);
+
+ kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+int bman_get_bpid(const struct bman_pool *pool)
+{
+ return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ unsigned long irqflags;
+ int avail, timeout = 1000; /* 1ms */
+ int i = num - 1;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ do {
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ if (likely(r))
+ break;
+
+ udelay(1);
+ } while (--timeout);
+
+ if (unlikely(!timeout))
+ return -ETIMEDOUT;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+ bm_buffer_set_bpid(r->bufs, pool->bpid);
+ if (i)
+ memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_release);
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ union bm_mc_result *mcr;
+ int ret;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ mcc = bm_mc_start(&p->p);
+ mcc->bpid = pool->bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ if (!bm_mc_result_timeout(&p->p, &mcr)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Timeout\n");
+ return -ETIMEDOUT;
+ }
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs)
+ memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
+
+ put_affine_portal();
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+ return portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644
index 000000000..7c3cc9680
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -0,0 +1,288 @@
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FBPR_BARE 0x0c00
+#define REG_FBPR_BAR 0x0c04
+#define REG_FBPR_AR 0x0c10
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_ERR_ISDR 0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
+
+struct bman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+ { BM_EIRQ_IVCI, "Invalid Command Verb" },
+ { BM_EIRQ_FLWI, "FBPR Low Watermark" },
+ { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { BM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+ return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = bm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+static void bm_set_memory(u64 ba, u32 size)
+{
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+ is_power_of_2(size));
+ /* choke if '[e]ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+ bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_AR, exp - 1);
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+static int __bman_probed;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+ fbpr_a = rmem->base;
+ fbpr_sz = rmem->size;
+
+ WARN_ON(!(fbpr_a && fbpr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = bm_ccsr_in(REG_ERR_IER);
+ isr_val = bm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = bm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+ if (bman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ bman_hwerr_txts[i].txt);
+ if (bman_hwerr_txts[i].mask & ecsr_val) {
+ /* Re-arm error capture registers */
+ bm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ bman_hwerr_txts[i].mask);
+ ier_val &= ~bman_hwerr_txts[i].mask;
+ bm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+int bman_is_probed(void)
+{
+ return __bman_probed;
+}
+EXPORT_SYMBOL_GPL(bman_is_probed);
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+ int ret, err_irq;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ u16 id, bm_pool_cnt;
+ u8 major, minor;
+
+ __bman_probed = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
+ node);
+ return -ENXIO;
+ }
+ bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!bm_ccsr_start)
+ return -ENXIO;
+
+ bm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ bman_ip_rev = BMAN_REV10;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else if (major == 2 && minor == 0) {
+ bman_ip_rev = BMAN_REV20;
+ bm_pool_cnt = 8;
+ } else if (major == 2 && minor == 1) {
+ bman_ip_rev = BMAN_REV21;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else {
+ dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+ id, major, minor);
+ return -ENODEV;
+ }
+
+ /*
+ * If FBPR memory wasn't defined using the qbman compatible string
+ * try using the of_reserved_mem_device method
+ */
+ if (!fbpr_a) {
+ ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+
+ dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
+
+ bm_set_memory(fbpr_a, fbpr_sz);
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %pOF IRQ\n", node);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
+ ret, node);
+ return ret;
+ }
+ /* Disable Buffer Pool State Change */
+ bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+ if (IS_ERR(bm_bpalloc)) {
+ ret = PTR_ERR(bm_bpalloc);
+ dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* seed BMan resource pool */
+ ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+ 0, bm_pool_cnt - 1, ret);
+ return ret;
+ }
+
+ __bman_probed = 1;
+
+ return 0;
+};
+
+static const struct of_device_id fsl_bman_ids[] = {
+ {
+ .compatible = "fsl,bman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_bman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_bman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 000000000..f9edd2889
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,215 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct bman_portal *affine_bportals[NR_CPUS];
+static struct cpumask portal_cpus;
+/* protect bman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(bman_lock);
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+ struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ bman_p_irqsource_add(p, BM_PIRQ_RCRI);
+ affine_bportals[pcfg->cpu] = p;
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static int bman_offline_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return 0;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return 0;
+
+ irq_set_affinity(pcfg->irq, cpumask_of(0));
+ return 0;
+}
+
+static int bman_online_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return 0;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return 0;
+
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ return 0;
+}
+
+static int bman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct bm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ int irq, cpu, err;
+
+ err = bman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(&pdev->dev, "failing probe due to bman probe error\n");
+ return -ENODEV;
+ }
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return -ENOMEM;
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
+ return -ENXIO;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
+ return -ENXIO;
+ }
+
+ pcfg->cpu = -1;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %pOF IRQ'\n", node);
+ return -ENXIO;
+ }
+ pcfg->irq = irq;
+
+ pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+ resource_size(addr_phys[0]),
+ QBMAN_MEMREMAP_ATTR);
+ if (!pcfg->addr_virt_ce) {
+ dev_err(dev, "memremap::CE failed\n");
+ goto err_ioremap1;
+ }
+
+ pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+ resource_size(addr_phys[1]));
+ if (!pcfg->addr_virt_ci) {
+ dev_err(dev, "ioremap::CI failed\n");
+ goto err_ioremap2;
+ }
+
+ spin_lock(&bman_lock);
+ cpu = cpumask_next_zero(-1, &portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ /* unassigned portal, skip init */
+ spin_unlock(&bman_lock);
+ return 0;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&bman_lock);
+ pcfg->cpu = cpu;
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ bman_offline_cpu(cpu);
+
+ return 0;
+
+err_portal_init:
+ iounmap(pcfg->addr_virt_ci);
+err_ioremap2:
+ memunmap(pcfg->addr_virt_ce);
+err_ioremap1:
+ return -ENXIO;
+}
+
+static const struct of_device_id bman_portal_ids[] = {
+ {
+ .compatible = "fsl,bman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bman_portal_ids,
+ },
+ .probe = bman_portal_probe,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qbman_portal:online",
+ bman_online_cpu, bman_offline_cpu);
+ if (ret < 0) {
+ pr_err("bman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
+ return 0;
+}
+
+module_driver(bman_portal_driver,
+ bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 000000000..751ce9038
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,78 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
+
+extern struct gen_pool *bm_bpalloc;
+
+struct bm_portal_config {
+ /* Portal addresses */
+ void *addr_virt_ce;
+ void __iomem *addr_virt_ci;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ struct device *dev;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+};
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+ */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 000000000..09b1c960b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,53 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+ int loop = 1;
+
+ while (loop--)
+ bman_test_api();
+#endif
+ return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 000000000..037ed342a
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,35 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+void bman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 000000000..6f6bdd154
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,151 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+#define NUM_BUFS 93
+#define LOOPS 3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+static struct bman_pool *pool;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+static void bufs_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BUFS; i++)
+ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+ bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+ if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
+
+ /*
+ * On SoCs with BMan revison 2.0, BMan only respects the 40
+ * LS-bits of buffer addresses, masking off the upper 8-bits on
+ * release commands. The API provides for 48-bit addresses
+ * because some SoCs support all 48-bits. When generating
+ * garbage addresses for testing, we either need to zero the
+ * upper 8-bits when releasing to BMan (otherwise we'll be
+ * disappointed when the buffers we acquire back from BMan
+ * don't match), or we need to mask the upper 8-bits off when
+ * comparing. We do the latter.
+ */
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return -1;
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return 1;
+ } else {
+ if (bm_buffer_get64(a) < bm_buffer_get64(b))
+ return -1;
+ if (bm_buffer_get64(a) > bm_buffer_get64(b))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bufs_confirm(void)
+{
+ int i, j;
+
+ for (i = 0; i < NUM_BUFS; i++) {
+ int matches = 0;
+
+ for (j = 0; j < NUM_BUFS; j++)
+ if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+ matches++;
+ WARN_ON(matches != 1);
+ }
+}
+
+/* test */
+void bman_test_api(void)
+{
+ int i, loops = LOOPS;
+
+ bufs_init();
+
+ pr_info("%s(): Starting\n", __func__);
+
+ pool = bman_new_pool();
+ if (!pool) {
+ pr_crit("bman_new_pool() failed\n");
+ goto failed;
+ }
+
+ /* Release buffers */
+do_loop:
+ i = 0;
+ while (i < NUM_BUFS) {
+ int num = 8;
+
+ if (i + num > NUM_BUFS)
+ num = NUM_BUFS - i;
+ if (bman_release(pool, bufs_in + i, num)) {
+ pr_crit("bman_release() failed\n");
+ goto failed;
+ }
+ i += num;
+ }
+
+ /* Acquire buffers */
+ while (i > 0) {
+ int tmp, num = 8;
+
+ if (num > i)
+ num = i;
+ tmp = bman_acquire(pool, bufs_out + i - num, num);
+ WARN_ON(tmp != num);
+ i -= num;
+ }
+ i = bman_acquire(pool, NULL, 1);
+ WARN_ON(i > 0);
+
+ bufs_confirm();
+
+ if (--loops)
+ goto do_loop;
+
+ /* Clean up */
+ bman_free_pool(pool);
+ pr_info("%s(): Finished\n", __func__);
+ return;
+
+failed:
+ WARN_ON(1);
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
new file mode 100644
index 000000000..9436aa83f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -0,0 +1,78 @@
+/* Copyright 2017 NXP Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of NXP Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "dpaa_sys.h"
+
+/*
+ * Initialize a devices private memory region
+ */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+ size_t *size)
+{
+ int ret;
+ struct device_node *mem_node;
+ u64 size64;
+
+ ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx);
+ if (ret) {
+ dev_err(dev,
+ "of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n",
+ idx, ret);
+ return -ENODEV;
+ }
+ mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (mem_node) {
+ ret = of_property_read_u64(mem_node, "size", &size64);
+ if (ret) {
+ dev_err(dev, "of_address_to_resource fails 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ *size = size64;
+ } else {
+ dev_err(dev, "No memory-region found for index %d\n", idx);
+ return -ENODEV;
+ }
+
+ if (!dma_zalloc_coherent(dev, *size, addr, 0)) {
+ dev_err(dev, "DMA Alloc memory failed\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Disassociate the reserved memory area from the device
+ * because a device can only have one DMA memory area. This
+ * should be fine since the memory is allocated and initialized
+ * and only ever accessed by the QBMan device from now on
+ */
+ of_reserved_mem_device_release(dev);
+ return 0;
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 000000000..9f379000d
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,114 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+static inline void dpaa_flush(void *p)
+{
+ /*
+ * Only PPC needs to flush the cache currently - on ARM the mapping
+ * is non cacheable
+ */
+#ifdef CONFIG_PPC
+ flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#endif
+}
+
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#define dpaa_zero(p) memset(p, 0, 64)
+
+static inline void dpaa_touch_ro(void *p)
+{
+#if (L1_CACHE_BYTES == 32)
+ prefetch(p+32);
+#endif
+ prefetch(p);
+}
+
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
+{
+ dpaa_invalidate(p);
+ dpaa_touch_ro(p);
+}
+
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
+
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF 0x80000000
+
+/* Initialize the devices private memory region */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+ size_t *size);
+
+/* memremap() attributes for different platforms */
+#ifdef CONFIG_PPC
+#define QBMAN_MEMREMAP_ATTR MEMREMAP_WB
+#else
+#define QBMAN_MEMREMAP_ATTR MEMREMAP_WC
+#endif
+
+#endif /* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 000000000..d7bf456fd
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,2889 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x3000
+#define QM_REG_EQCR_CI_CINH 0x3040
+#define QM_REG_EQCR_ITR 0x3080
+#define QM_REG_DQRR_PI_CINH 0x3100
+#define QM_REG_DQRR_CI_CINH 0x3140
+#define QM_REG_DQRR_ITR 0x3180
+#define QM_REG_DQRR_DCAP 0x31C0
+#define QM_REG_DQRR_SDQCR 0x3200
+#define QM_REG_DQRR_VDQCR 0x3240
+#define QM_REG_DQRR_PDQCR 0x3280
+#define QM_REG_MR_PI_CINH 0x3300
+#define QM_REG_MR_CI_CINH 0x3340
+#define QM_REG_MR_ITR 0x3380
+#define QM_REG_CFG 0x3500
+#define QM_REG_ISR 0x3600
+#define QM_REG_IER 0x3640
+#define QM_REG_ISDR 0x3680
+#define QM_REG_IIR 0x36C0
+#define QM_REG_ITPR 0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3040
+#define QM_CL_DQRR_PI_CENA 0x3100
+#define QM_CL_DQRR_CI_CENA 0x3140
+#define QM_CL_MR_PI_CENA 0x3300
+#define QM_CL_MR_CI_CENA 0x3340
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+#else
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x0000
+#define QM_REG_EQCR_CI_CINH 0x0004
+#define QM_REG_EQCR_ITR 0x0008
+#define QM_REG_DQRR_PI_CINH 0x0040
+#define QM_REG_DQRR_CI_CINH 0x0044
+#define QM_REG_DQRR_ITR 0x0048
+#define QM_REG_DQRR_DCAP 0x0050
+#define QM_REG_DQRR_SDQCR 0x0054
+#define QM_REG_DQRR_VDQCR 0x0058
+#define QM_REG_DQRR_PDQCR 0x005c
+#define QM_REG_MR_PI_CINH 0x0080
+#define QM_REG_MR_CI_CINH 0x0084
+#define QM_REG_MR_ITR 0x0088
+#define QM_REG_CFG 0x0100
+#define QM_REG_ISR 0x0e00
+#define QM_REG_IER 0x0e04
+#define QM_REG_ISDR 0x0e08
+#define QM_REG_IIR 0x0e0c
+#define QM_REG_ITPR 0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3100
+#define QM_CL_DQRR_PI_CENA 0x3200
+#define QM_CL_DQRR_CI_CENA 0x3300
+#define QM_CL_MR_PI_CENA 0x3400
+#define QM_CL_MR_CI_CENA 0x3500
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+#endif
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 dca;
+ __be16 seqnum;
+ u8 __reserved[4];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
+ struct qm_fd fd;
+ u8 __reserved3[32];
+} __packed __aligned(8);
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ const struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ union qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+/* "FQ" command layout */
+struct qm_mcc_fq {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ __be32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+
+/* "CGR" command layout */
+struct qm_mcc_cgr {
+ u8 _ncw_verb;
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+};
+
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+union qm_mc_command {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 __reserved[63];
+ };
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_fq fq;
+ struct qm_mcc_cgr cgr;
+};
+
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+ u8 verb;
+ u8 result;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT 10000 /* us */
+union qm_mc_result {
+ struct {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[62];
+ };
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ struct qm_mcr_queryfq_np queryfq_np;
+};
+
+struct qm_mc {
+ union qm_mc_command *cr;
+ union qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+struct qm_addr {
+ void *ce; /* cache-enabled */
+ __be32 *ce_be; /* same value as above but for direct access */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ /*
+ * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+ * and including 'mc' fits within a cacheline (yay!). The 'config' part
+ * is setup-only, so isn't a cause for a concern. In other words, don't
+ * rearrange this structure on a whim, there be dragons ...
+ */
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+ return ioread32be(p->addr.ci + offset);
+}
+
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+ iowrite32be(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
+{
+ return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
+}
+
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~EQCR_CARRY;
+
+ return (struct qm_eqcr_entry *)addr;
+}
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
+{
+ /* increment to the next EQCR pointer and handle overflow and 'vbit' */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = eqcr_carryclear(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+ return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (pi != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("losing uncommitted EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+ (QM_EQCR_SIZE - 1);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
+{
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
+ DPAA_ASSERT(eqcr->available >= 1);
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+ eqcr_commit_checks(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+ dma_wmb();
+ eqcursor = eqcr->cursor;
+ eqcursor->_ncw_verb = myverb | eqcr->vbit;
+ dpaa_flush(eqcursor);
+ eqcr_inc(eqcr);
+ eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+ const struct qm_dqrr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~DQRR_CARRY;
+
+ return (const struct qm_dqrr_entry *)addr;
+}
+
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
+}
+
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
+{
+ return dqrr_carryclear(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(portal, QM_REG_CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (dqrr->cmode != qm_dqrr_cdc &&
+ dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = dqrr_inc(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+ /*
+ * If PAMU is not available we need to invalidate the cache.
+ * When PAMU is available the cache is updated by stash
+ */
+ dpaa_invalidate_touch_ro(res);
+#endif
+ if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+ int idx = dqrr_ptr2idx(dq);
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPAA_ASSERT((dqrr->ring + idx) == dq);
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~MR_CARRY;
+
+ return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+ return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+ ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != mr_ptr2idx(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = mr_inc(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+ union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+
+ if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = mr_inc(res);
+ }
+ dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = mr_ptr2idx(mr->cursor);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT)
+ ? 0 : 1;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!rr->verb) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+ union qm_mc_result **mcr)
+{
+ int timeout = QM_MCR_TIMEOUT;
+
+ do {
+ *mcr = qm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ fq->flags |= mask;
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ fq->flags &= ~mask;
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ /* probing time config params for cpu-affine portals */
+ const struct qm_portal_config *config;
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ struct work_struct congestion_work;
+ struct work_struct mr_work;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+struct qman_portal *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(qman_affine_portal);
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_wq_alloc(void)
+{
+ qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+ if (!qm_portal_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+ num_fqids = _num_fqids;
+
+ fq_table = vzalloc(array3_size(sizeof(struct qman_fq *),
+ num_fqids, 2));
+ if (!fq_table)
+ return -ENOMEM;
+
+ pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+ fq_table, num_fqids * 2);
+ return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+ struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (WARN_ON(idx >= num_fqids * 2))
+ return NULL;
+#endif
+ fq = fq_table[idx];
+ DPAA_ASSERT(!fq || idx == fq->idx);
+
+ return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+ return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+ return idx_to_fq(tag);
+#else
+ return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+ return fq->idx;
+#else
+ return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+ u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+ u32 clear = 0;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI) {
+ __poll_portal_fast(p, QMAN_POLL_LIMIT);
+ clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
+ }
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
+ qm_out(&p->p, QM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const union qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ msleep(1);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+ /* PAMU is required for stashing */
+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+ portal->use_eqcr_ci_stashing = 0;
+#endif
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt_ce;
+ p->addr.ce_be = c->addr_virt_ce;
+ p->addr.ci = c->addr_virt_ci;
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ dev_err(c->dev, "EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ dev_err(c->dev, "DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ dev_err(c->dev, "MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+ qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+ qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+ portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ INIT_WORK(&portal->congestion_work, qm_congestion_task);
+ INIT_WORK(&portal->mr_work, qm_mr_process_task);
+ portal->bits = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ isdr = 0xffffffff;
+ qm_out(p, QM_REG_ISDR, isdr);
+ portal->irq_sources = 0;
+ qm_out(p, QM_REG_IER, 0);
+ qm_out(p, QM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+ if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+ irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+ dev_err(c->dev, "irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_out(p, QM_REG_ISDR, isdr);
+ ret = qm_eqcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_out(p, QM_REG_ISDR, isdr);
+ if (qm_dqrr_current(p)) {
+ dev_err(c->dev, "DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
+ }
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ const union qm_mr_entry *e = qm_mr_current(p);
+
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
+ e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_out(p, QM_REG_ISDR, 0);
+ qm_out(p, QM_REG_IIR, 0);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->cgrs);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *portal;
+ int err;
+
+ portal = &per_cpu(qman_affine_portal, c->cpu);
+ err = qman_create_portal(portal, c, cgrs);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ affine_channels[c->cpu] = c->channel;
+ affine_portals[c->cpu] = portal;
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+ struct qman_portal *qm = get_affine_portal();
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ put_affine_portal();
+ return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg, u8 verb)
+{
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+ fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+}
+
+static void qm_congestion_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ congestion_work);
+ struct qman_cgrs rr, c;
+ union qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ spin_unlock(&p->cgr_lock);
+ dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ return;
+ }
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+ &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+}
+
+static void qm_mr_process_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ mr_work);
+ const union qm_mr_entry *msg;
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+
+ preempt_disable();
+
+ while (1) {
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ break;
+
+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is clear */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = fqid_to_fq(qm_fqid_get(&msg->fq));
+ if (WARN_ON(!fq))
+ break;
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+ fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ pr_crit_once("Leaking DCP ERNs!\n");
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+ fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
+ fq->cb.ern(p, fq, msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ }
+
+ qm_mr_cci_consume(&p->p, num);
+ qman_p_irqsource_add(p, QM_PIRQ_MRI);
+ preempt_enable();
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ if (is & QM_PIRQ_CSCI) {
+ qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->congestion_work);
+ }
+
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ qman_p_irqsource_remove(p, QM_PIRQ_MRI);
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->mr_work);
+ }
+
+ return is;
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust context_b as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: context_b points to the FQ */
+ fq = tag_to_fq(be32_to_cpu(dq->context_b));
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
+
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ p->irq_sources |= bits & QM_PIRQ_VISIBLE;
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+ u32 ier;
+
+ /*
+ * Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+ local_irq_save(irqflags);
+ bits &= QM_PIRQ_VISIBLE;
+ p->irq_sources &= ~bits;
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ ier = qm_in(&p->p, QM_REG_IER);
+ /*
+ * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_out(&p->p, QM_REG_ISR, ~ier);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
+
+ cpu = portal->config->cpu;
+ put_affine_portal();
+ }
+ WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+struct qman_portal *qman_get_affine_portal(int cpu)
+{
+ return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+ return __poll_portal_fast(p, limit);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ fq->fqid = fqid;
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+
+ /* A context_b of 0 is allegedly special, so don't use that fqid */
+ if (fqid == 0 || fqid >= num_fqids) {
+ WARN(1, "bad fqid %d\n", fqid);
+ return -EINVAL;
+ }
+
+ fq->idx = fqid * 2;
+ if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+ fq->idx++;
+
+ WARN_ON(fq_table[fq->idx]);
+ fq_table[fq->idx] = fq;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+
+ DPAA_ASSERT(fq_table[fq->idx]);
+ fq_table[fq->idx] = NULL;
+ return;
+ default:
+ break;
+ }
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ u8 res, myverb;
+ int ret = 0;
+
+ myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+ ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ struct qman_portal *p = qman_dma_portal;
+
+ phys_fq = dma_map_single(p->config->dev, fq,
+ sizeof(*fq), DMA_TO_DEVICE);
+ if (dma_mapping_error(p->config->dev, phys_fq)) {
+ dev_err(p->config->dev, "dma_mapping failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ int wq = 0;
+
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_DESTWQ);
+ wq = 4;
+ }
+ qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+ }
+ qm_mc_commit(&p->p, myverb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "MCR timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ if (opts) {
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+ if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state != qman_fq_state_parked) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_sched)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state == qman_fq_state_retired ||
+ fq->state == qman_fq_state_oos) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (res == QM_MCR_RESULT_OK) {
+ ret = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ union qm_mr_entry msg;
+
+ msg.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ qm_fqid_set(&msg.fq, fq->fqid);
+ msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ ret = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+ fq->state != qman_fq_state_retired) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *np = mcr->queryfq_np;
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ ret = -ERANGE;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_query_fq_np);
+
+static int qman_query_cgr(struct qman_cgr *cgr,
+ struct qm_mcr_querycgr *cgrd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->cgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ else {
+ dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+ mcr_result_str(mcr->result));
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+ struct qm_mcr_querycgr query_cgr;
+ int err;
+
+ err = qman_query_cgr(cgr, &query_cgr);
+ if (err)
+ return err;
+
+ *result = !!query_cgr.cgr.cs;
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+ unsigned long irqflags;
+ int ret = -EBUSY;
+
+ local_irq_save(irqflags);
+ if (p->vdqcr_owned)
+ goto out;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto out;
+
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ p->vdqcr_owned = fq;
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ ret = 0;
+out:
+ local_irq_restore(irqflags);
+ return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+ int ret;
+
+ *p = get_affine_portal();
+ ret = set_p_vdqcr(*p, fq, vdqcr);
+ put_affine_portal();
+ return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !set_vdqcr(p, fq, vdqcr));
+ else
+ wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_retired)
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+ else
+ ret = set_vdqcr(&p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /*
+ * NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue.
+ */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags;
+ u8 avail;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ goto out;
+
+ qm_fqid_set(eq, fq->fqid);
+ eq->tag = cpu_to_be32(fq_to_tag(fq));
+ eq->fd = *fd;
+
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ if (mcr->result != QM_MCR_RESULT_OK)
+ ret = -EIO;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
+
+/* congestion state change notification target update control */
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
+ else
+ cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
+}
+
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
+ else
+ cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
+}
+
+static u8 qman_cgr_cpus[CGR_NUM];
+
+void qman_init_cgr_all(void)
+{
+ struct qman_cgr cgr;
+ int err_cnt = 0;
+
+ for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+ if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+ err_cnt++;
+ }
+
+ if (err_cnt)
+ pr_err("Warning: %d error%s while initialising CGR h/w\n",
+ err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ int ret;
+ struct qman_portal *p;
+
+ /*
+ * We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= CGR_NUM)
+ return -EINVAL;
+
+ preempt_disable();
+ p = get_affine_portal();
+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+ preempt_enable();
+
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto out;
+
+ qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+ local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+
+ /* send init if flags indicate so */
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto out;
+ }
+
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success */
+ dev_err(p->config->dev, "CGR HW state partially modified\n");
+ ret = 0;
+ goto out;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en &&
+ qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+out:
+ spin_unlock(&p->cgr_lock);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ unsigned long irqflags;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ /* attempt to delete from other portal than creator */
+ dev_err(p->config->dev, "CGR not owned by current portal");
+ dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if (i->cgrid == cgr->cgrid && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+
+ local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+ qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+ struct qman_cgr *cgr;
+ struct completion completion;
+};
+
+static void qman_delete_cgr_smp_call(void *p)
+{
+ qman_delete_cgr((struct qman_cgr *)p);
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_delete_cgr_smp_call, cgr, true);
+ preempt_enable();
+ return;
+ }
+
+ qman_delete_cgr(cgr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+/* Cleanup FQs */
+
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
+{
+ const union qm_mr_entry *msg;
+ int found = 0;
+
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ while (msg) {
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+ found = 1;
+ qm_mr_next(p);
+ qm_mr_cci_consume_to_current(p);
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ }
+ return found;
+}
+
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+ bool wait)
+{
+ const struct qm_dqrr_entry *dqrr;
+ int found = 0;
+
+ do {
+ qm_dqrr_pvb_update(p);
+ dqrr = qm_dqrr_current(p);
+ if (!dqrr)
+ cpu_relax();
+ } while (wait && !dqrr);
+
+ while (dqrr) {
+ if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
+ found = 1;
+ qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+ qm_dqrr_pvb_update(p);
+ qm_dqrr_next(p);
+ dqrr = qm_dqrr_current(p);
+ }
+ return found;
+}
+
+#define qm_mr_drain(p, V) \
+ _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
+
+#define qm_dqrr_drain_wait(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+ _qm_dqrr_consume_and_match(p, 0, 0, false)
+
+static int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p;
+ struct device *dev;
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ int orl_empty, drain = 0, ret = 0;
+ u32 channel, wq, res;
+ u8 state;
+
+ p = get_affine_portal();
+ dev = p->config->dev;
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ goto out; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ /* Need to store these since the MCR gets reused */
+ channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+ wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (res == QM_MCR_RESULT_PENDING) {
+ /*
+ * Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ int found_fqrn = 0;
+ u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < qm_channel_pool1 + 15) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1)<<4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+ fqid, channel);
+ ret = -EBUSY;
+ goto out;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(&p->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(&p->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ do {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_drain_nomatch(&p->p);
+ /* Process message ring too */
+ found_fqrn = qm_mr_drain(&p->p, FQRN);
+ cpu_relax();
+ } while (!found_fqrn);
+
+ }
+ if (res != QM_MCR_RESULT_OK &&
+ res != QM_MCR_RESULT_PENDING) {
+ dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+ fqid, res);
+ ret = -EIO;
+ goto out;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /*
+ * ORL had no entries, no need to wait until the
+ * ERNs come in
+ */
+ orl_empty = 1;
+ }
+ /*
+ * Retirement succeeded, check to see if FQ needs
+ * to be drained
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ do {
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ /*
+ * Wait for a dequeue and process the dequeues,
+ * making sure to empty the ring completely
+ */
+ } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+ }
+ qm_dqrr_sdqcr_set(&p->p, 0);
+
+ while (!orl_empty) {
+ /* Wait for the ORL to have been completely drained */
+ orl_empty = qm_mr_drain(&p->p, FQRL);
+ cpu_relax();
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_OOS:
+ /* Done */
+ break;
+
+ default:
+ ret = -EIO;
+ }
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal)
+{
+ return portal->config;
+}
+EXPORT_SYMBOL(qman_get_qm_portal_config);
+
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+{
+ unsigned long addr;
+
+ if (!p)
+ return -ENODEV;
+
+ addr = gen_pool_alloc(p, cnt);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+int qman_alloc_pool_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+ int ret = qman_shutdown_fq(fqid);
+
+ if (ret) {
+ pr_debug("FQID %d leaked\n", fqid);
+ return ret;
+ }
+
+ gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+ /*
+ * We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose destination channel is the pool-channel being released.
+ * When a non-OOS FQD is found we attempt to clean it up
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err == -ERANGE)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ else if (WARN_ON(err))
+ return err;
+
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return err;
+ if (qm_fqd_get_chan(&fqd) == qp) {
+ /* The channel is the FQ's target, clean it */
+ err = qman_shutdown_fq(fq.fqid);
+ if (err)
+ /*
+ * Couldn't shut down the FQ
+ * so the pool must be leaked
+ */
+ return err;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_pool(u32 qp)
+{
+ int ret;
+
+ ret = qpool_cleanup(qp);
+ if (ret) {
+ pr_debug("CHID %d leaked\n", qp);
+ return ret;
+ }
+
+ gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
+
+static int cgr_cleanup(u32 cgrid)
+{
+ /*
+ * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+ * error, looking for non-OOS FQDs whose CGR is the CGR being released
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err == -ERANGE)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ else if (WARN_ON(err))
+ return err;
+
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return err;
+ if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
+ fqd.cgid == cgrid) {
+ pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+ cgrid, fq.fqid);
+ return -EIO;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_cgrid(u32 cgrid)
+{
+ int ret;
+
+ ret = cgr_cleanup(cgrid);
+ if (ret) {
+ pr_debug("CGRID %d leaked\n", cgrid);
+ return ret;
+ }
+
+ gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 000000000..6fd5fef5f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,861 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+EXPORT_SYMBOL(qm_channel_caam);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
+#define REG_DD_CFG 0x0200
+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC 0x0400
+#define REG_PFDR_FP_HEAD 0x0404
+#define REG_PFDR_FP_TAIL 0x0408
+#define REG_PFDR_FP_LWIT 0x0410
+#define REG_PFDR_CFG 0x0414
+#define REG_SFDR_CFG 0x0500
+#define REG_SFDR_IN_USE 0x0504
+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID 0x0630
+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_ECIR2 0x0a0c
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_MCR 0x0b00
+#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG 0x0be0
+#define REG_HID_CFG 0x0bf0
+#define REG_IDLE_STAT 0x0bf4
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FQD_BARE 0x0c00
+#define REG_PFDR_BARE 0x0c20
+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE 0x0c80
+#define REG_QCSP_BAR 0x0c84
+#define REG_CI_SCHED_CFG 0x0d00
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_CI_RLM_AVG 0x0d14
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR 0x01000000
+#define MCR_get_rslt(v) (u8)((v) >> 24)
+#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r) ((r) == 0xf0)
+#define MCR_rslt_eaccess(r) ((r) == 0xf8)
+#define MCR_rslt_inval(r) ((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV 4
+#define QM_CI_SCHED_CFG_SRQ_W 3
+#define QM_CI_SCHED_CFG_RW_W 2
+#define QM_CI_SCHED_CFG_BMAN_W 2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+ qm_wq_portal = 0,
+ qm_wq_pool = 1,
+ qm_wq_fman0 = 2,
+ qm_wq_fman1 = 3,
+ qm_wq_caam = 4,
+ qm_wq_pme = 5,
+ qm_wq_first = qm_wq_portal,
+ qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+ qm_memory_fqd,
+ qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IFSI)
+
+struct qm_ecir {
+ u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+ return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+ return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+ u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+ return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+ return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+ u32 info; /* memid[24-27], eadr[0-11] */
+ /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+ { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+ { QM_EIRQ_CTDE, "Corenet Target Data Error" },
+ { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+ { QM_EIRQ_PLWI, "PFDR Low Watermark" },
+ { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { QM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+ { QM_EIRQ_ICVI, "Invalid Command Verb" },
+ { QM_EIRQ_IFSI, "Invalid Flow Control State" },
+ { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+ { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+ { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+ { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+ { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+ { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+ { QM_EIRQ_IESI, "Invalid Enqueue State" },
+ { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+ { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+ { 0x01FF, 24, "FQD cache tag memory 0" },
+ { 0x01FF, 24, "FQD cache tag memory 1" },
+ { 0x01FF, 24, "FQD cache tag memory 2" },
+ { 0x01FF, 24, "FQD cache tag memory 3" },
+ { 0x0FFF, 512, "FQD cache memory" },
+ { 0x07FF, 128, "SFDR memory" },
+ { 0x01FF, 72, "WQ context memory" },
+ { 0x00FF, 240, "CGR memory" },
+ { 0x00FF, 302, "Internal Order Restoration List memory" },
+ { 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+static int __qman_probed;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+ return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+ return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+ DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+ portal == qm_dc_portal_fman1);
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+ else
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+ u8 csw5, u8 csw6, u8 csw7)
+{
+ qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+ qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+ qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+ (QM_CI_SCHED_CFG_SRCCIV << 24) |
+ (QM_CI_SCHED_CFG_SRQ_W << 8) |
+ (QM_CI_SCHED_CFG_RW_W << 4) |
+ QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = qm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+#define PFDR_AR_EN BIT(31)
+static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+ u32 exp = ilog2(size);
+
+ /* choke if size isn't within range */
+ DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+ is_power_of_2(size));
+ /* choke if 'ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+ qm_ccsr_out(offset, upper_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+ qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+ qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+ qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+ u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+ DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+ /* Make sure the command interface is 'idle' */
+ if (!MCR_rslt_idle(rslt)) {
+ dev_crit(dev, "QMAN_MCR isn't idle");
+ WARN_ON(1);
+ }
+
+ /* Write the MCR command params then the verb */
+ qm_ccsr_out(REG_MCP(0), pfdr_start);
+ /*
+ * TODO: remove this - it's a workaround for a model bug that is
+ * corrected in more recent versions. We use the workaround until
+ * everyone has upgraded.
+ */
+ qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+ dma_wmb();
+ qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+ /* Poll for the result */
+ do {
+ rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+ } while (!MCR_rslt_idle(rslt));
+ if (MCR_rslt_ok(rslt))
+ return 0;
+ if (MCR_rslt_eaccess(rslt))
+ return -EACCES;
+ if (MCR_rslt_inval(rslt))
+ return -EINVAL;
+ dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+ return -ENODEV;
+}
+
+/*
+ * QMan needs two global memory areas initialized at boot time:
+ * 1) FQD: Frame Queue Descriptors used to manage frame queues
+ * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
+ * Both areas are reserved using the device tree reserved memory framework
+ * and the addresses and sizes are initialized when the QMan device is probed
+ */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+#ifdef CONFIG_PPC
+/*
+ * Support for PPC Device Tree backward compatibility when compatible
+ * string is set to fsl-qman-fqd and fsl-qman-pfdr
+ */
+static int zero_priv_mem(phys_addr_t addr, size_t sz)
+{
+ /* map as cacheable, non-guarded */
+ void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+ if (!tmpp)
+ return -ENOMEM;
+
+ memset_io(tmpp, 0, sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + sz);
+ iounmap(tmpp);
+
+ return 0;
+}
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+ fqd_a = rmem->base;
+ fqd_sz = rmem->size;
+
+ WARN_ON(!(fqd_a && fqd_sz));
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+ pfdr_a = rmem->base;
+ pfdr_sz = rmem->size;
+
+ WARN_ON(!(pfdr_a && pfdr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+
+#endif
+
+static unsigned int qm_get_fqid_maxcnt(void)
+{
+ return fqd_sz / 64;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ dev_warn(dev, "ErrInt, EDATA:\n");
+ i = bit_count / 32;
+ if (bit_count % 32) {
+ i++;
+ mask = ~(mask << bit_count % 32);
+ }
+ j = 16 - i;
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+ u32 ecsr_val)
+{
+ struct qm_ecir ecir_val;
+ struct qm_eadr eadr_val;
+ int memid;
+
+ ecir_val.info = qm_ccsr_in(REG_ECIR);
+ /* Is portal info valid */
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ struct qm_ecir2 ecir2_val;
+
+ ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+ qm_ecir2_get_pnum(&ecir2_val));
+ }
+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_v3_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_v3_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ } else {
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+ qm_ecir_get_pnum(&ecir_val));
+ }
+ if (ecsr_val & FQID_ECSR_ERR)
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ }
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = qm_ccsr_in(REG_ERR_IER);
+ isr_val = qm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = qm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+ if (qman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ qman_hwerr_txts[i].txt);
+ if (qman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(dev, isr_mask,
+ ecsr_val);
+ /* Re-arm error capture registers */
+ qm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ qman_hwerr_txts[i].mask);
+ ier_val &= ~qman_hwerr_txts[i].mask;
+ qm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+ int i, err;
+
+ /* FQD memory */
+ qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ /* PFDR memory */
+ qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+ err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+ if (err)
+ return err;
+ /* thresholds */
+ qm_set_pfdr_threshold(512, 64);
+ qm_set_sfdr_threshold(128);
+ /* clear stale PEBI bit from interrupt status register */
+ qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+ /* corenet initiator settings */
+ qm_set_corenet_initiator();
+ /* HID settings */
+ qm_set_hid();
+ /* Set scheduling weights to defaults */
+ for (i = qm_wq_first; i <= qm_wq_last; i++)
+ qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+ /* We are not prepared to accept ERNs for hardware enqueues */
+ qm_set_dc(qm_dc_portal_fman0, 1, 0);
+ qm_set_dc(qm_dc_portal_fman1, 1, 0);
+ return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+ static int done;
+ static u32 liodn_offset;
+ u32 before, after;
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+ else
+ before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+ if (!done) {
+ liodn_offset = before & LIO_CFG_LIODN_MASK;
+ done = 1;
+ return;
+ }
+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+ else
+ qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+void qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+ u32 before, after;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+ /* Each pair of vcpu share the same SRQ(SDEST) */
+ cpu_idx /= 2;
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+ } else {
+ before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+ }
+}
+
+static int qman_resource_init(struct device *dev)
+{
+ int pool_chan_num, cgrid_num;
+ int ret, i;
+
+ switch (qman_ip_rev >> 8) {
+ case 1:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ case 2:
+ pool_chan_num = 3;
+ cgrid_num = 64;
+ break;
+ case 3:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+ pool_chan_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+ return ret;
+ }
+
+ ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+ return ret;
+ }
+
+ /* parse pool channels into the SDQCR mask */
+ for (i = 0; i < cgrid_num; i++)
+ qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+ ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
+ qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int qman_is_probed(void)
+{
+ return __qman_probed;
+}
+EXPORT_SYMBOL_GPL(qman_is_probed);
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ int ret, err_irq;
+ u16 id;
+ u8 major, minor;
+
+ __qman_probed = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
+ node);
+ return -ENXIO;
+ }
+ qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!qm_ccsr_start)
+ return -ENXIO;
+
+ qm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+ return -ENODEV;
+ } else if (major == 1 && minor == 1)
+ qman_ip_rev = QMAN_REV11;
+ else if (major == 1 && minor == 2)
+ qman_ip_rev = QMAN_REV12;
+ else if (major == 2 && minor == 0)
+ qman_ip_rev = QMAN_REV20;
+ else if (major == 3 && minor == 0)
+ qman_ip_rev = QMAN_REV30;
+ else if (major == 3 && minor == 1)
+ qman_ip_rev = QMAN_REV31;
+ else if (major == 3 && minor == 2)
+ qman_ip_rev = QMAN_REV32;
+ else {
+ dev_err(dev, "Unknown QMan version\n");
+ return -ENODEV;
+ }
+
+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+ }
+
+ if (fqd_a) {
+#ifdef CONFIG_PPC
+ /*
+ * For PPC backward DT compatibility
+ * FQD memory MUST be zero'd by software
+ */
+ zero_priv_mem(fqd_a, fqd_sz);
+#else
+ WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
+#endif
+ } else {
+ /*
+ * Order of memory regions is assumed as FQD followed by PFDR
+ * in order to ensure allocations from the correct regions the
+ * driver initializes then allocates each piece in order
+ */
+ ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+ dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
+
+ if (!pfdr_a) {
+ /* Setup PFDR memory */
+ ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+ dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
+
+ ret = qman_init_ccsr(dev);
+ if (ret) {
+ dev_err(dev, "CCSR setup failed\n");
+ return ret;
+ }
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %pOF property 'interrupts'\n",
+ node);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
+ ret, node);
+ return ret;
+ }
+
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+ if (IS_ERR(qm_fqalloc)) {
+ ret = PTR_ERR(qm_fqalloc);
+ dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+ if (IS_ERR(qm_qpalloc)) {
+ ret = PTR_ERR(qm_qpalloc);
+ dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+ if (IS_ERR(qm_cgralloc)) {
+ ret = PTR_ERR(qm_cgralloc);
+ dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qman_resource_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+ if (ret)
+ return ret;
+
+ ret = qman_wq_alloc();
+ if (ret)
+ return ret;
+
+ __qman_probed = 1;
+
+ return 0;
+}
+
+static const struct of_device_id fsl_qman_ids[] = {
+ {
+ .compatible = "fsl,qman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_qman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_qman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 000000000..3e9391d11
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,361 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW 1
+#define CONFIG_FSL_DPA_PIRQ_FAST 1
+
+static struct cpumask portal_cpus;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+ struct device *dev = pcfg->dev;
+ int window_count = 1;
+ struct iommu_domain_geometry geom_attr;
+ struct pamu_stash_attribute stash_attr;
+ int ret;
+
+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+ if (!pcfg->iommu_domain) {
+ dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+ goto no_iommu;
+ }
+ geom_attr.aperture_start = 0;
+ geom_attr.aperture_end =
+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+ geom_attr.force_aperture = true;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+ &geom_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+ &window_count);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH,
+ &stash_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+ __func__, ret);
+ goto out_domain_free;
+ }
+ ret = iommu_attach_device(pcfg->iommu_domain, dev);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
+ &window_count);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_detach_device;
+ }
+
+no_iommu:
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+
+ return;
+
+#ifdef CONFIG_FSL_PAMU
+out_detach_device:
+ iommu_detach_device(pcfg->iommu_domain, NULL);
+out_domain_free:
+ iommu_domain_free(pcfg->iommu_domain);
+ pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+ struct qman_portal *p;
+ u32 irq_sources = 0;
+
+ /* We need the same LIODN offset for all portals */
+ qman_liodn_fixup(pcfg->channel);
+
+ pcfg->iommu_domain = NULL;
+ portal_set_cpu(pcfg, pcfg->cpu);
+
+ p = qman_create_affine_portal(pcfg, NULL);
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ /* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+ QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+ irq_sources |= QM_PIRQ_DQRI;
+#endif
+ qman_p_irqsource_add(p, irq_sources);
+
+ spin_lock(&qman_lock);
+ if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+ /* all assigned portals are initialized now */
+ qman_init_cgr_all();
+ }
+
+ if (!qman_dma_portal)
+ qman_dma_portal = p;
+
+ spin_unlock(&qman_lock);
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+ unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU /* TODO */
+ struct pamu_stash_attribute stash_attr;
+ int ret;
+
+ if (pcfg->iommu_domain) {
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+ if (ret < 0) {
+ dev_err(pcfg->dev,
+ "Failed to update pamu stash setting\n");
+ return;
+ }
+ }
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+}
+
+static int qman_offline_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->irq, cpumask_of(0));
+ qman_portal_update_sdest(pcfg, 0);
+ }
+ }
+ return 0;
+}
+
+static int qman_online_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ qman_portal_update_sdest(pcfg, cpu);
+ }
+ }
+ return 0;
+}
+
+static int qman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct qm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ int irq, cpu, err;
+ u32 val;
+
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(&pdev->dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return -ENOMEM;
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
+ return -ENXIO;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
+ return -ENXIO;
+ }
+
+ err = of_property_read_u32(node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
+ return err;
+ }
+ pcfg->channel = val;
+ pcfg->cpu = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %pOF IRQ\n", node);
+ return -ENXIO;
+ }
+ pcfg->irq = irq;
+
+ pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+ resource_size(addr_phys[0]),
+ QBMAN_MEMREMAP_ATTR);
+ if (!pcfg->addr_virt_ce) {
+ dev_err(dev, "memremap::CE failed\n");
+ goto err_ioremap1;
+ }
+
+ pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+ resource_size(addr_phys[1]));
+ if (!pcfg->addr_virt_ci) {
+ dev_err(dev, "ioremap::CI failed\n");
+ goto err_ioremap2;
+ }
+
+ pcfg->pools = qm_get_pools_sdqcr();
+
+ spin_lock(&qman_lock);
+ cpu = cpumask_next_zero(-1, &portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ /* unassigned portal, skip init */
+ spin_unlock(&qman_lock);
+ return 0;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&qman_lock);
+ pcfg->cpu = cpu;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+ dev_err(dev, "dma_set_mask() failed\n");
+ goto err_portal_init;
+ }
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ qman_offline_cpu(cpu);
+
+ return 0;
+
+err_portal_init:
+ iounmap(pcfg->addr_virt_ci);
+err_ioremap2:
+ memunmap(pcfg->addr_virt_ce);
+err_ioremap1:
+ return -ENXIO;
+}
+
+static const struct of_device_id qman_portal_ids[] = {
+ {
+ .compatible = "fsl,qman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = qman_portal_ids,
+ },
+ .probe = qman_portal_probe,
+};
+
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qman_portal:online",
+ qman_online_cpu, qman_offline_cpu);
+ if (ret < 0) {
+ pr_err("qman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
+ return 0;
+}
+
+module_driver(qman_portal_driver,
+ qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 000000000..75a8f905f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,267 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+ u8 verb;
+ u8 result;
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+ return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+ u8 verb;
+ u8 result;
+ u8 __reserved[30];
+ /* Access this struct using qman_cgrs_get() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+ u8 verb;
+ u8 result;
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[6];
+ u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
+ __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved3[3];
+ u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
+ __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
+}
+
+/* Congestion Groups */
+
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
+{
+ return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ *dest = *src;
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ & *_b++;
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ ^ *_b++;
+}
+
+void qman_init_cgr_all(void);
+
+struct qm_portal_config {
+ /* Portal addresses */
+ void *addr_virt_ce;
+ void __iomem *addr_virt_ci;
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /*
+ * the portal's dedicated channel id, used initialising
+ * frame queues to target this portal when scheduled
+ */
+ u16 channel;
+ /*
+ * mask of pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+ */
+ u32 pools;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
+
+int qman_wq_alloc(void);
+void qman_liodn_fixup(u16 channel);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+int qman_alloc_fq_table(u32 num_fqids);
+
+/* QMan s/w corenet portal, low-level i/face */
+
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 000000000..18f7f0202
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,62 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+ int loop = 1;
+ int err = 0;
+
+ while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+ err = qman_test_stash();
+ if (err)
+ break;
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+ err = qman_test_api();
+ if (err)
+ break;
+#endif
+ }
+ return err;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 000000000..41bdbc48c
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,34 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+int qman_test_stash(void);
+int qman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 000000000..2895d062c
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,245 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#define CGR_ID 27
+#define POOL_ID 2
+#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES 10
+#define NUM_PARTIAL 4
+#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
+ QM_SDQCR_TYPE_PRIO_QOS | \
+ QM_SDQCR_TOKEN_SET(0x98) | \
+ QM_SDQCR_CHANNELS_DEDICATED | \
+ QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
+#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+ struct qman_fq *,
+ const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+ .cb.dqrr = cb_dqrr,
+ .cb.ern = cb_ern,
+ .cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *fd)
+{
+ qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
+ qm_fd_set_contig_big(fd, 0x0000ffff);
+ fd->cmd = cpu_to_be32(0xfeedf00d);
+}
+
+static void fd_inc(struct qm_fd *fd)
+{
+ u64 t = qm_fd_addr_get64(fd);
+ int z = t >> 40;
+ unsigned int len, off;
+ enum qm_fd_format fmt;
+
+ t <<= 1;
+ if (z)
+ t |= 1;
+ qm_fd_addr_set64(fd, t);
+
+ fmt = qm_fd_get_format(fd);
+ off = qm_fd_get_offset(fd);
+ len = qm_fd_get_length(fd);
+ len--;
+ qm_fd_set_param(fd, fmt, off, len);
+
+ fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
+{
+ bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
+
+ neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
+ neq |= a->cfg != b->cfg;
+ neq |= a->cmd != b->cmd;
+
+ return neq;
+}
+
+/* test */
+static int do_enqueues(struct qman_fq *fq)
+{
+ unsigned int loop;
+ int err = 0;
+
+ for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+ if (qman_enqueue(fq, &fd)) {
+ pr_crit("qman_enqueue() failed\n");
+ err = -EIO;
+ }
+ fd_inc(&fd);
+ }
+
+ return err;
+}
+
+int qman_test_api(void)
+{
+ unsigned int flags, frmcnt;
+ int err;
+ struct qman_fq *fq = &fq_base;
+
+ pr_info("%s(): Starting\n", __func__);
+ fd_init(&fd);
+ fd_init(&fd_dq);
+
+ /* Initialise (parked) FQ */
+ err = qman_create_fq(0, FQ_FLAGS, fq);
+ if (err) {
+ pr_crit("qman_create_fq() failed\n");
+ goto failed;
+ }
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err) {
+ pr_crit("qman_init_fq() failed\n");
+ goto failed;
+ }
+ /* Do enqueues + VDQCR, twice. (Parked FQ) */
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (till-empty);\n");
+ frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+ NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_err("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("scheduled dequeue (till-empty)\n");
+ err = qman_schedule_fq(fq);
+ if (err) {
+ pr_crit("qman_schedule_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, sdqcr_complete);
+
+ /* Retire and OOS the FQ */
+ err = qman_retire_fq(fq, &flags);
+ if (err < 0) {
+ pr_crit("qman_retire_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, retire_complete);
+ if (flags & QMAN_FQ_STATE_BLOCKOOS) {
+ err = -EIO;
+ pr_crit("leaking frames\n");
+ goto failed;
+ }
+ err = qman_oos_fq(fq);
+ if (err) {
+ pr_crit("qman_oos_fq() failed\n");
+ goto failed;
+ }
+ qman_destroy_fq(fq);
+ pr_info("%s(): Finished\n", __func__);
+ return 0;
+
+failed:
+ WARN_ON(1);
+ return err;
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
+ pr_err("BADNESS: dequeued frame doesn't match;\n");
+ return qman_cb_dqrr_consume;
+ }
+ fd_inc(&fd_dq);
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
+ sdqcr_complete = 1;
+ wake_up(&waitqueue);
+ }
+ return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ pr_crit("cb_ern() unimplemented");
+ WARN_ON(1);
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
+ pr_crit("unexpected FQS message");
+ WARN_ON(1);
+ return;
+ }
+ pr_info("Retirement message received\n");
+ retire_complete = 1;
+ wake_up(&waitqueue);
+}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 000000000..e87b65403
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,627 @@
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/*
+ * Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ * handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ * and advance the iterator for the next loop. This includes a final fixup,
+ * which connects the last handler to the first (and which is why phase 2
+ * and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * initialise FQ objects and advance the iterator for the next loop.
+ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ * initialisation targets the correct cpu.
+ */
+
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
+struct bstrap {
+ int (*fn)(void);
+ atomic_t started;
+};
+static int bstrap_fn(void *bs)
+{
+ struct bstrap *bstrap = bs;
+ int err;
+
+ atomic_inc(&bstrap->started);
+ err = bstrap->fn();
+ if (err)
+ return err;
+ while (!kthread_should_stop())
+ msleep(20);
+ return 0;
+}
+static int on_all_cpus(int (*fn)(void))
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct bstrap bstrap = {
+ .fn = fn,
+ .started = ATOMIC_INIT(0)
+ };
+ struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+ "hotpotato%d", cpu);
+ int ret;
+
+ if (IS_ERR(k))
+ return -ENOMEM;
+ kthread_bind(k, cpu);
+ wake_up_process(k);
+ /*
+ * If we call kthread_stop() before the "wake up" has had an
+ * effect, then the thread may exit with -EINTR without ever
+ * running the function. So poll until it's started before
+ * requesting it to stop.
+ */
+ while (!atomic_read(&bstrap.started))
+ msleep(20);
+ ret = kthread_stop(k);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+struct hp_handler {
+
+ /* The following data is stashed when 'rx' is dequeued; */
+ /* -------------- */
+ /* The Rx FQ, dequeues of which will stash the entire hp_handler */
+ struct qman_fq rx;
+ /* The Tx FQ we should forward to */
+ struct qman_fq tx;
+ /* The value we XOR post-dequeue, prior to validating */
+ u32 rx_mixer;
+ /* The value we XOR pre-enqueue, after validating */
+ u32 tx_mixer;
+ /* what the hotpotato address should be on dequeue */
+ dma_addr_t addr;
+ u32 *frame_ptr;
+
+ /* The following data isn't (necessarily) stashed on dequeue; */
+ /* -------------- */
+ u32 fqid_rx, fqid_tx;
+ /* list node for linking us into 'hp_cpu' */
+ struct list_head node;
+ /* Just to check ... */
+ unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+ /* identify the cpu we run on; */
+ unsigned int processor_id;
+ /* root node for the per-cpu list of handlers */
+ struct list_head handlers;
+ /* list node for linking us into 'hp_cpu_list' */
+ struct list_head node;
+ /*
+ * when repeatedly scanning 'hp_list', each time linking the n'th
+ * handlers together, this is used as per-cpu iterator state
+ */
+ struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static DEFINE_SPINLOCK(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+static void *__frame_ptr;
+static u32 *frame_ptr;
+static dma_addr_t frame_dma;
+
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU 2
+#define HP_LOOPS 8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS 80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD 0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static int allocate_frame_data(void)
+{
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+
+ if (!qman_dma_portal) {
+ pr_crit("portal not available\n");
+ return -EIO;
+ }
+
+ pcfg = qman_get_qm_portal_config(qman_dma_portal);
+
+ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+ if (!__frame_ptr)
+ return -ENOMEM;
+
+ frame_ptr = PTR_ALIGN(__frame_ptr, 64);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+ frame_ptr[loop] = lfsr;
+ lfsr = do_lfsr(lfsr);
+ }
+
+ frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pcfg->dev, frame_dma)) {
+ pr_crit("dma mapping failure\n");
+ kfree(__frame_ptr);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void deallocate_frame_data(void)
+{
+ dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ kfree(__frame_ptr);
+}
+
+static inline int process_frame_data(struct hp_handler *handler,
+ const struct qm_fd *fd)
+{
+ u32 *p = handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+
+ if (qm_fd_addr_get64(fd) != handler->addr) {
+ pr_crit("bad frame address, [%llX != %llX]\n",
+ qm_fd_addr_get64(fd), handler->addr);
+ return -EIO;
+ }
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ *p ^= handler->rx_mixer;
+ if (*p != lfsr) {
+ pr_crit("corrupt frame data");
+ return -EIO;
+ }
+ *p ^= handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ return 0;
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ if (process_frame_data(handler, &dqrr->fd)) {
+ WARN_ON(1);
+ goto skip;
+ }
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ process_frame_data(handler, &dqrr->fd);
+ if (++loop_counter < HP_LOOPS) {
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ goto skip;
+ }
+ } else {
+ pr_info("Received final (%dth) frame\n", loop_counter);
+ wake_up(&queue);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static int create_per_cpu_handlers(void)
+{
+ struct hp_handler *handler;
+ int loop;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ hp_cpu->processor_id = smp_processor_id();
+ spin_lock(&hp_lock);
+ list_add_tail(&hp_cpu->node, &hp_cpu_list);
+ hp_cpu_list_length++;
+ spin_unlock(&hp_lock);
+ INIT_LIST_HEAD(&hp_cpu->handlers);
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+ if (!handler) {
+ pr_crit("kmem_cache_alloc() failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ handler->processor_id = hp_cpu->processor_id;
+ handler->addr = frame_dma;
+ handler->frame_ptr = frame_ptr;
+ list_add_tail(&handler->node, &hp_cpu->handlers);
+ }
+ return 0;
+}
+
+static int destroy_per_cpu_handlers(void)
+{
+ struct list_head *loop, *tmp;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ spin_lock(&hp_lock);
+ list_del(&hp_cpu->node);
+ spin_unlock(&hp_lock);
+ list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+ u32 flags = 0;
+ struct hp_handler *handler = list_entry(loop, struct hp_handler,
+ node);
+ if (qman_retire_fq(&handler->rx, &flags) ||
+ (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+ pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+ WARN_ON(1);
+ return -EIO;
+ }
+ if (qman_oos_fq(&handler->rx)) {
+ pr_crit("qman_oos_fq(rx) failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ qman_destroy_fq(&handler->rx);
+ qman_destroy_fq(&handler->tx);
+ qman_release_fqid(handler->fqid_rx);
+ list_del(&handler->node);
+ kmem_cache_free(hp_handler_slab, handler);
+ }
+ return 0;
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+ u8 res = (offset + (L1_CACHE_BYTES - 1))
+ / (L1_CACHE_BYTES);
+ if (res > 3)
+ return 3;
+ return res;
+}
+#define STASH_DATA_CL \
+ num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+ num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static int init_handler(void *h)
+{
+ struct qm_mcc_initfq opts;
+ struct hp_handler *handler = h;
+ int err;
+
+ if (handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ /* Set up rx */
+ memset(&handler->rx, 0, sizeof(handler->rx));
+ if (handler == special_handler)
+ handler->rx.cb.dqrr = special_dqrr;
+ else
+ handler->rx.cb.dqrr = normal_dqrr;
+ err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+ if (err) {
+ pr_crit("qman_create_fq(rx) failed");
+ goto failed;
+ }
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
+ qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+ err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+ QMAN_INITFQ_FLAG_LOCAL, &opts);
+ if (err) {
+ pr_crit("qman_init_fq(rx) failed");
+ goto failed;
+ }
+ /* Set up tx */
+ memset(&handler->tx, 0, sizeof(handler->tx));
+ err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+ &handler->tx);
+ if (err) {
+ pr_crit("qman_create_fq(tx) failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void init_handler_cb(void *h)
+{
+ if (init_handler(h))
+ WARN_ON(1);
+}
+
+static int init_phase2(void)
+{
+ int loop;
+ u32 fqid = 0;
+ u32 lfsr = 0xdeadbeef;
+ struct hp_cpu *hp_cpu;
+ struct hp_handler *handler;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ int err;
+
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ /* Rx FQID is the previous handler's Tx FQID */
+ hp_cpu->iterator->fqid_rx = fqid;
+ /* Allocate new FQID for Tx */
+ err = qman_alloc_fqid(&fqid);
+ if (err) {
+ pr_crit("qman_alloc_fqid() failed");
+ return err;
+ }
+ hp_cpu->iterator->fqid_tx = fqid;
+ /* Rx mixer is the previous handler's Tx mixer */
+ hp_cpu->iterator->rx_mixer = lfsr;
+ /* Get new mixer for Tx */
+ lfsr = do_lfsr(lfsr);
+ hp_cpu->iterator->tx_mixer = lfsr;
+ }
+ }
+ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+ if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+ return 1;
+ handler->fqid_rx = fqid;
+ handler->rx_mixer = lfsr;
+ /* and tag it as our "special" handler */
+ special_handler = handler;
+ return 0;
+}
+
+static int init_phase3(void)
+{
+ int loop, err;
+ struct hp_cpu *hp_cpu;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ preempt_disable();
+ if (hp_cpu->processor_id == smp_processor_id()) {
+ err = init_handler(hp_cpu->iterator);
+ if (err)
+ return err;
+ } else {
+ smp_call_function_single(hp_cpu->processor_id,
+ init_handler_cb, hp_cpu->iterator, 1);
+ }
+ preempt_enable();
+ }
+ }
+ return 0;
+}
+
+static int send_first_frame(void *ignore)
+{
+ u32 *p = special_handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop, err;
+ struct qm_fd fd;
+
+ if (special_handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ memset(&fd, 0, sizeof(fd));
+ qm_fd_addr_set64(&fd, special_handler->addr);
+ qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ if (*p != lfsr) {
+ err = -EIO;
+ pr_crit("corrupt frame data");
+ goto failed;
+ }
+ *p ^= special_handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ pr_info("Sending first frame\n");
+ err = qman_enqueue(&special_handler->tx, &fd);
+ if (err) {
+ pr_crit("qman_enqueue() failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void send_first_frame_cb(void *ignore)
+{
+ if (send_first_frame(NULL))
+ WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+ int err;
+
+ if (cpumask_weight(cpu_online_mask) < 2) {
+ pr_info("%s(): skip - only 1 CPU\n", __func__);
+ return 0;
+ }
+
+ pr_info("%s(): Starting\n", __func__);
+
+ hp_cpu_list_length = 0;
+ loop_counter = 0;
+ hp_handler_slab = kmem_cache_create("hp_handler_slab",
+ sizeof(struct hp_handler), L1_CACHE_BYTES,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!hp_handler_slab) {
+ err = -EIO;
+ pr_crit("kmem_cache_create() failed");
+ goto failed;
+ }
+
+ err = allocate_frame_data();
+ if (err)
+ goto failed;
+
+ /* Init phase 1 */
+ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+ if (on_all_cpus(create_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ pr_info("Number of cpus: %d, total of %d handlers\n",
+ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+ err = init_phase2();
+ if (err)
+ goto failed;
+
+ err = init_phase3();
+ if (err)
+ goto failed;
+
+ preempt_disable();
+ if (special_handler->processor_id == smp_processor_id()) {
+ err = send_first_frame(NULL);
+ if (err)
+ goto failed;
+ } else {
+ smp_call_function_single(special_handler->processor_id,
+ send_first_frame_cb, NULL, 1);
+ }
+ preempt_enable();
+
+ wait_event(queue, loop_counter == HP_LOOPS);
+ deallocate_frame_data();
+ if (on_all_cpus(destroy_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ kmem_cache_destroy(hp_handler_slab);
+ pr_info("%s(): Finished\n", __func__);
+
+ return 0;
+failed:
+ WARN_ON(1);
+ return err;
+}
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
new file mode 100644
index 000000000..fabba17e9
--- /dev/null
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -0,0 +1,42 @@
+#
+# QE Communication options
+#
+
+config QUICC_ENGINE
+ bool "QUICC Engine (QE) framework support"
+ depends on FSL_SOC && PPC32
+ select GENERIC_ALLOCATOR
+ select CRC32
+ help
+ The QUICC Engine (QE) is a new generation of communications
+ coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
+ Selecting this option means that you wish to build a kernel
+ for a machine with a QE coprocessor.
+
+config UCC_SLOW
+ bool
+ default y if SERIAL_QE
+ help
+ This option provides qe_lib support to UCC slow
+ protocols: UART, BISYNC, QMC
+
+config UCC_FAST
+ bool
+ default y if UCC_GETH || QE_TDM
+ help
+ This option provides qe_lib support to UCC fast
+ protocols: HDLC, Ethernet, ATM, transparent
+
+config UCC
+ bool
+ default y if UCC_FAST || UCC_SLOW
+
+config QE_TDM
+ bool
+ default y if FSL_UCC_HDLC
+
+config QE_USB
+ bool
+ default y if USB_FSL_QE
+ help
+ QE USB Controller support
diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
new file mode 100644
index 000000000..55a555304
--- /dev/null
+++ b/drivers/soc/fsl/qe/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux ppc-specific parts of QE
+#
+obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
+obj-$(CONFIG_CPM) += qe_common.o
+obj-$(CONFIG_UCC) += ucc.o
+obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
+obj-$(CONFIG_UCC_FAST) += ucc_fast.o
+obj-$(CONFIG_QE_TDM) += qe_tdm.o
+obj-$(CONFIG_QE_USB) += usb.o
+obj-$(CONFIG_QE_GPIO) += gpio.o
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
new file mode 100644
index 000000000..51b3a47b5
--- /dev/null
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -0,0 +1,344 @@
+/*
+ * QUICC Engine GPIOs
+ *
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+/* FIXME: needed for gpio_to_chip() get rid of this */
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <soc/fsl/qe/qe.h>
+
+struct qe_gpio_chip {
+ struct of_mm_gpio_chip mm_gc;
+ spinlock_t lock;
+
+ unsigned long pin_flags[QE_PIO_PINS];
+#define QE_PIN_REQUESTED 0
+
+ /* shadowed data register to clear/set bits safely */
+ u32 cpdata;
+
+ /* saved_regs used to restore dedicated functions */
+ struct qe_pio_regs saved_regs;
+};
+
+static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct qe_gpio_chip *qe_gc =
+ container_of(mm_gc, struct qe_gpio_chip, mm_gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+
+ qe_gc->cpdata = in_be32(&regs->cpdata);
+ qe_gc->saved_regs.cpdata = qe_gc->cpdata;
+ qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
+ qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
+ qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
+ qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
+ qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
+}
+
+static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+ return !!(in_be32(&regs->cpdata) & pin_mask);
+}
+
+static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+ u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (val)
+ qe_gc->cpdata |= pin_mask;
+ else
+ qe_gc->cpdata &= ~pin_mask;
+
+ out_be32(&regs->cpdata, qe_gc->cpdata);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static void qe_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ for (i = 0; i < gc->ngpio; i++) {
+ if (*mask == 0)
+ break;
+ if (__test_and_clear_bit(i, mask)) {
+ if (test_bit(i, bits))
+ qe_gc->cpdata |= (1U << (QE_PIO_PINS - 1 - i));
+ else
+ qe_gc->cpdata &= ~(1U << (QE_PIO_PINS - 1 - i));
+ }
+ }
+
+ out_be32(&regs->cpdata, qe_gc->cpdata);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
+}
+
+static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ qe_gpio_set(gc, gpio, val);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
+}
+
+struct qe_pin {
+ /*
+ * The qe_gpio_chip name is unfortunate, we should change that to
+ * something like qe_pio_controller. Someday.
+ */
+ struct qe_gpio_chip *controller;
+ int num;
+};
+
+/**
+ * qe_pin_request - Request a QE pin
+ * @np: device node to get a pin from
+ * @index: index of a pin in the device tree
+ * Context: non-atomic
+ *
+ * This function return qe_pin so that you could use it with the rest of
+ * the QE Pin Multiplexing API.
+ */
+struct qe_pin *qe_pin_request(struct device_node *np, int index)
+{
+ struct qe_pin *qe_pin;
+ struct gpio_chip *gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct qe_gpio_chip *qe_gc;
+ int err;
+ unsigned long flags;
+
+ qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
+ if (!qe_pin) {
+ pr_debug("%s: can't allocate memory\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = of_get_gpio(np, index);
+ if (err < 0)
+ goto err0;
+ gc = gpio_to_chip(err);
+ if (WARN_ON(!gc)) {
+ err = -ENODEV;
+ goto err0;
+ }
+
+ if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
+ pr_debug("%s: tried to get a non-qe pin\n", __func__);
+ err = -EINVAL;
+ goto err0;
+ }
+
+ mm_gc = to_of_mm_gpio_chip(gc);
+ qe_gc = gpiochip_get_data(gc);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ err -= gc->base;
+ if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
+ qe_pin->controller = qe_gc;
+ qe_pin->num = err;
+ err = 0;
+ } else {
+ err = -EBUSY;
+ }
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ if (!err)
+ return qe_pin;
+err0:
+ kfree(qe_pin);
+ pr_debug("%s failed with status %d\n", __func__, err);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(qe_pin_request);
+
+/**
+ * qe_pin_free - Free a pin
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function frees the qe_pin structure and makes a pin available
+ * for further qe_pin_request() calls.
+ */
+void qe_pin_free(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ unsigned long flags;
+ const int pin = qe_pin->num;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+ test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ kfree(qe_pin);
+}
+EXPORT_SYMBOL(qe_pin_free);
+
+/**
+ * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function resets a pin to a dedicated peripheral function that
+ * has been set up by the firmware.
+ */
+void qe_pin_set_dedicated(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs *sregs = &qe_gc->saved_regs;
+ int pin = qe_pin->num;
+ u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
+ u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
+ bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (second_reg) {
+ clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
+ clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
+ } else {
+ clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
+ clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
+ }
+
+ if (sregs->cpdata & mask1)
+ qe_gc->cpdata |= mask1;
+ else
+ qe_gc->cpdata &= ~mask1;
+
+ out_be32(&regs->cpdata, qe_gc->cpdata);
+ clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_dedicated);
+
+/**
+ * qe_pin_set_gpio - Set a pin to the GPIO mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function sets a pin to the GPIO mode.
+ */
+void qe_pin_set_gpio(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ /* Let's make it input by default, GPIO API is able to change that. */
+ __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_gpio);
+
+static int __init qe_add_gpiochips(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
+ int ret;
+ struct qe_gpio_chip *qe_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct gpio_chip *gc;
+
+ qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
+ if (!qe_gc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&qe_gc->lock);
+
+ mm_gc = &qe_gc->mm_gc;
+ gc = &mm_gc->gc;
+
+ mm_gc->save_regs = qe_gpio_save_regs;
+ gc->ngpio = QE_PIO_PINS;
+ gc->direction_input = qe_gpio_dir_in;
+ gc->direction_output = qe_gpio_dir_out;
+ gc->get = qe_gpio_get;
+ gc->set = qe_gpio_set;
+ gc->set_multiple = qe_gpio_set_multiple;
+
+ ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
+ if (ret)
+ goto err;
+ continue;
+err:
+ pr_err("%pOF: registration failed with status %d\n",
+ np, ret);
+ kfree(qe_gc);
+ /* try others anyway */
+ }
+ return 0;
+}
+arch_initcall(qe_add_gpiochips);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
new file mode 100644
index 000000000..2ef6fc648
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe.c
@@ -0,0 +1,736 @@
+/*
+ * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net)
+ *
+ * Description:
+ * General Purpose functions for the global management of the
+ * QUICC Engine (QE).
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_platform.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <asm/prom.h>
+#include <asm/rheap.h>
+
+static void qe_snums_init(void);
+static int qe_sdma_init(void);
+
+static DEFINE_SPINLOCK(qe_lock);
+DEFINE_SPINLOCK(cmxgcr_lock);
+EXPORT_SYMBOL(cmxgcr_lock);
+
+/* QE snum state */
+enum qe_snum_state {
+ QE_SNUM_STATE_USED,
+ QE_SNUM_STATE_FREE
+};
+
+/* QE snum */
+struct qe_snum {
+ u8 num;
+ enum qe_snum_state state;
+};
+
+/* We allocate this here because it is used almost exclusively for
+ * the communication processor devices.
+ */
+struct qe_immap __iomem *qe_immr;
+EXPORT_SYMBOL(qe_immr);
+
+static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
+static unsigned int qe_num_of_snum;
+
+static phys_addr_t qebase = -1;
+
+static phys_addr_t get_qe_base(void)
+{
+ struct device_node *qe;
+ int ret;
+ struct resource res;
+
+ if (qebase != -1)
+ return qebase;
+
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!qe) {
+ qe = of_find_node_by_type(NULL, "qe");
+ if (!qe)
+ return qebase;
+ }
+
+ ret = of_address_to_resource(qe, 0, &res);
+ if (!ret)
+ qebase = res.start;
+ of_node_put(qe);
+
+ return qebase;
+}
+
+void qe_reset(void)
+{
+ if (qe_immr == NULL)
+ qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
+
+ qe_snums_init();
+
+ qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ /* Reclaim the MURAM memory for our use. */
+ qe_muram_init();
+
+ if (qe_sdma_init())
+ panic("sdma init failed!");
+}
+
+int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
+{
+ unsigned long flags;
+ u8 mcn_shift = 0, dev_shift = 0;
+ u32 ret;
+
+ spin_lock_irqsave(&qe_lock, flags);
+ if (cmd == QE_RESET) {
+ out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
+ } else {
+ if (cmd == QE_ASSIGN_PAGE) {
+ /* Here device is the SNUM, not sub-block */
+ dev_shift = QE_CR_SNUM_SHIFT;
+ } else if (cmd == QE_ASSIGN_RISC) {
+ /* Here device is the SNUM, and mcnProtocol is
+ * e_QeCmdRiscAssignment value */
+ dev_shift = QE_CR_SNUM_SHIFT;
+ mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
+ } else {
+ if (device == QE_CR_SUBBLOCK_USB)
+ mcn_shift = QE_CR_MCN_USB_SHIFT;
+ else
+ mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
+ }
+
+ out_be32(&qe_immr->cp.cecdr, cmd_input);
+ out_be32(&qe_immr->cp.cecr,
+ (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
+ mcn_protocol << mcn_shift));
+ }
+
+ /* wait for the QE_CR_FLG to clear */
+ ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
+ 100, 0);
+ /* On timeout (e.g. failure), the expression will be false (ret == 0),
+ otherwise it will be true (ret == 1). */
+ spin_unlock_irqrestore(&qe_lock, flags);
+
+ return ret == 1;
+}
+EXPORT_SYMBOL(qe_issue_cmd);
+
+/* Set a baud rate generator. This needs lots of work. There are
+ * 16 BRGs, which can be connected to the QE channels or output
+ * as clocks. The BRGs are in two different block of internal
+ * memory mapped space.
+ * The BRG clock is the QE clock divided by 2.
+ * It was set up long ago during the initial boot phase and is
+ * is given to us.
+ * Baud rate clocks are zero-based in the driver code (as that maps
+ * to port numbers). Documentation uses 1-based numbering.
+ */
+static unsigned int brg_clk = 0;
+
+#define CLK_GRAN (1000)
+#define CLK_GRAN_LIMIT (5)
+
+unsigned int qe_get_brg_clk(void)
+{
+ struct device_node *qe;
+ int size;
+ const u32 *prop;
+ unsigned int mod;
+
+ if (brg_clk)
+ return brg_clk;
+
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!qe) {
+ qe = of_find_node_by_type(NULL, "qe");
+ if (!qe)
+ return brg_clk;
+ }
+
+ prop = of_get_property(qe, "brg-frequency", &size);
+ if (prop && size == sizeof(*prop))
+ brg_clk = *prop;
+
+ of_node_put(qe);
+
+ /* round this if near to a multiple of CLK_GRAN */
+ mod = brg_clk % CLK_GRAN;
+ if (mod) {
+ if (mod < CLK_GRAN_LIMIT)
+ brg_clk -= mod;
+ else if (mod > (CLK_GRAN - CLK_GRAN_LIMIT))
+ brg_clk += CLK_GRAN - mod;
+ }
+
+ return brg_clk;
+}
+EXPORT_SYMBOL(qe_get_brg_clk);
+
+#define PVR_VER_836x 0x8083
+#define PVR_VER_832x 0x8084
+
+/* Program the BRG to the given sampling rate and multiplier
+ *
+ * @brg: the BRG, QE_BRG1 - QE_BRG16
+ * @rate: the desired sampling rate
+ * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
+ * GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
+ * then 'multiplier' should be 8.
+ */
+int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
+{
+ u32 divisor, tempval;
+ u32 div16 = 0;
+
+ if ((brg < QE_BRG1) || (brg > QE_BRG16))
+ return -EINVAL;
+
+ divisor = qe_get_brg_clk() / (rate * multiplier);
+
+ if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
+ div16 = QE_BRGC_DIV16;
+ divisor /= 16;
+ }
+
+ /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
+ that the BRG divisor must be even if you're not using divide-by-16
+ mode. */
+ if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
+ if (!div16 && (divisor & 1) && (divisor > 3))
+ divisor++;
+
+ tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
+ QE_BRGC_ENABLE | div16;
+
+ out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_setbrg);
+
+/* Convert a string to a QE clock source enum
+ *
+ * This function takes a string, typically from a property in the device
+ * tree, and returns the corresponding "enum qe_clock" value.
+*/
+enum qe_clock qe_clock_source(const char *source)
+{
+ unsigned int i;
+
+ if (strcasecmp(source, "none") == 0)
+ return QE_CLK_NONE;
+
+ if (strcmp(source, "tsync_pin") == 0)
+ return QE_TSYNC_PIN;
+
+ if (strcmp(source, "rsync_pin") == 0)
+ return QE_RSYNC_PIN;
+
+ if (strncasecmp(source, "brg", 3) == 0) {
+ i = simple_strtoul(source + 3, NULL, 10);
+ if ((i >= 1) && (i <= 16))
+ return (QE_BRG1 - 1) + i;
+ else
+ return QE_CLK_DUMMY;
+ }
+
+ if (strncasecmp(source, "clk", 3) == 0) {
+ i = simple_strtoul(source + 3, NULL, 10);
+ if ((i >= 1) && (i <= 24))
+ return (QE_CLK1 - 1) + i;
+ else
+ return QE_CLK_DUMMY;
+ }
+
+ return QE_CLK_DUMMY;
+}
+EXPORT_SYMBOL(qe_clock_source);
+
+/* Initialize SNUMs (thread serial numbers) according to
+ * QE Module Control chapter, SNUM table
+ */
+static void qe_snums_init(void)
+{
+ int i;
+ static const u8 snum_init_76[] = {
+ 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+ 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
+ 0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
+ 0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
+ 0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
+ 0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
+ 0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
+ 0xF4, 0xF5, 0xFC, 0xFD,
+ };
+ static const u8 snum_init_46[] = {
+ 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+ 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19,
+ 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
+ 0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
+ };
+ static const u8 *snum_init;
+
+ qe_num_of_snum = qe_get_num_of_snums();
+
+ if (qe_num_of_snum == 76)
+ snum_init = snum_init_76;
+ else
+ snum_init = snum_init_46;
+
+ for (i = 0; i < qe_num_of_snum; i++) {
+ snums[i].num = snum_init[i];
+ snums[i].state = QE_SNUM_STATE_FREE;
+ }
+}
+
+int qe_get_snum(void)
+{
+ unsigned long flags;
+ int snum = -EBUSY;
+ int i;
+
+ spin_lock_irqsave(&qe_lock, flags);
+ for (i = 0; i < qe_num_of_snum; i++) {
+ if (snums[i].state == QE_SNUM_STATE_FREE) {
+ snums[i].state = QE_SNUM_STATE_USED;
+ snum = snums[i].num;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&qe_lock, flags);
+
+ return snum;
+}
+EXPORT_SYMBOL(qe_get_snum);
+
+void qe_put_snum(u8 snum)
+{
+ int i;
+
+ for (i = 0; i < qe_num_of_snum; i++) {
+ if (snums[i].num == snum) {
+ snums[i].state = QE_SNUM_STATE_FREE;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(qe_put_snum);
+
+static int qe_sdma_init(void)
+{
+ struct sdma __iomem *sdma = &qe_immr->sdma;
+ static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
+
+ if (!sdma)
+ return -ENODEV;
+
+ /* allocate 2 internal temporary buffers (512 bytes size each) for
+ * the SDMA */
+ if (IS_ERR_VALUE(sdma_buf_offset)) {
+ sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
+ if (IS_ERR_VALUE(sdma_buf_offset))
+ return -ENOMEM;
+ }
+
+ out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
+ out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
+ (0x1 << QE_SDMR_CEN_SHIFT)));
+
+ return 0;
+}
+
+/* The maximum number of RISCs we support */
+#define MAX_QE_RISC 4
+
+/* Firmware information stored here for qe_get_firmware_info() */
+static struct qe_firmware_info qe_firmware_info;
+
+/*
+ * Set to 1 if QE firmware has been uploaded, and therefore
+ * qe_firmware_info contains valid data.
+ */
+static int qe_firmware_uploaded;
+
+/*
+ * Upload a QE microcode
+ *
+ * This function is a worker function for qe_upload_firmware(). It does
+ * the actual uploading of the microcode.
+ */
+static void qe_upload_microcode(const void *base,
+ const struct qe_microcode *ucode)
+{
+ const __be32 *code = base + be32_to_cpu(ucode->code_offset);
+ unsigned int i;
+
+ if (ucode->major || ucode->minor || ucode->revision)
+ printk(KERN_INFO "qe-firmware: "
+ "uploading microcode '%s' version %u.%u.%u\n",
+ ucode->id, ucode->major, ucode->minor, ucode->revision);
+ else
+ printk(KERN_INFO "qe-firmware: "
+ "uploading microcode '%s'\n", ucode->id);
+
+ /* Use auto-increment */
+ out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
+ QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
+
+ for (i = 0; i < be32_to_cpu(ucode->count); i++)
+ out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
+
+ /* Set I-RAM Ready Register */
+ out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
+}
+
+/*
+ * Upload a microcode to the I-RAM at a specific address.
+ *
+ * See Documentation/powerpc/qe_firmware.txt for information on QE microcode
+ * uploading.
+ *
+ * Currently, only version 1 is supported, so the 'version' field must be
+ * set to 1.
+ *
+ * The SOC model and revision are not validated, they are only displayed for
+ * informational purposes.
+ *
+ * 'calc_size' is the calculated size, in bytes, of the firmware structure and
+ * all of the microcode structures, minus the CRC.
+ *
+ * 'length' is the size that the structure says it is, including the CRC.
+ */
+int qe_upload_firmware(const struct qe_firmware *firmware)
+{
+ unsigned int i;
+ unsigned int j;
+ u32 crc;
+ size_t calc_size = sizeof(struct qe_firmware);
+ size_t length;
+ const struct qe_header *hdr;
+
+ if (!firmware) {
+ printk(KERN_ERR "qe-firmware: invalid pointer\n");
+ return -EINVAL;
+ }
+
+ hdr = &firmware->header;
+ length = be32_to_cpu(hdr->length);
+
+ /* Check the magic */
+ if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
+ (hdr->magic[2] != 'F')) {
+ printk(KERN_ERR "qe-firmware: not a microcode\n");
+ return -EPERM;
+ }
+
+ /* Check the version */
+ if (hdr->version != 1) {
+ printk(KERN_ERR "qe-firmware: unsupported version\n");
+ return -EPERM;
+ }
+
+ /* Validate some of the fields */
+ if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
+ printk(KERN_ERR "qe-firmware: invalid data\n");
+ return -EINVAL;
+ }
+
+ /* Validate the length and check if there's a CRC */
+ calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
+
+ for (i = 0; i < firmware->count; i++)
+ /*
+ * For situations where the second RISC uses the same microcode
+ * as the first, the 'code_offset' and 'count' fields will be
+ * zero, so it's okay to add those.
+ */
+ calc_size += sizeof(__be32) *
+ be32_to_cpu(firmware->microcode[i].count);
+
+ /* Validate the length */
+ if (length != calc_size + sizeof(__be32)) {
+ printk(KERN_ERR "qe-firmware: invalid length\n");
+ return -EPERM;
+ }
+
+ /* Validate the CRC */
+ crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
+ if (crc != crc32(0, firmware, calc_size)) {
+ printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
+ return -EIO;
+ }
+
+ /*
+ * If the microcode calls for it, split the I-RAM.
+ */
+ if (!firmware->split)
+ setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+
+ if (firmware->soc.model)
+ printk(KERN_INFO
+ "qe-firmware: firmware '%s' for %u V%u.%u\n",
+ firmware->id, be16_to_cpu(firmware->soc.model),
+ firmware->soc.major, firmware->soc.minor);
+ else
+ printk(KERN_INFO "qe-firmware: firmware '%s'\n",
+ firmware->id);
+
+ /*
+ * The QE only supports one microcode per RISC, so clear out all the
+ * saved microcode information and put in the new.
+ */
+ memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
+ strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
+ qe_firmware_info.extended_modes = firmware->extended_modes;
+ memcpy(qe_firmware_info.vtraps, firmware->vtraps,
+ sizeof(firmware->vtraps));
+
+ /* Loop through each microcode. */
+ for (i = 0; i < firmware->count; i++) {
+ const struct qe_microcode *ucode = &firmware->microcode[i];
+
+ /* Upload a microcode if it's present */
+ if (ucode->code_offset)
+ qe_upload_microcode(firmware, ucode);
+
+ /* Program the traps for this processor */
+ for (j = 0; j < 16; j++) {
+ u32 trap = be32_to_cpu(ucode->traps[j]);
+
+ if (trap)
+ out_be32(&qe_immr->rsp[i].tibcr[j], trap);
+ }
+
+ /* Enable traps */
+ out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
+ }
+
+ qe_firmware_uploaded = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_upload_firmware);
+
+/*
+ * Get info on the currently-loaded firmware
+ *
+ * This function also checks the device tree to see if the boot loader has
+ * uploaded a firmware already.
+ */
+struct qe_firmware_info *qe_get_firmware_info(void)
+{
+ static int initialized;
+ struct property *prop;
+ struct device_node *qe;
+ struct device_node *fw = NULL;
+ const char *sprop;
+ unsigned int i;
+
+ /*
+ * If we haven't checked yet, and a driver hasn't uploaded a firmware
+ * yet, then check the device tree for information.
+ */
+ if (qe_firmware_uploaded)
+ return &qe_firmware_info;
+
+ if (initialized)
+ return NULL;
+
+ initialized = 1;
+
+ /*
+ * Newer device trees have an "fsl,qe" compatible property for the QE
+ * node, but we still need to support older device trees.
+ */
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!qe) {
+ qe = of_find_node_by_type(NULL, "qe");
+ if (!qe)
+ return NULL;
+ }
+
+ /* Find the 'firmware' child node */
+ for_each_child_of_node(qe, fw) {
+ if (strcmp(fw->name, "firmware") == 0)
+ break;
+ }
+
+ of_node_put(qe);
+
+ /* Did we find the 'firmware' node? */
+ if (!fw)
+ return NULL;
+
+ qe_firmware_uploaded = 1;
+
+ /* Copy the data into qe_firmware_info*/
+ sprop = of_get_property(fw, "id", NULL);
+ if (sprop)
+ strlcpy(qe_firmware_info.id, sprop,
+ sizeof(qe_firmware_info.id));
+
+ prop = of_find_property(fw, "extended-modes", NULL);
+ if (prop && (prop->length == sizeof(u64))) {
+ const u64 *iprop = prop->value;
+
+ qe_firmware_info.extended_modes = *iprop;
+ }
+
+ prop = of_find_property(fw, "virtual-traps", NULL);
+ if (prop && (prop->length == 32)) {
+ const u32 *iprop = prop->value;
+
+ for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
+ qe_firmware_info.vtraps[i] = iprop[i];
+ }
+
+ of_node_put(fw);
+
+ return &qe_firmware_info;
+}
+EXPORT_SYMBOL(qe_get_firmware_info);
+
+unsigned int qe_get_num_of_risc(void)
+{
+ struct device_node *qe;
+ int size;
+ unsigned int num_of_risc = 0;
+ const u32 *prop;
+
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!qe) {
+ /* Older devices trees did not have an "fsl,qe"
+ * compatible property, so we need to look for
+ * the QE node by name.
+ */
+ qe = of_find_node_by_type(NULL, "qe");
+ if (!qe)
+ return num_of_risc;
+ }
+
+ prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
+ if (prop && size == sizeof(*prop))
+ num_of_risc = *prop;
+
+ of_node_put(qe);
+
+ return num_of_risc;
+}
+EXPORT_SYMBOL(qe_get_num_of_risc);
+
+unsigned int qe_get_num_of_snums(void)
+{
+ struct device_node *qe;
+ int size;
+ unsigned int num_of_snums;
+ const u32 *prop;
+
+ num_of_snums = 28; /* The default number of snum for threads is 28 */
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!qe) {
+ /* Older devices trees did not have an "fsl,qe"
+ * compatible property, so we need to look for
+ * the QE node by name.
+ */
+ qe = of_find_node_by_type(NULL, "qe");
+ if (!qe)
+ return num_of_snums;
+ }
+
+ prop = of_get_property(qe, "fsl,qe-num-snums", &size);
+ if (prop && size == sizeof(*prop)) {
+ num_of_snums = *prop;
+ if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
+ /* No QE ever has fewer than 28 SNUMs */
+ pr_err("QE: number of snum is invalid\n");
+ of_node_put(qe);
+ return -EINVAL;
+ }
+ }
+
+ of_node_put(qe);
+
+ return num_of_snums;
+}
+EXPORT_SYMBOL(qe_get_num_of_snums);
+
+static int __init qe_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!np)
+ return -ENODEV;
+ qe_reset();
+ of_node_put(np);
+ return 0;
+}
+subsys_initcall(qe_init);
+
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
+static int qe_resume(struct platform_device *ofdev)
+{
+ if (!qe_alive_during_sleep())
+ qe_reset();
+ return 0;
+}
+
+static int qe_probe(struct platform_device *ofdev)
+{
+ return 0;
+}
+
+static const struct of_device_id qe_ids[] = {
+ { .compatible = "fsl,qe", },
+ { },
+};
+
+static struct platform_driver qe_driver = {
+ .driver = {
+ .name = "fsl-qe",
+ .of_match_table = qe_ids,
+ },
+ .probe = qe_probe,
+ .resume = qe_resume,
+};
+
+builtin_platform_driver(qe_driver);
+#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
new file mode 100644
index 000000000..104e68d9b
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -0,0 +1,243 @@
+/*
+ * Common CPM code
+ *
+ * Author: Scott Wood <scottwood@freescale.com>
+ *
+ * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
+ *
+ * Some parts derived from commproc.c/cpm2_common.c, which is:
+ * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
+ * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
+ * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/qe.h>
+
+static struct gen_pool *muram_pool;
+static spinlock_t cpm_muram_lock;
+static u8 __iomem *muram_vbase;
+static phys_addr_t muram_pbase;
+
+struct muram_block {
+ struct list_head head;
+ unsigned long start;
+ int size;
+};
+
+static LIST_HEAD(muram_block_list);
+
+/* max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+#define GENPOOL_OFFSET (4096 * 8)
+
+int cpm_muram_init(void)
+{
+ struct device_node *np;
+ struct resource r;
+ u32 zero[OF_MAX_ADDR_CELLS] = {};
+ resource_size_t max = 0;
+ int i = 0;
+ int ret = 0;
+
+ if (muram_pbase)
+ return 0;
+
+ spin_lock_init(&cpm_muram_lock);
+ np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
+ if (!np) {
+ /* try legacy bindings */
+ np = of_find_node_by_name(NULL, "data-only");
+ if (!np) {
+ pr_err("Cannot find CPM muram data node");
+ ret = -ENODEV;
+ goto out_muram;
+ }
+ }
+
+ muram_pool = gen_pool_create(0, -1);
+ if (!muram_pool) {
+ pr_err("Cannot allocate memory pool for CPM/QE muram");
+ ret = -ENOMEM;
+ goto out_muram;
+ }
+ muram_pbase = of_translate_address(np, zero);
+ if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
+ pr_err("Cannot translate zero through CPM muram node");
+ ret = -ENODEV;
+ goto out_pool;
+ }
+
+ while (of_address_to_resource(np, i++, &r) == 0) {
+ if (r.end > max)
+ max = r.end;
+ ret = gen_pool_add(muram_pool, r.start - muram_pbase +
+ GENPOOL_OFFSET, resource_size(&r), -1);
+ if (ret) {
+ pr_err("QE: couldn't add muram to pool!\n");
+ goto out_pool;
+ }
+ }
+
+ muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
+ if (!muram_vbase) {
+ pr_err("Cannot map QE muram");
+ ret = -ENOMEM;
+ goto out_pool;
+ }
+ goto out_muram;
+out_pool:
+ gen_pool_destroy(muram_pool);
+out_muram:
+ of_node_put(np);
+ return ret;
+}
+
+/*
+ * cpm_muram_alloc_common - cpm_muram_alloc common code
+ * @size: number of bytes to allocate
+ * @algo: algorithm for alloc.
+ * @data: data for genalloc's algorithm.
+ *
+ * This function returns an offset into the muram area.
+ */
+static unsigned long cpm_muram_alloc_common(unsigned long size,
+ genpool_algo_t algo, void *data)
+{
+ struct muram_block *entry;
+ unsigned long start;
+
+ if (!muram_pool && cpm_muram_init())
+ goto out2;
+
+ start = gen_pool_alloc_algo(muram_pool, size, algo, data);
+ if (!start)
+ goto out2;
+ start = start - GENPOOL_OFFSET;
+ memset_io(cpm_muram_addr(start), 0, size);
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ goto out1;
+ entry->start = start;
+ entry->size = size;
+ list_add(&entry->head, &muram_block_list);
+
+ return start;
+out1:
+ gen_pool_free(muram_pool, start, size);
+out2:
+ return (unsigned long)-ENOMEM;
+}
+
+/*
+ * cpm_muram_alloc - allocate the requested size worth of multi-user ram
+ * @size: number of bytes to allocate
+ * @align: requested alignment, in bytes
+ *
+ * This function returns an offset into the muram area.
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
+{
+ unsigned long start;
+ unsigned long flags;
+ struct genpool_data_align muram_pool_data;
+
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ muram_pool_data.align = align;
+ start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
+ &muram_pool_data);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+ return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc);
+
+/**
+ * cpm_muram_free - free a chunk of multi-user ram
+ * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
+ */
+int cpm_muram_free(unsigned long offset)
+{
+ unsigned long flags;
+ int size;
+ struct muram_block *tmp;
+
+ size = 0;
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ list_for_each_entry(tmp, &muram_block_list, head) {
+ if (tmp->start == offset) {
+ size = tmp->size;
+ list_del(&tmp->head);
+ kfree(tmp);
+ break;
+ }
+ }
+ gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+ return size;
+}
+EXPORT_SYMBOL(cpm_muram_free);
+
+/*
+ * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
+ * @offset: offset of allocation start address
+ * @size: number of bytes to allocate
+ * This function returns an offset into the muram area
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
+{
+ unsigned long start;
+ unsigned long flags;
+ struct genpool_data_fixed muram_pool_data_fixed;
+
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
+ start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
+ &muram_pool_data_fixed);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+ return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc_fixed);
+
+/**
+ * cpm_muram_addr - turn a muram offset into a virtual address
+ * @offset: muram offset to convert
+ */
+void __iomem *cpm_muram_addr(unsigned long offset)
+{
+ return muram_vbase + offset;
+}
+EXPORT_SYMBOL(cpm_muram_addr);
+
+unsigned long cpm_muram_offset(void __iomem *addr)
+{
+ return addr - (void __iomem *)muram_vbase;
+}
+EXPORT_SYMBOL(cpm_muram_offset);
+
+/**
+ * cpm_muram_dma - turn a muram virtual address into a DMA address
+ * @offset: virtual address from cpm_muram_addr() to convert
+ */
+dma_addr_t cpm_muram_dma(void __iomem *addr)
+{
+ return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
+}
+EXPORT_SYMBOL(cpm_muram_dma);
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
new file mode 100644
index 000000000..ec2ca864b
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -0,0 +1,512 @@
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_ic.c
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * QUICC ENGINE Interrupt Controller
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <soc/fsl/qe/qe_ic.h>
+
+#include "qe_ic.h"
+
+static DEFINE_RAW_SPINLOCK(qe_ic_lock);
+
+static struct qe_ic_info qe_ic_info[] = {
+ [1] = {
+ .mask = 0x00008000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [2] = {
+ .mask = 0x00004000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [3] = {
+ .mask = 0x00002000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [10] = {
+ .mask = 0x00000040,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [11] = {
+ .mask = 0x00000020,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [12] = {
+ .mask = 0x00000010,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [13] = {
+ .mask = 0x00000008,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 4,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [14] = {
+ .mask = 0x00000004,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 5,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [15] = {
+ .mask = 0x00000002,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 6,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [20] = {
+ .mask = 0x10000000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPRTA,
+ },
+ [25] = {
+ .mask = 0x00800000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [26] = {
+ .mask = 0x00400000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [27] = {
+ .mask = 0x00200000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [28] = {
+ .mask = 0x00100000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [32] = {
+ .mask = 0x80000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [33] = {
+ .mask = 0x40000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [34] = {
+ .mask = 0x20000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [35] = {
+ .mask = 0x10000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [36] = {
+ .mask = 0x08000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 4,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [40] = {
+ .mask = 0x00800000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [41] = {
+ .mask = 0x00400000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [42] = {
+ .mask = 0x00200000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [43] = {
+ .mask = 0x00100000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPYCC,
+ },
+};
+
+static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
+{
+ return in_be32(base + (reg >> 2));
+}
+
+static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
+ u32 value)
+{
+ out_be32(base + (reg >> 2), value);
+}
+
+static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
+{
+ return irq_get_chip_data(virq);
+}
+
+static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
+{
+ return irq_data_get_irq_chip_data(d);
+}
+
+static void qe_ic_unmask_irq(struct irq_data *d)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 temp;
+
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+ temp | qe_ic_info[src].mask);
+
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static void qe_ic_mask_irq(struct irq_data *d)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 temp;
+
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+ temp & ~qe_ic_info[src].mask);
+
+ /* Flush the above write before enabling interrupts; otherwise,
+ * spurious interrupts will sometimes happen. To be 100% sure
+ * that the write has reached the device before interrupts are
+ * enabled, the mask register would have to be read back; however,
+ * this is not required for correctness, only to avoid wasting
+ * time on a large number of spurious interrupts. In testing,
+ * a sync reduced the observed spurious interrupts to zero.
+ */
+ mb();
+
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static struct irq_chip qe_ic_irq_chip = {
+ .name = "QEIC",
+ .irq_unmask = qe_ic_unmask_irq,
+ .irq_mask = qe_ic_mask_irq,
+ .irq_mask_ack = qe_ic_mask_irq,
+};
+
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ /* Exact match, unless qe_ic node is NULL */
+ struct device_node *of_node = irq_domain_get_of_node(h);
+ return of_node == NULL || of_node == node;
+}
+
+static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct qe_ic *qe_ic = h->host_data;
+ struct irq_chip *chip;
+
+ if (hw >= ARRAY_SIZE(qe_ic_info)) {
+ pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
+ return -EINVAL;
+ }
+
+ if (qe_ic_info[hw].mask == 0) {
+ printk(KERN_ERR "Can't map reserved IRQ\n");
+ return -EINVAL;
+ }
+ /* Default chip */
+ chip = &qe_ic->hc_irq;
+
+ irq_set_chip_data(virq, qe_ic);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+
+ irq_set_chip_and_handler(virq, chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops qe_ic_host_ops = {
+ .match = qe_ic_host_match,
+ .map = qe_ic_host_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+{
+ int irq;
+
+ BUG_ON(qe_ic == NULL);
+
+ /* get the interrupt source vector. */
+ irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
+
+ if (irq == 0)
+ return NO_IRQ;
+
+ return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+{
+ int irq;
+
+ BUG_ON(qe_ic == NULL);
+
+ /* get the interrupt source vector. */
+ irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
+
+ if (irq == 0)
+ return NO_IRQ;
+
+ return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+void __init qe_ic_init(struct device_node *node, unsigned int flags,
+ void (*low_handler)(struct irq_desc *desc),
+ void (*high_handler)(struct irq_desc *desc))
+{
+ struct qe_ic *qe_ic;
+ struct resource res;
+ u32 temp = 0, ret, high_active = 0;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret)
+ return;
+
+ qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
+ if (qe_ic == NULL)
+ return;
+
+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
+ if (qe_ic->irqhost == NULL) {
+ kfree(qe_ic);
+ return;
+ }
+
+ qe_ic->regs = ioremap(res.start, resource_size(&res));
+
+ qe_ic->hc_irq = qe_ic_irq_chip;
+
+ qe_ic->virq_high = irq_of_parse_and_map(node, 0);
+ qe_ic->virq_low = irq_of_parse_and_map(node, 1);
+
+ if (qe_ic->virq_low == NO_IRQ) {
+ printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
+ kfree(qe_ic);
+ return;
+ }
+
+ /* default priority scheme is grouped. If spread mode is */
+ /* required, configure cicr accordingly. */
+ if (flags & QE_IC_SPREADMODE_GRP_W)
+ temp |= CICR_GWCC;
+ if (flags & QE_IC_SPREADMODE_GRP_X)
+ temp |= CICR_GXCC;
+ if (flags & QE_IC_SPREADMODE_GRP_Y)
+ temp |= CICR_GYCC;
+ if (flags & QE_IC_SPREADMODE_GRP_Z)
+ temp |= CICR_GZCC;
+ if (flags & QE_IC_SPREADMODE_GRP_RISCA)
+ temp |= CICR_GRTA;
+ if (flags & QE_IC_SPREADMODE_GRP_RISCB)
+ temp |= CICR_GRTB;
+
+ /* choose destination signal for highest priority interrupt */
+ if (flags & QE_IC_HIGH_SIGNAL) {
+ temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
+ high_active = 1;
+ }
+
+ qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+
+ irq_set_handler_data(qe_ic->virq_low, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_low, low_handler);
+
+ if (qe_ic->virq_high != NO_IRQ &&
+ qe_ic->virq_high != qe_ic->virq_low) {
+ irq_set_handler_data(qe_ic->virq_high, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_high, high_handler);
+ }
+}
+
+void qe_ic_set_highest_priority(unsigned int virq, int high)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+ unsigned int src = virq_to_hw(virq);
+ u32 temp = 0;
+
+ temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
+
+ temp &= ~CICR_HP_MASK;
+ temp |= src << CICR_HP_SHIFT;
+
+ temp &= ~CICR_HPIT_MASK;
+ temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
+
+ qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+}
+
+/* Set Priority level within its group, from 1 to 8 */
+int qe_ic_set_priority(unsigned int virq, unsigned int priority)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+ unsigned int src = virq_to_hw(virq);
+ u32 temp;
+
+ if (priority > 8 || priority == 0)
+ return -EINVAL;
+ if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
+ "%s: Invalid hw irq number for QEIC\n", __func__))
+ return -EINVAL;
+ if (qe_ic_info[src].pri_reg == 0)
+ return -EINVAL;
+
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
+
+ if (priority < 4) {
+ temp &= ~(0x7 << (32 - priority * 3));
+ temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
+ } else {
+ temp &= ~(0x7 << (24 - priority * 3));
+ temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
+ }
+
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
+
+ return 0;
+}
+
+/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
+int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq(virq);
+ unsigned int src = virq_to_hw(virq);
+ u32 temp, control_reg = QEIC_CICNR, shift = 0;
+
+ if (priority > 2 || priority == 0)
+ return -EINVAL;
+ if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
+ "%s: Invalid hw irq number for QEIC\n", __func__))
+ return -EINVAL;
+
+ switch (qe_ic_info[src].pri_reg) {
+ case QEIC_CIPZCC:
+ shift = CICNR_ZCC1T_SHIFT;
+ break;
+ case QEIC_CIPWCC:
+ shift = CICNR_WCC1T_SHIFT;
+ break;
+ case QEIC_CIPYCC:
+ shift = CICNR_YCC1T_SHIFT;
+ break;
+ case QEIC_CIPXCC:
+ shift = CICNR_XCC1T_SHIFT;
+ break;
+ case QEIC_CIPRTA:
+ shift = CRICR_RTA1T_SHIFT;
+ control_reg = QEIC_CRICR;
+ break;
+ case QEIC_CIPRTB:
+ shift = CRICR_RTB1T_SHIFT;
+ control_reg = QEIC_CRICR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ shift += (2 - priority) * 2;
+ temp = qe_ic_read(qe_ic->regs, control_reg);
+ temp &= ~(SIGNAL_MASK << shift);
+ temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
+ qe_ic_write(qe_ic->regs, control_reg, temp);
+
+ return 0;
+}
+
+static struct bus_type qe_ic_subsys = {
+ .name = "qe_ic",
+ .dev_name = "qe_ic",
+};
+
+static struct device device_qe_ic = {
+ .id = 0,
+ .bus = &qe_ic_subsys,
+};
+
+static int __init init_qe_ic_sysfs(void)
+{
+ int rc;
+
+ printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
+
+ rc = subsys_system_register(&qe_ic_subsys, NULL);
+ if (rc) {
+ printk(KERN_ERR "Failed registering qe_ic sys class\n");
+ return -ENODEV;
+ }
+ rc = device_register(&device_qe_ic);
+ if (rc) {
+ printk(KERN_ERR "Failed registering qe_ic sys device\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+subsys_initcall(init_qe_ic_sysfs);
diff --git a/drivers/soc/fsl/qe/qe_ic.h b/drivers/soc/fsl/qe/qe_ic.h
new file mode 100644
index 000000000..926a2ed42
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_ic.h
@@ -0,0 +1,103 @@
+/*
+ * drivers/soc/fsl/qe/qe_ic.h
+ *
+ * QUICC ENGINE Interrupt Controller Header
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _POWERPC_SYSDEV_QE_IC_H
+#define _POWERPC_SYSDEV_QE_IC_H
+
+#include <soc/fsl/qe/qe_ic.h>
+
+#define NR_QE_IC_INTS 64
+
+/* QE IC registers offset */
+#define QEIC_CICR 0x00
+#define QEIC_CIVEC 0x04
+#define QEIC_CRIPNR 0x08
+#define QEIC_CIPNR 0x0c
+#define QEIC_CIPXCC 0x10
+#define QEIC_CIPYCC 0x14
+#define QEIC_CIPWCC 0x18
+#define QEIC_CIPZCC 0x1c
+#define QEIC_CIMR 0x20
+#define QEIC_CRIMR 0x24
+#define QEIC_CICNR 0x28
+#define QEIC_CIPRTA 0x30
+#define QEIC_CIPRTB 0x34
+#define QEIC_CRICR 0x3c
+#define QEIC_CHIVEC 0x60
+
+/* Interrupt priority registers */
+#define CIPCC_SHIFT_PRI0 29
+#define CIPCC_SHIFT_PRI1 26
+#define CIPCC_SHIFT_PRI2 23
+#define CIPCC_SHIFT_PRI3 20
+#define CIPCC_SHIFT_PRI4 13
+#define CIPCC_SHIFT_PRI5 10
+#define CIPCC_SHIFT_PRI6 7
+#define CIPCC_SHIFT_PRI7 4
+
+/* CICR priority modes */
+#define CICR_GWCC 0x00040000
+#define CICR_GXCC 0x00020000
+#define CICR_GYCC 0x00010000
+#define CICR_GZCC 0x00080000
+#define CICR_GRTA 0x00200000
+#define CICR_GRTB 0x00400000
+#define CICR_HPIT_SHIFT 8
+#define CICR_HPIT_MASK 0x00000300
+#define CICR_HP_SHIFT 24
+#define CICR_HP_MASK 0x3f000000
+
+/* CICNR */
+#define CICNR_WCC1T_SHIFT 20
+#define CICNR_ZCC1T_SHIFT 28
+#define CICNR_YCC1T_SHIFT 12
+#define CICNR_XCC1T_SHIFT 4
+
+/* CRICR */
+#define CRICR_RTA1T_SHIFT 20
+#define CRICR_RTB1T_SHIFT 28
+
+/* Signal indicator */
+#define SIGNAL_MASK 3
+#define SIGNAL_HIGH 2
+#define SIGNAL_LOW 0
+
+struct qe_ic {
+ /* Control registers offset */
+ volatile u32 __iomem *regs;
+
+ /* The remapper for this QEIC */
+ struct irq_domain *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+
+ /* VIRQ numbers of QE high/low irqs */
+ unsigned int virq_high;
+ unsigned int virq_low;
+};
+
+/*
+ * QE interrupt controller internal structure
+ */
+struct qe_ic_info {
+ u32 mask; /* location of this source at the QIMR register. */
+ u32 mask_reg; /* Mask register offset */
+ u8 pri_code; /* for grouped interrupts sources - the interrupt
+ code as appears at the group priority register */
+ u32 pri_reg; /* Group priority register offset */
+};
+
+#endif /* _POWERPC_SYSDEV_QE_IC_H */
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
new file mode 100644
index 000000000..127a4a836
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -0,0 +1,194 @@
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_io.c
+ *
+ * QE Parallel I/O ports configuration routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <LeoLi@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/qe.h>
+#include <asm/prom.h>
+#include <sysdev/fsl_soc.h>
+
+#undef DEBUG
+
+static struct qe_pio_regs __iomem *par_io;
+static int num_par_io_ports = 0;
+
+int par_io_init(struct device_node *np)
+{
+ struct resource res;
+ int ret;
+ const u32 *num_ports;
+
+ /* Map Parallel I/O ports registers */
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+ par_io = ioremap(res.start, resource_size(&res));
+ if (!par_io)
+ return -ENOMEM;
+
+ num_ports = of_get_property(np, "num-ports", NULL);
+ if (num_ports)
+ num_par_io_ports = *num_ports;
+
+ return 0;
+}
+
+void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
+ int open_drain, int assignment, int has_irq)
+{
+ u32 pin_mask1bit;
+ u32 pin_mask2bits;
+ u32 new_mask2bits;
+ u32 tmp_val;
+
+ /* calculate pin location for single and 2 bits information */
+ pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
+
+ /* Set open drain, if required */
+ tmp_val = in_be32(&par_io->cpodr);
+ if (open_drain)
+ out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
+ else
+ out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
+
+ /* define direction */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+ in_be32(&par_io->cpdir2) :
+ in_be32(&par_io->cpdir1);
+
+ /* get all bits mask for 2 bit per port */
+ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+ /* Get the final mask we need for the right definition */
+ new_mask2bits = (u32) (dir << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+ out_be32(&par_io->cpdir2,
+ ~pin_mask2bits & tmp_val);
+ tmp_val &= ~pin_mask2bits;
+ out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
+ } else {
+ out_be32(&par_io->cpdir1,
+ ~pin_mask2bits & tmp_val);
+ tmp_val &= ~pin_mask2bits;
+ out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
+ }
+ /* define pin assignment */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+ in_be32(&par_io->cppar2) :
+ in_be32(&par_io->cppar1);
+
+ new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+ out_be32(&par_io->cppar2,
+ ~pin_mask2bits & tmp_val);
+ tmp_val &= ~pin_mask2bits;
+ out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
+ } else {
+ out_be32(&par_io->cppar1,
+ ~pin_mask2bits & tmp_val);
+ tmp_val &= ~pin_mask2bits;
+ out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
+ }
+}
+EXPORT_SYMBOL(__par_io_config_pin);
+
+int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+ int assignment, int has_irq)
+{
+ if (!par_io || port >= num_par_io_ports)
+ return -EINVAL;
+
+ __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
+ has_irq);
+ return 0;
+}
+EXPORT_SYMBOL(par_io_config_pin);
+
+int par_io_data_set(u8 port, u8 pin, u8 val)
+{
+ u32 pin_mask, tmp_val;
+
+ if (port >= num_par_io_ports)
+ return -EINVAL;
+ if (pin >= QE_PIO_PINS)
+ return -EINVAL;
+ /* calculate pin location */
+ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
+
+ tmp_val = in_be32(&par_io[port].cpdata);
+
+ if (val == 0) /* clear */
+ out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
+ else /* set */
+ out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
+
+ return 0;
+}
+EXPORT_SYMBOL(par_io_data_set);
+
+int par_io_of_config(struct device_node *np)
+{
+ struct device_node *pio;
+ const phandle *ph;
+ int pio_map_len;
+ const unsigned int *pio_map;
+
+ if (par_io == NULL) {
+ printk(KERN_ERR "par_io not initialized\n");
+ return -1;
+ }
+
+ ph = of_get_property(np, "pio-handle", NULL);
+ if (ph == NULL) {
+ printk(KERN_ERR "pio-handle not available\n");
+ return -1;
+ }
+
+ pio = of_find_node_by_phandle(*ph);
+
+ pio_map = of_get_property(pio, "pio-map", &pio_map_len);
+ if (pio_map == NULL) {
+ printk(KERN_ERR "pio-map is not set!\n");
+ return -1;
+ }
+ pio_map_len /= sizeof(unsigned int);
+ if ((pio_map_len % 6) != 0) {
+ printk(KERN_ERR "pio-map format wrong!\n");
+ return -1;
+ }
+
+ while (pio_map_len > 0) {
+ par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
+ (int) pio_map[2], (int) pio_map[3],
+ (int) pio_map[4], (int) pio_map[5]);
+ pio_map += 6;
+ pio_map_len -= 6;
+ }
+ of_node_put(pio);
+ return 0;
+}
+EXPORT_SYMBOL(par_io_of_config);
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
new file mode 100644
index 000000000..f744c214f
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Zhao Qiang <qiang.zhao@nxp.com>
+ *
+ * Description:
+ * QE TDM API Set - TDM specific routines implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <soc/fsl/qe/qe_tdm.h>
+
+static int set_tdm_framer(const char *tdm_framer_type)
+{
+ if (strcmp(tdm_framer_type, "e1") == 0)
+ return TDM_FRAMER_E1;
+ else if (strcmp(tdm_framer_type, "t1") == 0)
+ return TDM_FRAMER_T1;
+ else
+ return -EINVAL;
+}
+
+static void set_si_param(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si_mode_info *si_info = &ut_info->si_info;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK) {
+ si_info->simr_crt = 1;
+ si_info->simr_rfsd = 0;
+ }
+}
+
+int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
+ struct ucc_tdm_info *ut_info)
+{
+ const char *sprop;
+ int ret = 0;
+ u32 val;
+ struct resource *res;
+ struct device_node *np2;
+ static int siram_init_flag;
+ struct platform_device *pdev;
+
+ sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.rx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.rx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.rx_sync > QE_RSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ sprop = of_get_property(np, "fsl,tx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.tx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.tx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.tx_sync > QE_TSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "fsl,tx-timeslot-mask", 0, &val);
+ if (ret) {
+ pr_err("QE-TDM: Invalid tx-timeslot-mask property\n");
+ return -EINVAL;
+ }
+ utdm->tx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,rx-timeslot-mask", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: Invalid rx-timeslot-mask property\n");
+ return ret;
+ }
+ utdm->rx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,tdm-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No fsl,tdm-id property for this UCC\n");
+ return ret;
+ }
+ utdm->tdm_port = val;
+ ut_info->uf_info.tdm_num = utdm->tdm_port;
+
+ if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
+ utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
+ else
+ utdm->tdm_mode = TDM_NORMAL;
+
+ sprop = of_get_property(np, "fsl,tdm-framer-type", NULL);
+ if (!sprop) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No tdm-framer-type property for UCC\n");
+ return ret;
+ }
+ ret = set_tdm_framer(sprop);
+ if (ret < 0)
+ return -EINVAL;
+ utdm->tdm_framer_type = ret;
+
+ ret = of_property_read_u32_index(np, "fsl,siram-entry-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No siram entry id for UCC\n");
+ return ret;
+ }
+ utdm->siram_entry_id = val;
+
+ set_si_param(utdm, ut_info);
+
+ np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
+ if (!np2)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(np2);
+ if (!pdev) {
+ pr_err("%s: failed to lookup pdev\n", np2->name);
+ of_node_put(np2);
+ return -EINVAL;
+ }
+
+ of_node_put(np2);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(utdm->si_regs)) {
+ ret = PTR_ERR(utdm->si_regs);
+ goto err_miss_siram_property;
+ }
+
+ np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
+ if (!np2) {
+ ret = -EINVAL;
+ goto err_miss_siram_property;
+ }
+
+ pdev = of_find_device_by_node(np2);
+ if (!pdev) {
+ ret = -EINVAL;
+ pr_err("%s: failed to lookup pdev\n", np2->name);
+ of_node_put(np2);
+ goto err_miss_siram_property;
+ }
+
+ of_node_put(np2);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ utdm->siram = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(utdm->siram)) {
+ ret = PTR_ERR(utdm->siram);
+ goto err_miss_siram_property;
+ }
+
+ if (siram_init_flag == 0) {
+ memset_io(utdm->siram, 0, resource_size(res));
+ siram_init_flag = 1;
+ }
+
+ return ret;
+
+err_miss_siram_property:
+ devm_iounmap(&pdev->dev, utdm->si_regs);
+ return ret;
+}
+EXPORT_SYMBOL(ucc_of_parse_tdm);
+
+void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si1 __iomem *si_regs;
+ u16 __iomem *siram;
+ u16 siram_entry_valid;
+ u16 siram_entry_closed;
+ u16 ucc_num;
+ u8 csel;
+ u16 sixmr;
+ u16 tdm_port;
+ u32 siram_entry_id;
+ u32 mask;
+ int i;
+
+ si_regs = utdm->si_regs;
+ siram = utdm->siram;
+ ucc_num = ut_info->uf_info.ucc_num;
+ tdm_port = utdm->tdm_port;
+ siram_entry_id = utdm->siram_entry_id;
+
+ if (utdm->tdm_framer_type == TDM_FRAMER_T1)
+ utdm->num_of_ts = 24;
+ if (utdm->tdm_framer_type == TDM_FRAMER_E1)
+ utdm->num_of_ts = 32;
+
+ /* set siram table */
+ csel = (ucc_num < 4) ? ucc_num + 9 : ucc_num - 3;
+
+ siram_entry_valid = SIR_CSEL(csel) | SIR_BYTE | SIR_CNT(0);
+ siram_entry_closed = SIR_IDLE | SIR_BYTE | SIR_CNT(0);
+
+ for (i = 0; i < utdm->num_of_ts; i++) {
+ mask = 0x01 << i;
+
+ if (utdm->tx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + i]);
+
+ if (utdm->rx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ }
+
+ setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+ setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+
+ /* Set SIxMR register */
+ sixmr = SIMR_SAD(siram_entry_id);
+
+ sixmr &= ~SIMR_SDM_MASK;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK)
+ sixmr |= SIMR_SDM_INTERNAL_LOOPBACK;
+ else
+ sixmr |= SIMR_SDM_NORMAL;
+
+ sixmr |= SIMR_RFSD(ut_info->si_info.simr_rfsd) |
+ SIMR_TFSD(ut_info->si_info.simr_tfsd);
+
+ if (ut_info->si_info.simr_crt)
+ sixmr |= SIMR_CRT;
+ if (ut_info->si_info.simr_sl)
+ sixmr |= SIMR_SL;
+ if (ut_info->si_info.simr_ce)
+ sixmr |= SIMR_CE;
+ if (ut_info->si_info.simr_fe)
+ sixmr |= SIMR_FE;
+ if (ut_info->si_info.simr_gm)
+ sixmr |= SIMR_GM;
+
+ switch (tdm_port) {
+ case 0:
+ iowrite16be(sixmr, &si_regs->sixmr1[0]);
+ break;
+ case 1:
+ iowrite16be(sixmr, &si_regs->sixmr1[1]);
+ break;
+ case 2:
+ iowrite16be(sixmr, &si_regs->sixmr1[2]);
+ break;
+ case 3:
+ iowrite16be(sixmr, &si_regs->sixmr1[3]);
+ break;
+ default:
+ pr_err("QE-TDM: can not find tdm sixmr reg\n");
+ break;
+ }
+}
+EXPORT_SYMBOL(ucc_tdm_init);
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
new file mode 100644
index 000000000..681f7d4b7
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -0,0 +1,662 @@
+/*
+ * arch/powerpc/sysdev/qe_lib/ucc.c
+ *
+ * QE UCC API Set - UCC specific routines implementations.
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/ucc.h>
+
+#define UCC_TDM_NUM 8
+#define RX_SYNC_SHIFT_BASE 30
+#define TX_SYNC_SHIFT_BASE 14
+#define RX_CLK_SHIFT_BASE 28
+#define TX_CLK_SHIFT_BASE 12
+
+int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
+{
+ unsigned long flags;
+
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+ clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+ ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
+
+/* Configure the UCC to either Slow or Fast.
+ *
+ * A given UCC can be figured to support either "slow" devices (e.g. UART)
+ * or "fast" devices (e.g. Ethernet).
+ *
+ * 'ucc_num' is the UCC number, from 0 - 7.
+ *
+ * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
+ * must always be set to 1.
+ */
+int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
+{
+ u8 __iomem *guemr;
+
+ /* The GUEMR register is at the same location for both slow and fast
+ devices, so we just use uccX.slow.guemr. */
+ switch (ucc_num) {
+ case 0: guemr = &qe_immr->ucc1.slow.guemr;
+ break;
+ case 1: guemr = &qe_immr->ucc2.slow.guemr;
+ break;
+ case 2: guemr = &qe_immr->ucc3.slow.guemr;
+ break;
+ case 3: guemr = &qe_immr->ucc4.slow.guemr;
+ break;
+ case 4: guemr = &qe_immr->ucc5.slow.guemr;
+ break;
+ case 5: guemr = &qe_immr->ucc6.slow.guemr;
+ break;
+ case 6: guemr = &qe_immr->ucc7.slow.guemr;
+ break;
+ case 7: guemr = &qe_immr->ucc8.slow.guemr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
+ UCC_GUEMR_SET_RESERVED3 | speed);
+
+ return 0;
+}
+
+static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
+ unsigned int *reg_num, unsigned int *shift)
+{
+ unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
+
+ *reg_num = cmx + 1;
+ *cmxucr = &qe_immr->qmx.cmxucr[cmx];
+ *shift = 16 - 8 * (ucc_num & 2);
+}
+
+int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
+{
+ __be32 __iomem *cmxucr;
+ unsigned int reg_num;
+ unsigned int shift;
+
+ /* check if the UCC number is in range. */
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+ if (set)
+ setbits32(cmxucr, mask << shift);
+ else
+ clrbits32(cmxucr, mask << shift);
+
+ return 0;
+}
+
+int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ __be32 __iomem *cmxucr;
+ unsigned int reg_num;
+ unsigned int shift;
+ u32 clock_bits = 0;
+
+ /* check if the UCC number is in range. */
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
+ return -EINVAL;
+
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+ switch (reg_num) {
+ case 1:
+ switch (clock) {
+ case QE_BRG1: clock_bits = 1; break;
+ case QE_BRG2: clock_bits = 2; break;
+ case QE_BRG7: clock_bits = 3; break;
+ case QE_BRG8: clock_bits = 4; break;
+ case QE_CLK9: clock_bits = 5; break;
+ case QE_CLK10: clock_bits = 6; break;
+ case QE_CLK11: clock_bits = 7; break;
+ case QE_CLK12: clock_bits = 8; break;
+ case QE_CLK15: clock_bits = 9; break;
+ case QE_CLK16: clock_bits = 10; break;
+ default: break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_BRG5: clock_bits = 1; break;
+ case QE_BRG6: clock_bits = 2; break;
+ case QE_BRG7: clock_bits = 3; break;
+ case QE_BRG8: clock_bits = 4; break;
+ case QE_CLK13: clock_bits = 5; break;
+ case QE_CLK14: clock_bits = 6; break;
+ case QE_CLK19: clock_bits = 7; break;
+ case QE_CLK20: clock_bits = 8; break;
+ case QE_CLK15: clock_bits = 9; break;
+ case QE_CLK16: clock_bits = 10; break;
+ default: break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_BRG9: clock_bits = 1; break;
+ case QE_BRG10: clock_bits = 2; break;
+ case QE_BRG15: clock_bits = 3; break;
+ case QE_BRG16: clock_bits = 4; break;
+ case QE_CLK3: clock_bits = 5; break;
+ case QE_CLK4: clock_bits = 6; break;
+ case QE_CLK17: clock_bits = 7; break;
+ case QE_CLK18: clock_bits = 8; break;
+ case QE_CLK7: clock_bits = 9; break;
+ case QE_CLK8: clock_bits = 10; break;
+ case QE_CLK16: clock_bits = 11; break;
+ default: break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_BRG13: clock_bits = 1; break;
+ case QE_BRG14: clock_bits = 2; break;
+ case QE_BRG15: clock_bits = 3; break;
+ case QE_BRG16: clock_bits = 4; break;
+ case QE_CLK5: clock_bits = 5; break;
+ case QE_CLK6: clock_bits = 6; break;
+ case QE_CLK21: clock_bits = 7; break;
+ case QE_CLK22: clock_bits = 8; break;
+ case QE_CLK7: clock_bits = 9; break;
+ case QE_CLK8: clock_bits = 10; break;
+ case QE_CLK16: clock_bits = 11; break;
+ default: break;
+ }
+ break;
+ default: break;
+ }
+
+ /* Check for invalid combination of clock and UCC number */
+ if (!clock_bits)
+ return -ENOENT;
+
+ if (mode == COMM_DIR_RX)
+ shift += 4;
+
+ clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+}
+
+static int ucc_get_tdm_common_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ /*
+ * for TDM[0, 1, 2, 3], TX and RX use common
+ * clock source BRG3,4 and CLK1,2
+ * for TDM[4, 5, 6, 7], TX and RX use common
+ * clock source BRG12,13 and CLK23,24
+ */
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG3:
+ clock_bits = 1;
+ break;
+ case QE_BRG4:
+ clock_bits = 2;
+ break;
+ case QE_CLK1:
+ clock_bits = 4;
+ break;
+ case QE_CLK2:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG12:
+ clock_bits = 1;
+ break;
+ case QE_BRG13:
+ clock_bits = 2;
+ break;
+ case QE_CLK23:
+ clock_bits = 4;
+ break;
+ case QE_CLK24:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_rx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK3:
+ clock_bits = 6;
+ break;
+ case QE_CLK8:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK5:
+ clock_bits = 6;
+ break;
+ case QE_CLK10:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK7:
+ clock_bits = 6;
+ break;
+ case QE_CLK12:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK9:
+ clock_bits = 6;
+ break;
+ case QE_CLK14:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK11:
+ clock_bits = 6;
+ break;
+ case QE_CLK16:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK13:
+ clock_bits = 6;
+ break;
+ case QE_CLK18:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK15:
+ clock_bits = 6;
+ break;
+ case QE_CLK20:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK17:
+ clock_bits = 6;
+ break;
+ case QE_CLK22:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_tx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK4:
+ clock_bits = 6;
+ break;
+ case QE_CLK9:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK6:
+ clock_bits = 6;
+ break;
+ case QE_CLK11:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK8:
+ clock_bits = 6;
+ break;
+ case QE_CLK13:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK10:
+ clock_bits = 6;
+ break;
+ case QE_CLK15:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK12:
+ clock_bits = 6;
+ break;
+ case QE_CLK17:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK14:
+ clock_bits = 6;
+ break;
+ case QE_CLK19:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK16:
+ clock_bits = 6;
+ break;
+ case QE_CLK21:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK18:
+ clock_bits = 6;
+ break;
+ case QE_CLK3:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+/* tdm_num: TDM A-H port num is 0-7 */
+static int ucc_get_tdm_rxtx_clk(enum comm_dir mode, u32 tdm_num,
+ enum qe_clock clock)
+{
+ int clock_bits;
+
+ clock_bits = ucc_get_tdm_common_clk(tdm_num, clock);
+ if (clock_bits > 0)
+ return clock_bits;
+ if (mode == COMM_DIR_RX)
+ clock_bits = ucc_get_tdm_rx_clk(tdm_num, clock);
+ if (mode == COMM_DIR_TX)
+ clock_bits = ucc_get_tdm_tx_clk(tdm_num, clock);
+ return clock_bits;
+}
+
+static u32 ucc_get_tdm_clk_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_CLK_SHIFT_BASE : TX_CLK_SHIFT_BASE;
+ if (tdm_num < 4)
+ shift -= tdm_num * 4;
+ else
+ shift -= (tdm_num - 4) * 4;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int clock_bits;
+ u32 shift;
+ struct qe_mux __iomem *qe_mux_reg;
+ __be32 __iomem *cmxs1cr;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num > 7 || tdm_num < 0)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ clock_bits = ucc_get_tdm_rxtx_clk(mode, tdm_num, clock);
+ if (clock_bits < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_clk_shift(mode, tdm_num);
+
+ cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
+ &qe_mux_reg->cmxsi1cr_h;
+
+ qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+}
+
+static int ucc_get_tdm_sync_source(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source = -EINVAL;
+
+ if (mode == COMM_DIR_RX && clock == QE_RSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+ if (mode == COMM_DIR_TX && clock == QE_TSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG10:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG11:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG14:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG15:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return source;
+}
+
+static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
+ shift -= tdm_num * 2;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source;
+ u32 shift;
+ struct qe_mux *qe_mux_reg;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num >= UCC_TDM_NUM)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ source = ucc_get_tdm_sync_source(tdm_num, clock, mode);
+ if (source < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_sync_shift(mode, tdm_num);
+
+ qe_clrsetbits32(&qe_mux_reg->cmxsi1syr,
+ QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ source << shift);
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
new file mode 100644
index 000000000..83d8d16e3
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Fast API Set - UCC Fast specific routines implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
+{
+ printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
+ printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
+
+ printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
+ printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
+ printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
+ printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
+ printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
+ printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
+ printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
+ &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
+ printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
+ printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
+ printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
+ printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
+ printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
+ printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
+ printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
+ printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
+ printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
+ printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
+ printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
+ &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
+}
+EXPORT_SYMBOL(ucc_fast_dump_regs);
+
+u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
+{
+ switch (uccf_num) {
+ case 0: return QE_CR_SUBBLOCK_UCCFAST1;
+ case 1: return QE_CR_SUBBLOCK_UCCFAST2;
+ case 2: return QE_CR_SUBBLOCK_UCCFAST3;
+ case 3: return QE_CR_SUBBLOCK_UCCFAST4;
+ case 4: return QE_CR_SUBBLOCK_UCCFAST5;
+ case 5: return QE_CR_SUBBLOCK_UCCFAST6;
+ case 6: return QE_CR_SUBBLOCK_UCCFAST7;
+ case 7: return QE_CR_SUBBLOCK_UCCFAST8;
+ default: return QE_CR_SUBBLOCK_INVALID;
+ }
+}
+EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
+
+void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
+{
+ out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
+}
+EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
+
+void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+
+ uf_regs = uccf->uf_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+ gumr = in_be32(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr |= UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 1;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr |= UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 1;
+ }
+ out_be32(&uf_regs->gumr, gumr);
+}
+EXPORT_SYMBOL(ucc_fast_enable);
+
+void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+
+ uf_regs = uccf->uf_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+ gumr = in_be32(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr &= ~UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 0;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr &= ~UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 0;
+ }
+ out_be32(&uf_regs->gumr, gumr);
+}
+EXPORT_SYMBOL(ucc_fast_disable);
+
+int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
+{
+ struct ucc_fast_private *uccf;
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+ int ret;
+
+ if (!uf_info)
+ return -EINVAL;
+
+ /* check if the UCC port number is in range. */
+ if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
+ printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check that 'max_rx_buf_length' is properly aligned (4). */
+ if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Validate Virtual Fifo register values */
+ if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
+ printk(KERN_ERR "%s: urfs is too small\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
+ if (!uccf) {
+ printk(KERN_ERR "%s: Cannot allocate private data\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Fill fast UCC structure */
+ uccf->uf_info = uf_info;
+ /* Set the PHY base address */
+ uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
+ if (uccf->uf_regs == NULL) {
+ printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+ kfree(uccf);
+ return -ENOMEM;
+ }
+
+ uccf->enabled_tx = 0;
+ uccf->enabled_rx = 0;
+ uccf->stopped_tx = 0;
+ uccf->stopped_rx = 0;
+ uf_regs = uccf->uf_regs;
+ uccf->p_ucce = &uf_regs->ucce;
+ uccf->p_uccm = &uf_regs->uccm;
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ uccf->p_utodr = &uf_regs->utodr;
+#endif
+#ifdef STATISTICS
+ uccf->tx_frames = 0;
+ uccf->rx_frames = 0;
+ uccf->rx_discarded = 0;
+#endif /* STATISTICS */
+
+ /* Set UCC to fast type */
+ ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
+ if (ret) {
+ printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
+ ucc_fast_free(uccf);
+ return ret;
+ }
+
+ uccf->mrblr = uf_info->max_rx_buf_length;
+
+ /* Set GUMR */
+ /* For more details see the hardware spec. */
+ gumr = uf_info->ttx_trx;
+ if (uf_info->tci)
+ gumr |= UCC_FAST_GUMR_TCI;
+ if (uf_info->cdp)
+ gumr |= UCC_FAST_GUMR_CDP;
+ if (uf_info->ctsp)
+ gumr |= UCC_FAST_GUMR_CTSP;
+ if (uf_info->cds)
+ gumr |= UCC_FAST_GUMR_CDS;
+ if (uf_info->ctss)
+ gumr |= UCC_FAST_GUMR_CTSS;
+ if (uf_info->txsy)
+ gumr |= UCC_FAST_GUMR_TXSY;
+ if (uf_info->rsyn)
+ gumr |= UCC_FAST_GUMR_RSYN;
+ gumr |= uf_info->synl;
+ if (uf_info->rtsm)
+ gumr |= UCC_FAST_GUMR_RTSM;
+ gumr |= uf_info->renc;
+ if (uf_info->revd)
+ gumr |= UCC_FAST_GUMR_REVD;
+ gumr |= uf_info->tenc;
+ gumr |= uf_info->tcrc;
+ gumr |= uf_info->mode;
+ out_be32(&uf_regs->gumr, gumr);
+
+ /* Allocate memory for Tx Virtual Fifo */
+ uccf->ucc_fast_tx_virtual_fifo_base_offset =
+ qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
+ __func__);
+ uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
+ ucc_fast_free(uccf);
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for Rx Virtual Fifo */
+ uccf->ucc_fast_rx_virtual_fifo_base_offset =
+ qe_muram_alloc(uf_info->urfs +
+ UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
+ __func__);
+ uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
+ ucc_fast_free(uccf);
+ return -ENOMEM;
+ }
+
+ /* Set Virtual Fifo registers */
+ out_be16(&uf_regs->urfs, uf_info->urfs);
+ out_be16(&uf_regs->urfet, uf_info->urfet);
+ out_be16(&uf_regs->urfset, uf_info->urfset);
+ out_be16(&uf_regs->utfs, uf_info->utfs);
+ out_be16(&uf_regs->utfet, uf_info->utfet);
+ out_be16(&uf_regs->utftt, uf_info->utftt);
+ /* utfb, urfb are offsets from MURAM base */
+ out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
+ out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+ /* Mux clocking */
+ /* Grant Support */
+ ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
+ /* Breakpoint Support */
+ ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
+ /* Set Tsa or NMSI mode. */
+ ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
+ /* If NMSI (not Tsa), set Tx and Rx clock. */
+ if (!uf_info->tsa) {
+ /* Rx clock routing */
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ printk(KERN_ERR "%s: illegal value for RX clock\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ /* Tx clock routing */
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ printk(KERN_ERR "%s: illegal value for TX clock\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ } else {
+ /* tdm Rx clock routing */
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx clock routing */
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Rx sync clock routing */
+ if ((uf_info->rx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->rx_sync,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx sync clock routing */
+ if ((uf_info->tx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->tx_sync,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ }
+
+ /* Set interrupt mask register at UCC level. */
+ out_be32(&uf_regs->uccm, uf_info->uccm_mask);
+
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
+ out_be32(&uf_regs->ucce, 0xffffffff);
+
+ *uccf_ret = uccf;
+ return 0;
+}
+EXPORT_SYMBOL(ucc_fast_init);
+
+void ucc_fast_free(struct ucc_fast_private * uccf)
+{
+ if (!uccf)
+ return;
+
+ if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
+ qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
+
+ if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
+ qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+ if (uccf->uf_regs)
+ iounmap(uccf->uf_regs);
+
+ kfree(uccf);
+}
+EXPORT_SYMBOL(ucc_fast_free);
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
new file mode 100644
index 000000000..9334bdbd9
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Slow API Set - UCC Slow specific routines implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_slow.h>
+
+u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
+{
+ switch (uccs_num) {
+ case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
+ case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
+ case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
+ case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
+ case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
+ case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
+ case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
+ case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
+ default: return QE_CR_SUBBLOCK_INVALID;
+ }
+}
+EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
+
+void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
+
+void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_stop_tx);
+
+void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_restart_tx);
+
+void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+ struct ucc_slow *us_regs;
+ u32 gumr_l;
+
+ us_regs = uccs->us_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+ gumr_l = in_be32(&us_regs->gumr_l);
+ if (mode & COMM_DIR_TX) {
+ gumr_l |= UCC_SLOW_GUMR_L_ENT;
+ uccs->enabled_tx = 1;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr_l |= UCC_SLOW_GUMR_L_ENR;
+ uccs->enabled_rx = 1;
+ }
+ out_be32(&us_regs->gumr_l, gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_enable);
+
+void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+ struct ucc_slow *us_regs;
+ u32 gumr_l;
+
+ us_regs = uccs->us_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+ gumr_l = in_be32(&us_regs->gumr_l);
+ if (mode & COMM_DIR_TX) {
+ gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
+ uccs->enabled_tx = 0;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
+ uccs->enabled_rx = 0;
+ }
+ out_be32(&us_regs->gumr_l, gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_disable);
+
+/* Initialize the UCC for Slow operations
+ *
+ * The caller should initialize the following us_info
+ */
+int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
+{
+ struct ucc_slow_private *uccs;
+ u32 i;
+ struct ucc_slow __iomem *us_regs;
+ u32 gumr;
+ struct qe_bd *bd;
+ u32 id;
+ u32 command;
+ int ret = 0;
+
+ if (!us_info)
+ return -EINVAL;
+
+ /* check if the UCC port number is in range. */
+ if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
+ printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Set mrblr
+ * Check that 'max_rx_buf_length' is properly aligned (4), unless
+ * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
+ * case when QE accepts 32 bits at a time.
+ */
+ if ((!us_info->rfw) &&
+ (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
+ printk(KERN_ERR "max_rx_buf_length not aligned.\n");
+ return -EINVAL;
+ }
+
+ uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
+ if (!uccs) {
+ printk(KERN_ERR "%s: Cannot allocate private data\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Fill slow UCC structure */
+ uccs->us_info = us_info;
+ /* Set the PHY base address */
+ uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
+ if (uccs->us_regs == NULL) {
+ printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+ kfree(uccs);
+ return -ENOMEM;
+ }
+
+ uccs->saved_uccm = 0;
+ uccs->p_rx_frame = 0;
+ us_regs = uccs->us_regs;
+ uccs->p_ucce = (u16 *) & (us_regs->ucce);
+ uccs->p_uccm = (u16 *) & (us_regs->uccm);
+#ifdef STATISTICS
+ uccs->rx_frames = 0;
+ uccs->tx_frames = 0;
+ uccs->rx_discarded = 0;
+#endif /* STATISTICS */
+
+ /* Get PRAM base */
+ uccs->us_pram_offset =
+ qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
+ if (IS_ERR_VALUE(uccs->us_pram_offset)) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
+ uccs->us_pram_offset);
+
+ uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
+
+ /* Set UCC to slow type */
+ ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
+ if (ret) {
+ printk(KERN_ERR "%s: cannot set UCC type", __func__);
+ ucc_slow_free(uccs);
+ return ret;
+ }
+
+ out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
+
+ INIT_LIST_HEAD(&uccs->confQ);
+
+ /* Allocate BDs. */
+ uccs->rx_base_offset =
+ qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
+ QE_ALIGNMENT_OF_BD);
+ if (IS_ERR_VALUE(uccs->rx_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
+ us_info->rx_bd_ring_len);
+ uccs->rx_base_offset = 0;
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+
+ uccs->tx_base_offset =
+ qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
+ QE_ALIGNMENT_OF_BD);
+ if (IS_ERR_VALUE(uccs->tx_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
+ uccs->tx_base_offset = 0;
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+
+ /* Init Tx bds */
+ bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
+ for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
+ /* clear bd buffer */
+ out_be32(&bd->buf, 0);
+ /* set bd status and length */
+ out_be32((u32 *) bd, 0);
+ bd++;
+ }
+ /* for last BD set Wrap bit */
+ out_be32(&bd->buf, 0);
+ out_be32((u32 *) bd, cpu_to_be32(T_W));
+
+ /* Init Rx bds */
+ bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
+ for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
+ /* set bd status and length */
+ out_be32((u32*)bd, 0);
+ /* clear bd buffer */
+ out_be32(&bd->buf, 0);
+ bd++;
+ }
+ /* for last BD set Wrap bit */
+ out_be32((u32*)bd, cpu_to_be32(R_W));
+ out_be32(&bd->buf, 0);
+
+ /* Set GUMR (For more details see the hardware spec.). */
+ /* gumr_h */
+ gumr = us_info->tcrc;
+ if (us_info->cdp)
+ gumr |= UCC_SLOW_GUMR_H_CDP;
+ if (us_info->ctsp)
+ gumr |= UCC_SLOW_GUMR_H_CTSP;
+ if (us_info->cds)
+ gumr |= UCC_SLOW_GUMR_H_CDS;
+ if (us_info->ctss)
+ gumr |= UCC_SLOW_GUMR_H_CTSS;
+ if (us_info->tfl)
+ gumr |= UCC_SLOW_GUMR_H_TFL;
+ if (us_info->rfw)
+ gumr |= UCC_SLOW_GUMR_H_RFW;
+ if (us_info->txsy)
+ gumr |= UCC_SLOW_GUMR_H_TXSY;
+ if (us_info->rtsm)
+ gumr |= UCC_SLOW_GUMR_H_RTSM;
+ out_be32(&us_regs->gumr_h, gumr);
+
+ /* gumr_l */
+ gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
+ us_info->diag | us_info->mode;
+ if (us_info->tci)
+ gumr |= UCC_SLOW_GUMR_L_TCI;
+ if (us_info->rinv)
+ gumr |= UCC_SLOW_GUMR_L_RINV;
+ if (us_info->tinv)
+ gumr |= UCC_SLOW_GUMR_L_TINV;
+ if (us_info->tend)
+ gumr |= UCC_SLOW_GUMR_L_TEND;
+ out_be32(&us_regs->gumr_l, gumr);
+
+ /* Function code registers */
+
+ /* if the data is in cachable memory, the 'global' */
+ /* in the function code should be set. */
+ uccs->us_pram->tbmr = UCC_BMR_BO_BE;
+ uccs->us_pram->rbmr = UCC_BMR_BO_BE;
+
+ /* rbase, tbase are offsets from MURAM base */
+ out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
+ out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
+
+ /* Mux clocking */
+ /* Grant Support */
+ ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
+ /* Breakpoint Support */
+ ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
+ /* Set Tsa or NMSI mode. */
+ ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
+ /* If NMSI (not Tsa), set Tx and Rx clock. */
+ if (!us_info->tsa) {
+ /* Rx clock routing */
+ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
+ COMM_DIR_RX)) {
+ printk(KERN_ERR "%s: illegal value for RX clock\n",
+ __func__);
+ ucc_slow_free(uccs);
+ return -EINVAL;
+ }
+ /* Tx clock routing */
+ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
+ COMM_DIR_TX)) {
+ printk(KERN_ERR "%s: illegal value for TX clock\n",
+ __func__);
+ ucc_slow_free(uccs);
+ return -EINVAL;
+ }
+ }
+
+ /* Set interrupt mask register at UCC level. */
+ out_be16(&us_regs->uccm, us_info->uccm_mask);
+
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
+ out_be16(&us_regs->ucce, 0xffff);
+
+ /* Issue QE Init command */
+ if (us_info->init_tx && us_info->init_rx)
+ command = QE_INIT_TX_RX;
+ else if (us_info->init_tx)
+ command = QE_INIT_TX;
+ else
+ command = QE_INIT_RX; /* We know at least one is TRUE */
+
+ qe_issue_cmd(command, id, us_info->protocol, 0);
+
+ *uccs_ret = uccs;
+ return 0;
+}
+EXPORT_SYMBOL(ucc_slow_init);
+
+void ucc_slow_free(struct ucc_slow_private * uccs)
+{
+ if (!uccs)
+ return;
+
+ if (uccs->rx_base_offset)
+ qe_muram_free(uccs->rx_base_offset);
+
+ if (uccs->tx_base_offset)
+ qe_muram_free(uccs->tx_base_offset);
+
+ if (uccs->us_pram)
+ qe_muram_free(uccs->us_pram_offset);
+
+ if (uccs->us_regs)
+ iounmap(uccs->us_regs);
+
+ kfree(uccs);
+}
+EXPORT_SYMBOL(ucc_slow_free);
+
diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c
new file mode 100644
index 000000000..111f7ab80
--- /dev/null
+++ b/drivers/soc/fsl/qe/usb.c
@@ -0,0 +1,56 @@
+/*
+ * QE USB routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc.
+ * Shlomi Gridish <gridish@freescale.com>
+ * Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+int qe_usb_clock_set(enum qe_clock clk, int rate)
+{
+ struct qe_mux __iomem *mux = &qe_immr->qmx;
+ unsigned long flags;
+ u32 val;
+
+ switch (clk) {
+ case QE_CLK3: val = QE_CMXGCR_USBCS_CLK3; break;
+ case QE_CLK5: val = QE_CMXGCR_USBCS_CLK5; break;
+ case QE_CLK7: val = QE_CMXGCR_USBCS_CLK7; break;
+ case QE_CLK9: val = QE_CMXGCR_USBCS_CLK9; break;
+ case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break;
+ case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break;
+ case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break;
+ case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break;
+ case QE_BRG9: val = QE_CMXGCR_USBCS_BRG9; break;
+ case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break;
+ default:
+ pr_err("%s: requested unknown clock %d\n", __func__, clk);
+ return -EINVAL;
+ }
+
+ if (qe_clock_is_brg(clk))
+ qe_setbrg(clk, rate, 1);
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+
+ clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
+
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_usb_clock_set);
diff --git a/drivers/soc/gemini/Makefile b/drivers/soc/gemini/Makefile
new file mode 100644
index 000000000..8cbd1e45d
--- /dev/null
+++ b/drivers/soc/gemini/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += soc-gemini.o
diff --git a/drivers/soc/gemini/soc-gemini.c b/drivers/soc/gemini/soc-gemini.c
new file mode 100644
index 000000000..642b96c91
--- /dev/null
+++ b/drivers/soc/gemini/soc-gemini.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+
+#define GLOBAL_WORD_ID 0x00
+#define GEMINI_GLOBAL_ARB1_CTRL 0x2c
+#define GEMINI_ARB1_BURST_MASK GENMASK(21, 16)
+#define GEMINI_ARB1_BURST_SHIFT 16
+/* These all define the priority on the BUS2 backplane */
+#define GEMINI_ARB1_PRIO_MASK GENMASK(9, 0)
+#define GEMINI_ARB1_DMAC_HIGH_PRIO BIT(0)
+#define GEMINI_ARB1_IDE_HIGH_PRIO BIT(1)
+#define GEMINI_ARB1_RAID_HIGH_PRIO BIT(2)
+#define GEMINI_ARB1_SECURITY_HIGH_PRIO BIT(3)
+#define GEMINI_ARB1_GMAC0_HIGH_PRIO BIT(4)
+#define GEMINI_ARB1_GMAC1_HIGH_PRIO BIT(5)
+#define GEMINI_ARB1_USB0_HIGH_PRIO BIT(6)
+#define GEMINI_ARB1_USB1_HIGH_PRIO BIT(7)
+#define GEMINI_ARB1_PCI_HIGH_PRIO BIT(8)
+#define GEMINI_ARB1_TVE_HIGH_PRIO BIT(9)
+
+#define GEMINI_DEFAULT_BURST_SIZE 0x20
+#define GEMINI_DEFAULT_PRIO (GEMINI_ARB1_GMAC0_HIGH_PRIO | \
+ GEMINI_ARB1_GMAC1_HIGH_PRIO)
+
+static int __init gemini_soc_init(void)
+{
+ struct regmap *map;
+ u32 rev;
+ u32 val;
+ int ret;
+
+ /* Multiplatform guard, only proceed on Gemini */
+ if (!of_machine_is_compatible("cortina,gemini"))
+ return 0;
+
+ map = syscon_regmap_lookup_by_compatible("cortina,gemini-syscon");
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ ret = regmap_read(map, GLOBAL_WORD_ID, &rev);
+ if (ret)
+ return ret;
+
+ val = (GEMINI_DEFAULT_BURST_SIZE << GEMINI_ARB1_BURST_SHIFT) |
+ GEMINI_DEFAULT_PRIO;
+
+ /* Set up system arbitration */
+ regmap_update_bits(map,
+ GEMINI_GLOBAL_ARB1_CTRL,
+ GEMINI_ARB1_BURST_MASK | GEMINI_ARB1_PRIO_MASK,
+ val);
+
+ pr_info("Gemini SoC %04x revision %02x, set arbitration %08x\n",
+ rev >> 8, rev & 0xff, val);
+
+ return 0;
+}
+subsys_initcall(gemini_soc_init);
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
new file mode 100644
index 000000000..a5b86a28f
--- /dev/null
+++ b/drivers/soc/imx/Kconfig
@@ -0,0 +1,10 @@
+menu "i.MX SoC drivers"
+
+config IMX7_PM_DOMAINS
+ bool "i.MX7 PM domains"
+ depends on SOC_IMX7D || (COMPILE_TEST && OF)
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ default y if SOC_IMX7D
+
+endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
new file mode 100644
index 000000000..aab41a5cc
--- /dev/null
+++ b/drivers/soc/imx/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
+obj-$(CONFIG_IMX7_PM_DOMAINS) += gpcv2.o
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
new file mode 100644
index 000000000..56c019ec7
--- /dev/null
+++ b/drivers/soc/imx/gpc.c
@@ -0,0 +1,549 @@
+/*
+ * Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ * Copyright 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define GPC_CNTR 0x000
+
+#define GPC_PGC_CTRL_OFFS 0x0
+#define GPC_PGC_PUPSCR_OFFS 0x4
+#define GPC_PGC_PDNSCR_OFFS 0x8
+#define GPC_PGC_SW2ISO_SHIFT 0x8
+#define GPC_PGC_SW_SHIFT 0x0
+
+#define GPC_PGC_PCI_PDN 0x200
+#define GPC_PGC_PCI_SR 0x20c
+
+#define GPC_PGC_GPU_PDN 0x260
+#define GPC_PGC_GPU_PUPSCR 0x264
+#define GPC_PGC_GPU_PDNSCR 0x268
+#define GPC_PGC_GPU_SR 0x26c
+
+#define GPC_PGC_DISP_PDN 0x240
+#define GPC_PGC_DISP_SR 0x24c
+
+#define GPU_VPU_PUP_REQ BIT(1)
+#define GPU_VPU_PDN_REQ BIT(0)
+
+#define GPC_CLK_MAX 6
+
+#define PGC_DOMAIN_FLAG_NO_PD BIT(0)
+
+struct imx_pm_domain {
+ struct generic_pm_domain base;
+ struct regmap *regmap;
+ struct regulator *supply;
+ struct clk *clk[GPC_CLK_MAX];
+ int num_clks;
+ unsigned int reg_offs;
+ signed char cntr_pdn_bit;
+ unsigned int ipg_rate_mhz;
+};
+
+static inline struct imx_pm_domain *
+to_imx_pm_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct imx_pm_domain, base);
+}
+
+static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd)
+{
+ struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
+ int iso, iso2sw;
+ u32 val;
+
+ /* Read ISO and ISO2SW power down delays */
+ regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PDNSCR_OFFS, &val);
+ iso = val & 0x3f;
+ iso2sw = (val >> 8) & 0x3f;
+
+ /* Gate off domain when powered down */
+ regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
+ 0x1, 0x1);
+
+ /* Request GPC to power down domain */
+ val = BIT(pd->cntr_pdn_bit);
+ regmap_update_bits(pd->regmap, GPC_CNTR, val, val);
+
+ /* Wait ISO + ISO2SW IPG clock cycles */
+ udelay(DIV_ROUND_UP(iso + iso2sw, pd->ipg_rate_mhz));
+
+ if (pd->supply)
+ regulator_disable(pd->supply);
+
+ return 0;
+}
+
+static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
+{
+ struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
+ int i, ret;
+ u32 val, req;
+
+ if (pd->supply) {
+ ret = regulator_enable(pd->supply);
+ if (ret) {
+ pr_err("%s: failed to enable regulator: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ /* Enable reset clocks for all devices in the domain */
+ for (i = 0; i < pd->num_clks; i++)
+ clk_prepare_enable(pd->clk[i]);
+
+ /* Gate off domain when powered down */
+ regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
+ 0x1, 0x1);
+
+ /* Request GPC to power up domain */
+ req = BIT(pd->cntr_pdn_bit + 1);
+ regmap_update_bits(pd->regmap, GPC_CNTR, req, req);
+
+ /* Wait for the PGC to handle the request */
+ ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req),
+ 1, 50);
+ if (ret)
+ pr_err("powerup request on domain %s timed out\n", genpd->name);
+
+ /* Wait for reset to propagate through peripherals */
+ usleep_range(5, 10);
+
+ /* Disable reset clocks for all devices in the domain */
+ for (i = 0; i < pd->num_clks; i++)
+ clk_disable_unprepare(pd->clk[i]);
+
+ return 0;
+}
+
+static int imx_pgc_get_clocks(struct device *dev, struct imx_pm_domain *domain)
+{
+ int i, ret;
+
+ for (i = 0; ; i++) {
+ struct clk *clk = of_clk_get(dev->of_node, i);
+ if (IS_ERR(clk))
+ break;
+ if (i >= GPC_CLK_MAX) {
+ dev_err(dev, "more than %d clocks\n", GPC_CLK_MAX);
+ ret = -EINVAL;
+ goto clk_err;
+ }
+ domain->clk[i] = clk;
+ }
+ domain->num_clks = i;
+
+ return 0;
+
+clk_err:
+ while (i--)
+ clk_put(domain->clk[i]);
+
+ return ret;
+}
+
+static void imx_pgc_put_clocks(struct imx_pm_domain *domain)
+{
+ int i;
+
+ for (i = domain->num_clks - 1; i >= 0; i--)
+ clk_put(domain->clk[i]);
+}
+
+static int imx_pgc_parse_dt(struct device *dev, struct imx_pm_domain *domain)
+{
+ /* try to get the domain supply regulator */
+ domain->supply = devm_regulator_get_optional(dev, "power");
+ if (IS_ERR(domain->supply)) {
+ if (PTR_ERR(domain->supply) == -ENODEV)
+ domain->supply = NULL;
+ else
+ return PTR_ERR(domain->supply);
+ }
+
+ /* try to get all clocks needed for reset propagation */
+ return imx_pgc_get_clocks(dev, domain);
+}
+
+static int imx_pgc_power_domain_probe(struct platform_device *pdev)
+{
+ struct imx_pm_domain *domain = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ /* if this PD is associated with a DT node try to parse it */
+ if (dev->of_node) {
+ ret = imx_pgc_parse_dt(dev, domain);
+ if (ret)
+ return ret;
+ }
+
+ /* initially power on the domain */
+ if (domain->base.power_on)
+ domain->base.power_on(&domain->base);
+
+ if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ pm_genpd_init(&domain->base, NULL, false);
+ ret = of_genpd_add_provider_simple(dev->of_node, &domain->base);
+ if (ret)
+ goto genpd_err;
+ }
+
+ device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ return 0;
+
+genpd_err:
+ pm_genpd_remove(&domain->base);
+ imx_pgc_put_clocks(domain);
+
+ return ret;
+}
+
+static int imx_pgc_power_domain_remove(struct platform_device *pdev)
+{
+ struct imx_pm_domain *domain = pdev->dev.platform_data;
+
+ if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&domain->base);
+ imx_pgc_put_clocks(domain);
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id imx_pgc_power_domain_id[] = {
+ { "imx-pgc-power-domain"},
+ { },
+};
+
+static struct platform_driver imx_pgc_power_domain_driver = {
+ .driver = {
+ .name = "imx-pgc-pd",
+ },
+ .probe = imx_pgc_power_domain_probe,
+ .remove = imx_pgc_power_domain_remove,
+ .id_table = imx_pgc_power_domain_id,
+};
+builtin_platform_driver(imx_pgc_power_domain_driver)
+
+#define GPC_PGC_DOMAIN_ARM 0
+#define GPC_PGC_DOMAIN_PU 1
+#define GPC_PGC_DOMAIN_DISPLAY 2
+
+static struct genpd_power_state imx6_pm_domain_pu_state = {
+ .power_off_latency_ns = 25000,
+ .power_on_latency_ns = 2000000,
+};
+
+static struct imx_pm_domain imx_gpc_domains[] = {
+ {
+ .base = {
+ .name = "ARM",
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ },
+ }, {
+ .base = {
+ .name = "PU",
+ .power_off = imx6_pm_domain_power_off,
+ .power_on = imx6_pm_domain_power_on,
+ .states = &imx6_pm_domain_pu_state,
+ .state_count = 1,
+ },
+ .reg_offs = 0x260,
+ .cntr_pdn_bit = 0,
+ }, {
+ .base = {
+ .name = "DISPLAY",
+ .power_off = imx6_pm_domain_power_off,
+ .power_on = imx6_pm_domain_power_on,
+ },
+ .reg_offs = 0x240,
+ .cntr_pdn_bit = 4,
+ }, {
+ .base = {
+ .name = "PCI",
+ .power_off = imx6_pm_domain_power_off,
+ .power_on = imx6_pm_domain_power_on,
+ },
+ .reg_offs = 0x200,
+ .cntr_pdn_bit = 6,
+ },
+};
+
+struct imx_gpc_dt_data {
+ int num_domains;
+ bool err009619_present;
+ bool err006287_present;
+};
+
+static const struct imx_gpc_dt_data imx6q_dt_data = {
+ .num_domains = 2,
+ .err009619_present = false,
+ .err006287_present = false,
+};
+
+static const struct imx_gpc_dt_data imx6qp_dt_data = {
+ .num_domains = 2,
+ .err009619_present = true,
+ .err006287_present = false,
+};
+
+static const struct imx_gpc_dt_data imx6sl_dt_data = {
+ .num_domains = 3,
+ .err009619_present = false,
+ .err006287_present = true,
+};
+
+static const struct imx_gpc_dt_data imx6sx_dt_data = {
+ .num_domains = 4,
+ .err009619_present = false,
+ .err006287_present = false,
+};
+
+static const struct of_device_id imx_gpc_dt_ids[] = {
+ { .compatible = "fsl,imx6q-gpc", .data = &imx6q_dt_data },
+ { .compatible = "fsl,imx6qp-gpc", .data = &imx6qp_dt_data },
+ { .compatible = "fsl,imx6sl-gpc", .data = &imx6sl_dt_data },
+ { .compatible = "fsl,imx6sx-gpc", .data = &imx6sx_dt_data },
+ { }
+};
+
+static const struct regmap_range yes_ranges[] = {
+ regmap_reg_range(GPC_CNTR, GPC_CNTR),
+ regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR),
+ regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR),
+ regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR),
+};
+
+static const struct regmap_access_table access_table = {
+ .yes_ranges = yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(yes_ranges),
+};
+
+static const struct regmap_config imx_gpc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = &access_table,
+ .wr_table = &access_table,
+ .max_register = 0x2ac,
+ .fast_io = true,
+};
+
+static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
+ &imx_gpc_domains[0].base,
+ &imx_gpc_domains[1].base,
+};
+
+static struct genpd_onecell_data imx_gpc_onecell_data = {
+ .domains = imx_gpc_onecell_domains,
+ .num_domains = 2,
+};
+
+static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap,
+ unsigned int num_domains)
+{
+ struct imx_pm_domain *domain;
+ int i, ret;
+
+ for (i = 0; i < num_domains; i++) {
+ domain = &imx_gpc_domains[i];
+ domain->regmap = regmap;
+ domain->ipg_rate_mhz = 66;
+
+ if (i == 1) {
+ domain->supply = devm_regulator_get(dev, "pu");
+ if (IS_ERR(domain->supply))
+ return PTR_ERR(domain->supply);
+
+ ret = imx_pgc_get_clocks(dev, domain);
+ if (ret)
+ goto clk_err;
+
+ domain->base.power_on(&domain->base);
+ }
+ }
+
+ for (i = 0; i < num_domains; i++)
+ pm_genpd_init(&imx_gpc_domains[i].base, NULL, false);
+
+ if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ ret = of_genpd_add_provider_onecell(dev->of_node,
+ &imx_gpc_onecell_data);
+ if (ret)
+ goto genpd_err;
+ }
+
+ return 0;
+
+genpd_err:
+ for (i = 0; i < num_domains; i++)
+ pm_genpd_remove(&imx_gpc_domains[i].base);
+ imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]);
+clk_err:
+ return ret;
+}
+
+static int imx_gpc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(imx_gpc_dt_ids, &pdev->dev);
+ const struct imx_gpc_dt_data *of_id_data = of_id->data;
+ struct device_node *pgc_node;
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
+
+ /* bail out if DT too old and doesn't provide the necessary info */
+ if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
+ !pgc_node)
+ return 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
+ &imx_gpc_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&pdev->dev, "failed to init regmap: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Disable PU power down in normal operation if ERR009619 is present */
+ if (of_id_data->err009619_present)
+ imx_gpc_domains[GPC_PGC_DOMAIN_PU].base.flags |=
+ GENPD_FLAG_ALWAYS_ON;
+
+ /* Keep DISP always on if ERR006287 is present */
+ if (of_id_data->err006287_present)
+ imx_gpc_domains[GPC_PGC_DOMAIN_DISPLAY].base.flags |=
+ GENPD_FLAG_ALWAYS_ON;
+
+ if (!pgc_node) {
+ ret = imx_gpc_old_dt_init(&pdev->dev, regmap,
+ of_id_data->num_domains);
+ if (ret)
+ return ret;
+ } else {
+ struct imx_pm_domain *domain;
+ struct platform_device *pd_pdev;
+ struct device_node *np;
+ struct clk *ipg_clk;
+ unsigned int ipg_rate_mhz;
+ int domain_index;
+
+ ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(ipg_clk))
+ return PTR_ERR(ipg_clk);
+ ipg_rate_mhz = clk_get_rate(ipg_clk) / 1000000;
+
+ for_each_child_of_node(pgc_node, np) {
+ ret = of_property_read_u32(np, "reg", &domain_index);
+ if (ret) {
+ of_node_put(np);
+ return ret;
+ }
+ if (domain_index >= of_id_data->num_domains)
+ continue;
+
+ pd_pdev = platform_device_alloc("imx-pgc-power-domain",
+ domain_index);
+ if (!pd_pdev) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ ret = platform_device_add_data(pd_pdev,
+ &imx_gpc_domains[domain_index],
+ sizeof(imx_gpc_domains[domain_index]));
+ if (ret) {
+ platform_device_put(pd_pdev);
+ of_node_put(np);
+ return ret;
+ }
+ domain = pd_pdev->dev.platform_data;
+ domain->regmap = regmap;
+ domain->ipg_rate_mhz = ipg_rate_mhz;
+
+ pd_pdev->dev.parent = &pdev->dev;
+ pd_pdev->dev.of_node = np;
+
+ ret = platform_device_add(pd_pdev);
+ if (ret) {
+ platform_device_put(pd_pdev);
+ of_node_put(np);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int imx_gpc_remove(struct platform_device *pdev)
+{
+ struct device_node *pgc_node;
+ int ret;
+
+ pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
+
+ /* bail out if DT too old and doesn't provide the necessary info */
+ if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
+ !pgc_node)
+ return 0;
+
+ /*
+ * If the old DT binding is used the toplevel driver needs to
+ * de-register the power domains
+ */
+ if (!pgc_node) {
+ of_genpd_del_provider(pdev->dev.of_node);
+
+ ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base);
+ if (ret)
+ return ret;
+ imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]);
+
+ ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_ARM].base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver imx_gpc_driver = {
+ .driver = {
+ .name = "imx-gpc",
+ .of_match_table = imx_gpc_dt_ids,
+ },
+ .probe = imx_gpc_probe,
+ .remove = imx_gpc_remove,
+};
+builtin_platform_driver(imx_gpc_driver)
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
new file mode 100644
index 000000000..6ef18cf8f
--- /dev/null
+++ b/drivers/soc/imx/gpcv2.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright 2017 Impinj, Inc
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * Based on the code of analogus driver:
+ *
+ * Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/power/imx7-power.h>
+
+#define GPC_LPCR_A7_BSC 0x000
+
+#define GPC_PGC_CPU_MAPPING 0x0ec
+#define USB_HSIC_PHY_A7_DOMAIN BIT(6)
+#define USB_OTG2_PHY_A7_DOMAIN BIT(5)
+#define USB_OTG1_PHY_A7_DOMAIN BIT(4)
+#define PCIE_PHY_A7_DOMAIN BIT(3)
+#define MIPI_PHY_A7_DOMAIN BIT(2)
+
+#define GPC_PU_PGC_SW_PUP_REQ 0x0f8
+#define GPC_PU_PGC_SW_PDN_REQ 0x104
+#define USB_HSIC_PHY_SW_Pxx_REQ BIT(4)
+#define USB_OTG2_PHY_SW_Pxx_REQ BIT(3)
+#define USB_OTG1_PHY_SW_Pxx_REQ BIT(2)
+#define PCIE_PHY_SW_Pxx_REQ BIT(1)
+#define MIPI_PHY_SW_Pxx_REQ BIT(0)
+
+#define GPC_M4_PU_PDN_FLG 0x1bc
+
+/*
+ * The PGC offset values in Reference Manual
+ * (Rev. 1, 01/2018 and the older ones) GPC chapter's
+ * GPC_PGC memory map are incorrect, below offset
+ * values are from design RTL.
+ */
+#define PGC_MIPI 16
+#define PGC_PCIE 17
+#define PGC_USB_HSIC 20
+#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
+#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
+
+#define GPC_PGC_CTRL_PCR BIT(0)
+
+struct imx7_pgc_domain {
+ struct generic_pm_domain genpd;
+ struct regmap *regmap;
+ struct regulator *regulator;
+
+ unsigned int pgc;
+
+ const struct {
+ u32 pxx;
+ u32 map;
+ } bits;
+
+ const int voltage;
+ struct device *dev;
+};
+
+static int imx7_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
+ bool on)
+{
+ struct imx7_pgc_domain *domain = container_of(genpd,
+ struct imx7_pgc_domain,
+ genpd);
+ unsigned int offset = on ?
+ GPC_PU_PGC_SW_PUP_REQ : GPC_PU_PGC_SW_PDN_REQ;
+ const bool enable_power_control = !on;
+ const bool has_regulator = !IS_ERR(domain->regulator);
+ unsigned long deadline;
+ int ret = 0;
+
+ regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ domain->bits.map, domain->bits.map);
+
+ if (has_regulator && on) {
+ ret = regulator_enable(domain->regulator);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable regulator\n");
+ goto unmap;
+ }
+ }
+
+ if (enable_power_control)
+ regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
+ GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+
+ regmap_update_bits(domain->regmap, offset,
+ domain->bits.pxx, domain->bits.pxx);
+
+ /*
+ * As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
+ * for PUP_REQ/PDN_REQ bit to be cleared
+ */
+ deadline = jiffies + msecs_to_jiffies(1);
+ while (true) {
+ u32 pxx_req;
+
+ regmap_read(domain->regmap, offset, &pxx_req);
+
+ if (!(pxx_req & domain->bits.pxx))
+ break;
+
+ if (time_after(jiffies, deadline)) {
+ dev_err(domain->dev, "falied to command PGC\n");
+ ret = -ETIMEDOUT;
+ /*
+ * If we were in a process of enabling a
+ * domain and failed we might as well disable
+ * the regulator we just enabled. And if it
+ * was the opposite situation and we failed to
+ * power down -- keep the regulator on
+ */
+ on = !on;
+ break;
+ }
+
+ cpu_relax();
+ }
+
+ if (enable_power_control)
+ regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
+ GPC_PGC_CTRL_PCR, 0);
+
+ if (has_regulator && !on) {
+ int err;
+
+ err = regulator_disable(domain->regulator);
+ if (err)
+ dev_err(domain->dev,
+ "failed to disable regulator: %d\n", ret);
+ /* Preserve earlier error code */
+ ret = ret ?: err;
+ }
+unmap:
+ regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ domain->bits.map, 0);
+ return ret;
+}
+
+static int imx7_gpc_pu_pgc_sw_pup_req(struct generic_pm_domain *genpd)
+{
+ return imx7_gpc_pu_pgc_sw_pxx_req(genpd, true);
+}
+
+static int imx7_gpc_pu_pgc_sw_pdn_req(struct generic_pm_domain *genpd)
+{
+ return imx7_gpc_pu_pgc_sw_pxx_req(genpd, false);
+}
+
+static const struct imx7_pgc_domain imx7_pgc_domains[] = {
+ [IMX7_POWER_DOMAIN_MIPI_PHY] = {
+ .genpd = {
+ .name = "mipi-phy",
+ },
+ .bits = {
+ .pxx = MIPI_PHY_SW_Pxx_REQ,
+ .map = MIPI_PHY_A7_DOMAIN,
+ },
+ .voltage = 1000000,
+ .pgc = PGC_MIPI,
+ },
+
+ [IMX7_POWER_DOMAIN_PCIE_PHY] = {
+ .genpd = {
+ .name = "pcie-phy",
+ },
+ .bits = {
+ .pxx = PCIE_PHY_SW_Pxx_REQ,
+ .map = PCIE_PHY_A7_DOMAIN,
+ },
+ .voltage = 1000000,
+ .pgc = PGC_PCIE,
+ },
+
+ [IMX7_POWER_DOMAIN_USB_HSIC_PHY] = {
+ .genpd = {
+ .name = "usb-hsic-phy",
+ },
+ .bits = {
+ .pxx = USB_HSIC_PHY_SW_Pxx_REQ,
+ .map = USB_HSIC_PHY_A7_DOMAIN,
+ },
+ .voltage = 1200000,
+ .pgc = PGC_USB_HSIC,
+ },
+};
+
+static int imx7_pgc_domain_probe(struct platform_device *pdev)
+{
+ struct imx7_pgc_domain *domain = pdev->dev.platform_data;
+ int ret;
+
+ domain->dev = &pdev->dev;
+
+ domain->regulator = devm_regulator_get_optional(domain->dev, "power");
+ if (IS_ERR(domain->regulator)) {
+ if (PTR_ERR(domain->regulator) != -ENODEV) {
+ if (PTR_ERR(domain->regulator) != -EPROBE_DEFER)
+ dev_err(domain->dev, "Failed to get domain's regulator\n");
+ return PTR_ERR(domain->regulator);
+ }
+ } else {
+ regulator_set_voltage(domain->regulator,
+ domain->voltage, domain->voltage);
+ }
+
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err(domain->dev, "Failed to init power domain\n");
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_simple(domain->dev->of_node,
+ &domain->genpd);
+ if (ret) {
+ dev_err(domain->dev, "Failed to add genpd provider\n");
+ pm_genpd_remove(&domain->genpd);
+ }
+
+ return ret;
+}
+
+static int imx7_pgc_domain_remove(struct platform_device *pdev)
+{
+ struct imx7_pgc_domain *domain = pdev->dev.platform_data;
+
+ of_genpd_del_provider(domain->dev->of_node);
+ pm_genpd_remove(&domain->genpd);
+
+ return 0;
+}
+
+static const struct platform_device_id imx7_pgc_domain_id[] = {
+ { "imx7-pgc-domain", },
+ { },
+};
+
+static struct platform_driver imx7_pgc_domain_driver = {
+ .driver = {
+ .name = "imx7-pgc",
+ },
+ .probe = imx7_pgc_domain_probe,
+ .remove = imx7_pgc_domain_remove,
+ .id_table = imx7_pgc_domain_id,
+};
+builtin_platform_driver(imx7_pgc_domain_driver)
+
+static int imx_gpcv2_probe(struct platform_device *pdev)
+{
+ static const struct regmap_range yes_ranges[] = {
+ regmap_reg_range(GPC_LPCR_A7_BSC,
+ GPC_M4_PU_PDN_FLG),
+ regmap_reg_range(GPC_PGC_CTRL(PGC_MIPI),
+ GPC_PGC_SR(PGC_MIPI)),
+ regmap_reg_range(GPC_PGC_CTRL(PGC_PCIE),
+ GPC_PGC_SR(PGC_PCIE)),
+ regmap_reg_range(GPC_PGC_CTRL(PGC_USB_HSIC),
+ GPC_PGC_SR(PGC_USB_HSIC)),
+ };
+ static const struct regmap_access_table access_table = {
+ .yes_ranges = yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(yes_ranges),
+ };
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = &access_table,
+ .wr_table = &access_table,
+ .max_register = SZ_4K,
+ };
+ struct device *dev = &pdev->dev;
+ struct device_node *pgc_np, *np;
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ pgc_np = of_get_child_by_name(dev->of_node, "pgc");
+ if (!pgc_np) {
+ dev_err(dev, "No power domains specified in DT\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "failed to init regmap (%d)\n", ret);
+ return ret;
+ }
+
+ for_each_child_of_node(pgc_np, np) {
+ struct platform_device *pd_pdev;
+ struct imx7_pgc_domain *domain;
+ u32 domain_index;
+
+ ret = of_property_read_u32(np, "reg", &domain_index);
+ if (ret) {
+ dev_err(dev, "Failed to read 'reg' property\n");
+ of_node_put(np);
+ return ret;
+ }
+
+ if (domain_index >= ARRAY_SIZE(imx7_pgc_domains)) {
+ dev_warn(dev,
+ "Domain index %d is out of bounds\n",
+ domain_index);
+ continue;
+ }
+
+ pd_pdev = platform_device_alloc("imx7-pgc-domain",
+ domain_index);
+ if (!pd_pdev) {
+ dev_err(dev, "Failed to allocate platform device\n");
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ ret = platform_device_add_data(pd_pdev,
+ &imx7_pgc_domains[domain_index],
+ sizeof(imx7_pgc_domains[domain_index]));
+ if (ret) {
+ platform_device_put(pd_pdev);
+ of_node_put(np);
+ return ret;
+ }
+
+ domain = pd_pdev->dev.platform_data;
+ domain->regmap = regmap;
+ domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req;
+ domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req;
+
+ pd_pdev->dev.parent = dev;
+ pd_pdev->dev.of_node = np;
+
+ ret = platform_device_add(pd_pdev);
+ if (ret) {
+ platform_device_put(pd_pdev);
+ of_node_put(np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id imx_gpcv2_dt_ids[] = {
+ { .compatible = "fsl,imx7d-gpc" },
+ { }
+};
+
+static struct platform_driver imx_gpc_driver = {
+ .driver = {
+ .name = "imx-gpcv2",
+ .of_match_table = imx_gpcv2_dt_ids,
+ },
+ .probe = imx_gpcv2_probe,
+};
+builtin_platform_driver(imx_gpc_driver)
diff --git a/drivers/soc/lantiq/Makefile b/drivers/soc/lantiq/Makefile
new file mode 100644
index 000000000..be9e866d5
--- /dev/null
+++ b/drivers/soc/lantiq/Makefile
@@ -0,0 +1,2 @@
+obj-y += fpi-bus.o
+obj-$(CONFIG_XRX200_PHY_FW) += gphy.o
diff --git a/drivers/soc/lantiq/fpi-bus.c b/drivers/soc/lantiq/fpi-bus.c
new file mode 100644
index 000000000..a671c9984
--- /dev/null
+++ b/drivers/soc/lantiq/fpi-bus.c
@@ -0,0 +1,87 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011-2015 John Crispin <blogic@phrozen.org>
+ * Copyright (C) 2015 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ * Copyright (C) 2017 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#include <lantiq_soc.h>
+
+#define XBAR_ALWAYS_LAST 0x430
+#define XBAR_FPI_BURST_EN BIT(1)
+#define XBAR_AHB_BURST_EN BIT(2)
+
+#define RCU_VR9_BE_AHB1S 0x00000008
+
+static int ltq_fpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res_xbar;
+ struct regmap *rcu_regmap;
+ void __iomem *xbar_membase;
+ u32 rcu_ahb_endianness_reg_offset;
+ int ret;
+
+ res_xbar = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xbar_membase = devm_ioremap_resource(dev, res_xbar);
+ if (IS_ERR(xbar_membase))
+ return PTR_ERR(xbar_membase);
+
+ /* RCU configuration is optional */
+ rcu_regmap = syscon_regmap_lookup_by_phandle(np, "lantiq,rcu");
+ if (IS_ERR(rcu_regmap))
+ return PTR_ERR(rcu_regmap);
+
+ ret = device_property_read_u32(dev, "lantiq,offset-endianness",
+ &rcu_ahb_endianness_reg_offset);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get RCU reg offset\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(rcu_regmap, rcu_ahb_endianness_reg_offset,
+ RCU_VR9_BE_AHB1S, RCU_VR9_BE_AHB1S);
+ if (ret) {
+ dev_warn(&pdev->dev,
+ "Failed to configure RCU AHB endianness\n");
+ return ret;
+ }
+
+ /* disable fpi burst */
+ ltq_w32_mask(XBAR_FPI_BURST_EN, 0, xbar_membase + XBAR_ALWAYS_LAST);
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+}
+
+static const struct of_device_id ltq_fpi_match[] = {
+ { .compatible = "lantiq,xrx200-fpi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_fpi_match);
+
+static struct platform_driver ltq_fpi_driver = {
+ .probe = ltq_fpi_probe,
+ .driver = {
+ .name = "fpi-xway",
+ .of_match_table = ltq_fpi_match,
+ },
+};
+
+module_platform_driver(ltq_fpi_driver);
+
+MODULE_DESCRIPTION("Lantiq FPI bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c
new file mode 100644
index 000000000..feeb17ceb
--- /dev/null
+++ b/drivers/soc/lantiq/gphy.c
@@ -0,0 +1,224 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@phrozen.org>
+ * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ * Copyright (C) 2017 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/property.h>
+#include <dt-bindings/mips/lantiq_rcu_gphy.h>
+
+#include <lantiq_soc.h>
+
+#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+
+struct xway_gphy_priv {
+ struct clk *gphy_clk_gate;
+ struct reset_control *gphy_reset;
+ struct reset_control *gphy_reset2;
+ void __iomem *membase;
+ char *fw_name;
+};
+
+struct xway_gphy_match_data {
+ char *fe_firmware_name;
+ char *ge_firmware_name;
+};
+
+static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
+};
+
+static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
+};
+
+static const struct xway_gphy_match_data xrx300_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
+ .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
+};
+
+static const struct of_device_id xway_gphy_match[] = {
+ { .compatible = "lantiq,xrx200a1x-gphy", .data = &xrx200a1x_gphy_data },
+ { .compatible = "lantiq,xrx200a2x-gphy", .data = &xrx200a2x_gphy_data },
+ { .compatible = "lantiq,xrx300-gphy", .data = &xrx300_gphy_data },
+ { .compatible = "lantiq,xrx330-gphy", .data = &xrx300_gphy_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xway_gphy_match);
+
+static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
+ dma_addr_t *dev_addr)
+{
+ const struct firmware *fw;
+ void *fw_addr;
+ dma_addr_t dma_addr;
+ size_t size;
+ int ret;
+
+ ret = request_firmware(&fw, priv->fw_name, dev);
+ if (ret) {
+ dev_err(dev, "failed to load firmware: %s, error: %i\n",
+ priv->fw_name, ret);
+ return ret;
+ }
+
+ /*
+ * GPHY cores need the firmware code in a persistent and contiguous
+ * memory area with a 16 kB boundary aligned start address.
+ */
+ size = fw->size + XRX200_GPHY_FW_ALIGN;
+
+ fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
+ if (fw_addr) {
+ fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
+ *dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
+ memcpy(fw_addr, fw->data, fw->size);
+ } else {
+ dev_err(dev, "failed to alloc firmware memory\n");
+ ret = -ENOMEM;
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int xway_gphy_of_probe(struct platform_device *pdev,
+ struct xway_gphy_priv *priv)
+{
+ struct device *dev = &pdev->dev;
+ const struct xway_gphy_match_data *gphy_fw_name_cfg;
+ u32 gphy_mode;
+ int ret;
+ struct resource *res_gphy;
+
+ gphy_fw_name_cfg = of_device_get_match_data(dev);
+
+ priv->gphy_clk_gate = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->gphy_clk_gate)) {
+ dev_err(dev, "Failed to lookup gate clock\n");
+ return PTR_ERR(priv->gphy_clk_gate);
+ }
+
+ res_gphy = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->membase = devm_ioremap_resource(dev, res_gphy);
+ if (IS_ERR(priv->membase))
+ return PTR_ERR(priv->membase);
+
+ priv->gphy_reset = devm_reset_control_get(dev, "gphy");
+ if (IS_ERR(priv->gphy_reset)) {
+ if (PTR_ERR(priv->gphy_reset) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to lookup gphy reset\n");
+ return PTR_ERR(priv->gphy_reset);
+ }
+
+ priv->gphy_reset2 = devm_reset_control_get_optional(dev, "gphy2");
+ if (IS_ERR(priv->gphy_reset2))
+ return PTR_ERR(priv->gphy_reset2);
+
+ ret = device_property_read_u32(dev, "lantiq,gphy-mode", &gphy_mode);
+ /* Default to GE mode */
+ if (ret)
+ gphy_mode = GPHY_MODE_GE;
+
+ switch (gphy_mode) {
+ case GPHY_MODE_FE:
+ priv->fw_name = gphy_fw_name_cfg->fe_firmware_name;
+ break;
+ case GPHY_MODE_GE:
+ priv->fw_name = gphy_fw_name_cfg->ge_firmware_name;
+ break;
+ default:
+ dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xway_gphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xway_gphy_priv *priv;
+ dma_addr_t fw_addr = 0;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = xway_gphy_of_probe(pdev, priv);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->gphy_clk_gate);
+ if (ret)
+ return ret;
+
+ ret = xway_gphy_load(dev, priv, &fw_addr);
+ if (ret) {
+ clk_disable_unprepare(priv->gphy_clk_gate);
+ return ret;
+ }
+
+ reset_control_assert(priv->gphy_reset);
+ reset_control_assert(priv->gphy_reset2);
+
+ iowrite32be(fw_addr, priv->membase);
+
+ reset_control_deassert(priv->gphy_reset);
+ reset_control_deassert(priv->gphy_reset2);
+
+ platform_set_drvdata(pdev, priv);
+
+ return ret;
+}
+
+static int xway_gphy_remove(struct platform_device *pdev)
+{
+ struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
+
+ iowrite32be(0, priv->membase);
+
+ clk_disable_unprepare(priv->gphy_clk_gate);
+
+ return 0;
+}
+
+static struct platform_driver xway_gphy_driver = {
+ .probe = xway_gphy_probe,
+ .remove = xway_gphy_remove,
+ .driver = {
+ .name = "xway-rcu-gphy",
+ .of_match_table = xway_gphy_match,
+ },
+};
+
+module_platform_driver(xway_gphy_driver);
+
+MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Lantiq XWAY GPHY Firmware Loader");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
new file mode 100644
index 000000000..a7d066733
--- /dev/null
+++ b/drivers/soc/mediatek/Kconfig
@@ -0,0 +1,34 @@
+#
+# MediaTek SoC drivers
+#
+menu "MediaTek SoC drivers"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+
+config MTK_INFRACFG
+ bool "MediaTek INFRACFG Support"
+ select REGMAP
+ help
+ Say yes here to add support for the MediaTek INFRACFG controller. The
+ INFRACFG controller contains various infrastructure registers not
+ directly associated to any device.
+
+config MTK_PMIC_WRAP
+ tristate "MediaTek PMIC Wrapper Support"
+ depends on RESET_CONTROLLER
+ select REGMAP
+ help
+ Say yes here to add support for MediaTek PMIC Wrapper found
+ on different MediaTek SoCs. The PMIC wrapper is a proprietary
+ hardware to connect the PMIC.
+
+config MTK_SCPSYS
+ bool "MediaTek SCPSYS Support"
+ default ARCH_MEDIATEK
+ select REGMAP
+ select MTK_INFRACFG
+ select PM_GENERIC_DOMAINS if PM
+ help
+ Say yes here to add support for the MediaTek SCPSYS power domain
+ driver.
+
+endmenu
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
new file mode 100644
index 000000000..12998b088
--- /dev/null
+++ b/drivers/soc/mediatek/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
+obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
+obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c
new file mode 100644
index 000000000..958861c9e
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-infracfg.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/infracfg.h>
+#include <asm/processor.h>
+
+#define MTK_POLL_DELAY_US 10
+#define MTK_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+
+#define INFRA_TOPAXI_PROTECTEN 0x0220
+#define INFRA_TOPAXI_PROTECTSTA1 0x0228
+#define INFRA_TOPAXI_PROTECTEN_SET 0x0260
+#define INFRA_TOPAXI_PROTECTEN_CLR 0x0264
+
+/**
+ * mtk_infracfg_set_bus_protection - enable bus protection
+ * @regmap: The infracfg regmap
+ * @mask: The mask containing the protection bits to be enabled.
+ * @reg_update: The boolean flag determines to set the protection bits
+ * by regmap_update_bits with enable register(PROTECTEN) or
+ * by regmap_write with set register(PROTECTEN_SET).
+ *
+ * This function enables the bus protection bits for disabled power
+ * domains so that the system does not hang when some unit accesses the
+ * bus while in power down.
+ */
+int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask,
+ bool reg_update)
+{
+ u32 val;
+ int ret;
+
+ if (reg_update)
+ regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask,
+ mask);
+ else
+ regmap_write(infracfg, INFRA_TOPAXI_PROTECTEN_SET, mask);
+
+ ret = regmap_read_poll_timeout(infracfg, INFRA_TOPAXI_PROTECTSTA1,
+ val, (val & mask) == mask,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+
+ return ret;
+}
+
+/**
+ * mtk_infracfg_clear_bus_protection - disable bus protection
+ * @regmap: The infracfg regmap
+ * @mask: The mask containing the protection bits to be disabled.
+ * @reg_update: The boolean flag determines to clear the protection bits
+ * by regmap_update_bits with enable register(PROTECTEN) or
+ * by regmap_write with clear register(PROTECTEN_CLR).
+ *
+ * This function disables the bus protection bits previously enabled with
+ * mtk_infracfg_set_bus_protection.
+ */
+
+int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask,
+ bool reg_update)
+{
+ int ret;
+ u32 val;
+
+ if (reg_update)
+ regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask, 0);
+ else
+ regmap_write(infracfg, INFRA_TOPAXI_PROTECTEN_CLR, mask);
+
+ ret = regmap_read_poll_timeout(infracfg, INFRA_TOPAXI_PROTECTSTA1,
+ val, !(val & mask),
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+
+ return ret;
+}
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
new file mode 100644
index 000000000..011a40b5f
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -0,0 +1,1684 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define PWRAP_MT8135_BRIDGE_IORD_ARB_EN 0x4
+#define PWRAP_MT8135_BRIDGE_WACS3_EN 0x10
+#define PWRAP_MT8135_BRIDGE_INIT_DONE3 0x14
+#define PWRAP_MT8135_BRIDGE_WACS4_EN 0x24
+#define PWRAP_MT8135_BRIDGE_INIT_DONE4 0x28
+#define PWRAP_MT8135_BRIDGE_INT_EN 0x38
+#define PWRAP_MT8135_BRIDGE_TIMER_EN 0x48
+#define PWRAP_MT8135_BRIDGE_WDT_UNIT 0x50
+#define PWRAP_MT8135_BRIDGE_WDT_SRC_EN 0x54
+
+/* macro for wrapper status */
+#define PWRAP_GET_WACS_RDATA(x) (((x) >> 0) & 0x0000ffff)
+#define PWRAP_GET_WACS_FSM(x) (((x) >> 16) & 0x00000007)
+#define PWRAP_GET_WACS_REQ(x) (((x) >> 19) & 0x00000001)
+#define PWRAP_STATE_SYNC_IDLE0 (1 << 20)
+#define PWRAP_STATE_INIT_DONE0 (1 << 21)
+
+/* macro for WACS FSM */
+#define PWRAP_WACS_FSM_IDLE 0x00
+#define PWRAP_WACS_FSM_REQ 0x02
+#define PWRAP_WACS_FSM_WFDLE 0x04
+#define PWRAP_WACS_FSM_WFVLDCLR 0x06
+#define PWRAP_WACS_INIT_DONE 0x01
+#define PWRAP_WACS_WACS_SYNC_IDLE 0x01
+#define PWRAP_WACS_SYNC_BUSY 0x00
+
+/* macro for device wrapper default value */
+#define PWRAP_DEW_READ_TEST_VAL 0x5aa5
+#define PWRAP_DEW_WRITE_TEST_VAL 0xa55a
+
+/* macro for manual command */
+#define PWRAP_MAN_CMD_SPI_WRITE_NEW (1 << 14)
+#define PWRAP_MAN_CMD_SPI_WRITE (1 << 13)
+#define PWRAP_MAN_CMD_OP_CSH (0x0 << 8)
+#define PWRAP_MAN_CMD_OP_CSL (0x1 << 8)
+#define PWRAP_MAN_CMD_OP_CK (0x2 << 8)
+#define PWRAP_MAN_CMD_OP_OUTS (0x8 << 8)
+#define PWRAP_MAN_CMD_OP_OUTD (0x9 << 8)
+#define PWRAP_MAN_CMD_OP_OUTQ (0xa << 8)
+
+/* macro for Watch Dog Timer Source */
+#define PWRAP_WDT_SRC_EN_STAUPD_TRIG (1 << 25)
+#define PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE (1 << 20)
+#define PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE (1 << 6)
+#define PWRAP_WDT_SRC_MASK_ALL 0xffffffff
+#define PWRAP_WDT_SRC_MASK_NO_STAUPD ~(PWRAP_WDT_SRC_EN_STAUPD_TRIG | \
+ PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE | \
+ PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE)
+
+/* Group of bits used for shown slave capability */
+#define PWRAP_SLV_CAP_SPI BIT(0)
+#define PWRAP_SLV_CAP_DUALIO BIT(1)
+#define PWRAP_SLV_CAP_SECURITY BIT(2)
+#define HAS_CAP(_c, _x) (((_c) & (_x)) == (_x))
+
+/* defines for slave device wrapper registers */
+enum dew_regs {
+ PWRAP_DEW_BASE,
+ PWRAP_DEW_DIO_EN,
+ PWRAP_DEW_READ_TEST,
+ PWRAP_DEW_WRITE_TEST,
+ PWRAP_DEW_CRC_EN,
+ PWRAP_DEW_CRC_VAL,
+ PWRAP_DEW_MON_GRP_SEL,
+ PWRAP_DEW_CIPHER_KEY_SEL,
+ PWRAP_DEW_CIPHER_IV_SEL,
+ PWRAP_DEW_CIPHER_RDY,
+ PWRAP_DEW_CIPHER_MODE,
+ PWRAP_DEW_CIPHER_SWRST,
+
+ /* MT6397 only regs */
+ PWRAP_DEW_EVENT_OUT_EN,
+ PWRAP_DEW_EVENT_SRC_EN,
+ PWRAP_DEW_EVENT_SRC,
+ PWRAP_DEW_EVENT_FLAG,
+ PWRAP_DEW_MON_FLAG_SEL,
+ PWRAP_DEW_EVENT_TEST,
+ PWRAP_DEW_CIPHER_LOAD,
+ PWRAP_DEW_CIPHER_START,
+
+ /* MT6323 only regs */
+ PWRAP_DEW_CIPHER_EN,
+ PWRAP_DEW_RDDMY_NO,
+};
+
+static const u32 mt6323_regs[] = {
+ [PWRAP_DEW_BASE] = 0x0000,
+ [PWRAP_DEW_DIO_EN] = 0x018a,
+ [PWRAP_DEW_READ_TEST] = 0x018c,
+ [PWRAP_DEW_WRITE_TEST] = 0x018e,
+ [PWRAP_DEW_CRC_EN] = 0x0192,
+ [PWRAP_DEW_CRC_VAL] = 0x0194,
+ [PWRAP_DEW_MON_GRP_SEL] = 0x0196,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0198,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x019a,
+ [PWRAP_DEW_CIPHER_EN] = 0x019c,
+ [PWRAP_DEW_CIPHER_RDY] = 0x019e,
+ [PWRAP_DEW_CIPHER_MODE] = 0x01a0,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x01a2,
+ [PWRAP_DEW_RDDMY_NO] = 0x01a4,
+};
+
+static const u32 mt6397_regs[] = {
+ [PWRAP_DEW_BASE] = 0xbc00,
+ [PWRAP_DEW_EVENT_OUT_EN] = 0xbc00,
+ [PWRAP_DEW_DIO_EN] = 0xbc02,
+ [PWRAP_DEW_EVENT_SRC_EN] = 0xbc04,
+ [PWRAP_DEW_EVENT_SRC] = 0xbc06,
+ [PWRAP_DEW_EVENT_FLAG] = 0xbc08,
+ [PWRAP_DEW_READ_TEST] = 0xbc0a,
+ [PWRAP_DEW_WRITE_TEST] = 0xbc0c,
+ [PWRAP_DEW_CRC_EN] = 0xbc0e,
+ [PWRAP_DEW_CRC_VAL] = 0xbc10,
+ [PWRAP_DEW_MON_GRP_SEL] = 0xbc12,
+ [PWRAP_DEW_MON_FLAG_SEL] = 0xbc14,
+ [PWRAP_DEW_EVENT_TEST] = 0xbc16,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0xbc18,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0xbc1a,
+ [PWRAP_DEW_CIPHER_LOAD] = 0xbc1c,
+ [PWRAP_DEW_CIPHER_START] = 0xbc1e,
+ [PWRAP_DEW_CIPHER_RDY] = 0xbc20,
+ [PWRAP_DEW_CIPHER_MODE] = 0xbc22,
+ [PWRAP_DEW_CIPHER_SWRST] = 0xbc24,
+};
+
+static const u32 mt6351_regs[] = {
+ [PWRAP_DEW_DIO_EN] = 0x02F2,
+ [PWRAP_DEW_READ_TEST] = 0x02F4,
+ [PWRAP_DEW_WRITE_TEST] = 0x02F6,
+ [PWRAP_DEW_CRC_EN] = 0x02FA,
+ [PWRAP_DEW_CRC_VAL] = 0x02FC,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0300,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x0302,
+ [PWRAP_DEW_CIPHER_EN] = 0x0304,
+ [PWRAP_DEW_CIPHER_RDY] = 0x0306,
+ [PWRAP_DEW_CIPHER_MODE] = 0x0308,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x030A,
+ [PWRAP_DEW_RDDMY_NO] = 0x030C,
+};
+
+enum pwrap_regs {
+ PWRAP_MUX_SEL,
+ PWRAP_WRAP_EN,
+ PWRAP_DIO_EN,
+ PWRAP_SIDLY,
+ PWRAP_CSHEXT_WRITE,
+ PWRAP_CSHEXT_READ,
+ PWRAP_CSLEXT_START,
+ PWRAP_CSLEXT_END,
+ PWRAP_STAUPD_PRD,
+ PWRAP_STAUPD_GRPEN,
+ PWRAP_STAUPD_MAN_TRIG,
+ PWRAP_STAUPD_STA,
+ PWRAP_WRAP_STA,
+ PWRAP_HARB_INIT,
+ PWRAP_HARB_HPRIO,
+ PWRAP_HIPRIO_ARB_EN,
+ PWRAP_HARB_STA0,
+ PWRAP_HARB_STA1,
+ PWRAP_MAN_EN,
+ PWRAP_MAN_CMD,
+ PWRAP_MAN_RDATA,
+ PWRAP_MAN_VLDCLR,
+ PWRAP_WACS0_EN,
+ PWRAP_INIT_DONE0,
+ PWRAP_WACS0_CMD,
+ PWRAP_WACS0_RDATA,
+ PWRAP_WACS0_VLDCLR,
+ PWRAP_WACS1_EN,
+ PWRAP_INIT_DONE1,
+ PWRAP_WACS1_CMD,
+ PWRAP_WACS1_RDATA,
+ PWRAP_WACS1_VLDCLR,
+ PWRAP_WACS2_EN,
+ PWRAP_INIT_DONE2,
+ PWRAP_WACS2_CMD,
+ PWRAP_WACS2_RDATA,
+ PWRAP_WACS2_VLDCLR,
+ PWRAP_INT_EN,
+ PWRAP_INT_FLG_RAW,
+ PWRAP_INT_FLG,
+ PWRAP_INT_CLR,
+ PWRAP_SIG_ADR,
+ PWRAP_SIG_MODE,
+ PWRAP_SIG_VALUE,
+ PWRAP_SIG_ERRVAL,
+ PWRAP_CRC_EN,
+ PWRAP_TIMER_EN,
+ PWRAP_TIMER_STA,
+ PWRAP_WDT_UNIT,
+ PWRAP_WDT_SRC_EN,
+ PWRAP_WDT_FLG,
+ PWRAP_DEBUG_INT_SEL,
+ PWRAP_CIPHER_KEY_SEL,
+ PWRAP_CIPHER_IV_SEL,
+ PWRAP_CIPHER_RDY,
+ PWRAP_CIPHER_MODE,
+ PWRAP_CIPHER_SWRST,
+ PWRAP_DCM_EN,
+ PWRAP_DCM_DBC_PRD,
+
+ /* MT2701 only regs */
+ PWRAP_ADC_CMD_ADDR,
+ PWRAP_PWRAP_ADC_CMD,
+ PWRAP_ADC_RDY_ADDR,
+ PWRAP_ADC_RDATA_ADDR1,
+ PWRAP_ADC_RDATA_ADDR2,
+
+ /* MT7622 only regs */
+ PWRAP_EINT_STA0_ADR,
+ PWRAP_EINT_STA1_ADR,
+ PWRAP_STA,
+ PWRAP_CLR,
+ PWRAP_DVFS_ADR8,
+ PWRAP_DVFS_WDATA8,
+ PWRAP_DVFS_ADR9,
+ PWRAP_DVFS_WDATA9,
+ PWRAP_DVFS_ADR10,
+ PWRAP_DVFS_WDATA10,
+ PWRAP_DVFS_ADR11,
+ PWRAP_DVFS_WDATA11,
+ PWRAP_DVFS_ADR12,
+ PWRAP_DVFS_WDATA12,
+ PWRAP_DVFS_ADR13,
+ PWRAP_DVFS_WDATA13,
+ PWRAP_DVFS_ADR14,
+ PWRAP_DVFS_WDATA14,
+ PWRAP_DVFS_ADR15,
+ PWRAP_DVFS_WDATA15,
+ PWRAP_EXT_CK,
+ PWRAP_ADC_RDATA_ADDR,
+ PWRAP_GPS_STA,
+ PWRAP_SW_RST,
+ PWRAP_DVFS_STEP_CTRL0,
+ PWRAP_DVFS_STEP_CTRL1,
+ PWRAP_DVFS_STEP_CTRL2,
+ PWRAP_SPI2_CTRL,
+
+ /* MT8135 only regs */
+ PWRAP_CSHEXT,
+ PWRAP_EVENT_IN_EN,
+ PWRAP_EVENT_DST_EN,
+ PWRAP_RRARB_INIT,
+ PWRAP_RRARB_EN,
+ PWRAP_RRARB_STA0,
+ PWRAP_RRARB_STA1,
+ PWRAP_EVENT_STA,
+ PWRAP_EVENT_STACLR,
+ PWRAP_CIPHER_LOAD,
+ PWRAP_CIPHER_START,
+
+ /* MT8173 only regs */
+ PWRAP_RDDMY,
+ PWRAP_SI_CK_CON,
+ PWRAP_DVFS_ADR0,
+ PWRAP_DVFS_WDATA0,
+ PWRAP_DVFS_ADR1,
+ PWRAP_DVFS_WDATA1,
+ PWRAP_DVFS_ADR2,
+ PWRAP_DVFS_WDATA2,
+ PWRAP_DVFS_ADR3,
+ PWRAP_DVFS_WDATA3,
+ PWRAP_DVFS_ADR4,
+ PWRAP_DVFS_WDATA4,
+ PWRAP_DVFS_ADR5,
+ PWRAP_DVFS_WDATA5,
+ PWRAP_DVFS_ADR6,
+ PWRAP_DVFS_WDATA6,
+ PWRAP_DVFS_ADR7,
+ PWRAP_DVFS_WDATA7,
+ PWRAP_SPMINF_STA,
+ PWRAP_CIPHER_EN,
+};
+
+static int mt2701_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_RDDMY] = 0x18,
+ [PWRAP_SI_CK_CON] = 0x1c,
+ [PWRAP_CSHEXT_WRITE] = 0x20,
+ [PWRAP_CSHEXT_READ] = 0x24,
+ [PWRAP_CSLEXT_START] = 0x28,
+ [PWRAP_CSLEXT_END] = 0x2c,
+ [PWRAP_STAUPD_PRD] = 0x30,
+ [PWRAP_STAUPD_GRPEN] = 0x34,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x38,
+ [PWRAP_STAUPD_STA] = 0x3c,
+ [PWRAP_WRAP_STA] = 0x44,
+ [PWRAP_HARB_INIT] = 0x48,
+ [PWRAP_HARB_HPRIO] = 0x4c,
+ [PWRAP_HIPRIO_ARB_EN] = 0x50,
+ [PWRAP_HARB_STA0] = 0x54,
+ [PWRAP_HARB_STA1] = 0x58,
+ [PWRAP_MAN_EN] = 0x5c,
+ [PWRAP_MAN_CMD] = 0x60,
+ [PWRAP_MAN_RDATA] = 0x64,
+ [PWRAP_MAN_VLDCLR] = 0x68,
+ [PWRAP_WACS0_EN] = 0x6c,
+ [PWRAP_INIT_DONE0] = 0x70,
+ [PWRAP_WACS0_CMD] = 0x74,
+ [PWRAP_WACS0_RDATA] = 0x78,
+ [PWRAP_WACS0_VLDCLR] = 0x7c,
+ [PWRAP_WACS1_EN] = 0x80,
+ [PWRAP_INIT_DONE1] = 0x84,
+ [PWRAP_WACS1_CMD] = 0x88,
+ [PWRAP_WACS1_RDATA] = 0x8c,
+ [PWRAP_WACS1_VLDCLR] = 0x90,
+ [PWRAP_WACS2_EN] = 0x94,
+ [PWRAP_INIT_DONE2] = 0x98,
+ [PWRAP_WACS2_CMD] = 0x9c,
+ [PWRAP_WACS2_RDATA] = 0xa0,
+ [PWRAP_WACS2_VLDCLR] = 0xa4,
+ [PWRAP_INT_EN] = 0xa8,
+ [PWRAP_INT_FLG_RAW] = 0xac,
+ [PWRAP_INT_FLG] = 0xb0,
+ [PWRAP_INT_CLR] = 0xb4,
+ [PWRAP_SIG_ADR] = 0xb8,
+ [PWRAP_SIG_MODE] = 0xbc,
+ [PWRAP_SIG_VALUE] = 0xc0,
+ [PWRAP_SIG_ERRVAL] = 0xc4,
+ [PWRAP_CRC_EN] = 0xc8,
+ [PWRAP_TIMER_EN] = 0xcc,
+ [PWRAP_TIMER_STA] = 0xd0,
+ [PWRAP_WDT_UNIT] = 0xd4,
+ [PWRAP_WDT_SRC_EN] = 0xd8,
+ [PWRAP_WDT_FLG] = 0xdc,
+ [PWRAP_DEBUG_INT_SEL] = 0xe0,
+ [PWRAP_DVFS_ADR0] = 0xe4,
+ [PWRAP_DVFS_WDATA0] = 0xe8,
+ [PWRAP_DVFS_ADR1] = 0xec,
+ [PWRAP_DVFS_WDATA1] = 0xf0,
+ [PWRAP_DVFS_ADR2] = 0xf4,
+ [PWRAP_DVFS_WDATA2] = 0xf8,
+ [PWRAP_DVFS_ADR3] = 0xfc,
+ [PWRAP_DVFS_WDATA3] = 0x100,
+ [PWRAP_DVFS_ADR4] = 0x104,
+ [PWRAP_DVFS_WDATA4] = 0x108,
+ [PWRAP_DVFS_ADR5] = 0x10c,
+ [PWRAP_DVFS_WDATA5] = 0x110,
+ [PWRAP_DVFS_ADR6] = 0x114,
+ [PWRAP_DVFS_WDATA6] = 0x118,
+ [PWRAP_DVFS_ADR7] = 0x11c,
+ [PWRAP_DVFS_WDATA7] = 0x120,
+ [PWRAP_CIPHER_KEY_SEL] = 0x124,
+ [PWRAP_CIPHER_IV_SEL] = 0x128,
+ [PWRAP_CIPHER_EN] = 0x12c,
+ [PWRAP_CIPHER_RDY] = 0x130,
+ [PWRAP_CIPHER_MODE] = 0x134,
+ [PWRAP_CIPHER_SWRST] = 0x138,
+ [PWRAP_DCM_EN] = 0x13c,
+ [PWRAP_DCM_DBC_PRD] = 0x140,
+ [PWRAP_ADC_CMD_ADDR] = 0x144,
+ [PWRAP_PWRAP_ADC_CMD] = 0x148,
+ [PWRAP_ADC_RDY_ADDR] = 0x14c,
+ [PWRAP_ADC_RDATA_ADDR1] = 0x150,
+ [PWRAP_ADC_RDATA_ADDR2] = 0x154,
+};
+
+static int mt6797_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xC,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1C,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9C,
+ [PWRAP_WACS2_CMD] = 0xA0,
+ [PWRAP_WACS2_RDATA] = 0xA4,
+ [PWRAP_WACS2_VLDCLR] = 0xA8,
+ [PWRAP_INT_EN] = 0xC0,
+ [PWRAP_INT_FLG_RAW] = 0xC4,
+ [PWRAP_INT_FLG] = 0xC8,
+ [PWRAP_INT_CLR] = 0xCC,
+ [PWRAP_TIMER_EN] = 0xF4,
+ [PWRAP_WDT_UNIT] = 0xFC,
+ [PWRAP_WDT_SRC_EN] = 0x100,
+ [PWRAP_DCM_EN] = 0x1CC,
+ [PWRAP_DCM_DBC_PRD] = 0x1D4,
+};
+
+static int mt7622_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xC,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1C,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2C,
+ [PWRAP_EINT_STA0_ADR] = 0x30,
+ [PWRAP_EINT_STA1_ADR] = 0x34,
+ [PWRAP_STA] = 0x38,
+ [PWRAP_CLR] = 0x3C,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4C,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5C,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6C,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7C,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8C,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9C,
+ [PWRAP_WACS2_CMD] = 0xA0,
+ [PWRAP_WACS2_RDATA] = 0xA4,
+ [PWRAP_WACS2_VLDCLR] = 0xA8,
+ [PWRAP_INT_EN] = 0xAC,
+ [PWRAP_INT_FLG_RAW] = 0xB0,
+ [PWRAP_INT_FLG] = 0xB4,
+ [PWRAP_INT_CLR] = 0xB8,
+ [PWRAP_SIG_ADR] = 0xBC,
+ [PWRAP_SIG_MODE] = 0xC0,
+ [PWRAP_SIG_VALUE] = 0xC4,
+ [PWRAP_SIG_ERRVAL] = 0xC8,
+ [PWRAP_CRC_EN] = 0xCC,
+ [PWRAP_TIMER_EN] = 0xD0,
+ [PWRAP_TIMER_STA] = 0xD4,
+ [PWRAP_WDT_UNIT] = 0xD8,
+ [PWRAP_WDT_SRC_EN] = 0xDC,
+ [PWRAP_WDT_FLG] = 0xE0,
+ [PWRAP_DEBUG_INT_SEL] = 0xE4,
+ [PWRAP_DVFS_ADR0] = 0xE8,
+ [PWRAP_DVFS_WDATA0] = 0xEC,
+ [PWRAP_DVFS_ADR1] = 0xF0,
+ [PWRAP_DVFS_WDATA1] = 0xF4,
+ [PWRAP_DVFS_ADR2] = 0xF8,
+ [PWRAP_DVFS_WDATA2] = 0xFC,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10C,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11C,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_DVFS_ADR8] = 0x128,
+ [PWRAP_DVFS_WDATA8] = 0x12C,
+ [PWRAP_DVFS_ADR9] = 0x130,
+ [PWRAP_DVFS_WDATA9] = 0x134,
+ [PWRAP_DVFS_ADR10] = 0x138,
+ [PWRAP_DVFS_WDATA10] = 0x13C,
+ [PWRAP_DVFS_ADR11] = 0x140,
+ [PWRAP_DVFS_WDATA11] = 0x144,
+ [PWRAP_DVFS_ADR12] = 0x148,
+ [PWRAP_DVFS_WDATA12] = 0x14C,
+ [PWRAP_DVFS_ADR13] = 0x150,
+ [PWRAP_DVFS_WDATA13] = 0x154,
+ [PWRAP_DVFS_ADR14] = 0x158,
+ [PWRAP_DVFS_WDATA14] = 0x15C,
+ [PWRAP_DVFS_ADR15] = 0x160,
+ [PWRAP_DVFS_WDATA15] = 0x164,
+ [PWRAP_SPMINF_STA] = 0x168,
+ [PWRAP_CIPHER_KEY_SEL] = 0x16C,
+ [PWRAP_CIPHER_IV_SEL] = 0x170,
+ [PWRAP_CIPHER_EN] = 0x174,
+ [PWRAP_CIPHER_RDY] = 0x178,
+ [PWRAP_CIPHER_MODE] = 0x17C,
+ [PWRAP_CIPHER_SWRST] = 0x180,
+ [PWRAP_DCM_EN] = 0x184,
+ [PWRAP_DCM_DBC_PRD] = 0x188,
+ [PWRAP_EXT_CK] = 0x18C,
+ [PWRAP_ADC_CMD_ADDR] = 0x190,
+ [PWRAP_PWRAP_ADC_CMD] = 0x194,
+ [PWRAP_ADC_RDATA_ADDR] = 0x198,
+ [PWRAP_GPS_STA] = 0x19C,
+ [PWRAP_SW_RST] = 0x1A0,
+ [PWRAP_DVFS_STEP_CTRL0] = 0x238,
+ [PWRAP_DVFS_STEP_CTRL1] = 0x23C,
+ [PWRAP_DVFS_STEP_CTRL2] = 0x240,
+ [PWRAP_SPI2_CTRL] = 0x244,
+};
+
+static int mt8173_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1c,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2c,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4c,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5c,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6c,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7c,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8c,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9c,
+ [PWRAP_WACS2_CMD] = 0xa0,
+ [PWRAP_WACS2_RDATA] = 0xa4,
+ [PWRAP_WACS2_VLDCLR] = 0xa8,
+ [PWRAP_INT_EN] = 0xac,
+ [PWRAP_INT_FLG_RAW] = 0xb0,
+ [PWRAP_INT_FLG] = 0xb4,
+ [PWRAP_INT_CLR] = 0xb8,
+ [PWRAP_SIG_ADR] = 0xbc,
+ [PWRAP_SIG_MODE] = 0xc0,
+ [PWRAP_SIG_VALUE] = 0xc4,
+ [PWRAP_SIG_ERRVAL] = 0xc8,
+ [PWRAP_CRC_EN] = 0xcc,
+ [PWRAP_TIMER_EN] = 0xd0,
+ [PWRAP_TIMER_STA] = 0xd4,
+ [PWRAP_WDT_UNIT] = 0xd8,
+ [PWRAP_WDT_SRC_EN] = 0xdc,
+ [PWRAP_WDT_FLG] = 0xe0,
+ [PWRAP_DEBUG_INT_SEL] = 0xe4,
+ [PWRAP_DVFS_ADR0] = 0xe8,
+ [PWRAP_DVFS_WDATA0] = 0xec,
+ [PWRAP_DVFS_ADR1] = 0xf0,
+ [PWRAP_DVFS_WDATA1] = 0xf4,
+ [PWRAP_DVFS_ADR2] = 0xf8,
+ [PWRAP_DVFS_WDATA2] = 0xfc,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10c,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11c,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_SPMINF_STA] = 0x128,
+ [PWRAP_CIPHER_KEY_SEL] = 0x12c,
+ [PWRAP_CIPHER_IV_SEL] = 0x130,
+ [PWRAP_CIPHER_EN] = 0x134,
+ [PWRAP_CIPHER_RDY] = 0x138,
+ [PWRAP_CIPHER_MODE] = 0x13c,
+ [PWRAP_CIPHER_SWRST] = 0x140,
+ [PWRAP_DCM_EN] = 0x144,
+ [PWRAP_DCM_DBC_PRD] = 0x148,
+};
+
+static int mt8135_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_CSHEXT] = 0x10,
+ [PWRAP_CSHEXT_WRITE] = 0x14,
+ [PWRAP_CSHEXT_READ] = 0x18,
+ [PWRAP_CSLEXT_START] = 0x1c,
+ [PWRAP_CSLEXT_END] = 0x20,
+ [PWRAP_STAUPD_PRD] = 0x24,
+ [PWRAP_STAUPD_GRPEN] = 0x28,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x2c,
+ [PWRAP_STAUPD_STA] = 0x30,
+ [PWRAP_EVENT_IN_EN] = 0x34,
+ [PWRAP_EVENT_DST_EN] = 0x38,
+ [PWRAP_WRAP_STA] = 0x3c,
+ [PWRAP_RRARB_INIT] = 0x40,
+ [PWRAP_RRARB_EN] = 0x44,
+ [PWRAP_RRARB_STA0] = 0x48,
+ [PWRAP_RRARB_STA1] = 0x4c,
+ [PWRAP_HARB_INIT] = 0x50,
+ [PWRAP_HARB_HPRIO] = 0x54,
+ [PWRAP_HIPRIO_ARB_EN] = 0x58,
+ [PWRAP_HARB_STA0] = 0x5c,
+ [PWRAP_HARB_STA1] = 0x60,
+ [PWRAP_MAN_EN] = 0x64,
+ [PWRAP_MAN_CMD] = 0x68,
+ [PWRAP_MAN_RDATA] = 0x6c,
+ [PWRAP_MAN_VLDCLR] = 0x70,
+ [PWRAP_WACS0_EN] = 0x74,
+ [PWRAP_INIT_DONE0] = 0x78,
+ [PWRAP_WACS0_CMD] = 0x7c,
+ [PWRAP_WACS0_RDATA] = 0x80,
+ [PWRAP_WACS0_VLDCLR] = 0x84,
+ [PWRAP_WACS1_EN] = 0x88,
+ [PWRAP_INIT_DONE1] = 0x8c,
+ [PWRAP_WACS1_CMD] = 0x90,
+ [PWRAP_WACS1_RDATA] = 0x94,
+ [PWRAP_WACS1_VLDCLR] = 0x98,
+ [PWRAP_WACS2_EN] = 0x9c,
+ [PWRAP_INIT_DONE2] = 0xa0,
+ [PWRAP_WACS2_CMD] = 0xa4,
+ [PWRAP_WACS2_RDATA] = 0xa8,
+ [PWRAP_WACS2_VLDCLR] = 0xac,
+ [PWRAP_INT_EN] = 0xb0,
+ [PWRAP_INT_FLG_RAW] = 0xb4,
+ [PWRAP_INT_FLG] = 0xb8,
+ [PWRAP_INT_CLR] = 0xbc,
+ [PWRAP_SIG_ADR] = 0xc0,
+ [PWRAP_SIG_MODE] = 0xc4,
+ [PWRAP_SIG_VALUE] = 0xc8,
+ [PWRAP_SIG_ERRVAL] = 0xcc,
+ [PWRAP_CRC_EN] = 0xd0,
+ [PWRAP_EVENT_STA] = 0xd4,
+ [PWRAP_EVENT_STACLR] = 0xd8,
+ [PWRAP_TIMER_EN] = 0xdc,
+ [PWRAP_TIMER_STA] = 0xe0,
+ [PWRAP_WDT_UNIT] = 0xe4,
+ [PWRAP_WDT_SRC_EN] = 0xe8,
+ [PWRAP_WDT_FLG] = 0xec,
+ [PWRAP_DEBUG_INT_SEL] = 0xf0,
+ [PWRAP_CIPHER_KEY_SEL] = 0x134,
+ [PWRAP_CIPHER_IV_SEL] = 0x138,
+ [PWRAP_CIPHER_LOAD] = 0x13c,
+ [PWRAP_CIPHER_START] = 0x140,
+ [PWRAP_CIPHER_RDY] = 0x144,
+ [PWRAP_CIPHER_MODE] = 0x148,
+ [PWRAP_CIPHER_SWRST] = 0x14c,
+ [PWRAP_DCM_EN] = 0x15c,
+ [PWRAP_DCM_DBC_PRD] = 0x160,
+};
+
+enum pmic_type {
+ PMIC_MT6323,
+ PMIC_MT6351,
+ PMIC_MT6380,
+ PMIC_MT6397,
+};
+
+enum pwrap_type {
+ PWRAP_MT2701,
+ PWRAP_MT6797,
+ PWRAP_MT7622,
+ PWRAP_MT8135,
+ PWRAP_MT8173,
+};
+
+struct pmic_wrapper;
+struct pwrap_slv_type {
+ const u32 *dew_regs;
+ enum pmic_type type;
+ const struct regmap_config *regmap;
+ /* Flags indicating the capability for the target slave */
+ u32 caps;
+ /*
+ * pwrap operations are highly associated with the PMIC types,
+ * so the pointers added increases flexibility allowing determination
+ * which type is used by the detection through device tree.
+ */
+ int (*pwrap_read)(struct pmic_wrapper *wrp, u32 adr, u32 *rdata);
+ int (*pwrap_write)(struct pmic_wrapper *wrp, u32 adr, u32 wdata);
+};
+
+struct pmic_wrapper {
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *regmap;
+ const struct pmic_wrapper_type *master;
+ const struct pwrap_slv_type *slave;
+ struct clk *clk_spi;
+ struct clk *clk_wrap;
+ struct reset_control *rstc;
+
+ struct reset_control *rstc_bridge;
+ void __iomem *bridge_base;
+};
+
+struct pmic_wrapper_type {
+ int *regs;
+ enum pwrap_type type;
+ u32 arb_en_all;
+ u32 int_en_all;
+ u32 spi_w;
+ u32 wdt_src;
+ unsigned int has_bridge:1;
+ int (*init_reg_clock)(struct pmic_wrapper *wrp);
+ int (*init_soc_specific)(struct pmic_wrapper *wrp);
+};
+
+static u32 pwrap_readl(struct pmic_wrapper *wrp, enum pwrap_regs reg)
+{
+ return readl(wrp->base + wrp->master->regs[reg]);
+}
+
+static void pwrap_writel(struct pmic_wrapper *wrp, u32 val, enum pwrap_regs reg)
+{
+ writel(val, wrp->base + wrp->master->regs[reg]);
+}
+
+static bool pwrap_is_fsm_idle(struct pmic_wrapper *wrp)
+{
+ u32 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+
+ return PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_IDLE;
+}
+
+static bool pwrap_is_fsm_vldclr(struct pmic_wrapper *wrp)
+{
+ u32 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+
+ return PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR;
+}
+
+/*
+ * Timeout issue sometimes caused by the last read command
+ * failed because pmic wrap could not got the FSM_VLDCLR
+ * in time after finishing WACS2_CMD. It made state machine
+ * still on FSM_VLDCLR and timeout next time.
+ * Check the status of FSM and clear the vldclr to recovery the
+ * error.
+ */
+static inline void pwrap_leave_fsm_vldclr(struct pmic_wrapper *wrp)
+{
+ if (pwrap_is_fsm_vldclr(wrp))
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+}
+
+static bool pwrap_is_sync_idle(struct pmic_wrapper *wrp)
+{
+ return pwrap_readl(wrp, PWRAP_WACS2_RDATA) & PWRAP_STATE_SYNC_IDLE0;
+}
+
+static bool pwrap_is_fsm_idle_and_sync_idle(struct pmic_wrapper *wrp)
+{
+ u32 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+
+ return (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_IDLE) &&
+ (val & PWRAP_STATE_SYNC_IDLE0);
+}
+
+static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
+ bool (*fp)(struct pmic_wrapper *))
+{
+ unsigned long timeout;
+
+ timeout = jiffies + usecs_to_jiffies(10000);
+
+ do {
+ if (time_after(jiffies, timeout))
+ return fp(wrp) ? 0 : -ETIMEDOUT;
+ if (fp(wrp))
+ return 0;
+ } while (1);
+}
+
+static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
+{
+ int ret;
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ pwrap_writel(wrp, (adr >> 1) << 16, PWRAP_WACS2_CMD);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ if (ret)
+ return ret;
+
+ *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+
+ return 0;
+}
+
+static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
+{
+ int ret, msb;
+
+ *rdata = 0;
+ for (msb = 0; msb < 2; msb++) {
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ pwrap_writel(wrp, ((msb << 30) | (adr << 16)),
+ PWRAP_WACS2_CMD);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ if (ret)
+ return ret;
+
+ *rdata += (PWRAP_GET_WACS_RDATA(pwrap_readl(wrp,
+ PWRAP_WACS2_RDATA)) << (16 * msb));
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+ }
+
+ return 0;
+}
+
+static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
+{
+ return wrp->slave->pwrap_read(wrp, adr, rdata);
+}
+
+static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ int ret;
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ pwrap_writel(wrp, (1 << 31) | ((adr >> 1) << 16) | wdata,
+ PWRAP_WACS2_CMD);
+
+ return 0;
+}
+
+static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ int ret, msb, rdata;
+
+ for (msb = 0; msb < 2; msb++) {
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ pwrap_writel(wrp, (1 << 31) | (msb << 30) | (adr << 16) |
+ ((wdata >> (msb * 16)) & 0xffff),
+ PWRAP_WACS2_CMD);
+
+ /*
+ * The pwrap_read operation is the requirement of hardware used
+ * for the synchronization between two successive 16-bit
+ * pwrap_writel operations composing one 32-bit bus writing.
+ * Otherwise, we'll find the result fails on the lower 16-bit
+ * pwrap writing.
+ */
+ if (!msb)
+ pwrap_read(wrp, adr, &rdata);
+ }
+
+ return 0;
+}
+
+static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ return wrp->slave->pwrap_write(wrp, adr, wdata);
+}
+
+static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata)
+{
+ return pwrap_read(context, adr, rdata);
+}
+
+static int pwrap_regmap_write(void *context, u32 adr, u32 wdata)
+{
+ return pwrap_write(context, adr, wdata);
+}
+
+static int pwrap_reset_spislave(struct pmic_wrapper *wrp)
+{
+ int ret, i;
+
+ pwrap_writel(wrp, 0, PWRAP_HIPRIO_ARB_EN);
+ pwrap_writel(wrp, 0, PWRAP_WRAP_EN);
+ pwrap_writel(wrp, 1, PWRAP_MUX_SEL);
+ pwrap_writel(wrp, 1, PWRAP_MAN_EN);
+ pwrap_writel(wrp, 0, PWRAP_DIO_EN);
+
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_CSL,
+ PWRAP_MAN_CMD);
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS,
+ PWRAP_MAN_CMD);
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_CSH,
+ PWRAP_MAN_CMD);
+
+ for (i = 0; i < 4; i++)
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS,
+ PWRAP_MAN_CMD);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_sync_idle);
+ if (ret) {
+ dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 0, PWRAP_MAN_EN);
+ pwrap_writel(wrp, 0, PWRAP_MUX_SEL);
+
+ return 0;
+}
+
+/*
+ * pwrap_init_sidly - configure serial input delay
+ *
+ * This configures the serial input delay. We can configure 0, 2, 4 or 6ns
+ * delay. Do a read test with all possible values and chose the best delay.
+ */
+static int pwrap_init_sidly(struct pmic_wrapper *wrp)
+{
+ u32 rdata;
+ u32 i;
+ u32 pass = 0;
+ signed char dly[16] = {
+ -1, 0, 1, 0, 2, -1, 1, 1, 3, -1, -1, -1, 3, -1, 2, 1
+ };
+
+ for (i = 0; i < 4; i++) {
+ pwrap_writel(wrp, i, PWRAP_SIDLY);
+ pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_READ_TEST],
+ &rdata);
+ if (rdata == PWRAP_DEW_READ_TEST_VAL) {
+ dev_dbg(wrp->dev, "[Read Test] pass, SIDLY=%x\n", i);
+ pass |= 1 << i;
+ }
+ }
+
+ if (dly[pass] < 0) {
+ dev_err(wrp->dev, "sidly pass range 0x%x not continuous\n",
+ pass);
+ return -EIO;
+ }
+
+ pwrap_writel(wrp, dly[pass], PWRAP_SIDLY);
+
+ return 0;
+}
+
+static int pwrap_init_dual_io(struct pmic_wrapper *wrp)
+{
+ int ret;
+ u32 rdata;
+
+ /* Enable dual IO mode */
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1);
+
+ /* Check IDLE & INIT_DONE in advance */
+ ret = pwrap_wait_for_state(wrp,
+ pwrap_is_fsm_idle_and_sync_idle);
+ if (ret) {
+ dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 1, PWRAP_DIO_EN);
+
+ /* Read Test */
+ pwrap_read(wrp,
+ wrp->slave->dew_regs[PWRAP_DEW_READ_TEST], &rdata);
+ if (rdata != PWRAP_DEW_READ_TEST_VAL) {
+ dev_err(wrp->dev,
+ "Read failed on DIO mode: 0x%04x!=0x%04x\n",
+ PWRAP_DEW_READ_TEST_VAL, rdata);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * pwrap_init_chip_select_ext is used to configure CS extension time for each
+ * phase during data transactions on the pwrap bus.
+ */
+static void pwrap_init_chip_select_ext(struct pmic_wrapper *wrp, u8 hext_write,
+ u8 hext_read, u8 lext_start,
+ u8 lext_end)
+{
+ /*
+ * After finishing a write and read transaction, extends CS high time
+ * to be at least xT of BUS CLK as hext_write and hext_read specifies
+ * respectively.
+ */
+ pwrap_writel(wrp, hext_write, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, hext_read, PWRAP_CSHEXT_READ);
+
+ /*
+ * Extends CS low time after CSL and before CSH command to be at
+ * least xT of BUS CLK as lext_start and lext_end specifies
+ * respectively.
+ */
+ pwrap_writel(wrp, lext_start, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, lext_end, PWRAP_CSLEXT_END);
+}
+
+static int pwrap_common_init_reg_clock(struct pmic_wrapper *wrp)
+{
+ switch (wrp->master->type) {
+ case PWRAP_MT8173:
+ pwrap_init_chip_select_ext(wrp, 0, 4, 2, 2);
+ break;
+ case PWRAP_MT8135:
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+ pwrap_init_chip_select_ext(wrp, 0, 4, 0, 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int pwrap_mt2701_init_reg_clock(struct pmic_wrapper *wrp)
+{
+ switch (wrp->slave->type) {
+ case PMIC_MT6397:
+ pwrap_writel(wrp, 0xc, PWRAP_RDDMY);
+ pwrap_init_chip_select_ext(wrp, 4, 0, 2, 2);
+ break;
+
+ case PMIC_MT6323:
+ pwrap_writel(wrp, 0x8, PWRAP_RDDMY);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_RDDMY_NO],
+ 0x8);
+ pwrap_init_chip_select_ext(wrp, 5, 0, 2, 2);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static bool pwrap_is_cipher_ready(struct pmic_wrapper *wrp)
+{
+ return pwrap_readl(wrp, PWRAP_CIPHER_RDY) & 1;
+}
+
+static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
+{
+ u32 rdata;
+ int ret;
+
+ ret = pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_RDY],
+ &rdata);
+ if (ret)
+ return 0;
+
+ return rdata == 1;
+}
+
+static int pwrap_init_cipher(struct pmic_wrapper *wrp)
+{
+ int ret;
+ u32 rdata = 0;
+
+ pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
+ pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST);
+ pwrap_writel(wrp, 0x1, PWRAP_CIPHER_KEY_SEL);
+ pwrap_writel(wrp, 0x2, PWRAP_CIPHER_IV_SEL);
+
+ switch (wrp->master->type) {
+ case PWRAP_MT8135:
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_LOAD);
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_START);
+ break;
+ case PWRAP_MT2701:
+ case PWRAP_MT6797:
+ case PWRAP_MT8173:
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
+ break;
+ case PWRAP_MT7622:
+ pwrap_writel(wrp, 0, PWRAP_CIPHER_EN);
+ break;
+ }
+
+ /* Config cipher mode @PMIC */
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_SWRST], 0x1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_SWRST], 0x0);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_KEY_SEL], 0x1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_IV_SEL], 0x2);
+
+ switch (wrp->slave->type) {
+ case PMIC_MT6397:
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_LOAD],
+ 0x1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_START],
+ 0x1);
+ break;
+ case PMIC_MT6323:
+ case PMIC_MT6351:
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_EN],
+ 0x1);
+ break;
+ default:
+ break;
+ }
+
+ /* wait for cipher data ready@AP */
+ ret = pwrap_wait_for_state(wrp, pwrap_is_cipher_ready);
+ if (ret) {
+ dev_err(wrp->dev, "cipher data ready@AP fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* wait for cipher data ready@PMIC */
+ ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready);
+ if (ret) {
+ dev_err(wrp->dev,
+ "timeout waiting for cipher data ready@PMIC\n");
+ return ret;
+ }
+
+ /* wait for cipher mode idle */
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_MODE], 0x1);
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
+ if (ret) {
+ dev_err(wrp->dev, "cipher mode idle fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_MODE);
+
+ /* Write Test */
+ if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_WRITE_TEST],
+ PWRAP_DEW_WRITE_TEST_VAL) ||
+ pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_WRITE_TEST],
+ &rdata) ||
+ (rdata != PWRAP_DEW_WRITE_TEST_VAL)) {
+ dev_err(wrp->dev, "rdata=0x%04X\n", rdata);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int pwrap_init_security(struct pmic_wrapper *wrp)
+{
+ int ret;
+
+ /* Enable encryption */
+ ret = pwrap_init_cipher(wrp);
+ if (ret)
+ return ret;
+
+ /* Signature checking - using CRC */
+ if (pwrap_write(wrp,
+ wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1))
+ return -EFAULT;
+
+ pwrap_writel(wrp, 0x1, PWRAP_CRC_EN);
+ pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE);
+ pwrap_writel(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_VAL],
+ PWRAP_SIG_ADR);
+ pwrap_writel(wrp,
+ wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+
+ return 0;
+}
+
+static int pwrap_mt8135_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ /* enable pwrap events and pwrap bridge in AP side */
+ pwrap_writel(wrp, 0x1, PWRAP_EVENT_IN_EN);
+ pwrap_writel(wrp, 0xffff, PWRAP_EVENT_DST_EN);
+ writel(0x7f, wrp->bridge_base + PWRAP_MT8135_BRIDGE_IORD_ARB_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS3_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS4_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_UNIT);
+ writel(0xffff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_SRC_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_TIMER_EN);
+ writel(0x7ff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INT_EN);
+
+ /* enable PMIC event out and sources */
+ if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_OUT_EN],
+ 0x1) ||
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_SRC_EN],
+ 0xffff)) {
+ dev_err(wrp->dev, "enable dewrap fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int pwrap_mt8173_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ /* PMIC_DEWRAP enables */
+ if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_OUT_EN],
+ 0x1) ||
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_SRC_EN],
+ 0xffff)) {
+ dev_err(wrp->dev, "enable dewrap fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int pwrap_mt2701_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ /* GPS_INTF initialization */
+ switch (wrp->slave->type) {
+ case PMIC_MT6323:
+ pwrap_writel(wrp, 0x076c, PWRAP_ADC_CMD_ADDR);
+ pwrap_writel(wrp, 0x8000, PWRAP_PWRAP_ADC_CMD);
+ pwrap_writel(wrp, 0x072c, PWRAP_ADC_RDY_ADDR);
+ pwrap_writel(wrp, 0x072e, PWRAP_ADC_RDATA_ADDR1);
+ pwrap_writel(wrp, 0x0730, PWRAP_ADC_RDATA_ADDR2);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int pwrap_mt7622_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ pwrap_writel(wrp, 0, PWRAP_STAUPD_PRD);
+ /* enable 2wire SPI master */
+ pwrap_writel(wrp, 0x8000000, PWRAP_SPI2_CTRL);
+
+ return 0;
+}
+
+static int pwrap_init(struct pmic_wrapper *wrp)
+{
+ int ret;
+
+ reset_control_reset(wrp->rstc);
+ if (wrp->rstc_bridge)
+ reset_control_reset(wrp->rstc_bridge);
+
+ if (wrp->master->type == PWRAP_MT8173) {
+ /* Enable DCM */
+ pwrap_writel(wrp, 3, PWRAP_DCM_EN);
+ pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
+ }
+
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SPI)) {
+ /* Reset SPI slave */
+ ret = pwrap_reset_spislave(wrp);
+ if (ret)
+ return ret;
+ }
+
+ pwrap_writel(wrp, 1, PWRAP_WRAP_EN);
+
+ pwrap_writel(wrp, wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_EN);
+
+ ret = wrp->master->init_reg_clock(wrp);
+ if (ret)
+ return ret;
+
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SPI)) {
+ /* Setup serial input delay */
+ ret = pwrap_init_sidly(wrp);
+ if (ret)
+ return ret;
+ }
+
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_DUALIO)) {
+ /* Enable dual I/O mode */
+ ret = pwrap_init_dual_io(wrp);
+ if (ret)
+ return ret;
+ }
+
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SECURITY)) {
+ /* Enable security on bus */
+ ret = pwrap_init_security(wrp);
+ if (ret)
+ return ret;
+ }
+
+ if (wrp->master->type == PWRAP_MT8135)
+ pwrap_writel(wrp, 0x7, PWRAP_RRARB_EN);
+
+ pwrap_writel(wrp, 0x1, PWRAP_WACS0_EN);
+ pwrap_writel(wrp, 0x1, PWRAP_WACS1_EN);
+ pwrap_writel(wrp, 0x1, PWRAP_WACS2_EN);
+ pwrap_writel(wrp, 0x5, PWRAP_STAUPD_PRD);
+ pwrap_writel(wrp, 0xff, PWRAP_STAUPD_GRPEN);
+
+ if (wrp->master->init_soc_specific) {
+ ret = wrp->master->init_soc_specific(wrp);
+ if (ret)
+ return ret;
+ }
+
+ /* Setup the init done registers */
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE2);
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE0);
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE1);
+
+ if (wrp->master->has_bridge) {
+ writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE3);
+ writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE4);
+ }
+
+ return 0;
+}
+
+static irqreturn_t pwrap_interrupt(int irqno, void *dev_id)
+{
+ u32 rdata;
+ struct pmic_wrapper *wrp = dev_id;
+
+ rdata = pwrap_readl(wrp, PWRAP_INT_FLG);
+
+ dev_err(wrp->dev, "unexpected interrupt int=0x%x\n", rdata);
+
+ pwrap_writel(wrp, 0xffffffff, PWRAP_INT_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config pwrap_regmap_config16 = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_read = pwrap_regmap_read,
+ .reg_write = pwrap_regmap_write,
+ .max_register = 0xffff,
+};
+
+static const struct regmap_config pwrap_regmap_config32 = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_read = pwrap_regmap_read,
+ .reg_write = pwrap_regmap_write,
+ .max_register = 0xffff,
+};
+
+static const struct pwrap_slv_type pmic_mt6323 = {
+ .dew_regs = mt6323_regs,
+ .type = PMIC_MT6323,
+ .regmap = &pwrap_regmap_config16,
+ .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
+ PWRAP_SLV_CAP_SECURITY,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+};
+
+static const struct pwrap_slv_type pmic_mt6380 = {
+ .dew_regs = NULL,
+ .type = PMIC_MT6380,
+ .regmap = &pwrap_regmap_config32,
+ .caps = 0,
+ .pwrap_read = pwrap_read32,
+ .pwrap_write = pwrap_write32,
+};
+
+static const struct pwrap_slv_type pmic_mt6397 = {
+ .dew_regs = mt6397_regs,
+ .type = PMIC_MT6397,
+ .regmap = &pwrap_regmap_config16,
+ .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
+ PWRAP_SLV_CAP_SECURITY,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+};
+
+static const struct pwrap_slv_type pmic_mt6351 = {
+ .dew_regs = mt6351_regs,
+ .type = PMIC_MT6351,
+ .regmap = &pwrap_regmap_config16,
+ .caps = 0,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+};
+
+static const struct of_device_id of_slave_match_tbl[] = {
+ {
+ .compatible = "mediatek,mt6323",
+ .data = &pmic_mt6323,
+ }, {
+ /* The MT6380 PMIC only implements a regulator, so we bind it
+ * directly instead of using a MFD.
+ */
+ .compatible = "mediatek,mt6380-regulator",
+ .data = &pmic_mt6380,
+ }, {
+ .compatible = "mediatek,mt6397",
+ .data = &pmic_mt6397,
+ }, {
+ .compatible = "mediatek,mt6351",
+ .data = &pmic_mt6351,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, of_slave_match_tbl);
+
+static const struct pmic_wrapper_type pwrap_mt2701 = {
+ .regs = mt2701_regs,
+ .type = PWRAP_MT2701,
+ .arb_en_all = 0x3f,
+ .int_en_all = ~(u32)(BIT(31) | BIT(2)),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE_NEW,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .has_bridge = 0,
+ .init_reg_clock = pwrap_mt2701_init_reg_clock,
+ .init_soc_specific = pwrap_mt2701_init_soc_specific,
+};
+
+static const struct pmic_wrapper_type pwrap_mt6797 = {
+ .regs = mt6797_regs,
+ .type = PWRAP_MT6797,
+ .arb_en_all = 0x01fff,
+ .int_en_all = 0xffffffc6,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .has_bridge = 0,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
+static const struct pmic_wrapper_type pwrap_mt7622 = {
+ .regs = mt7622_regs,
+ .type = PWRAP_MT7622,
+ .arb_en_all = 0xff,
+ .int_en_all = ~(u32)BIT(31),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .has_bridge = 0,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = pwrap_mt7622_init_soc_specific,
+};
+
+static const struct pmic_wrapper_type pwrap_mt8135 = {
+ .regs = mt8135_regs,
+ .type = PWRAP_MT8135,
+ .arb_en_all = 0x1ff,
+ .int_en_all = ~(u32)(BIT(31) | BIT(1)),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .has_bridge = 1,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = pwrap_mt8135_init_soc_specific,
+};
+
+static const struct pmic_wrapper_type pwrap_mt8173 = {
+ .regs = mt8173_regs,
+ .type = PWRAP_MT8173,
+ .arb_en_all = 0x3f,
+ .int_en_all = ~(u32)(BIT(31) | BIT(1)),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD,
+ .has_bridge = 0,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = pwrap_mt8173_init_soc_specific,
+};
+
+static const struct of_device_id of_pwrap_match_tbl[] = {
+ {
+ .compatible = "mediatek,mt2701-pwrap",
+ .data = &pwrap_mt2701,
+ }, {
+ .compatible = "mediatek,mt6797-pwrap",
+ .data = &pwrap_mt6797,
+ }, {
+ .compatible = "mediatek,mt7622-pwrap",
+ .data = &pwrap_mt7622,
+ }, {
+ .compatible = "mediatek,mt8135-pwrap",
+ .data = &pwrap_mt8135,
+ }, {
+ .compatible = "mediatek,mt8173-pwrap",
+ .data = &pwrap_mt8173,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl);
+
+static int pwrap_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ struct pmic_wrapper *wrp;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_slave_id = NULL;
+ struct resource *res;
+
+ if (np->child)
+ of_slave_id = of_match_node(of_slave_match_tbl, np->child);
+
+ if (!of_slave_id) {
+ dev_dbg(&pdev->dev, "slave pmic should be defined in dts\n");
+ return -EINVAL;
+ }
+
+ wrp = devm_kzalloc(&pdev->dev, sizeof(*wrp), GFP_KERNEL);
+ if (!wrp)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, wrp);
+
+ wrp->master = of_device_get_match_data(&pdev->dev);
+ wrp->slave = of_slave_id->data;
+ wrp->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrap");
+ wrp->base = devm_ioremap_resource(wrp->dev, res);
+ if (IS_ERR(wrp->base))
+ return PTR_ERR(wrp->base);
+
+ wrp->rstc = devm_reset_control_get(wrp->dev, "pwrap");
+ if (IS_ERR(wrp->rstc)) {
+ ret = PTR_ERR(wrp->rstc);
+ dev_dbg(wrp->dev, "cannot get pwrap reset: %d\n", ret);
+ return ret;
+ }
+
+ if (wrp->master->has_bridge) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "pwrap-bridge");
+ wrp->bridge_base = devm_ioremap_resource(wrp->dev, res);
+ if (IS_ERR(wrp->bridge_base))
+ return PTR_ERR(wrp->bridge_base);
+
+ wrp->rstc_bridge = devm_reset_control_get(wrp->dev,
+ "pwrap-bridge");
+ if (IS_ERR(wrp->rstc_bridge)) {
+ ret = PTR_ERR(wrp->rstc_bridge);
+ dev_dbg(wrp->dev,
+ "cannot get pwrap-bridge reset: %d\n", ret);
+ return ret;
+ }
+ }
+
+ wrp->clk_spi = devm_clk_get(wrp->dev, "spi");
+ if (IS_ERR(wrp->clk_spi)) {
+ dev_dbg(wrp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(wrp->clk_spi));
+ return PTR_ERR(wrp->clk_spi);
+ }
+
+ wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap");
+ if (IS_ERR(wrp->clk_wrap)) {
+ dev_dbg(wrp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(wrp->clk_wrap));
+ return PTR_ERR(wrp->clk_wrap);
+ }
+
+ ret = clk_prepare_enable(wrp->clk_spi);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(wrp->clk_wrap);
+ if (ret)
+ goto err_out1;
+
+ /* Enable internal dynamic clock */
+ pwrap_writel(wrp, 1, PWRAP_DCM_EN);
+ pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
+
+ /*
+ * The PMIC could already be initialized by the bootloader.
+ * Skip initialization here in this case.
+ */
+ if (!pwrap_readl(wrp, PWRAP_INIT_DONE2)) {
+ ret = pwrap_init(wrp);
+ if (ret) {
+ dev_dbg(wrp->dev, "init failed with %d\n", ret);
+ goto err_out2;
+ }
+ }
+
+ if (!(pwrap_readl(wrp, PWRAP_WACS2_RDATA) & PWRAP_STATE_INIT_DONE0)) {
+ dev_dbg(wrp->dev, "initialization isn't finished\n");
+ ret = -ENODEV;
+ goto err_out2;
+ }
+
+ /* Initialize watchdog, may not be done by the bootloader */
+ pwrap_writel(wrp, 0xf, PWRAP_WDT_UNIT);
+ /*
+ * Since STAUPD was not used on mt8173 platform,
+ * so STAUPD of WDT_SRC which should be turned off
+ */
+ pwrap_writel(wrp, wrp->master->wdt_src, PWRAP_WDT_SRC_EN);
+ pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
+ pwrap_writel(wrp, wrp->master->int_en_all, PWRAP_INT_EN);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt,
+ IRQF_TRIGGER_HIGH,
+ "mt-pmic-pwrap", wrp);
+ if (ret)
+ goto err_out2;
+
+ wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regmap);
+ if (IS_ERR(wrp->regmap)) {
+ ret = PTR_ERR(wrp->regmap);
+ goto err_out2;
+ }
+
+ ret = of_platform_populate(np, NULL, NULL, wrp->dev);
+ if (ret) {
+ dev_dbg(wrp->dev, "failed to create child devices at %pOF\n",
+ np);
+ goto err_out2;
+ }
+
+ return 0;
+
+err_out2:
+ clk_disable_unprepare(wrp->clk_wrap);
+err_out1:
+ clk_disable_unprepare(wrp->clk_spi);
+
+ return ret;
+}
+
+static struct platform_driver pwrap_drv = {
+ .driver = {
+ .name = "mt-pmic-pwrap",
+ .of_match_table = of_match_ptr(of_pwrap_match_tbl),
+ },
+ .probe = pwrap_probe,
+};
+
+module_platform_driver(pwrap_drv);
+
+MODULE_AUTHOR("Flora Fu, MediaTek");
+MODULE_DESCRIPTION("MediaTek MT8135 PMIC Wrapper Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
new file mode 100644
index 000000000..ef54f1638
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -0,0 +1,1077 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regulator/consumer.h>
+#include <linux/soc/mediatek/infracfg.h>
+
+#include <dt-bindings/power/mt2701-power.h>
+#include <dt-bindings/power/mt2712-power.h>
+#include <dt-bindings/power/mt6797-power.h>
+#include <dt-bindings/power/mt7622-power.h>
+#include <dt-bindings/power/mt7623a-power.h>
+#include <dt-bindings/power/mt8173-power.h>
+
+#define MTK_POLL_DELAY_US 10
+#define MTK_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+
+#define MTK_SCPD_ACTIVE_WAKEUP BIT(0)
+#define MTK_SCPD_FWAIT_SRAM BIT(1)
+#define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x))
+
+#define SPM_VDE_PWR_CON 0x0210
+#define SPM_MFG_PWR_CON 0x0214
+#define SPM_VEN_PWR_CON 0x0230
+#define SPM_ISP_PWR_CON 0x0238
+#define SPM_DIS_PWR_CON 0x023c
+#define SPM_CONN_PWR_CON 0x0280
+#define SPM_VEN2_PWR_CON 0x0298
+#define SPM_AUDIO_PWR_CON 0x029c /* MT8173, MT2712 */
+#define SPM_BDP_PWR_CON 0x029c /* MT2701 */
+#define SPM_ETH_PWR_CON 0x02a0
+#define SPM_HIF_PWR_CON 0x02a4
+#define SPM_IFR_MSC_PWR_CON 0x02a8
+#define SPM_MFG_2D_PWR_CON 0x02c0
+#define SPM_MFG_ASYNC_PWR_CON 0x02c4
+#define SPM_USB_PWR_CON 0x02cc
+#define SPM_USB2_PWR_CON 0x02d4 /* MT2712 */
+#define SPM_ETHSYS_PWR_CON 0x02e0 /* MT7622 */
+#define SPM_HIF0_PWR_CON 0x02e4 /* MT7622 */
+#define SPM_HIF1_PWR_CON 0x02e8 /* MT7622 */
+#define SPM_WB_PWR_CON 0x02ec /* MT7622 */
+
+#define SPM_PWR_STATUS 0x060c
+#define SPM_PWR_STATUS_2ND 0x0610
+
+#define PWR_RST_B_BIT BIT(0)
+#define PWR_ISO_BIT BIT(1)
+#define PWR_ON_BIT BIT(2)
+#define PWR_ON_2ND_BIT BIT(3)
+#define PWR_CLK_DIS_BIT BIT(4)
+
+#define PWR_STATUS_CONN BIT(1)
+#define PWR_STATUS_DISP BIT(3)
+#define PWR_STATUS_MFG BIT(4)
+#define PWR_STATUS_ISP BIT(5)
+#define PWR_STATUS_VDEC BIT(7)
+#define PWR_STATUS_BDP BIT(14)
+#define PWR_STATUS_ETH BIT(15)
+#define PWR_STATUS_HIF BIT(16)
+#define PWR_STATUS_IFR_MSC BIT(17)
+#define PWR_STATUS_USB2 BIT(19) /* MT2712 */
+#define PWR_STATUS_VENC_LT BIT(20)
+#define PWR_STATUS_VENC BIT(21)
+#define PWR_STATUS_MFG_2D BIT(22) /* MT8173 */
+#define PWR_STATUS_MFG_ASYNC BIT(23) /* MT8173 */
+#define PWR_STATUS_AUDIO BIT(24) /* MT8173, MT2712 */
+#define PWR_STATUS_USB BIT(25) /* MT8173, MT2712 */
+#define PWR_STATUS_ETHSYS BIT(24) /* MT7622 */
+#define PWR_STATUS_HIF0 BIT(25) /* MT7622 */
+#define PWR_STATUS_HIF1 BIT(26) /* MT7622 */
+#define PWR_STATUS_WB BIT(27) /* MT7622 */
+
+enum clk_id {
+ CLK_NONE,
+ CLK_MM,
+ CLK_MFG,
+ CLK_VENC,
+ CLK_VENC_LT,
+ CLK_ETHIF,
+ CLK_VDEC,
+ CLK_HIFSEL,
+ CLK_JPGDEC,
+ CLK_AUDIO,
+ CLK_MAX,
+};
+
+static const char * const clk_names[] = {
+ NULL,
+ "mm",
+ "mfg",
+ "venc",
+ "venc_lt",
+ "ethif",
+ "vdec",
+ "hif_sel",
+ "jpgdec",
+ "audio",
+ NULL,
+};
+
+#define MAX_CLKS 3
+
+struct scp_domain_data {
+ const char *name;
+ u32 sta_mask;
+ int ctl_offs;
+ u32 sram_pdn_bits;
+ u32 sram_pdn_ack_bits;
+ u32 bus_prot_mask;
+ enum clk_id clk_id[MAX_CLKS];
+ u8 caps;
+};
+
+struct scp;
+
+struct scp_domain {
+ struct generic_pm_domain genpd;
+ struct scp *scp;
+ struct clk *clk[MAX_CLKS];
+ const struct scp_domain_data *data;
+ struct regulator *supply;
+};
+
+struct scp_ctrl_reg {
+ int pwr_sta_offs;
+ int pwr_sta2nd_offs;
+};
+
+struct scp {
+ struct scp_domain *domains;
+ struct genpd_onecell_data pd_data;
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *infracfg;
+ struct scp_ctrl_reg ctrl_reg;
+ bool bus_prot_reg_update;
+};
+
+struct scp_subdomain {
+ int origin;
+ int subdomain;
+};
+
+struct scp_soc_data {
+ const struct scp_domain_data *domains;
+ int num_domains;
+ const struct scp_subdomain *subdomains;
+ int num_subdomains;
+ const struct scp_ctrl_reg regs;
+ bool bus_prot_reg_update;
+};
+
+static int scpsys_domain_is_on(struct scp_domain *scpd)
+{
+ struct scp *scp = scpd->scp;
+
+ u32 status = readl(scp->base + scp->ctrl_reg.pwr_sta_offs) &
+ scpd->data->sta_mask;
+ u32 status2 = readl(scp->base + scp->ctrl_reg.pwr_sta2nd_offs) &
+ scpd->data->sta_mask;
+
+ /*
+ * A domain is on when both status bits are set. If only one is set
+ * return an error. This happens while powering up a domain
+ */
+
+ if (status && status2)
+ return true;
+ if (!status && !status2)
+ return false;
+
+ return -EINVAL;
+}
+
+static int scpsys_power_on(struct generic_pm_domain *genpd)
+{
+ struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
+ struct scp *scp = scpd->scp;
+ void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
+ u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
+ u32 val;
+ int ret, tmp;
+ int i;
+
+ if (scpd->supply) {
+ ret = regulator_enable(scpd->supply);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++) {
+ ret = clk_prepare_enable(scpd->clk[i]);
+ if (ret) {
+ for (--i; i >= 0; i--)
+ clk_disable_unprepare(scpd->clk[i]);
+
+ goto err_clk;
+ }
+ }
+
+ val = readl(ctl_addr);
+ val |= PWR_ON_BIT;
+ writel(val, ctl_addr);
+ val |= PWR_ON_2ND_BIT;
+ writel(val, ctl_addr);
+
+ /* wait until PWR_ACK = 1 */
+ ret = readx_poll_timeout(scpsys_domain_is_on, scpd, tmp, tmp > 0,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ goto err_pwr_ack;
+
+ val &= ~PWR_CLK_DIS_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_ISO_BIT;
+ writel(val, ctl_addr);
+
+ val |= PWR_RST_B_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~scpd->data->sram_pdn_bits;
+ writel(val, ctl_addr);
+
+ /* Either wait until SRAM_PDN_ACK all 0 or have a force wait */
+ if (MTK_SCPD_CAPS(scpd, MTK_SCPD_FWAIT_SRAM)) {
+ /*
+ * Currently, MTK_SCPD_FWAIT_SRAM is necessary only for
+ * MT7622_POWER_DOMAIN_WB and thus just a trivial setup is
+ * applied here.
+ */
+ usleep_range(12000, 12100);
+
+ } else {
+ ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == 0,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ goto err_pwr_ack;
+ }
+
+ if (scpd->data->bus_prot_mask) {
+ ret = mtk_infracfg_clear_bus_protection(scp->infracfg,
+ scpd->data->bus_prot_mask,
+ scp->bus_prot_reg_update);
+ if (ret)
+ goto err_pwr_ack;
+ }
+
+ return 0;
+
+err_pwr_ack:
+ for (i = MAX_CLKS - 1; i >= 0; i--) {
+ if (scpd->clk[i])
+ clk_disable_unprepare(scpd->clk[i]);
+ }
+err_clk:
+ if (scpd->supply)
+ regulator_disable(scpd->supply);
+
+ dev_err(scp->dev, "Failed to power on domain %s\n", genpd->name);
+
+ return ret;
+}
+
+static int scpsys_power_off(struct generic_pm_domain *genpd)
+{
+ struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
+ struct scp *scp = scpd->scp;
+ void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
+ u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
+ u32 val;
+ int ret, tmp;
+ int i;
+
+ if (scpd->data->bus_prot_mask) {
+ ret = mtk_infracfg_set_bus_protection(scp->infracfg,
+ scpd->data->bus_prot_mask,
+ scp->bus_prot_reg_update);
+ if (ret)
+ goto out;
+ }
+
+ val = readl(ctl_addr);
+ val |= scpd->data->sram_pdn_bits;
+ writel(val, ctl_addr);
+
+ /* wait until SRAM_PDN_ACK all 1 */
+ ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == pdn_ack,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ goto out;
+
+ val |= PWR_ISO_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_RST_B_BIT;
+ writel(val, ctl_addr);
+
+ val |= PWR_CLK_DIS_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_ON_BIT;
+ writel(val, ctl_addr);
+
+ val &= ~PWR_ON_2ND_BIT;
+ writel(val, ctl_addr);
+
+ /* wait until PWR_ACK = 0 */
+ ret = readx_poll_timeout(scpsys_domain_is_on, scpd, tmp, tmp == 0,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++)
+ clk_disable_unprepare(scpd->clk[i]);
+
+ if (scpd->supply)
+ regulator_disable(scpd->supply);
+
+ return 0;
+
+out:
+ dev_err(scp->dev, "Failed to power off domain %s\n", genpd->name);
+
+ return ret;
+}
+
+static void init_clks(struct platform_device *pdev, struct clk **clk)
+{
+ int i;
+
+ for (i = CLK_NONE + 1; i < CLK_MAX; i++)
+ clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+}
+
+static struct scp *init_scp(struct platform_device *pdev,
+ const struct scp_domain_data *scp_domain_data, int num,
+ const struct scp_ctrl_reg *scp_ctrl_reg,
+ bool bus_prot_reg_update)
+{
+ struct genpd_onecell_data *pd_data;
+ struct resource *res;
+ int i, j;
+ struct scp *scp;
+ struct clk *clk[CLK_MAX];
+
+ scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL);
+ if (!scp)
+ return ERR_PTR(-ENOMEM);
+
+ scp->ctrl_reg.pwr_sta_offs = scp_ctrl_reg->pwr_sta_offs;
+ scp->ctrl_reg.pwr_sta2nd_offs = scp_ctrl_reg->pwr_sta2nd_offs;
+
+ scp->bus_prot_reg_update = bus_prot_reg_update;
+
+ scp->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ scp->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(scp->base))
+ return ERR_CAST(scp->base);
+
+ scp->domains = devm_kcalloc(&pdev->dev,
+ num, sizeof(*scp->domains), GFP_KERNEL);
+ if (!scp->domains)
+ return ERR_PTR(-ENOMEM);
+
+ pd_data = &scp->pd_data;
+
+ pd_data->domains = devm_kcalloc(&pdev->dev,
+ num, sizeof(*pd_data->domains), GFP_KERNEL);
+ if (!pd_data->domains)
+ return ERR_PTR(-ENOMEM);
+
+ scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "infracfg");
+ if (IS_ERR(scp->infracfg)) {
+ dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n",
+ PTR_ERR(scp->infracfg));
+ return ERR_CAST(scp->infracfg);
+ }
+
+ for (i = 0; i < num; i++) {
+ struct scp_domain *scpd = &scp->domains[i];
+ const struct scp_domain_data *data = &scp_domain_data[i];
+
+ scpd->supply = devm_regulator_get_optional(&pdev->dev, data->name);
+ if (IS_ERR(scpd->supply)) {
+ if (PTR_ERR(scpd->supply) == -ENODEV)
+ scpd->supply = NULL;
+ else
+ return ERR_CAST(scpd->supply);
+ }
+ }
+
+ pd_data->num_domains = num;
+
+ init_clks(pdev, clk);
+
+ for (i = 0; i < num; i++) {
+ struct scp_domain *scpd = &scp->domains[i];
+ struct generic_pm_domain *genpd = &scpd->genpd;
+ const struct scp_domain_data *data = &scp_domain_data[i];
+
+ pd_data->domains[i] = genpd;
+ scpd->scp = scp;
+
+ scpd->data = data;
+
+ for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) {
+ struct clk *c = clk[data->clk_id[j]];
+
+ if (IS_ERR(c)) {
+ dev_err(&pdev->dev, "%s: clk unavailable\n",
+ data->name);
+ return ERR_CAST(c);
+ }
+
+ scpd->clk[j] = c;
+ }
+
+ genpd->name = data->name;
+ genpd->power_off = scpsys_power_off;
+ genpd->power_on = scpsys_power_on;
+ if (MTK_SCPD_CAPS(scpd, MTK_SCPD_ACTIVE_WAKEUP))
+ genpd->flags |= GENPD_FLAG_ACTIVE_WAKEUP;
+ }
+
+ return scp;
+}
+
+static void mtk_register_power_domains(struct platform_device *pdev,
+ struct scp *scp, int num)
+{
+ struct genpd_onecell_data *pd_data;
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ struct scp_domain *scpd = &scp->domains[i];
+ struct generic_pm_domain *genpd = &scpd->genpd;
+ bool on;
+
+ /*
+ * Initially turn on all domains to make the domains usable
+ * with !CONFIG_PM and to get the hardware in sync with the
+ * software. The unused domains will be switched off during
+ * late_init time.
+ */
+ on = !WARN_ON(genpd->power_on(genpd) < 0);
+
+ pm_genpd_init(genpd, NULL, !on);
+ }
+
+ /*
+ * We are not allowed to fail here since there is no way to unregister
+ * a power domain. Once registered above we have to keep the domains
+ * valid.
+ */
+
+ pd_data = &scp->pd_data;
+
+ ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
+}
+
+/*
+ * MT2701 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt2701[] = {
+ [MT2701_POWER_DOMAIN_CONN] = {
+ .name = "conn",
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = SPM_CONN_PWR_CON,
+ .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_CONN_M |
+ MT2701_TOP_AXI_PROT_EN_CONN_S,
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2701_POWER_DOMAIN_DISP] = {
+ .name = "disp",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_MM_M0,
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2701_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MFG},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2701_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2701_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2701_POWER_DOMAIN_BDP] = {
+ .name = "bdp",
+ .sta_mask = PWR_STATUS_BDP,
+ .ctl_offs = SPM_BDP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2701_POWER_DOMAIN_ETH] = {
+ .name = "eth",
+ .sta_mask = PWR_STATUS_ETH,
+ .ctl_offs = SPM_ETH_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2701_POWER_DOMAIN_HIF] = {
+ .name = "hif",
+ .sta_mask = PWR_STATUS_HIF,
+ .ctl_offs = SPM_HIF_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2701_POWER_DOMAIN_IFR_MSC] = {
+ .name = "ifr_msc",
+ .sta_mask = PWR_STATUS_IFR_MSC,
+ .ctl_offs = SPM_IFR_MSC_PWR_CON,
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+};
+
+/*
+ * MT2712 power domain support
+ */
+static const struct scp_domain_data scp_domain_data_mt2712[] = {
+ [MT2712_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2712_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM, CLK_VDEC},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2712_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC, CLK_JPGDEC},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2712_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2712_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_AUDIO},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2712_POWER_DOMAIN_USB] = {
+ .name = "usb",
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(10, 8),
+ .sram_pdn_ack_bits = GENMASK(14, 12),
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2712_POWER_DOMAIN_USB2] = {
+ .name = "usb2",
+ .sta_mask = PWR_STATUS_USB2,
+ .ctl_offs = SPM_USB2_PWR_CON,
+ .sram_pdn_bits = GENMASK(10, 8),
+ .sram_pdn_ack_bits = GENMASK(14, 12),
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2712_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(16, 16),
+ .clk_id = {CLK_MFG},
+ .bus_prot_mask = BIT(14) | BIT(21) | BIT(23),
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2712_POWER_DOMAIN_MFG_SC1] = {
+ .name = "mfg_sc1",
+ .sta_mask = BIT(22),
+ .ctl_offs = 0x02c0,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(16, 16),
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2712_POWER_DOMAIN_MFG_SC2] = {
+ .name = "mfg_sc2",
+ .sta_mask = BIT(23),
+ .ctl_offs = 0x02c4,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(16, 16),
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT2712_POWER_DOMAIN_MFG_SC3] = {
+ .name = "mfg_sc3",
+ .sta_mask = BIT(30),
+ .ctl_offs = 0x01f8,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(16, 16),
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+};
+
+static const struct scp_subdomain scp_subdomain_mt2712[] = {
+ {MT2712_POWER_DOMAIN_MM, MT2712_POWER_DOMAIN_VDEC},
+ {MT2712_POWER_DOMAIN_MM, MT2712_POWER_DOMAIN_VENC},
+ {MT2712_POWER_DOMAIN_MM, MT2712_POWER_DOMAIN_ISP},
+ {MT2712_POWER_DOMAIN_MFG, MT2712_POWER_DOMAIN_MFG_SC1},
+ {MT2712_POWER_DOMAIN_MFG_SC1, MT2712_POWER_DOMAIN_MFG_SC2},
+ {MT2712_POWER_DOMAIN_MFG_SC2, MT2712_POWER_DOMAIN_MFG_SC3},
+};
+
+/*
+ * MT6797 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt6797[] = {
+ [MT6797_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = BIT(7),
+ .ctl_offs = 0x300,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_VDEC},
+ },
+ [MT6797_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = BIT(21),
+ .ctl_offs = 0x304,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT6797_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = BIT(5),
+ .ctl_offs = 0x308,
+ .sram_pdn_bits = GENMASK(9, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT6797_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = BIT(3),
+ .ctl_offs = 0x30C,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = (BIT(1) | BIT(2)),
+ },
+ [MT6797_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = BIT(24),
+ .ctl_offs = 0x314,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT6797_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
+ .sta_mask = BIT(13),
+ .ctl_offs = 0x334,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ .clk_id = {CLK_MFG},
+ },
+ [MT6797_POWER_DOMAIN_MJC] = {
+ .name = "mjc",
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x310,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_NONE},
+ },
+};
+
+#define SPM_PWR_STATUS_MT6797 0x0180
+#define SPM_PWR_STATUS_2ND_MT6797 0x0184
+
+static const struct scp_subdomain scp_subdomain_mt6797[] = {
+ {MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_VDEC},
+ {MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_ISP},
+ {MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_VENC},
+ {MT6797_POWER_DOMAIN_MM, MT6797_POWER_DOMAIN_MJC},
+};
+
+/*
+ * MT7622 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt7622[] = {
+ [MT7622_POWER_DOMAIN_ETHSYS] = {
+ .name = "ethsys",
+ .sta_mask = PWR_STATUS_ETHSYS,
+ .ctl_offs = SPM_ETHSYS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_ETHSYS,
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT7622_POWER_DOMAIN_HIF0] = {
+ .name = "hif0",
+ .sta_mask = PWR_STATUS_HIF0,
+ .ctl_offs = SPM_HIF0_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_HIFSEL},
+ .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_HIF0,
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT7622_POWER_DOMAIN_HIF1] = {
+ .name = "hif1",
+ .sta_mask = PWR_STATUS_HIF1,
+ .ctl_offs = SPM_HIF1_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_HIFSEL},
+ .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_HIF1,
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT7622_POWER_DOMAIN_WB] = {
+ .name = "wb",
+ .sta_mask = PWR_STATUS_WB,
+ .ctl_offs = SPM_WB_PWR_CON,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ .clk_id = {CLK_NONE},
+ .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_WB,
+ .caps = MTK_SCPD_ACTIVE_WAKEUP | MTK_SCPD_FWAIT_SRAM,
+ },
+};
+
+/*
+ * MT7623A power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt7623a[] = {
+ [MT7623A_POWER_DOMAIN_CONN] = {
+ .name = "conn",
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = SPM_CONN_PWR_CON,
+ .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_CONN_M |
+ MT2701_TOP_AXI_PROT_EN_CONN_S,
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT7623A_POWER_DOMAIN_ETH] = {
+ .name = "eth",
+ .sta_mask = PWR_STATUS_ETH,
+ .ctl_offs = SPM_ETH_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT7623A_POWER_DOMAIN_HIF] = {
+ .name = "hif",
+ .sta_mask = PWR_STATUS_HIF,
+ .ctl_offs = SPM_HIF_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT7623A_POWER_DOMAIN_IFR_MSC] = {
+ .name = "ifr_msc",
+ .sta_mask = PWR_STATUS_IFR_MSC,
+ .ctl_offs = SPM_IFR_MSC_PWR_CON,
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+};
+
+/*
+ * MT8173 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt8173[] = {
+ [MT8173_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC},
+ },
+ [MT8173_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1,
+ },
+ [MT8173_POWER_DOMAIN_VENC_LT] = {
+ .name = "venc_lt",
+ .sta_mask = PWR_STATUS_VENC_LT,
+ .ctl_offs = SPM_VEN2_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC_LT},
+ },
+ [MT8173_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_USB] = {
+ .name = "usb",
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ .clk_id = {CLK_MFG},
+ },
+ [MT8173_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .clk_id = {CLK_NONE},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
+ },
+};
+
+static const struct scp_subdomain scp_subdomain_mt8173[] = {
+ {MT8173_POWER_DOMAIN_MFG_ASYNC, MT8173_POWER_DOMAIN_MFG_2D},
+ {MT8173_POWER_DOMAIN_MFG_2D, MT8173_POWER_DOMAIN_MFG},
+};
+
+static const struct scp_soc_data mt2701_data = {
+ .domains = scp_domain_data_mt2701,
+ .num_domains = ARRAY_SIZE(scp_domain_data_mt2701),
+ .regs = {
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
+ },
+ .bus_prot_reg_update = true,
+};
+
+static const struct scp_soc_data mt2712_data = {
+ .domains = scp_domain_data_mt2712,
+ .num_domains = ARRAY_SIZE(scp_domain_data_mt2712),
+ .subdomains = scp_subdomain_mt2712,
+ .num_subdomains = ARRAY_SIZE(scp_subdomain_mt2712),
+ .regs = {
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
+ },
+ .bus_prot_reg_update = false,
+};
+
+static const struct scp_soc_data mt6797_data = {
+ .domains = scp_domain_data_mt6797,
+ .num_domains = ARRAY_SIZE(scp_domain_data_mt6797),
+ .subdomains = scp_subdomain_mt6797,
+ .num_subdomains = ARRAY_SIZE(scp_subdomain_mt6797),
+ .regs = {
+ .pwr_sta_offs = SPM_PWR_STATUS_MT6797,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND_MT6797
+ },
+ .bus_prot_reg_update = true,
+};
+
+static const struct scp_soc_data mt7622_data = {
+ .domains = scp_domain_data_mt7622,
+ .num_domains = ARRAY_SIZE(scp_domain_data_mt7622),
+ .regs = {
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
+ },
+ .bus_prot_reg_update = true,
+};
+
+static const struct scp_soc_data mt7623a_data = {
+ .domains = scp_domain_data_mt7623a,
+ .num_domains = ARRAY_SIZE(scp_domain_data_mt7623a),
+ .regs = {
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
+ },
+ .bus_prot_reg_update = true,
+};
+
+static const struct scp_soc_data mt8173_data = {
+ .domains = scp_domain_data_mt8173,
+ .num_domains = ARRAY_SIZE(scp_domain_data_mt8173),
+ .subdomains = scp_subdomain_mt8173,
+ .num_subdomains = ARRAY_SIZE(scp_subdomain_mt8173),
+ .regs = {
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND
+ },
+ .bus_prot_reg_update = true,
+};
+
+/*
+ * scpsys driver init
+ */
+
+static const struct of_device_id of_scpsys_match_tbl[] = {
+ {
+ .compatible = "mediatek,mt2701-scpsys",
+ .data = &mt2701_data,
+ }, {
+ .compatible = "mediatek,mt2712-scpsys",
+ .data = &mt2712_data,
+ }, {
+ .compatible = "mediatek,mt6797-scpsys",
+ .data = &mt6797_data,
+ }, {
+ .compatible = "mediatek,mt7622-scpsys",
+ .data = &mt7622_data,
+ }, {
+ .compatible = "mediatek,mt7623a-scpsys",
+ .data = &mt7623a_data,
+ }, {
+ .compatible = "mediatek,mt8173-scpsys",
+ .data = &mt8173_data,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int scpsys_probe(struct platform_device *pdev)
+{
+ const struct scp_subdomain *sd;
+ const struct scp_soc_data *soc;
+ struct scp *scp;
+ struct genpd_onecell_data *pd_data;
+ int i, ret;
+
+ soc = of_device_get_match_data(&pdev->dev);
+
+ scp = init_scp(pdev, soc->domains, soc->num_domains, &soc->regs,
+ soc->bus_prot_reg_update);
+ if (IS_ERR(scp))
+ return PTR_ERR(scp);
+
+ mtk_register_power_domains(pdev, scp, soc->num_domains);
+
+ pd_data = &scp->pd_data;
+
+ for (i = 0, sd = soc->subdomains; i < soc->num_subdomains; i++, sd++) {
+ ret = pm_genpd_add_subdomain(pd_data->domains[sd->origin],
+ pd_data->domains[sd->subdomain]);
+ if (ret && IS_ENABLED(CONFIG_PM))
+ dev_err(&pdev->dev, "Failed to add subdomain: %d\n",
+ ret);
+ }
+
+ return 0;
+}
+
+static struct platform_driver scpsys_drv = {
+ .probe = scpsys_probe,
+ .driver = {
+ .name = "mtk-scpsys",
+ .suppress_bind_attrs = true,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_scpsys_match_tbl),
+ },
+};
+builtin_platform_driver(scpsys_drv);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
new file mode 100644
index 000000000..ba79b609a
--- /dev/null
+++ b/drivers/soc/qcom/Kconfig
@@ -0,0 +1,166 @@
+#
+# QCOM Soc drivers
+#
+menu "Qualcomm SoC drivers"
+
+config QCOM_COMMAND_DB
+ bool "Qualcomm Command DB"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on OF_RESERVED_MEM
+ help
+ Command DB queries shared memory by key string for shared system
+ resources. Platform drivers that require to set state of a shared
+ resource on a RPM-hardened platform must use this database to get
+ SoC specific identifier and information for the shared resources.
+
+config QCOM_GENI_SE
+ tristate "QCOM GENI Serial Engine Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ This driver is used to manage Generic Interface (GENI) firmware based
+ Qualcomm Technologies, Inc. Universal Peripheral (QUP) Wrapper. This
+ driver is also used to manage the common aspects of multiple Serial
+ Engines present in the QUP.
+
+config QCOM_GLINK_SSR
+ tristate "Qualcomm Glink SSR driver"
+ depends on RPMSG
+ depends on QCOM_RPROC_COMMON
+ help
+ Say y here to enable GLINK SSR support. The GLINK SSR driver
+ implements the SSR protocol for notifying the remote processor about
+ neighboring subsystems going up or down.
+
+config QCOM_GSBI
+ tristate "QCOM General Serial Bus Interface"
+ depends on ARCH_QCOM
+ select MFD_SYSCON
+ help
+ Say y here to enable GSBI support. The GSBI provides control
+ functions for connecting the underlying serial UART, SPI, and I2C
+ devices to the output pins.
+
+config QCOM_LLCC
+ tristate "Qualcomm Technologies, Inc. LLCC driver"
+ depends on ARCH_QCOM
+ help
+ Qualcomm Technologies, Inc. platform specific
+ Last Level Cache Controller(LLCC) driver. This provides interfaces
+ to clients that use the LLCC. Say yes here to enable LLCC slice
+ driver.
+
+config QCOM_SDM845_LLCC
+ tristate "Qualcomm Technologies, Inc. SDM845 LLCC driver"
+ depends on QCOM_LLCC
+ help
+ Say yes here to enable the LLCC driver for SDM845. This provides
+ data required to configure LLCC so that clients can start using the
+ LLCC slices.
+
+config QCOM_MDT_LOADER
+ tristate
+ select QCOM_SCM
+
+config QCOM_PM
+ bool "Qualcomm Power Management"
+ depends on ARCH_QCOM && !ARM64
+ select ARM_CPU_SUSPEND
+ select QCOM_SCM
+ help
+ QCOM Platform specific power driver to manage cores and L2 low power
+ modes. It interface with various system drivers to put the cores in
+ low power modes.
+
+config QCOM_QMI_HELPERS
+ tristate
+ depends on ARCH_QCOM && NET
+ help
+ Helper library for handling QMI encoded messages. QMI encoded
+ messages are used in communication between the majority of QRTR
+ clients and this helpers provide the common functionality needed for
+ doing this from a kernel driver.
+
+config QCOM_RMTFS_MEM
+ tristate "Qualcomm Remote Filesystem memory driver"
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ The Qualcomm remote filesystem memory driver is used for allocating
+ and exposing regions of shared memory with remote processors for the
+ purpose of exchanging sector-data between the remote filesystem
+ service and its clients.
+
+ Say y here if you intend to boot the modem remoteproc.
+
+config QCOM_RPMH
+ bool "Qualcomm RPM-Hardened (RPMH) Communication"
+ depends on ARCH_QCOM && ARM64 && OF || COMPILE_TEST
+ help
+ Support for communication with the hardened-RPM blocks in
+ Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
+ internal bus to transmit state requests for shared resources. A set
+ of hardware components aggregate requests for these resources and
+ help apply the aggregated state on the resource.
+
+config QCOM_SMEM
+ tristate "Qualcomm Shared Memory Manager (SMEM)"
+ depends on ARCH_QCOM
+ depends on HWSPINLOCK
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Manager.
+ The driver provides an interface to items in a heap shared among all
+ processors in a Qualcomm platform.
+
+config QCOM_SMD_RPM
+ tristate "Qualcomm Resource Power Manager (RPM) over SMD"
+ depends on ARCH_QCOM
+ depends on RPMSG && OF
+ help
+ If you say yes to this option, support will be included for the
+ Resource Power Manager system found in the Qualcomm 8974 based
+ devices.
+
+ This is required to access many regulators, clocks and bus
+ frequencies controlled by the RPM on these devices.
+
+ Say M here if you want to include support for the Qualcomm RPM as a
+ module. This will build a module called "qcom-smd-rpm".
+
+config QCOM_SMEM_STATE
+ bool
+
+config QCOM_SMP2P
+ tristate "Qualcomm Shared Memory Point to Point support"
+ depends on MAILBOX
+ depends on QCOM_SMEM
+ select QCOM_SMEM_STATE
+ help
+ Say yes here to support the Qualcomm Shared Memory Point to Point
+ protocol.
+
+config QCOM_SMSM
+ tristate "Qualcomm Shared Memory State Machine"
+ depends on QCOM_SMEM
+ select QCOM_SMEM_STATE
+ help
+ Say yes here to support the Qualcomm Shared Memory State Machine.
+ The state machine is represented by bits in shared memory.
+
+config QCOM_WCNSS_CTRL
+ tristate "Qualcomm WCNSS control driver"
+ depends on ARCH_QCOM
+ depends on RPMSG
+ help
+ Client driver for the WCNSS_CTRL SMD channel, used to download nv
+ firmware to a newly booted WCNSS chip.
+
+config QCOM_APR
+ tristate "Qualcomm APR Bus (Asynchronous Packet Router)"
+ depends on ARCH_QCOM
+ depends on RPMSG
+ help
+ Enable APR IPC protocol support between
+ application processor and QDSP6. APR is
+ used by audio driver to configure QDSP6
+ ASM, ADM and AFE modules.
+endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
new file mode 100644
index 000000000..f25b54cd6
--- /dev/null
+++ b/drivers/soc/qcom/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS_rpmh-rsc.o := -I$(src)
+obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
+obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
+obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
+obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
+obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
+obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
+qmi_helpers-y += qmi_encdec.o qmi_interface.o
+obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
+obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o
+qcom_rpmh-y += rpmh-rsc.o
+qcom_rpmh-y += rpmh.o
+obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) += smem.o
+obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
+obj-$(CONFIG_QCOM_SMSM) += smsm.o
+obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
+obj-$(CONFIG_QCOM_APR) += apr.o
+obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
+obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
new file mode 100644
index 000000000..ee9197f5a
--- /dev/null
+++ b/drivers/soc/qcom/apr.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/rpmsg.h>
+#include <linux/of.h>
+
+struct apr {
+ struct rpmsg_endpoint *ch;
+ struct device *dev;
+ spinlock_t svcs_lock;
+ struct idr svcs_idr;
+ int dest_domain_id;
+};
+
+/**
+ * apr_send_pkt() - Send a apr message from apr device
+ *
+ * @adev: Pointer to previously registered apr device.
+ * @pkt: Pointer to apr packet to send
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt)
+{
+ struct apr *apr = dev_get_drvdata(adev->dev.parent);
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->lock, flags);
+
+ hdr = &pkt->hdr;
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->src_svc = adev->svc_id;
+ hdr->dest_domain = adev->domain_id;
+ hdr->dest_svc = adev->svc_id;
+
+ ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size);
+ spin_unlock_irqrestore(&adev->lock, flags);
+
+ return ret ? ret : hdr->pkt_size;
+}
+EXPORT_SYMBOL_GPL(apr_send_pkt);
+
+static void apr_dev_release(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ kfree(adev);
+}
+
+static int apr_callback(struct rpmsg_device *rpdev, void *buf,
+ int len, void *priv, u32 addr)
+{
+ struct apr *apr = dev_get_drvdata(&rpdev->dev);
+ uint16_t hdr_size, msg_type, ver, svc_id;
+ struct apr_device *svc = NULL;
+ struct apr_driver *adrv = NULL;
+ struct apr_resp_pkt resp;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+
+ if (len <= APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Improper apr pkt received:%p %d\n",
+ buf, len);
+ return -EINVAL;
+ }
+
+ hdr = buf;
+ ver = APR_HDR_FIELD_VER(hdr->hdr_field);
+ if (ver > APR_PKT_VER + 1)
+ return -EINVAL;
+
+ hdr_size = APR_HDR_FIELD_SIZE_BYTES(hdr->hdr_field);
+ if (hdr_size < APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+ if (hdr->pkt_size < APR_HDR_SIZE || hdr->pkt_size != len) {
+ dev_err(apr->dev, "APR: Wrong paket size\n");
+ return -EINVAL;
+ }
+
+ msg_type = APR_HDR_FIELD_MT(hdr->hdr_field);
+ if (msg_type >= APR_MSG_TYPE_MAX) {
+ dev_err(apr->dev, "APR: Wrong message type: %d\n", msg_type);
+ return -EINVAL;
+ }
+
+ if (hdr->src_domain >= APR_DOMAIN_MAX ||
+ hdr->dest_domain >= APR_DOMAIN_MAX ||
+ hdr->src_svc >= APR_SVC_MAX ||
+ hdr->dest_svc >= APR_SVC_MAX) {
+ dev_err(apr->dev, "APR: Wrong APR header\n");
+ return -EINVAL;
+ }
+
+ svc_id = hdr->dest_svc;
+ spin_lock_irqsave(&apr->svcs_lock, flags);
+ svc = idr_find(&apr->svcs_idr, svc_id);
+ if (svc && svc->dev.driver)
+ adrv = to_apr_driver(svc->dev.driver);
+ spin_unlock_irqrestore(&apr->svcs_lock, flags);
+
+ if (!adrv) {
+ dev_err(apr->dev, "APR: service is not registered\n");
+ return -EINVAL;
+ }
+
+ resp.hdr = *hdr;
+ resp.payload_size = hdr->pkt_size - hdr_size;
+
+ /*
+ * NOTE: hdr_size is not same as APR_HDR_SIZE as remote can include
+ * optional headers in to apr_hdr which should be ignored
+ */
+ if (resp.payload_size > 0)
+ resp.payload = buf + hdr_size;
+
+ adrv->callback(svc, &resp);
+
+ return 0;
+}
+
+static int apr_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(drv);
+ const struct apr_device_id *id = adrv->id_table;
+
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!id)
+ return 0;
+
+ while (id->domain_id != 0 || id->svc_id != 0) {
+ if (id->domain_id == adev->domain_id &&
+ id->svc_id == adev->svc_id)
+ return 1;
+ id++;
+ }
+
+ return 0;
+}
+
+static int apr_device_probe(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(dev->driver);
+
+ return adrv->probe(adev);
+}
+
+static int apr_device_remove(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv;
+ struct apr *apr = dev_get_drvdata(adev->dev.parent);
+
+ if (dev->driver) {
+ adrv = to_apr_driver(dev->driver);
+ if (adrv->remove)
+ adrv->remove(adev);
+ spin_lock(&apr->svcs_lock);
+ idr_remove(&apr->svcs_idr, adev->svc_id);
+ spin_unlock(&apr->svcs_lock);
+ }
+
+ return 0;
+}
+
+static int apr_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return add_uevent_var(env, "MODALIAS=apr:%s", adev->name);
+}
+
+struct bus_type aprbus = {
+ .name = "aprbus",
+ .match = apr_device_match,
+ .probe = apr_device_probe,
+ .uevent = apr_uevent,
+ .remove = apr_device_remove,
+};
+EXPORT_SYMBOL_GPL(aprbus);
+
+static int apr_add_device(struct device *dev, struct device_node *np,
+ const struct apr_device_id *id)
+{
+ struct apr *apr = dev_get_drvdata(dev);
+ struct apr_device *adev = NULL;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ spin_lock_init(&adev->lock);
+
+ adev->svc_id = id->svc_id;
+ adev->domain_id = id->domain_id;
+ adev->version = id->svc_version;
+ if (np)
+ strscpy(adev->name, np->name, APR_NAME_SIZE);
+ else
+ strscpy(adev->name, id->name, APR_NAME_SIZE);
+
+ dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name,
+ id->domain_id, id->svc_id);
+
+ adev->dev.bus = &aprbus;
+ adev->dev.parent = dev;
+ adev->dev.of_node = np;
+ adev->dev.release = apr_dev_release;
+ adev->dev.driver = NULL;
+
+ spin_lock(&apr->svcs_lock);
+ idr_alloc(&apr->svcs_idr, adev, id->svc_id,
+ id->svc_id + 1, GFP_ATOMIC);
+ spin_unlock(&apr->svcs_lock);
+
+ dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
+
+ ret = device_register(&adev->dev);
+ if (ret) {
+ dev_err(dev, "device_register failed: %d\n", ret);
+ put_device(&adev->dev);
+ }
+
+ return ret;
+}
+
+static void of_register_apr_devices(struct device *dev)
+{
+ struct apr *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+
+ for_each_child_of_node(dev->of_node, node) {
+ struct apr_device_id id = { {0} };
+
+ if (of_property_read_u32(node, "reg", &id.svc_id))
+ continue;
+
+ id.domain_id = apr->dest_domain_id;
+
+ if (apr_add_device(dev, node, &id))
+ dev_err(dev, "Failed to add apr %d svc\n", id.svc_id);
+ }
+}
+
+static int apr_probe(struct rpmsg_device *rpdev)
+{
+ struct device *dev = &rpdev->dev;
+ struct apr *apr;
+ int ret;
+
+ apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL);
+ if (!apr)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(dev->of_node, "reg", &apr->dest_domain_id);
+ if (ret) {
+ dev_err(dev, "APR Domain ID not specified in DT\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, apr);
+ apr->ch = rpdev->ept;
+ apr->dev = dev;
+ spin_lock_init(&apr->svcs_lock);
+ idr_init(&apr->svcs_idr);
+ of_register_apr_devices(dev);
+
+ return 0;
+}
+
+static int apr_remove_device(struct device *dev, void *null)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ device_unregister(&adev->dev);
+
+ return 0;
+}
+
+static void apr_remove(struct rpmsg_device *rpdev)
+{
+ device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
+}
+
+/*
+ * __apr_driver_register() - Client driver registration with aprbus
+ *
+ * @drv:Client driver to be associated with client-device.
+ * @owner: owning module/driver
+ *
+ * This API will register the client driver with the aprbus
+ * It is called from the driver's module-init function.
+ */
+int __apr_driver_register(struct apr_driver *drv, struct module *owner)
+{
+ drv->driver.bus = &aprbus;
+ drv->driver.owner = owner;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__apr_driver_register);
+
+/*
+ * apr_driver_unregister() - Undo effect of apr_driver_register
+ *
+ * @drv: Client driver to be unregistered
+ */
+void apr_driver_unregister(struct apr_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(apr_driver_unregister);
+
+static const struct of_device_id apr_of_match[] = {
+ { .compatible = "qcom,apr"},
+ { .compatible = "qcom,apr-v2"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, apr_of_match);
+
+static struct rpmsg_driver apr_driver = {
+ .probe = apr_probe,
+ .remove = apr_remove,
+ .callback = apr_callback,
+ .drv = {
+ .name = "qcom,apr",
+ .of_match_table = apr_of_match,
+ },
+};
+
+static int __init apr_init(void)
+{
+ int ret;
+
+ ret = bus_register(&aprbus);
+ if (!ret)
+ ret = register_rpmsg_driver(&apr_driver);
+ else
+ bus_unregister(&aprbus);
+
+ return ret;
+}
+
+static void __exit apr_exit(void)
+{
+ bus_unregister(&aprbus);
+ unregister_rpmsg_driver(&apr_driver);
+}
+
+subsys_initcall(apr_init);
+module_exit(apr_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APR Bus");
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
new file mode 100644
index 000000000..78d73ec58
--- /dev/null
+++ b/drivers/soc/qcom/cmd-db.c
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <soc/qcom/cmd-db.h>
+
+#define NUM_PRIORITY 2
+#define MAX_SLV_ID 8
+#define SLAVE_ID_MASK 0x7
+#define SLAVE_ID_SHIFT 16
+
+/**
+ * struct entry_header: header for each entry in cmddb
+ *
+ * @id: resource's identifier
+ * @priority: unused
+ * @addr: the address of the resource
+ * @len: length of the data
+ * @offset: offset from :@data_offset, start of the data
+ */
+struct entry_header {
+ u8 id[8];
+ __le32 priority[NUM_PRIORITY];
+ __le32 addr;
+ __le16 len;
+ __le16 offset;
+};
+
+/**
+ * struct rsc_hdr: resource header information
+ *
+ * @slv_id: id for the resource
+ * @header_offset: entry's header at offset from the end of the cmd_db_header
+ * @data_offset: entry's data at offset from the end of the cmd_db_header
+ * @cnt: number of entries for HW type
+ * @version: MSB is major, LSB is minor
+ * @reserved: reserved for future use.
+ */
+struct rsc_hdr {
+ __le16 slv_id;
+ __le16 header_offset;
+ __le16 data_offset;
+ __le16 cnt;
+ __le16 version;
+ __le16 reserved[3];
+};
+
+/**
+ * struct cmd_db_header: The DB header information
+ *
+ * @version: The cmd db version
+ * @magic: constant expected in the database
+ * @header: array of resources
+ * @checksum: checksum for the header. Unused.
+ * @reserved: reserved memory
+ * @data: driver specific data
+ */
+struct cmd_db_header {
+ __le32 version;
+ u8 magic[4];
+ struct rsc_hdr header[MAX_SLV_ID];
+ __le32 checksum;
+ __le32 reserved;
+ u8 data[];
+};
+
+/**
+ * DOC: Description of the Command DB database.
+ *
+ * At the start of the command DB memory is the cmd_db_header structure.
+ * The cmd_db_header holds the version, checksum, magic key as well as an
+ * array for header for each slave (depicted by the rsc_header). Each h/w
+ * based accelerator is a 'slave' (shared resource) and has slave id indicating
+ * the type of accelerator. The rsc_header is the header for such individual
+ * slaves of a given type. The entries for each of these slaves begin at the
+ * rsc_hdr.header_offset. In addition each slave could have auxiliary data
+ * that may be needed by the driver. The data for the slave starts at the
+ * entry_header.offset to the location pointed to by the rsc_hdr.data_offset.
+ *
+ * Drivers have a stringified key to a slave/resource. They can query the slave
+ * information and get the slave id and the auxiliary data and the length of the
+ * data. Using this information, they can format the request to be sent to the
+ * h/w accelerator and request a resource state.
+ */
+
+static const u8 CMD_DB_MAGIC[] = { 0xdb, 0x30, 0x03, 0x0c };
+
+static bool cmd_db_magic_matches(const struct cmd_db_header *header)
+{
+ const u8 *magic = header->magic;
+
+ return memcmp(magic, CMD_DB_MAGIC, ARRAY_SIZE(CMD_DB_MAGIC)) == 0;
+}
+
+static struct cmd_db_header *cmd_db_header;
+
+
+static inline void *rsc_to_entry_header(struct rsc_hdr *hdr)
+{
+ u16 offset = le16_to_cpu(hdr->header_offset);
+
+ return cmd_db_header->data + offset;
+}
+
+static inline void *
+rsc_offset(struct rsc_hdr *hdr, struct entry_header *ent)
+{
+ u16 offset = le16_to_cpu(hdr->data_offset);
+ u16 loffset = le16_to_cpu(ent->offset);
+
+ return cmd_db_header->data + offset + loffset;
+}
+
+/**
+ * cmd_db_ready - Indicates if command DB is available
+ *
+ * Return: 0 on success, errno otherwise
+ */
+int cmd_db_ready(void)
+{
+ if (cmd_db_header == NULL)
+ return -EPROBE_DEFER;
+ else if (!cmd_db_magic_matches(cmd_db_header))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(cmd_db_ready);
+
+static int cmd_db_get_header(const char *id, struct entry_header *eh,
+ struct rsc_hdr *rh)
+{
+ struct rsc_hdr *rsc_hdr;
+ struct entry_header *ent;
+ int ret, i, j;
+ u8 query[8];
+
+ ret = cmd_db_ready();
+ if (ret)
+ return ret;
+
+ if (!eh || !rh)
+ return -EINVAL;
+
+ /* Pad out query string to same length as in DB */
+ strncpy(query, id, sizeof(query));
+
+ for (i = 0; i < MAX_SLV_ID; i++) {
+ rsc_hdr = &cmd_db_header->header[i];
+ if (!rsc_hdr->slv_id)
+ break;
+
+ ent = rsc_to_entry_header(rsc_hdr);
+ for (j = 0; j < le16_to_cpu(rsc_hdr->cnt); j++, ent++) {
+ if (memcmp(ent->id, query, sizeof(ent->id)) == 0)
+ break;
+ }
+
+ if (j < le16_to_cpu(rsc_hdr->cnt)) {
+ memcpy(eh, ent, sizeof(*ent));
+ memcpy(rh, rsc_hdr, sizeof(*rh));
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * cmd_db_read_addr() - Query command db for resource id address.
+ *
+ * @id: resource id to query for address
+ *
+ * Return: resource address on success, 0 on error
+ *
+ * This is used to retrieve resource address based on resource
+ * id.
+ */
+u32 cmd_db_read_addr(const char *id)
+{
+ int ret;
+ struct entry_header ent;
+ struct rsc_hdr rsc_hdr;
+
+ ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+
+ return ret < 0 ? 0 : le32_to_cpu(ent.addr);
+}
+EXPORT_SYMBOL(cmd_db_read_addr);
+
+/**
+ * cmd_db_read_aux_data() - Query command db for aux data.
+ *
+ * @id: Resource to retrieve AUX Data on.
+ * @data: Data buffer to copy returned aux data to. Returns size on NULL
+ * @len: Caller provides size of data buffer passed in.
+ *
+ * Return: size of data on success, errno otherwise
+ */
+int cmd_db_read_aux_data(const char *id, u8 *data, size_t len)
+{
+ int ret;
+ struct entry_header ent;
+ struct rsc_hdr rsc_hdr;
+ u16 ent_len;
+
+ if (!data)
+ return -EINVAL;
+
+ ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+ if (ret)
+ return ret;
+
+ ent_len = le16_to_cpu(ent.len);
+ if (len < ent_len)
+ return -EINVAL;
+
+ len = min_t(u16, ent_len, len);
+ memcpy(data, rsc_offset(&rsc_hdr, &ent), len);
+
+ return len;
+}
+EXPORT_SYMBOL(cmd_db_read_aux_data);
+
+/**
+ * cmd_db_read_aux_data_len - Get the length of the auxiliary data stored in DB.
+ *
+ * @id: Resource to retrieve AUX Data.
+ *
+ * Return: size on success, 0 on error
+ */
+size_t cmd_db_read_aux_data_len(const char *id)
+{
+ int ret;
+ struct entry_header ent;
+ struct rsc_hdr rsc_hdr;
+
+ ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+
+ return ret < 0 ? 0 : le16_to_cpu(ent.len);
+}
+EXPORT_SYMBOL(cmd_db_read_aux_data_len);
+
+/**
+ * cmd_db_read_slave_id - Get the slave ID for a given resource address
+ *
+ * @id: Resource id to query the DB for version
+ *
+ * Return: cmd_db_hw_type enum on success, CMD_DB_HW_INVALID on error
+ */
+enum cmd_db_hw_type cmd_db_read_slave_id(const char *id)
+{
+ int ret;
+ struct entry_header ent;
+ struct rsc_hdr rsc_hdr;
+ u32 addr;
+
+ ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+ if (ret < 0)
+ return CMD_DB_HW_INVALID;
+
+ addr = le32_to_cpu(ent.addr);
+ return (addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
+}
+EXPORT_SYMBOL(cmd_db_read_slave_id);
+
+static int cmd_db_dev_probe(struct platform_device *pdev)
+{
+ struct reserved_mem *rmem;
+ int ret = 0;
+
+ rmem = of_reserved_mem_lookup(pdev->dev.of_node);
+ if (!rmem) {
+ dev_err(&pdev->dev, "failed to acquire memory region\n");
+ return -EINVAL;
+ }
+
+ cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+ if (!cmd_db_header) {
+ ret = -ENOMEM;
+ cmd_db_header = NULL;
+ return ret;
+ }
+
+ if (!cmd_db_magic_matches(cmd_db_header)) {
+ dev_err(&pdev->dev, "Invalid Command DB Magic\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cmd_db_match_table[] = {
+ { .compatible = "qcom,cmd-db" },
+ { },
+};
+
+static struct platform_driver cmd_db_dev_driver = {
+ .probe = cmd_db_dev_probe,
+ .driver = {
+ .name = "cmd-db",
+ .of_match_table = cmd_db_match_table,
+ },
+};
+
+static int __init cmd_db_device_init(void)
+{
+ return platform_driver_register(&cmd_db_dev_driver);
+}
+arch_initcall(cmd_db_device_init);
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
new file mode 100644
index 000000000..19c7399ed
--- /dev/null
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/rpmsg.h>
+#include <linux/remoteproc/qcom_rproc.h>
+
+/**
+ * struct do_cleanup_msg - The data structure for an SSR do_cleanup message
+ * version: The G-Link SSR protocol version
+ * command: The G-Link SSR command - do_cleanup
+ * seq_num: Sequence number
+ * name_len: Length of the name of the subsystem being restarted
+ * name: G-Link edge name of the subsystem being restarted
+ */
+struct do_cleanup_msg {
+ __le32 version;
+ __le32 command;
+ __le32 seq_num;
+ __le32 name_len;
+ char name[32];
+};
+
+/**
+ * struct cleanup_done_msg - The data structure for an SSR cleanup_done message
+ * version: The G-Link SSR protocol version
+ * response: The G-Link SSR response to a do_cleanup command, cleanup_done
+ * seq_num: Sequence number
+ */
+struct cleanup_done_msg {
+ __le32 version;
+ __le32 response;
+ __le32 seq_num;
+};
+
+/**
+ * G-Link SSR protocol commands
+ */
+#define GLINK_SSR_DO_CLEANUP 0
+#define GLINK_SSR_CLEANUP_DONE 1
+
+struct glink_ssr {
+ struct device *dev;
+ struct rpmsg_endpoint *ept;
+
+ struct notifier_block nb;
+
+ u32 seq_num;
+ struct completion completion;
+};
+
+static int qcom_glink_ssr_callback(struct rpmsg_device *rpdev,
+ void *data, int len, void *priv, u32 addr)
+{
+ struct cleanup_done_msg *msg = data;
+ struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+
+ if (len < sizeof(*msg)) {
+ dev_err(ssr->dev, "message too short\n");
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(msg->version) != 0)
+ return -EINVAL;
+
+ if (le32_to_cpu(msg->response) != GLINK_SSR_CLEANUP_DONE)
+ return 0;
+
+ if (le32_to_cpu(msg->seq_num) != ssr->seq_num) {
+ dev_err(ssr->dev, "invalid sequence number of response\n");
+ return -EINVAL;
+ }
+
+ complete(&ssr->completion);
+
+ return 0;
+}
+
+static int qcom_glink_ssr_notify(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct glink_ssr *ssr = container_of(nb, struct glink_ssr, nb);
+ struct do_cleanup_msg msg;
+ char *ssr_name = data;
+ int ret;
+
+ ssr->seq_num++;
+ reinit_completion(&ssr->completion);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.command = cpu_to_le32(GLINK_SSR_DO_CLEANUP);
+ msg.seq_num = cpu_to_le32(ssr->seq_num);
+ msg.name_len = cpu_to_le32(strlen(ssr_name));
+ strlcpy(msg.name, ssr_name, sizeof(msg.name));
+
+ ret = rpmsg_send(ssr->ept, &msg, sizeof(msg));
+ if (ret < 0)
+ dev_err(ssr->dev, "failed to send cleanup message\n");
+
+ ret = wait_for_completion_timeout(&ssr->completion, HZ);
+ if (!ret)
+ dev_err(ssr->dev, "timeout waiting for cleanup done message\n");
+
+ return NOTIFY_DONE;
+}
+
+static int qcom_glink_ssr_probe(struct rpmsg_device *rpdev)
+{
+ struct glink_ssr *ssr;
+
+ ssr = devm_kzalloc(&rpdev->dev, sizeof(*ssr), GFP_KERNEL);
+ if (!ssr)
+ return -ENOMEM;
+
+ init_completion(&ssr->completion);
+
+ ssr->dev = &rpdev->dev;
+ ssr->ept = rpdev->ept;
+ ssr->nb.notifier_call = qcom_glink_ssr_notify;
+
+ dev_set_drvdata(&rpdev->dev, ssr);
+
+ return qcom_register_ssr_notifier(&ssr->nb);
+}
+
+static void qcom_glink_ssr_remove(struct rpmsg_device *rpdev)
+{
+ struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+
+ qcom_unregister_ssr_notifier(&ssr->nb);
+}
+
+static const struct rpmsg_device_id qcom_glink_ssr_match[] = {
+ { "glink_ssr" },
+ {}
+};
+
+static struct rpmsg_driver qcom_glink_ssr_driver = {
+ .probe = qcom_glink_ssr_probe,
+ .remove = qcom_glink_ssr_remove,
+ .callback = qcom_glink_ssr_callback,
+ .id_table = qcom_glink_ssr_match,
+ .drv = {
+ .name = "qcom_glink_ssr",
+ },
+};
+module_rpmsg_driver(qcom_glink_ssr_driver);
+
+MODULE_ALIAS("rpmsg:glink_ssr");
+MODULE_DESCRIPTION("Qualcomm GLINK SSR notifier");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
new file mode 100644
index 000000000..2e1e4f0a5
--- /dev/null
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+/*
+ * SCT(System Cache Table) entry contains of the following members:
+ * usecase_id: Unique id for the client's use case
+ * slice_id: llcc slice id for each client
+ * max_cap: The maximum capacity of the cache slice provided in KB
+ * priority: Priority of the client used to select victim line for replacement
+ * fixed_size: Boolean indicating if the slice has a fixed capacity
+ * bonus_ways: Bonus ways are additional ways to be used for any slice,
+ * if client ends up using more than reserved cache ways. Bonus
+ * ways are allocated only if they are not reserved for some
+ * other client.
+ * res_ways: Reserved ways for the cache slice, the reserved ways cannot
+ * be used by any other client than the one its assigned to.
+ * cache_mode: Each slice operates as a cache, this controls the mode of the
+ * slice: normal or TCM(Tightly Coupled Memory)
+ * probe_target_ways: Determines what ways to probe for access hit. When
+ * configured to 1 only bonus and reserved ways are probed.
+ * When configured to 0 all ways in llcc are probed.
+ * dis_cap_alloc: Disable capacity based allocation for a client
+ * retain_on_pc: If this bit is set and client has maintained active vote
+ * then the ways assigned to this client are not flushed on power
+ * collapse.
+ * activate_on_init: Activate the slice immediately after the SCT is programmed
+ */
+#define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
+ { \
+ .usecase_id = uid, \
+ .slice_id = sid, \
+ .max_cap = mc, \
+ .priority = p, \
+ .fixed_size = fs, \
+ .bonus_ways = bway, \
+ .res_ways = rway, \
+ .cache_mode = cmod, \
+ .probe_target_ways = ptw, \
+ .dis_cap_alloc = dca, \
+ .retain_on_pc = rp, \
+ .activate_on_init = a, \
+ }
+
+static struct llcc_slice_config sdm845_data[] = {
+ SCT_ENTRY(LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1),
+ SCT_ENTRY(LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1),
+ SCT_ENTRY(LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0),
+ SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0),
+};
+
+static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
+{
+ return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
+}
+
+static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
+ { .compatible = "qcom,sdm845-llcc", },
+ { }
+};
+
+static struct platform_driver sdm845_qcom_llcc_driver = {
+ .driver = {
+ .name = "sdm845-llcc",
+ .of_match_table = sdm845_qcom_llcc_of_match,
+ },
+ .probe = sdm845_qcom_llcc_probe,
+};
+module_platform_driver(sdm845_qcom_llcc_driver);
+
+MODULE_DESCRIPTION("QCOM sdm845 LLCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
new file mode 100644
index 000000000..54063a311
--- /dev/null
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define ACTIVATE BIT(0)
+#define DEACTIVATE BIT(1)
+#define ACT_CTRL_OPCODE_ACTIVATE BIT(0)
+#define ACT_CTRL_OPCODE_DEACTIVATE BIT(1)
+#define ACT_CTRL_ACT_TRIG BIT(0)
+#define ACT_CTRL_OPCODE_SHIFT 0x01
+#define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02
+#define ATTR1_FIXED_SIZE_SHIFT 0x03
+#define ATTR1_PRIORITY_SHIFT 0x04
+#define ATTR1_MAX_CAP_SHIFT 0x10
+#define ATTR0_RES_WAYS_MASK GENMASK(11, 0)
+#define ATTR0_BONUS_WAYS_MASK GENMASK(27, 16)
+#define ATTR0_BONUS_WAYS_SHIFT 0x10
+#define LLCC_STATUS_READ_DELAY 100
+
+#define CACHE_LINE_SIZE_SHIFT 6
+
+#define LLCC_COMMON_STATUS0 0x0003000c
+#define LLCC_LB_CNT_MASK GENMASK(31, 28)
+#define LLCC_LB_CNT_SHIFT 28
+
+#define MAX_CAP_TO_BYTES(n) (n * SZ_1K)
+#define LLCC_TRP_ACT_CTRLn(n) (n * SZ_4K)
+#define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
+#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
+#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
+
+#define BANK_OFFSET_STRIDE 0x80000
+
+static struct llcc_drv_data *drv_data;
+
+static const struct regmap_config llcc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @uid: usecase_id for the client
+ *
+ * A pointer to llcc slice descriptor will be returned on success and
+ * and error pointer is returned on failure
+ */
+struct llcc_slice_desc *llcc_slice_getd(u32 uid)
+{
+ const struct llcc_slice_config *cfg;
+ struct llcc_slice_desc *desc;
+ u32 sz, count;
+
+ cfg = drv_data->cfg;
+ sz = drv_data->cfg_size;
+
+ for (count = 0; cfg && count < sz; count++, cfg++)
+ if (cfg->usecase_id == uid)
+ break;
+
+ if (count == sz || !cfg)
+ return ERR_PTR(-ENODEV);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->slice_id = cfg->slice_id;
+ desc->slice_size = cfg->max_cap;
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_getd);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+ kfree(desc);
+}
+EXPORT_SYMBOL_GPL(llcc_slice_putd);
+
+static int llcc_update_act_ctrl(u32 sid,
+ u32 act_ctrl_reg_val, u32 status)
+{
+ u32 act_ctrl_reg;
+ u32 status_reg;
+ u32 slice_status;
+ int ret;
+
+ act_ctrl_reg = drv_data->bcast_off + LLCC_TRP_ACT_CTRLn(sid);
+ status_reg = drv_data->bcast_off + LLCC_TRP_STATUSn(sid);
+
+ /* Set the ACTIVE trigger */
+ act_ctrl_reg_val |= ACT_CTRL_ACT_TRIG;
+ ret = regmap_write(drv_data->regmap, act_ctrl_reg, act_ctrl_reg_val);
+ if (ret)
+ return ret;
+
+ /* Clear the ACTIVE trigger */
+ act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG;
+ ret = regmap_write(drv_data->regmap, act_ctrl_reg, act_ctrl_reg_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(drv_data->regmap, status_reg,
+ slice_status, !(slice_status & status),
+ 0, LLCC_STATUS_READ_DELAY);
+ return ret;
+}
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+ int ret;
+ u32 act_ctrl_val;
+
+ mutex_lock(&drv_data->lock);
+ if (test_bit(desc->slice_id, drv_data->bitmap)) {
+ mutex_unlock(&drv_data->lock);
+ return 0;
+ }
+
+ act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+ ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+ DEACTIVATE);
+ if (ret) {
+ mutex_unlock(&drv_data->lock);
+ return ret;
+ }
+
+ __set_bit(desc->slice_id, drv_data->bitmap);
+ mutex_unlock(&drv_data->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_activate);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+ u32 act_ctrl_val;
+ int ret;
+
+ mutex_lock(&drv_data->lock);
+ if (!test_bit(desc->slice_id, drv_data->bitmap)) {
+ mutex_unlock(&drv_data->lock);
+ return 0;
+ }
+ act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+ ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+ ACTIVATE);
+ if (ret) {
+ mutex_unlock(&drv_data->lock);
+ return ret;
+ }
+
+ __clear_bit(desc->slice_id, drv_data->bitmap);
+ mutex_unlock(&drv_data->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_deactivate);
+
+/**
+ * llcc_get_slice_id - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+ return desc->slice_id;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_id);
+
+/**
+ * llcc_get_slice_size - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+ return desc->slice_size;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_size);
+
+static int qcom_llcc_cfg_program(struct platform_device *pdev)
+{
+ int i;
+ u32 attr1_cfg;
+ u32 attr0_cfg;
+ u32 attr1_val;
+ u32 attr0_val;
+ u32 max_cap_cacheline;
+ u32 sz;
+ int ret;
+ const struct llcc_slice_config *llcc_table;
+ struct llcc_slice_desc desc;
+ u32 bcast_off = drv_data->bcast_off;
+
+ sz = drv_data->cfg_size;
+ llcc_table = drv_data->cfg;
+
+ for (i = 0; i < sz; i++) {
+ attr1_cfg = bcast_off +
+ LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
+ attr0_cfg = bcast_off +
+ LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
+
+ attr1_val = llcc_table[i].cache_mode;
+ attr1_val |= llcc_table[i].probe_target_ways <<
+ ATTR1_PROBE_TARGET_WAYS_SHIFT;
+ attr1_val |= llcc_table[i].fixed_size <<
+ ATTR1_FIXED_SIZE_SHIFT;
+ attr1_val |= llcc_table[i].priority <<
+ ATTR1_PRIORITY_SHIFT;
+
+ max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
+
+ /* LLCC instances can vary for each target.
+ * The SW writes to broadcast register which gets propagated
+ * to each llcc instace (llcc0,.. llccN).
+ * Since the size of the memory is divided equally amongst the
+ * llcc instances, we need to configure the max cap accordingly.
+ */
+ max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
+ max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
+ attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
+
+ attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
+ attr0_val |= llcc_table[i].bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
+
+ ret = regmap_write(drv_data->regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+ ret = regmap_write(drv_data->regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+ if (llcc_table[i].activate_on_init) {
+ desc.slice_id = llcc_table[i].slice_id;
+ ret = llcc_slice_activate(&desc);
+ }
+ }
+ return ret;
+}
+
+int qcom_llcc_probe(struct platform_device *pdev,
+ const struct llcc_slice_config *llcc_cfg, u32 sz)
+{
+ u32 num_banks;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ int ret, i;
+
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drv_data->regmap = devm_regmap_init_mmio(dev, base,
+ &llcc_regmap_config);
+ if (IS_ERR(drv_data->regmap))
+ return PTR_ERR(drv_data->regmap);
+
+ ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
+ &num_banks);
+ if (ret)
+ return ret;
+
+ num_banks &= LLCC_LB_CNT_MASK;
+ num_banks >>= LLCC_LB_CNT_SHIFT;
+ drv_data->num_banks = num_banks;
+
+ for (i = 0; i < sz; i++)
+ if (llcc_cfg[i].slice_id > drv_data->max_slices)
+ drv_data->max_slices = llcc_cfg[i].slice_id;
+
+ drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
+ GFP_KERNEL);
+ if (!drv_data->offsets)
+ return -ENOMEM;
+
+ for (i = 0; i < num_banks; i++)
+ drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
+
+ drv_data->bcast_off = num_banks * BANK_OFFSET_STRIDE;
+
+ drv_data->bitmap = devm_kcalloc(dev,
+ BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!drv_data->bitmap)
+ return -ENOMEM;
+
+ drv_data->cfg = llcc_cfg;
+ drv_data->cfg_size = sz;
+ mutex_init(&drv_data->lock);
+ platform_set_drvdata(pdev, drv_data);
+
+ return qcom_llcc_cfg_program(pdev);
+}
+EXPORT_SYMBOL_GPL(qcom_llcc_probe);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
new file mode 100644
index 000000000..47dffe773
--- /dev/null
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -0,0 +1,262 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2015 Sony Mobile Communications Inc
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/qcom_scm.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+static bool mdt_phdr_valid(const struct elf32_phdr *phdr)
+{
+ if (phdr->p_type != PT_LOAD)
+ return false;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ return false;
+
+ if (!phdr->p_memsz)
+ return false;
+
+ return true;
+}
+
+/**
+ * qcom_mdt_get_size() - acquire size of the memory region needed to load mdt
+ * @fw: firmware object for the mdt file
+ *
+ * Returns size of the loaded firmware blob, or -EINVAL on failure.
+ */
+ssize_t qcom_mdt_get_size(const struct firmware *fw)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ phys_addr_t min_addr = PHYS_ADDR_MAX;
+ phys_addr_t max_addr = 0;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ return min_addr < max_addr ? max_addr - min_addr : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
+
+static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *firmware, int pas_id, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base, bool pas_init)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ const struct firmware *seg_fw;
+ phys_addr_t mem_reloc;
+ phys_addr_t min_addr = PHYS_ADDR_MAX;
+ phys_addr_t max_addr = 0;
+ size_t fw_name_len;
+ ssize_t offset;
+ char *fw_name;
+ bool relocate = false;
+ void *ptr;
+ int ret;
+ int i;
+
+ if (!fw || !mem_region || !mem_phys || !mem_size)
+ return -EINVAL;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ fw_name_len = strlen(firmware);
+ if (fw_name_len <= 4)
+ return -EINVAL;
+
+ fw_name = kstrdup(firmware, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ if (pas_init) {
+ ret = qcom_scm_pas_init_image(pas_id, fw->data, fw->size);
+ if (ret) {
+ dev_err(dev, "invalid firmware metadata\n");
+ goto out;
+ }
+ }
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
+ relocate = true;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ if (relocate) {
+ if (pas_init) {
+ ret = qcom_scm_pas_mem_setup(pas_id, mem_phys,
+ max_addr - min_addr);
+ if (ret) {
+ dev_err(dev, "unable to setup relocation\n");
+ goto out;
+ }
+ }
+
+ /*
+ * The image is relocatable, so offset each segment based on
+ * the lowest segment address.
+ */
+ mem_reloc = min_addr;
+ } else {
+ /*
+ * Image is not relocatable, so offset each segment based on
+ * the allocated physical chunk of memory.
+ */
+ mem_reloc = mem_phys;
+ }
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ offset = phdr->p_paddr - mem_reloc;
+ if (offset < 0 || offset + phdr->p_memsz > mem_size) {
+ dev_err(dev, "segment outside memory range\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (phdr->p_filesz > phdr->p_memsz) {
+ dev_err(dev,
+ "refusing to load segment %d with p_filesz > p_memsz\n",
+ i);
+ ret = -EINVAL;
+ break;
+ }
+
+ ptr = mem_region + offset;
+
+ if (phdr->p_filesz) {
+ sprintf(fw_name + fw_name_len - 3, "b%02d", i);
+ ret = request_firmware_into_buf(&seg_fw, fw_name, dev,
+ ptr, phdr->p_filesz);
+ if (ret) {
+ dev_err(dev, "failed to load %s\n", fw_name);
+ break;
+ }
+
+ if (seg_fw->size != phdr->p_filesz) {
+ dev_err(dev,
+ "failed to load segment %d from truncated file %s\n",
+ i, fw_name);
+ release_firmware(seg_fw);
+ ret = -EINVAL;
+ break;
+ }
+
+ release_firmware(seg_fw);
+ }
+
+ if (phdr->p_memsz > phdr->p_filesz)
+ memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+ }
+
+ if (reloc_base)
+ *reloc_base = mem_reloc;
+
+out:
+ kfree(fw_name);
+
+ return ret;
+}
+
+/**
+ * qcom_mdt_load() - load the firmware which header is loaded as fw
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @firmware: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_region: allocated memory region to load firmware into
+ * @mem_phys: physical address of allocated memory region
+ * @mem_size: size of the allocated memory region
+ * @reloc_base: adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *firmware, int pas_id, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base)
+{
+ return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
+ mem_size, reloc_base, true);
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load);
+
+/**
+ * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @firmware: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_region: allocated memory region to load firmware into
+ * @mem_phys: physical address of allocated memory region
+ * @mem_size: size of the allocated memory region
+ * @reloc_base: adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
+ const char *firmware, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size, phys_addr_t *reloc_base)
+{
+ return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
+ mem_size, reloc_base, false);
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
+
+MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
new file mode 100644
index 000000000..7369b0619
--- /dev/null
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -0,0 +1,768 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/qcom-geni-se.h>
+
+/**
+ * DOC: Overview
+ *
+ * Generic Interface (GENI) Serial Engine (SE) Wrapper driver is introduced
+ * to manage GENI firmware based Qualcomm Universal Peripheral (QUP) Wrapper
+ * controller. QUP Wrapper is designed to support various serial bus protocols
+ * like UART, SPI, I2C, I3C, etc.
+ */
+
+/**
+ * DOC: Hardware description
+ *
+ * GENI based QUP is a highly-flexible and programmable module for supporting
+ * a wide range of serial interfaces like UART, SPI, I2C, I3C, etc. A single
+ * QUP module can provide upto 8 serial interfaces, using its internal
+ * serial engines. The actual configuration is determined by the target
+ * platform configuration. The protocol supported by each interface is
+ * determined by the firmware loaded to the serial engine. Each SE consists
+ * of a DMA Engine and GENI sub modules which enable serial engines to
+ * support FIFO and DMA modes of operation.
+ *
+ *
+ * +-----------------------------------------+
+ * |QUP Wrapper |
+ * | +----------------------------+ |
+ * --QUP & SE Clocks--> | Serial Engine N | +-IO------>
+ * | | ... | | Interface
+ * <---Clock Perf.----+ +----+-----------------------+ | |
+ * State Interface | | Serial Engine 1 | | |
+ * | | | | |
+ * | | | | |
+ * <--------AHB-------> | | | |
+ * | | +----+ |
+ * | | | |
+ * | | | |
+ * <------SE IRQ------+ +----------------------------+ |
+ * | |
+ * +-----------------------------------------+
+ *
+ * Figure 1: GENI based QUP Wrapper
+ *
+ * The GENI submodules include primary and secondary sequencers which are
+ * used to drive TX & RX operations. On serial interfaces that operate using
+ * master-slave model, primary sequencer drives both TX & RX operations. On
+ * serial interfaces that operate using peer-to-peer model, primary sequencer
+ * drives TX operation and secondary sequencer drives RX operation.
+ */
+
+/**
+ * DOC: Software description
+ *
+ * GENI SE Wrapper driver is structured into 2 parts:
+ *
+ * geni_wrapper represents QUP Wrapper controller. This part of the driver
+ * manages QUP Wrapper information such as hardware version, clock
+ * performance table that is common to all the internal serial engines.
+ *
+ * geni_se represents serial engine. This part of the driver manages serial
+ * engine information such as clocks, containing QUP Wrapper, etc. This part
+ * of driver also supports operations (eg. initialize the concerned serial
+ * engine, select between FIFO and DMA mode of operation etc.) that are
+ * common to all the serial engines and are independent of serial interfaces.
+ */
+
+#define MAX_CLK_PERF_LEVEL 32
+#define NUM_AHB_CLKS 2
+
+/**
+ * @struct geni_wrapper - Data structure to represent the QUP Wrapper Core
+ * @dev: Device pointer of the QUP wrapper core
+ * @base: Base address of this instance of QUP wrapper core
+ * @ahb_clks: Handle to the primary & secondary AHB clocks
+ */
+struct geni_wrapper {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
+};
+
+#define QUP_HW_VER_REG 0x4
+
+/* Common SE registers */
+#define GENI_INIT_CFG_REVISION 0x0
+#define GENI_S_INIT_CFG_REVISION 0x4
+#define GENI_OUTPUT_CTRL 0x24
+#define GENI_CGC_CTRL 0x28
+#define GENI_CLK_CTRL_RO 0x60
+#define GENI_IF_DISABLE_RO 0x64
+#define GENI_FW_S_REVISION_RO 0x6c
+#define SE_GENI_BYTE_GRAN 0x254
+#define SE_GENI_TX_PACKING_CFG0 0x260
+#define SE_GENI_TX_PACKING_CFG1 0x264
+#define SE_GENI_RX_PACKING_CFG0 0x284
+#define SE_GENI_RX_PACKING_CFG1 0x288
+#define SE_GENI_M_GP_LENGTH 0x910
+#define SE_GENI_S_GP_LENGTH 0x914
+#define SE_DMA_TX_PTR_L 0xc30
+#define SE_DMA_TX_PTR_H 0xc34
+#define SE_DMA_TX_ATTR 0xc38
+#define SE_DMA_TX_LEN 0xc3c
+#define SE_DMA_TX_IRQ_EN 0xc48
+#define SE_DMA_TX_IRQ_EN_SET 0xc4c
+#define SE_DMA_TX_IRQ_EN_CLR 0xc50
+#define SE_DMA_TX_LEN_IN 0xc54
+#define SE_DMA_TX_MAX_BURST 0xc5c
+#define SE_DMA_RX_PTR_L 0xd30
+#define SE_DMA_RX_PTR_H 0xd34
+#define SE_DMA_RX_ATTR 0xd38
+#define SE_DMA_RX_LEN 0xd3c
+#define SE_DMA_RX_IRQ_EN 0xd48
+#define SE_DMA_RX_IRQ_EN_SET 0xd4c
+#define SE_DMA_RX_IRQ_EN_CLR 0xd50
+#define SE_DMA_RX_LEN_IN 0xd54
+#define SE_DMA_RX_MAX_BURST 0xd5c
+#define SE_DMA_RX_FLUSH 0xd60
+#define SE_GSI_EVENT_EN 0xe18
+#define SE_IRQ_EN 0xe1c
+#define SE_DMA_GENERAL_CFG 0xe30
+
+/* GENI_OUTPUT_CTRL fields */
+#define DEFAULT_IO_OUTPUT_CTRL_MSK GENMASK(6, 0)
+
+/* GENI_CGC_CTRL fields */
+#define CFG_AHB_CLK_CGC_ON BIT(0)
+#define CFG_AHB_WR_ACLK_CGC_ON BIT(1)
+#define DATA_AHB_CLK_CGC_ON BIT(2)
+#define SCLK_CGC_ON BIT(3)
+#define TX_CLK_CGC_ON BIT(4)
+#define RX_CLK_CGC_ON BIT(5)
+#define EXT_CLK_CGC_ON BIT(6)
+#define PROG_RAM_HCLK_OFF BIT(8)
+#define PROG_RAM_SCLK_OFF BIT(9)
+#define DEFAULT_CGC_EN GENMASK(6, 0)
+
+/* SE_GSI_EVENT_EN fields */
+#define DMA_RX_EVENT_EN BIT(0)
+#define DMA_TX_EVENT_EN BIT(1)
+#define GENI_M_EVENT_EN BIT(2)
+#define GENI_S_EVENT_EN BIT(3)
+
+/* SE_IRQ_EN fields */
+#define DMA_RX_IRQ_EN BIT(0)
+#define DMA_TX_IRQ_EN BIT(1)
+#define GENI_M_IRQ_EN BIT(2)
+#define GENI_S_IRQ_EN BIT(3)
+
+/* SE_DMA_GENERAL_CFG */
+#define DMA_RX_CLK_CGC_ON BIT(0)
+#define DMA_TX_CLK_CGC_ON BIT(1)
+#define DMA_AHB_SLV_CFG_ON BIT(2)
+#define AHB_SEC_SLV_CLK_CGC_ON BIT(3)
+#define DUMMY_RX_NON_BUFFERABLE BIT(4)
+#define RX_DMA_ZERO_PADDING_EN BIT(5)
+#define RX_DMA_IRQ_DELAY_MSK GENMASK(8, 6)
+#define RX_DMA_IRQ_DELAY_SHFT 6
+
+/**
+ * geni_se_get_qup_hw_version() - Read the QUP wrapper Hardware version
+ * @se: Pointer to the corresponding serial engine.
+ *
+ * Return: Hardware Version of the wrapper.
+ */
+u32 geni_se_get_qup_hw_version(struct geni_se *se)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ return readl_relaxed(wrapper->base + QUP_HW_VER_REG);
+}
+EXPORT_SYMBOL(geni_se_get_qup_hw_version);
+
+static void geni_se_io_set_mode(void __iomem *base)
+{
+ u32 val;
+
+ val = readl_relaxed(base + SE_IRQ_EN);
+ val |= GENI_M_IRQ_EN | GENI_S_IRQ_EN;
+ val |= DMA_TX_IRQ_EN | DMA_RX_IRQ_EN;
+ writel_relaxed(val, base + SE_IRQ_EN);
+
+ val = readl_relaxed(base + SE_GENI_DMA_MODE_EN);
+ val &= ~GENI_DMA_MODE_EN;
+ writel_relaxed(val, base + SE_GENI_DMA_MODE_EN);
+
+ writel_relaxed(0, base + SE_GSI_EVENT_EN);
+}
+
+static void geni_se_io_init(void __iomem *base)
+{
+ u32 val;
+
+ val = readl_relaxed(base + GENI_CGC_CTRL);
+ val |= DEFAULT_CGC_EN;
+ writel_relaxed(val, base + GENI_CGC_CTRL);
+
+ val = readl_relaxed(base + SE_DMA_GENERAL_CFG);
+ val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON;
+ val |= DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON;
+ writel_relaxed(val, base + SE_DMA_GENERAL_CFG);
+
+ writel_relaxed(DEFAULT_IO_OUTPUT_CTRL_MSK, base + GENI_OUTPUT_CTRL);
+ writel_relaxed(FORCE_DEFAULT, base + GENI_FORCE_DEFAULT_REG);
+}
+
+/**
+ * geni_se_init() - Initialize the GENI serial engine
+ * @se: Pointer to the concerned serial engine.
+ * @rx_wm: Receive watermark, in units of FIFO words.
+ * @rx_rfr_wm: Ready-for-receive watermark, in units of FIFO words.
+ *
+ * This function is used to initialize the GENI serial engine, configure
+ * receive watermark and ready-for-receive watermarks.
+ */
+void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr)
+{
+ u32 val;
+
+ geni_se_io_init(se->base);
+ geni_se_io_set_mode(se->base);
+
+ writel_relaxed(rx_wm, se->base + SE_GENI_RX_WATERMARK_REG);
+ writel_relaxed(rx_rfr, se->base + SE_GENI_RX_RFR_WATERMARK_REG);
+
+ val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ val |= M_COMMON_GENI_M_IRQ_EN;
+ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+
+ val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+ val |= S_COMMON_GENI_S_IRQ_EN;
+ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
+}
+EXPORT_SYMBOL(geni_se_init);
+
+static void geni_se_select_fifo_mode(struct geni_se *se)
+{
+ u32 proto = geni_se_read_proto(se);
+ u32 val;
+
+ writel_relaxed(0, se->base + SE_GSI_EVENT_EN);
+ writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR);
+ writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR);
+ writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR);
+ writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR);
+ writel_relaxed(0xffffffff, se->base + SE_IRQ_EN);
+
+ val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ if (proto != GENI_SE_UART) {
+ val |= M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN;
+ val |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+ }
+ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+
+ val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+ if (proto != GENI_SE_UART)
+ val |= S_CMD_DONE_EN;
+ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
+
+ val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
+ val &= ~GENI_DMA_MODE_EN;
+ writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
+}
+
+static void geni_se_select_dma_mode(struct geni_se *se)
+{
+ u32 proto = geni_se_read_proto(se);
+ u32 val;
+
+ writel_relaxed(0, se->base + SE_GSI_EVENT_EN);
+ writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR);
+ writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR);
+ writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR);
+ writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR);
+ writel_relaxed(0xffffffff, se->base + SE_IRQ_EN);
+
+ val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ if (proto != GENI_SE_UART) {
+ val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
+ val &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ }
+ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+
+ val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+ if (proto != GENI_SE_UART)
+ val &= ~S_CMD_DONE_EN;
+ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
+
+ val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
+ val |= GENI_DMA_MODE_EN;
+ writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
+}
+
+/**
+ * geni_se_select_mode() - Select the serial engine transfer mode
+ * @se: Pointer to the concerned serial engine.
+ * @mode: Transfer mode to be selected.
+ */
+void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
+{
+ WARN_ON(mode != GENI_SE_FIFO && mode != GENI_SE_DMA);
+
+ switch (mode) {
+ case GENI_SE_FIFO:
+ geni_se_select_fifo_mode(se);
+ break;
+ case GENI_SE_DMA:
+ geni_se_select_dma_mode(se);
+ break;
+ case GENI_SE_INVALID:
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(geni_se_select_mode);
+
+/**
+ * DOC: Overview
+ *
+ * GENI FIFO packing is highly configurable. TX/RX packing/unpacking consist
+ * of up to 4 operations, each operation represented by 4 configuration vectors
+ * of 10 bits programmed in GENI_TX_PACKING_CFG0 and GENI_TX_PACKING_CFG1 for
+ * TX FIFO and in GENI_RX_PACKING_CFG0 and GENI_RX_PACKING_CFG1 for RX FIFO.
+ * Refer to below examples for detailed bit-field description.
+ *
+ * Example 1: word_size = 7, packing_mode = 4 x 8, msb_to_lsb = 1
+ *
+ * +-----------+-------+-------+-------+-------+
+ * | | vec_0 | vec_1 | vec_2 | vec_3 |
+ * +-----------+-------+-------+-------+-------+
+ * | start | 0x6 | 0xe | 0x16 | 0x1e |
+ * | direction | 1 | 1 | 1 | 1 |
+ * | length | 6 | 6 | 6 | 6 |
+ * | stop | 0 | 0 | 0 | 1 |
+ * +-----------+-------+-------+-------+-------+
+ *
+ * Example 2: word_size = 15, packing_mode = 2 x 16, msb_to_lsb = 0
+ *
+ * +-----------+-------+-------+-------+-------+
+ * | | vec_0 | vec_1 | vec_2 | vec_3 |
+ * +-----------+-------+-------+-------+-------+
+ * | start | 0x0 | 0x8 | 0x10 | 0x18 |
+ * | direction | 0 | 0 | 0 | 0 |
+ * | length | 7 | 6 | 7 | 6 |
+ * | stop | 0 | 0 | 0 | 1 |
+ * +-----------+-------+-------+-------+-------+
+ *
+ * Example 3: word_size = 23, packing_mode = 1 x 32, msb_to_lsb = 1
+ *
+ * +-----------+-------+-------+-------+-------+
+ * | | vec_0 | vec_1 | vec_2 | vec_3 |
+ * +-----------+-------+-------+-------+-------+
+ * | start | 0x16 | 0xe | 0x6 | 0x0 |
+ * | direction | 1 | 1 | 1 | 1 |
+ * | length | 7 | 7 | 6 | 0 |
+ * | stop | 0 | 0 | 1 | 0 |
+ * +-----------+-------+-------+-------+-------+
+ *
+ */
+
+#define NUM_PACKING_VECTORS 4
+#define PACKING_START_SHIFT 5
+#define PACKING_DIR_SHIFT 4
+#define PACKING_LEN_SHIFT 1
+#define PACKING_STOP_BIT BIT(0)
+#define PACKING_VECTOR_SHIFT 10
+/**
+ * geni_se_config_packing() - Packing configuration of the serial engine
+ * @se: Pointer to the concerned serial engine
+ * @bpw: Bits of data per transfer word.
+ * @pack_words: Number of words per fifo element.
+ * @msb_to_lsb: Transfer from MSB to LSB or vice-versa.
+ * @tx_cfg: Flag to configure the TX Packing.
+ * @rx_cfg: Flag to configure the RX Packing.
+ *
+ * This function is used to configure the packing rules for the current
+ * transfer.
+ */
+void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words,
+ bool msb_to_lsb, bool tx_cfg, bool rx_cfg)
+{
+ u32 cfg0, cfg1, cfg[NUM_PACKING_VECTORS] = {0};
+ int len;
+ int temp_bpw = bpw;
+ int idx_start = msb_to_lsb ? bpw - 1 : 0;
+ int idx = idx_start;
+ int idx_delta = msb_to_lsb ? -BITS_PER_BYTE : BITS_PER_BYTE;
+ int ceil_bpw = ALIGN(bpw, BITS_PER_BYTE);
+ int iter = (ceil_bpw * pack_words) / BITS_PER_BYTE;
+ int i;
+
+ if (iter <= 0 || iter > NUM_PACKING_VECTORS)
+ return;
+
+ for (i = 0; i < iter; i++) {
+ len = min_t(int, temp_bpw, BITS_PER_BYTE) - 1;
+ cfg[i] = idx << PACKING_START_SHIFT;
+ cfg[i] |= msb_to_lsb << PACKING_DIR_SHIFT;
+ cfg[i] |= len << PACKING_LEN_SHIFT;
+
+ if (temp_bpw <= BITS_PER_BYTE) {
+ idx = ((i + 1) * BITS_PER_BYTE) + idx_start;
+ temp_bpw = bpw;
+ } else {
+ idx = idx + idx_delta;
+ temp_bpw = temp_bpw - BITS_PER_BYTE;
+ }
+ }
+ cfg[iter - 1] |= PACKING_STOP_BIT;
+ cfg0 = cfg[0] | (cfg[1] << PACKING_VECTOR_SHIFT);
+ cfg1 = cfg[2] | (cfg[3] << PACKING_VECTOR_SHIFT);
+
+ if (tx_cfg) {
+ writel_relaxed(cfg0, se->base + SE_GENI_TX_PACKING_CFG0);
+ writel_relaxed(cfg1, se->base + SE_GENI_TX_PACKING_CFG1);
+ }
+ if (rx_cfg) {
+ writel_relaxed(cfg0, se->base + SE_GENI_RX_PACKING_CFG0);
+ writel_relaxed(cfg1, se->base + SE_GENI_RX_PACKING_CFG1);
+ }
+
+ /*
+ * Number of protocol words in each FIFO entry
+ * 0 - 4x8, four words in each entry, max word size of 8 bits
+ * 1 - 2x16, two words in each entry, max word size of 16 bits
+ * 2 - 1x32, one word in each entry, max word size of 32 bits
+ * 3 - undefined
+ */
+ if (pack_words || bpw == 32)
+ writel_relaxed(bpw / 16, se->base + SE_GENI_BYTE_GRAN);
+}
+EXPORT_SYMBOL(geni_se_config_packing);
+
+static void geni_se_clks_off(struct geni_se *se)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ clk_disable_unprepare(se->clk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(wrapper->ahb_clks),
+ wrapper->ahb_clks);
+}
+
+/**
+ * geni_se_resources_off() - Turn off resources associated with the serial
+ * engine
+ * @se: Pointer to the concerned serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_resources_off(struct geni_se *se)
+{
+ int ret;
+
+ ret = pinctrl_pm_select_sleep_state(se->dev);
+ if (ret)
+ return ret;
+
+ geni_se_clks_off(se);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_resources_off);
+
+static int geni_se_clks_on(struct geni_se *se)
+{
+ int ret;
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(wrapper->ahb_clks),
+ wrapper->ahb_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(se->clk);
+ if (ret)
+ clk_bulk_disable_unprepare(ARRAY_SIZE(wrapper->ahb_clks),
+ wrapper->ahb_clks);
+ return ret;
+}
+
+/**
+ * geni_se_resources_on() - Turn on resources associated with the serial
+ * engine
+ * @se: Pointer to the concerned serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_resources_on(struct geni_se *se)
+{
+ int ret;
+
+ ret = geni_se_clks_on(se);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_pm_select_default_state(se->dev);
+ if (ret)
+ geni_se_clks_off(se);
+
+ return ret;
+}
+EXPORT_SYMBOL(geni_se_resources_on);
+
+/**
+ * geni_se_clk_tbl_get() - Get the clock table to program DFS
+ * @se: Pointer to the concerned serial engine.
+ * @tbl: Table in which the output is returned.
+ *
+ * This function is called by the protocol drivers to determine the different
+ * clock frequencies supported by serial engine core clock. The protocol
+ * drivers use the output to determine the clock frequency index to be
+ * programmed into DFS.
+ *
+ * Return: number of valid performance levels in the table on success,
+ * standard Linux error codes on failure.
+ */
+int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
+{
+ long freq = 0;
+ int i;
+
+ if (se->clk_perf_tbl) {
+ *tbl = se->clk_perf_tbl;
+ return se->num_clk_levels;
+ }
+
+ se->clk_perf_tbl = devm_kcalloc(se->dev, MAX_CLK_PERF_LEVEL,
+ sizeof(*se->clk_perf_tbl),
+ GFP_KERNEL);
+ if (!se->clk_perf_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
+ freq = clk_round_rate(se->clk, freq + 1);
+ if (freq <= 0 || freq == se->clk_perf_tbl[i - 1])
+ break;
+ se->clk_perf_tbl[i] = freq;
+ }
+ se->num_clk_levels = i;
+ *tbl = se->clk_perf_tbl;
+ return se->num_clk_levels;
+}
+EXPORT_SYMBOL(geni_se_clk_tbl_get);
+
+/**
+ * geni_se_clk_freq_match() - Get the matching or closest SE clock frequency
+ * @se: Pointer to the concerned serial engine.
+ * @req_freq: Requested clock frequency.
+ * @index: Index of the resultant frequency in the table.
+ * @res_freq: Resultant frequency of the source clock.
+ * @exact: Flag to indicate exact multiple requirement of the requested
+ * frequency.
+ *
+ * This function is called by the protocol drivers to determine the best match
+ * of the requested frequency as provided by the serial engine clock in order
+ * to meet the performance requirements.
+ *
+ * If we return success:
+ * - if @exact is true then @res_freq / <an_integer> == @req_freq
+ * - if @exact is false then @res_freq / <an_integer> <= @req_freq
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
+ unsigned int *index, unsigned long *res_freq,
+ bool exact)
+{
+ unsigned long *tbl;
+ int num_clk_levels;
+ int i;
+ unsigned long best_delta;
+ unsigned long new_delta;
+ unsigned int divider;
+
+ num_clk_levels = geni_se_clk_tbl_get(se, &tbl);
+ if (num_clk_levels < 0)
+ return num_clk_levels;
+
+ if (num_clk_levels == 0)
+ return -EINVAL;
+
+ best_delta = ULONG_MAX;
+ for (i = 0; i < num_clk_levels; i++) {
+ divider = DIV_ROUND_UP(tbl[i], req_freq);
+ new_delta = req_freq - tbl[i] / divider;
+ if (new_delta < best_delta) {
+ /* We have a new best! */
+ *index = i;
+ *res_freq = tbl[i];
+
+ /* If the new best is exact then we're done */
+ if (new_delta == 0)
+ return 0;
+
+ /* Record how close we got */
+ best_delta = new_delta;
+ }
+ }
+
+ if (exact)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_clk_freq_match);
+
+#define GENI_SE_DMA_DONE_EN BIT(0)
+#define GENI_SE_DMA_EOT_EN BIT(1)
+#define GENI_SE_DMA_AHB_ERR_EN BIT(2)
+#define GENI_SE_DMA_EOT_BUF BIT(0)
+/**
+ * geni_se_tx_dma_prep() - Prepare the serial engine for TX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @buf: Pointer to the TX buffer.
+ * @len: Length of the TX buffer.
+ * @iova: Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA TX.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
+ dma_addr_t *iova)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+ u32 val;
+
+ *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(wrapper->dev, *iova))
+ return -EIO;
+
+ val = GENI_SE_DMA_DONE_EN;
+ val |= GENI_SE_DMA_EOT_EN;
+ val |= GENI_SE_DMA_AHB_ERR_EN;
+ writel_relaxed(val, se->base + SE_DMA_TX_IRQ_EN_SET);
+ writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L);
+ writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H);
+ writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR);
+ writel(len, se->base + SE_DMA_TX_LEN);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_tx_dma_prep);
+
+/**
+ * geni_se_rx_dma_prep() - Prepare the serial engine for RX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @buf: Pointer to the RX buffer.
+ * @len: Length of the RX buffer.
+ * @iova: Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA RX.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
+ dma_addr_t *iova)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+ u32 val;
+
+ *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(wrapper->dev, *iova))
+ return -EIO;
+
+ val = GENI_SE_DMA_DONE_EN;
+ val |= GENI_SE_DMA_EOT_EN;
+ val |= GENI_SE_DMA_AHB_ERR_EN;
+ writel_relaxed(val, se->base + SE_DMA_RX_IRQ_EN_SET);
+ writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_RX_PTR_L);
+ writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H);
+ /* RX does not have EOT buffer type bit. So just reset RX_ATTR */
+ writel_relaxed(0, se->base + SE_DMA_RX_ATTR);
+ writel(len, se->base + SE_DMA_RX_LEN);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_rx_dma_prep);
+
+/**
+ * geni_se_tx_dma_unprep() - Unprepare the serial engine after TX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @iova: DMA address of the TX buffer.
+ * @len: Length of the TX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA TX.
+ */
+void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ if (iova && !dma_mapping_error(wrapper->dev, iova))
+ dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_tx_dma_unprep);
+
+/**
+ * geni_se_rx_dma_unprep() - Unprepare the serial engine after RX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @iova: DMA address of the RX buffer.
+ * @len: Length of the RX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA RX.
+ */
+void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ if (iova && !dma_mapping_error(wrapper->dev, iova))
+ dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_rx_dma_unprep);
+
+static int geni_se_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct geni_wrapper *wrapper;
+ int ret;
+
+ wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
+ if (!wrapper)
+ return -ENOMEM;
+
+ wrapper->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wrapper->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(wrapper->base))
+ return PTR_ERR(wrapper->base);
+
+ wrapper->ahb_clks[0].id = "m-ahb";
+ wrapper->ahb_clks[1].id = "s-ahb";
+ ret = devm_clk_bulk_get(dev, NUM_AHB_CLKS, wrapper->ahb_clks);
+ if (ret) {
+ dev_err(dev, "Err getting AHB clks %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(dev, wrapper);
+ dev_dbg(dev, "GENI SE Driver probed\n");
+ return devm_of_platform_populate(dev);
+}
+
+static const struct of_device_id geni_se_dt_match[] = {
+ { .compatible = "qcom,geni-se-qup", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, geni_se_dt_match);
+
+static struct platform_driver geni_se_driver = {
+ .driver = {
+ .name = "geni_se_qup",
+ .of_match_table = geni_se_dt_match,
+ },
+ .probe = geni_se_probe,
+};
+module_platform_driver(geni_se_driver);
+
+MODULE_DESCRIPTION("GENI Serial Engine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
new file mode 100644
index 000000000..038abc377
--- /dev/null
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2014, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/soc/qcom,gsbi.h>
+
+#define GSBI_CTRL_REG 0x0000
+#define GSBI_PROTOCOL_SHIFT 4
+#define MAX_GSBI 12
+
+#define TCSR_ADM_CRCI_BASE 0x70
+
+struct crci_config {
+ u32 num_rows;
+ const u32 (*array)[MAX_GSBI];
+};
+
+static const u32 crci_ipq8064[][MAX_GSBI] = {
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+};
+
+static const struct crci_config config_ipq8064 = {
+ .num_rows = ARRAY_SIZE(crci_ipq8064),
+ .array = crci_ipq8064,
+};
+
+static const unsigned int crci_apq8064[][MAX_GSBI] = {
+ {
+ 0x001800, 0x006000, 0x000030, 0x0000c0,
+ 0x000300, 0x000400, 0x000000, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+ {
+ 0x000000, 0x000000, 0x000000, 0x000000,
+ 0x000000, 0x000020, 0x0000c0, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+};
+
+static const struct crci_config config_apq8064 = {
+ .num_rows = ARRAY_SIZE(crci_apq8064),
+ .array = crci_apq8064,
+};
+
+static const unsigned int crci_msm8960[][MAX_GSBI] = {
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000400, 0x000000, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+ {
+ 0x000000, 0x000000, 0x000000, 0x000000,
+ 0x000000, 0x000020, 0x0000c0, 0x000300,
+ 0x001800, 0x006000, 0x000000, 0x000000
+ },
+};
+
+static const struct crci_config config_msm8960 = {
+ .num_rows = ARRAY_SIZE(crci_msm8960),
+ .array = crci_msm8960,
+};
+
+static const unsigned int crci_msm8660[][MAX_GSBI] = {
+ { /* ADM 0 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 0 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 1 - A */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 1 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+};
+
+static const struct crci_config config_msm8660 = {
+ .num_rows = ARRAY_SIZE(crci_msm8660),
+ .array = crci_msm8660,
+};
+
+struct gsbi_info {
+ struct clk *hclk;
+ u32 mode;
+ u32 crci;
+ struct regmap *tcsr;
+};
+
+static const struct of_device_id tcsr_dt_match[] = {
+ { .compatible = "qcom,tcsr-ipq8064", .data = &config_ipq8064},
+ { .compatible = "qcom,tcsr-apq8064", .data = &config_apq8064},
+ { .compatible = "qcom,tcsr-msm8960", .data = &config_msm8960},
+ { .compatible = "qcom,tcsr-msm8660", .data = &config_msm8660},
+ { },
+};
+
+static int gsbi_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *tcsr_node;
+ const struct of_device_id *match;
+ struct resource *res;
+ void __iomem *base;
+ struct gsbi_info *gsbi;
+ int i, ret;
+ u32 mask, gsbi_num;
+ const struct crci_config *config = NULL;
+
+ gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL);
+
+ if (!gsbi)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* get the tcsr node and setup the config and regmap */
+ gsbi->tcsr = syscon_regmap_lookup_by_phandle(node, "syscon-tcsr");
+
+ if (!IS_ERR(gsbi->tcsr)) {
+ tcsr_node = of_parse_phandle(node, "syscon-tcsr", 0);
+ if (tcsr_node) {
+ match = of_match_node(tcsr_dt_match, tcsr_node);
+ if (match)
+ config = match->data;
+ else
+ dev_warn(&pdev->dev, "no matching TCSR\n");
+
+ of_node_put(tcsr_node);
+ }
+ }
+
+ if (of_property_read_u32(node, "cell-index", &gsbi_num)) {
+ dev_err(&pdev->dev, "missing cell-index\n");
+ return -EINVAL;
+ }
+
+ if (gsbi_num < 1 || gsbi_num > MAX_GSBI) {
+ dev_err(&pdev->dev, "invalid cell-index\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) {
+ dev_err(&pdev->dev, "missing mode configuration\n");
+ return -EINVAL;
+ }
+
+ /* not required, so default to 0 if not present */
+ of_property_read_u32(node, "qcom,crci", &gsbi->crci);
+
+ dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n",
+ gsbi->mode, gsbi->crci);
+ gsbi->hclk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(gsbi->hclk))
+ return PTR_ERR(gsbi->hclk);
+
+ clk_prepare_enable(gsbi->hclk);
+
+ writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci,
+ base + GSBI_CTRL_REG);
+
+ /*
+ * modify tcsr to reflect mode and ADM CRCI mux
+ * Each gsbi contains a pair of bits, one for RX and one for TX
+ * SPI mode requires both bits cleared, otherwise they are set
+ */
+ if (config) {
+ for (i = 0; i < config->num_rows; i++) {
+ mask = config->array[i][gsbi_num - 1];
+
+ if (gsbi->mode == GSBI_PROT_SPI)
+ regmap_update_bits(gsbi->tcsr,
+ TCSR_ADM_CRCI_BASE + 4 * i, mask, 0);
+ else
+ regmap_update_bits(gsbi->tcsr,
+ TCSR_ADM_CRCI_BASE + 4 * i, mask, mask);
+
+ }
+ }
+
+ /* make sure the gsbi control write is not reordered */
+ wmb();
+
+ platform_set_drvdata(pdev, gsbi);
+
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ clk_disable_unprepare(gsbi->hclk);
+ return ret;
+}
+
+static int gsbi_remove(struct platform_device *pdev)
+{
+ struct gsbi_info *gsbi = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(gsbi->hclk);
+
+ return 0;
+}
+
+static const struct of_device_id gsbi_dt_match[] = {
+ { .compatible = "qcom,gsbi-v1.0.0", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, gsbi_dt_match);
+
+static struct platform_driver gsbi_driver = {
+ .driver = {
+ .name = "gsbi",
+ .of_match_table = gsbi_dt_match,
+ },
+ .probe = gsbi_probe,
+ .remove = gsbi_remove,
+};
+
+module_platform_driver(gsbi_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM GSBI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
new file mode 100644
index 000000000..3aaab71d1
--- /dev/null
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define QMI_ENCDEC_ENCODE_TLV(type, length, p_dst) do { \
+ *p_dst++ = type; \
+ *p_dst++ = ((u8)((length) & 0xFF)); \
+ *p_dst++ = ((u8)(((length) >> 8) & 0xFF)); \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_TLV(p_type, p_length, p_src) do { \
+ *p_type = (u8)*p_src++; \
+ *p_length = (u8)*p_src++; \
+ *p_length |= ((u8)*p_src) << 8; \
+} while (0)
+
+#define QMI_ENCDEC_ENCODE_N_BYTES(p_dst, p_src, size) \
+do { \
+ memcpy(p_dst, p_src, size); \
+ p_dst = (u8 *)p_dst + size; \
+ p_src = (u8 *)p_src + size; \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_N_BYTES(p_dst, p_src, size) \
+do { \
+ memcpy(p_dst, p_src, size); \
+ p_dst = (u8 *)p_dst + size; \
+ p_src = (u8 *)p_src + size; \
+} while (0)
+
+#define UPDATE_ENCODE_VARIABLES(temp_si, buf_dst, \
+ encoded_bytes, tlv_len, encode_tlv, rc) \
+do { \
+ buf_dst = (u8 *)buf_dst + rc; \
+ encoded_bytes += rc; \
+ tlv_len += rc; \
+ temp_si = temp_si + 1; \
+ encode_tlv = 1; \
+} while (0)
+
+#define UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc) \
+do { \
+ buf_src = (u8 *)buf_src + rc; \
+ decoded_bytes += rc; \
+} while (0)
+
+#define TLV_LEN_SIZE sizeof(u16)
+#define TLV_TYPE_SIZE sizeof(u8)
+#define OPTIONAL_TLV_TYPE_START 0x10
+
+static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf,
+ const void *in_c_struct, u32 out_buf_len,
+ int enc_level);
+
+static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
+ const void *in_buf, u32 in_buf_len, int dec_level);
+
+/**
+ * skip_to_next_elem() - Skip to next element in the structure to be encoded
+ * @ei_array: Struct info describing the element to be skipped.
+ * @level: Depth level of encoding/decoding to identify nested structures.
+ *
+ * This function is used while encoding optional elements. If the flag
+ * corresponding to an optional element is not set, then encoding the
+ * optional element can be skipped. This function can be used to perform
+ * that operation.
+ *
+ * Return: struct info of the next element that can be encoded.
+ */
+static struct qmi_elem_info *skip_to_next_elem(struct qmi_elem_info *ei_array,
+ int level)
+{
+ struct qmi_elem_info *temp_ei = ei_array;
+ u8 tlv_type;
+
+ if (level > 1) {
+ temp_ei = temp_ei + 1;
+ } else {
+ do {
+ tlv_type = temp_ei->tlv_type;
+ temp_ei = temp_ei + 1;
+ } while (tlv_type == temp_ei->tlv_type);
+ }
+
+ return temp_ei;
+}
+
+/**
+ * qmi_calc_min_msg_len() - Calculate the minimum length of a QMI message
+ * @ei_array: Struct info array describing the structure.
+ * @level: Level to identify the depth of the nested structures.
+ *
+ * Return: Expected minimum length of the QMI message or 0 on error.
+ */
+static int qmi_calc_min_msg_len(struct qmi_elem_info *ei_array,
+ int level)
+{
+ int min_msg_len = 0;
+ struct qmi_elem_info *temp_ei = ei_array;
+
+ if (!ei_array)
+ return min_msg_len;
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ /* Optional elements do not count in minimum length */
+ if (temp_ei->data_type == QMI_OPT_FLAG) {
+ temp_ei = skip_to_next_elem(temp_ei, level);
+ continue;
+ }
+
+ if (temp_ei->data_type == QMI_DATA_LEN) {
+ min_msg_len += (temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16));
+ temp_ei++;
+ continue;
+ } else if (temp_ei->data_type == QMI_STRUCT) {
+ min_msg_len += qmi_calc_min_msg_len(temp_ei->ei_array,
+ (level + 1));
+ temp_ei++;
+ } else if (temp_ei->data_type == QMI_STRING) {
+ if (level > 1)
+ min_msg_len += temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ min_msg_len += temp_ei->elem_len * temp_ei->elem_size;
+ temp_ei++;
+ } else {
+ min_msg_len += (temp_ei->elem_len * temp_ei->elem_size);
+ temp_ei++;
+ }
+
+ /*
+ * Type & Length info. not prepended for elements in the
+ * nested structure.
+ */
+ if (level == 1)
+ min_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ }
+
+ return min_msg_len;
+}
+
+/**
+ * qmi_encode_basic_elem() - Encodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @elem_size: Size of a single instance of the element to be encoded.
+ *
+ * This function encodes the "elem_len" number of data elements, each of
+ * size "elem_size" bytes from the source buffer "buf_src" and stores the
+ * encoded information in the destination buffer "buf_dst". The elements are
+ * of primary data type which include u8 - u64 or similar. This
+ * function returns the number of bytes of encoded information.
+ *
+ * Return: The number of bytes of encoded information.
+ */
+static int qmi_encode_basic_elem(void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 elem_size)
+{
+ u32 i, rc = 0;
+
+ for (i = 0; i < elem_len; i++) {
+ QMI_ENCDEC_ENCODE_N_BYTES(buf_dst, buf_src, elem_size);
+ rc += elem_size;
+ }
+
+ return rc;
+}
+
+/**
+ * qmi_encode_struct_elem() - Encodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the nested structure from the main structure.
+ *
+ * This function encodes the "elem_len" number of struct elements, each of
+ * size "ei_array->elem_size" bytes from the source buffer "buf_src" and
+ * stores the encoded information in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of encoded information.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode_struct_elem(struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 out_buf_len,
+ int enc_level)
+{
+ int i, rc, encoded_bytes = 0;
+ struct qmi_elem_info *temp_ei = ei_array;
+
+ for (i = 0; i < elem_len; i++) {
+ rc = qmi_encode(temp_ei->ei_array, buf_dst, buf_src,
+ out_buf_len - encoded_bytes, enc_level);
+ if (rc < 0) {
+ pr_err("%s: STRUCT Encode failure\n", __func__);
+ return rc;
+ }
+ buf_dst = buf_dst + rc;
+ buf_src = buf_src + temp_ei->elem_size;
+ encoded_bytes += rc;
+ }
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_encode_string_elem() - Encodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the string element from the main structure.
+ *
+ * This function encodes a string element of maximum length "ei_array->elem_len"
+ * bytes from the source buffer "buf_src" and stores the encoded information in
+ * the destination buffer "buf_dst". This function returns the number of bytes
+ * of encoded information.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode_string_elem(struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 out_buf_len, int enc_level)
+{
+ int rc;
+ int encoded_bytes = 0;
+ struct qmi_elem_info *temp_ei = ei_array;
+ u32 string_len = 0;
+ u32 string_len_sz = 0;
+
+ string_len = strlen(buf_src);
+ string_len_sz = temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ if (string_len > temp_ei->elem_len) {
+ pr_err("%s: String to be encoded is longer - %d > %d\n",
+ __func__, string_len, temp_ei->elem_len);
+ return -EINVAL;
+ }
+
+ if (enc_level == 1) {
+ if (string_len + TLV_LEN_SIZE + TLV_TYPE_SIZE >
+ out_buf_len) {
+ pr_err("%s: Output len %d > Out Buf len %d\n",
+ __func__, string_len, out_buf_len);
+ return -ETOOSMALL;
+ }
+ } else {
+ if (string_len + string_len_sz > out_buf_len) {
+ pr_err("%s: Output len %d > Out Buf len %d\n",
+ __func__, string_len, out_buf_len);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, &string_len,
+ 1, string_len_sz);
+ encoded_bytes += rc;
+ }
+
+ rc = qmi_encode_basic_elem(buf_dst + encoded_bytes, buf_src,
+ string_len, temp_ei->elem_size);
+ encoded_bytes += rc;
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_encode() - Core Encode Function
+ * @ei_array: Struct info array describing the structure to be encoded.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @in_c_struct: Pointer to the C structure to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Encode level to indicate the depth of the nested structure,
+ * within the main structure, being encoded.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf,
+ const void *in_c_struct, u32 out_buf_len,
+ int enc_level)
+{
+ struct qmi_elem_info *temp_ei = ei_array;
+ u8 opt_flag_value = 0;
+ u32 data_len_value = 0, data_len_sz;
+ u8 *buf_dst = (u8 *)out_buf;
+ u8 *tlv_pointer;
+ u32 tlv_len;
+ u8 tlv_type;
+ u32 encoded_bytes = 0;
+ const void *buf_src;
+ int encode_tlv = 0;
+ int rc;
+
+ if (!ei_array)
+ return 0;
+
+ tlv_pointer = buf_dst;
+ tlv_len = 0;
+ if (enc_level == 1)
+ buf_dst = buf_dst + (TLV_LEN_SIZE + TLV_TYPE_SIZE);
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ buf_src = in_c_struct + temp_ei->offset;
+ tlv_type = temp_ei->tlv_type;
+
+ if (temp_ei->array_type == NO_ARRAY) {
+ data_len_value = 1;
+ } else if (temp_ei->array_type == STATIC_ARRAY) {
+ data_len_value = temp_ei->elem_len;
+ } else if (data_len_value <= 0 ||
+ temp_ei->elem_len < data_len_value) {
+ pr_err("%s: Invalid data length\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (temp_ei->data_type) {
+ case QMI_OPT_FLAG:
+ rc = qmi_encode_basic_elem(&opt_flag_value, buf_src,
+ 1, sizeof(u8));
+ if (opt_flag_value)
+ temp_ei = temp_ei + 1;
+ else
+ temp_ei = skip_to_next_elem(temp_ei, enc_level);
+ break;
+
+ case QMI_DATA_LEN:
+ memcpy(&data_len_value, buf_src, temp_ei->elem_size);
+ data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16);
+ /* Check to avoid out of range buffer access */
+ if ((data_len_sz + encoded_bytes + TLV_LEN_SIZE +
+ TLV_TYPE_SIZE) > out_buf_len) {
+ pr_err("%s: Too Small Buffer @DATA_LEN\n",
+ __func__);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, &data_len_value,
+ 1, data_len_sz);
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ if (!data_len_value)
+ temp_ei = skip_to_next_elem(temp_ei, enc_level);
+ else
+ encode_tlv = 0;
+ break;
+
+ case QMI_UNSIGNED_1_BYTE:
+ case QMI_UNSIGNED_2_BYTE:
+ case QMI_UNSIGNED_4_BYTE:
+ case QMI_UNSIGNED_8_BYTE:
+ case QMI_SIGNED_2_BYTE_ENUM:
+ case QMI_SIGNED_4_BYTE_ENUM:
+ /* Check to avoid out of range buffer access */
+ if (((data_len_value * temp_ei->elem_size) +
+ encoded_bytes + TLV_LEN_SIZE + TLV_TYPE_SIZE) >
+ out_buf_len) {
+ pr_err("%s: Too Small Buffer @data_type:%d\n",
+ __func__, temp_ei->data_type);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, buf_src,
+ data_len_value,
+ temp_ei->elem_size);
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+
+ case QMI_STRUCT:
+ rc = qmi_encode_struct_elem(temp_ei, buf_dst, buf_src,
+ data_len_value,
+ out_buf_len - encoded_bytes,
+ enc_level + 1);
+ if (rc < 0)
+ return rc;
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+
+ case QMI_STRING:
+ rc = qmi_encode_string_elem(temp_ei, buf_dst, buf_src,
+ out_buf_len - encoded_bytes,
+ enc_level);
+ if (rc < 0)
+ return rc;
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+ default:
+ pr_err("%s: Unrecognized data type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (encode_tlv && enc_level == 1) {
+ QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+ encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ tlv_pointer = buf_dst;
+ tlv_len = 0;
+ buf_dst = buf_dst + TLV_LEN_SIZE + TLV_TYPE_SIZE;
+ encode_tlv = 0;
+ }
+ }
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_decode_basic_elem() - Decodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @elem_size: Size of a single instance of the element to be decoded.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "elem_size" bytes from the source buffer "buf_src" and stores
+ * the decoded elements in the destination buffer "buf_dst". The elements are
+ * of primary data type which include u8 - u64 or similar. This
+ * function returns the number of bytes of decoded information.
+ *
+ * Return: The total size of the decoded data elements, in bytes.
+ */
+static int qmi_decode_basic_elem(void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 elem_size)
+{
+ u32 i, rc = 0;
+
+ for (i = 0; i < elem_len; i++) {
+ QMI_ENCDEC_DECODE_N_BYTES(buf_dst, buf_src, elem_size);
+ rc += elem_size;
+ }
+
+ return rc;
+}
+
+/**
+ * qmi_decode_struct_elem() - Decodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @tlv_len: Total size of the encoded inforation corresponding to
+ * this struct element.
+ * @dec_level: Depth of the nested structure from the main structure.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "(tlv_len/elem_len)" bytes from the source buffer "buf_src"
+ * and stores the decoded elements in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of decoded information.
+ *
+ * Return: The total size of the decoded data elements on success, negative
+ * errno on error.
+ */
+static int qmi_decode_struct_elem(struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 tlv_len,
+ int dec_level)
+{
+ int i, rc, decoded_bytes = 0;
+ struct qmi_elem_info *temp_ei = ei_array;
+
+ for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) {
+ rc = qmi_decode(temp_ei->ei_array, buf_dst, buf_src,
+ tlv_len - decoded_bytes, dec_level);
+ if (rc < 0)
+ return rc;
+ buf_src = buf_src + rc;
+ buf_dst = buf_dst + temp_ei->elem_size;
+ decoded_bytes += rc;
+ }
+
+ if ((dec_level <= 2 && decoded_bytes != tlv_len) ||
+ (dec_level > 2 && (i < elem_len || decoded_bytes > tlv_len))) {
+ pr_err("%s: Fault in decoding: dl(%d), db(%d), tl(%d), i(%d), el(%d)\n",
+ __func__, dec_level, decoded_bytes, tlv_len,
+ i, elem_len);
+ return -EFAULT;
+ }
+
+ return decoded_bytes;
+}
+
+/**
+ * qmi_decode_string_elem() - Decodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @tlv_len: Total size of the encoded inforation corresponding to
+ * this string element.
+ * @dec_level: Depth of the string element from the main structure.
+ *
+ * This function decodes the string element of maximum length
+ * "ei_array->elem_len" from the source buffer "buf_src" and puts it into
+ * the destination buffer "buf_dst". This function returns number of bytes
+ * decoded from the input buffer.
+ *
+ * Return: The total size of the decoded data elements on success, negative
+ * errno on error.
+ */
+static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 tlv_len, int dec_level)
+{
+ int rc;
+ int decoded_bytes = 0;
+ u32 string_len = 0;
+ u32 string_len_sz = 0;
+ struct qmi_elem_info *temp_ei = ei_array;
+
+ if (dec_level == 1) {
+ string_len = tlv_len;
+ } else {
+ string_len_sz = temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ rc = qmi_decode_basic_elem(&string_len, buf_src,
+ 1, string_len_sz);
+ decoded_bytes += rc;
+ }
+
+ if (string_len > temp_ei->elem_len) {
+ pr_err("%s: String len %d > Max Len %d\n",
+ __func__, string_len, temp_ei->elem_len);
+ return -ETOOSMALL;
+ } else if (string_len > tlv_len) {
+ pr_err("%s: String len %d > Input Buffer Len %d\n",
+ __func__, string_len, tlv_len);
+ return -EFAULT;
+ }
+
+ rc = qmi_decode_basic_elem(buf_dst, buf_src + decoded_bytes,
+ string_len, temp_ei->elem_size);
+ *((char *)buf_dst + string_len) = '\0';
+ decoded_bytes += rc;
+
+ return decoded_bytes;
+}
+
+/**
+ * find_ei() - Find element info corresponding to TLV Type
+ * @ei_array: Struct info array of the message being decoded.
+ * @type: TLV Type of the element being searched.
+ *
+ * Every element that got encoded in the QMI message will have a type
+ * information associated with it. While decoding the QMI message,
+ * this function is used to find the struct info regarding the element
+ * that corresponds to the type being decoded.
+ *
+ * Return: Pointer to struct info, if found
+ */
+static struct qmi_elem_info *find_ei(struct qmi_elem_info *ei_array,
+ u32 type)
+{
+ struct qmi_elem_info *temp_ei = ei_array;
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ if (temp_ei->tlv_type == (u8)type)
+ return temp_ei;
+ temp_ei = temp_ei + 1;
+ }
+
+ return NULL;
+}
+
+/**
+ * qmi_decode() - Core Decode Function
+ * @ei_array: Struct info array describing the structure to be decoded.
+ * @out_c_struct: Buffer to hold the decoded C struct
+ * @in_buf: Buffer containing the QMI message to be decoded
+ * @in_buf_len: Length of the QMI message to be decoded
+ * @dec_level: Decode level to indicate the depth of the nested structure,
+ * within the main structure, being decoded
+ *
+ * Return: The number of bytes of decoded information on success, negative
+ * errno on error.
+ */
+static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
+ const void *in_buf, u32 in_buf_len,
+ int dec_level)
+{
+ struct qmi_elem_info *temp_ei = ei_array;
+ u8 opt_flag_value = 1;
+ u32 data_len_value = 0, data_len_sz = 0;
+ u8 *buf_dst = out_c_struct;
+ const u8 *tlv_pointer;
+ u32 tlv_len = 0;
+ u32 tlv_type;
+ u32 decoded_bytes = 0;
+ const void *buf_src = in_buf;
+ int rc;
+
+ while (decoded_bytes < in_buf_len) {
+ if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI)
+ return decoded_bytes;
+
+ if (dec_level == 1) {
+ tlv_pointer = buf_src;
+ QMI_ENCDEC_DECODE_TLV(&tlv_type,
+ &tlv_len, tlv_pointer);
+ buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ temp_ei = find_ei(ei_array, tlv_type);
+ if (!temp_ei && tlv_type < OPTIONAL_TLV_TYPE_START) {
+ pr_err("%s: Inval element info\n", __func__);
+ return -EINVAL;
+ } else if (!temp_ei) {
+ UPDATE_DECODE_VARIABLES(buf_src,
+ decoded_bytes, tlv_len);
+ continue;
+ }
+ } else {
+ /*
+ * No length information for elements in nested
+ * structures. So use remaining decodable buffer space.
+ */
+ tlv_len = in_buf_len - decoded_bytes;
+ }
+
+ buf_dst = out_c_struct + temp_ei->offset;
+ if (temp_ei->data_type == QMI_OPT_FLAG) {
+ memcpy(buf_dst, &opt_flag_value, sizeof(u8));
+ temp_ei = temp_ei + 1;
+ buf_dst = out_c_struct + temp_ei->offset;
+ }
+
+ if (temp_ei->data_type == QMI_DATA_LEN) {
+ data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16);
+ rc = qmi_decode_basic_elem(&data_len_value, buf_src,
+ 1, data_len_sz);
+ memcpy(buf_dst, &data_len_value, sizeof(u32));
+ temp_ei = temp_ei + 1;
+ buf_dst = out_c_struct + temp_ei->offset;
+ tlv_len -= data_len_sz;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ }
+
+ if (temp_ei->array_type == NO_ARRAY) {
+ data_len_value = 1;
+ } else if (temp_ei->array_type == STATIC_ARRAY) {
+ data_len_value = temp_ei->elem_len;
+ } else if (data_len_value > temp_ei->elem_len) {
+ pr_err("%s: Data len %d > max spec %d\n",
+ __func__, data_len_value, temp_ei->elem_len);
+ return -ETOOSMALL;
+ }
+
+ switch (temp_ei->data_type) {
+ case QMI_UNSIGNED_1_BYTE:
+ case QMI_UNSIGNED_2_BYTE:
+ case QMI_UNSIGNED_4_BYTE:
+ case QMI_UNSIGNED_8_BYTE:
+ case QMI_SIGNED_2_BYTE_ENUM:
+ case QMI_SIGNED_4_BYTE_ENUM:
+ rc = qmi_decode_basic_elem(buf_dst, buf_src,
+ data_len_value,
+ temp_ei->elem_size);
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ case QMI_STRUCT:
+ rc = qmi_decode_struct_elem(temp_ei, buf_dst, buf_src,
+ data_len_value, tlv_len,
+ dec_level + 1);
+ if (rc < 0)
+ return rc;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ case QMI_STRING:
+ rc = qmi_decode_string_elem(temp_ei, buf_dst, buf_src,
+ tlv_len, dec_level);
+ if (rc < 0)
+ return rc;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ default:
+ pr_err("%s: Unrecognized data type\n", __func__);
+ return -EINVAL;
+ }
+ temp_ei = temp_ei + 1;
+ }
+
+ return decoded_bytes;
+}
+
+/**
+ * qmi_encode_message() - Encode C structure as QMI encoded message
+ * @type: Type of QMI message
+ * @msg_id: Message ID of the message
+ * @len: Passed as max length of the message, updated to actual size
+ * @txn_id: Transaction ID
+ * @ei: QMI message descriptor
+ * @c_struct: Reference to structure to encode
+ *
+ * Return: Buffer with encoded message, or negative ERR_PTR() on error
+ */
+void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
+ unsigned int txn_id, struct qmi_elem_info *ei,
+ const void *c_struct)
+{
+ struct qmi_header *hdr;
+ ssize_t msglen = 0;
+ void *msg;
+ int ret;
+
+ /* Check the possibility of a zero length QMI message */
+ if (!c_struct) {
+ ret = qmi_calc_min_msg_len(ei, 1);
+ if (ret) {
+ pr_err("%s: Calc. len %d != 0, but NULL c_struct\n",
+ __func__, ret);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ msg = kzalloc(sizeof(*hdr) + *len, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ /* Encode message, if we have a message */
+ if (c_struct) {
+ msglen = qmi_encode(ei, msg + sizeof(*hdr), c_struct, *len, 1);
+ if (msglen < 0) {
+ kfree(msg);
+ return ERR_PTR(msglen);
+ }
+ }
+
+ hdr = msg;
+ hdr->type = type;
+ hdr->txn_id = txn_id;
+ hdr->msg_id = msg_id;
+ hdr->msg_len = msglen;
+
+ *len = sizeof(*hdr) + msglen;
+
+ return msg;
+}
+EXPORT_SYMBOL(qmi_encode_message);
+
+/**
+ * qmi_decode_message() - Decode QMI encoded message to C structure
+ * @buf: Buffer with encoded message
+ * @len: Amount of data in @buf
+ * @ei: QMI message descriptor
+ * @c_struct: Reference to structure to decode into
+ *
+ * Return: The number of bytes of decoded information on success, negative
+ * errno on error.
+ */
+int qmi_decode_message(const void *buf, size_t len,
+ struct qmi_elem_info *ei, void *c_struct)
+{
+ if (!ei)
+ return -EINVAL;
+
+ if (!c_struct || !buf || !len)
+ return -EINVAL;
+
+ return qmi_decode(ei, c_struct, buf + sizeof(struct qmi_header),
+ len - sizeof(struct qmi_header), 1);
+}
+EXPORT_SYMBOL(qmi_decode_message);
+
+/* Common header in all QMI responses */
+struct qmi_elem_info qmi_response_type_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_2_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct qmi_response_type_v01, result),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_SIGNED_2_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct qmi_response_type_v01, error),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
+
+MODULE_DESCRIPTION("QMI encoder/decoder helper");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
new file mode 100644
index 000000000..938ca41c5
--- /dev/null
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/qrtr.h>
+#include <linux/net.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <net/sock.h>
+#include <linux/workqueue.h>
+#include <linux/soc/qcom/qmi.h>
+
+static struct socket *qmi_sock_create(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq);
+
+/**
+ * qmi_recv_new_server() - handler of NEW_SERVER control message
+ * @qmi: qmi handle
+ * @service: service id of the new server
+ * @instance: instance id of the new server
+ * @node: node of the new server
+ * @port: port of the new server
+ *
+ * Calls the new_server callback to inform the client about a newly registered
+ * server matching the currently registered service lookup.
+ */
+static void qmi_recv_new_server(struct qmi_handle *qmi,
+ unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+ struct qmi_service *svc;
+ int ret;
+
+ if (!ops->new_server)
+ return;
+
+ /* Ignore EOF marker */
+ if (!node && !port)
+ return;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return;
+
+ svc->service = service;
+ svc->version = instance & 0xff;
+ svc->instance = instance >> 8;
+ svc->node = node;
+ svc->port = port;
+
+ ret = ops->new_server(qmi, svc);
+ if (ret < 0)
+ kfree(svc);
+ else
+ list_add(&svc->list_node, &qmi->lookup_results);
+}
+
+/**
+ * qmi_recv_del_server() - handler of DEL_SERVER control message
+ * @qmi: qmi handle
+ * @node: node of the dying server, a value of -1 matches all nodes
+ * @port: port of the dying server, a value of -1 matches all ports
+ *
+ * Calls the del_server callback for each previously seen server, allowing the
+ * client to react to the disappearing server.
+ */
+static void qmi_recv_del_server(struct qmi_handle *qmi,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+ struct qmi_service *svc;
+ struct qmi_service *tmp;
+
+ list_for_each_entry_safe(svc, tmp, &qmi->lookup_results, list_node) {
+ if (node != -1 && svc->node != node)
+ continue;
+ if (port != -1 && svc->port != port)
+ continue;
+
+ if (ops->del_server)
+ ops->del_server(qmi, svc);
+
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+}
+
+/**
+ * qmi_recv_bye() - handler of BYE control message
+ * @qmi: qmi handle
+ * @node: id of the dying node
+ *
+ * Signals the client that all previously registered services on this node are
+ * now gone and then calls the bye callback to allow the client client further
+ * cleaning up resources associated with this remote.
+ */
+static void qmi_recv_bye(struct qmi_handle *qmi,
+ unsigned int node)
+{
+ struct qmi_ops *ops = &qmi->ops;
+
+ qmi_recv_del_server(qmi, node, -1);
+
+ if (ops->bye)
+ ops->bye(qmi, node);
+}
+
+/**
+ * qmi_recv_del_client() - handler of DEL_CLIENT control message
+ * @qmi: qmi handle
+ * @node: node of the dying client
+ * @port: port of the dying client
+ *
+ * Signals the client about a dying client, by calling the del_client callback.
+ */
+static void qmi_recv_del_client(struct qmi_handle *qmi,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+
+ if (ops->del_client)
+ ops->del_client(qmi, node, port);
+}
+
+static void qmi_recv_ctrl_pkt(struct qmi_handle *qmi,
+ const void *buf, size_t len)
+{
+ const struct qrtr_ctrl_pkt *pkt = buf;
+
+ if (len < sizeof(struct qrtr_ctrl_pkt)) {
+ pr_debug("ignoring short control packet\n");
+ return;
+ }
+
+ switch (le32_to_cpu(pkt->cmd)) {
+ case QRTR_TYPE_BYE:
+ qmi_recv_bye(qmi, le32_to_cpu(pkt->client.node));
+ break;
+ case QRTR_TYPE_NEW_SERVER:
+ qmi_recv_new_server(qmi,
+ le32_to_cpu(pkt->server.service),
+ le32_to_cpu(pkt->server.instance),
+ le32_to_cpu(pkt->server.node),
+ le32_to_cpu(pkt->server.port));
+ break;
+ case QRTR_TYPE_DEL_SERVER:
+ qmi_recv_del_server(qmi,
+ le32_to_cpu(pkt->server.node),
+ le32_to_cpu(pkt->server.port));
+ break;
+ case QRTR_TYPE_DEL_CLIENT:
+ qmi_recv_del_client(qmi,
+ le32_to_cpu(pkt->client.node),
+ le32_to_cpu(pkt->client.port));
+ break;
+ }
+}
+
+static void qmi_send_new_lookup(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { };
+ struct kvec iv = { &pkt, sizeof(pkt) };
+ int ret;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP);
+ pkt.server.service = cpu_to_le32(svc->service);
+ pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
+
+ sq.sq_family = qmi->sq.sq_family;
+ sq.sq_node = qmi->sq.sq_node;
+ sq.sq_port = QRTR_PORT_CTRL;
+
+ msg.msg_name = &sq;
+ msg.msg_namelen = sizeof(sq);
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("failed to send lookup registration: %d\n", ret);
+ }
+ mutex_unlock(&qmi->sock_lock);
+}
+
+/**
+ * qmi_add_lookup() - register a new lookup with the name service
+ * @qmi: qmi handle
+ * @service: service id of the request
+ * @instance: instance id of the request
+ * @version: version number of the request
+ *
+ * Registering a lookup query with the name server will cause the name server
+ * to send NEW_SERVER and DEL_SERVER control messages to this socket as
+ * matching services are registered.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service,
+ unsigned int version, unsigned int instance)
+{
+ struct qmi_service *svc;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return -ENOMEM;
+
+ svc->service = service;
+ svc->version = version;
+ svc->instance = instance;
+
+ list_add(&svc->list_node, &qmi->lookups);
+
+ qmi_send_new_lookup(qmi, svc);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmi_add_lookup);
+
+static void qmi_send_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { };
+ struct kvec iv = { &pkt, sizeof(pkt) };
+ int ret;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER);
+ pkt.server.service = cpu_to_le32(svc->service);
+ pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
+ pkt.server.node = cpu_to_le32(qmi->sq.sq_node);
+ pkt.server.port = cpu_to_le32(qmi->sq.sq_port);
+
+ sq.sq_family = qmi->sq.sq_family;
+ sq.sq_node = qmi->sq.sq_node;
+ sq.sq_port = QRTR_PORT_CTRL;
+
+ msg.msg_name = &sq;
+ msg.msg_namelen = sizeof(sq);
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("send service registration failed: %d\n", ret);
+ }
+ mutex_unlock(&qmi->sock_lock);
+}
+
+/**
+ * qmi_add_server() - register a service with the name service
+ * @qmi: qmi handle
+ * @service: type of the service
+ * @instance: instance of the service
+ * @version: version of the service
+ *
+ * Register a new service with the name service. This allows clients to find
+ * and start sending messages to the client associated with @qmi.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_add_server(struct qmi_handle *qmi, unsigned int service,
+ unsigned int version, unsigned int instance)
+{
+ struct qmi_service *svc;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return -ENOMEM;
+
+ svc->service = service;
+ svc->version = version;
+ svc->instance = instance;
+
+ list_add(&svc->list_node, &qmi->services);
+
+ qmi_send_new_server(qmi, svc);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmi_add_server);
+
+/**
+ * qmi_txn_init() - allocate transaction id within the given QMI handle
+ * @qmi: QMI handle
+ * @txn: transaction context
+ * @ei: description of how to decode a matching response (optional)
+ * @c_struct: pointer to the object to decode the response into (optional)
+ *
+ * This allocates a transaction id within the QMI handle. If @ei and @c_struct
+ * are specified any responses to this transaction will be decoded as described
+ * by @ei into @c_struct.
+ *
+ * A client calling qmi_txn_init() must call either qmi_txn_wait() or
+ * qmi_txn_cancel() to free up the allocated resources.
+ *
+ * Return: Transaction id on success, negative errno on failure.
+ */
+int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
+ struct qmi_elem_info *ei, void *c_struct)
+{
+ int ret;
+
+ memset(txn, 0, sizeof(*txn));
+
+ mutex_init(&txn->lock);
+ init_completion(&txn->completion);
+ txn->qmi = qmi;
+ txn->ei = ei;
+ txn->dest = c_struct;
+
+ mutex_lock(&qmi->txn_lock);
+ ret = idr_alloc_cyclic(&qmi->txns, txn, 0, INT_MAX, GFP_KERNEL);
+ if (ret < 0)
+ pr_err("failed to allocate transaction id\n");
+
+ txn->id = ret;
+ mutex_unlock(&qmi->txn_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmi_txn_init);
+
+/**
+ * qmi_txn_wait() - wait for a response on a transaction
+ * @txn: transaction handle
+ * @timeout: timeout, in jiffies
+ *
+ * If the transaction is decoded by the means of @ei and @c_struct the return
+ * value will be the returned value of qmi_decode_message(), otherwise it's up
+ * to the specified message handler to fill out the result.
+ *
+ * Return: the transaction response on success, negative errno on failure.
+ */
+int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
+{
+ struct qmi_handle *qmi = txn->qmi;
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&txn->completion,
+ timeout);
+
+ mutex_lock(&qmi->txn_lock);
+ mutex_lock(&txn->lock);
+ idr_remove(&qmi->txns, txn->id);
+ mutex_unlock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return -ETIMEDOUT;
+ else
+ return txn->result;
+}
+EXPORT_SYMBOL(qmi_txn_wait);
+
+/**
+ * qmi_txn_cancel() - cancel an ongoing transaction
+ * @txn: transaction id
+ */
+void qmi_txn_cancel(struct qmi_txn *txn)
+{
+ struct qmi_handle *qmi = txn->qmi;
+
+ mutex_lock(&qmi->txn_lock);
+ mutex_lock(&txn->lock);
+ idr_remove(&qmi->txns, txn->id);
+ mutex_unlock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+}
+EXPORT_SYMBOL(qmi_txn_cancel);
+
+/**
+ * qmi_invoke_handler() - find and invoke a handler for a message
+ * @qmi: qmi handle
+ * @sq: sockaddr of the sender
+ * @txn: transaction object for the message
+ * @buf: buffer containing the message
+ * @len: length of @buf
+ *
+ * Find handler and invoke handler for the incoming message.
+ */
+static void qmi_invoke_handler(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *buf, size_t len)
+{
+ const struct qmi_msg_handler *handler;
+ const struct qmi_header *hdr = buf;
+ void *dest;
+ int ret;
+
+ if (!qmi->handlers)
+ return;
+
+ for (handler = qmi->handlers; handler->fn; handler++) {
+ if (handler->type == hdr->type &&
+ handler->msg_id == hdr->msg_id)
+ break;
+ }
+
+ if (!handler->fn)
+ return;
+
+ dest = kzalloc(handler->decoded_size, GFP_KERNEL);
+ if (!dest)
+ return;
+
+ ret = qmi_decode_message(buf, len, handler->ei, dest);
+ if (ret < 0)
+ pr_err("failed to decode incoming message\n");
+ else
+ handler->fn(qmi, sq, txn, dest);
+
+ kfree(dest);
+}
+
+/**
+ * qmi_handle_net_reset() - invoked to handle ENETRESET on a QMI handle
+ * @qmi: the QMI context
+ *
+ * As a result of registering a name service with the QRTR all open sockets are
+ * flagged with ENETRESET and this function will be called. The typical case is
+ * the initial boot, where this signals that the local node id has been
+ * configured and as such any bound sockets needs to be rebound. So close the
+ * socket, inform the client and re-initialize the socket.
+ *
+ * For clients it's generally sufficient to react to the del_server callbacks,
+ * but server code is expected to treat the net_reset callback as a "bye" from
+ * all nodes.
+ *
+ * Finally the QMI handle will send out registration requests for any lookups
+ * and services.
+ */
+static void qmi_handle_net_reset(struct qmi_handle *qmi)
+{
+ struct sockaddr_qrtr sq;
+ struct qmi_service *svc;
+ struct socket *sock;
+
+ sock = qmi_sock_create(qmi, &sq);
+ if (IS_ERR(sock))
+ return;
+
+ mutex_lock(&qmi->sock_lock);
+ sock_release(qmi->sock);
+ qmi->sock = NULL;
+ mutex_unlock(&qmi->sock_lock);
+
+ qmi_recv_del_server(qmi, -1, -1);
+
+ if (qmi->ops.net_reset)
+ qmi->ops.net_reset(qmi);
+
+ mutex_lock(&qmi->sock_lock);
+ qmi->sock = sock;
+ qmi->sq = sq;
+ mutex_unlock(&qmi->sock_lock);
+
+ list_for_each_entry(svc, &qmi->lookups, list_node)
+ qmi_send_new_lookup(qmi, svc);
+
+ list_for_each_entry(svc, &qmi->services, list_node)
+ qmi_send_new_server(qmi, svc);
+}
+
+static void qmi_handle_message(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ const void *buf, size_t len)
+{
+ const struct qmi_header *hdr;
+ struct qmi_txn tmp_txn;
+ struct qmi_txn *txn = NULL;
+ int ret;
+
+ if (len < sizeof(*hdr)) {
+ pr_err("ignoring short QMI packet\n");
+ return;
+ }
+
+ hdr = buf;
+
+ /* If this is a response, find the matching transaction handle */
+ if (hdr->type == QMI_RESPONSE) {
+ mutex_lock(&qmi->txn_lock);
+ txn = idr_find(&qmi->txns, hdr->txn_id);
+
+ /* Ignore unexpected responses */
+ if (!txn) {
+ mutex_unlock(&qmi->txn_lock);
+ return;
+ }
+
+ mutex_lock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+
+ if (txn->dest && txn->ei) {
+ ret = qmi_decode_message(buf, len, txn->ei, txn->dest);
+ if (ret < 0)
+ pr_err("failed to decode incoming message\n");
+
+ txn->result = ret;
+ complete(&txn->completion);
+ } else {
+ qmi_invoke_handler(qmi, sq, txn, buf, len);
+ }
+
+ mutex_unlock(&txn->lock);
+ } else {
+ /* Create a txn based on the txn_id of the incoming message */
+ memset(&tmp_txn, 0, sizeof(tmp_txn));
+ tmp_txn.id = hdr->txn_id;
+
+ qmi_invoke_handler(qmi, sq, &tmp_txn, buf, len);
+ }
+}
+
+static void qmi_data_ready_work(struct work_struct *work)
+{
+ struct qmi_handle *qmi = container_of(work, struct qmi_handle, work);
+ struct qmi_ops *ops = &qmi->ops;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) };
+ struct kvec iv;
+ ssize_t msglen;
+
+ for (;;) {
+ iv.iov_base = qmi->recv_buf;
+ iv.iov_len = qmi->recv_buf_size;
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock)
+ msglen = kernel_recvmsg(qmi->sock, &msg, &iv, 1,
+ iv.iov_len, MSG_DONTWAIT);
+ else
+ msglen = -EPIPE;
+ mutex_unlock(&qmi->sock_lock);
+ if (msglen == -EAGAIN)
+ break;
+
+ if (msglen == -ENETRESET) {
+ qmi_handle_net_reset(qmi);
+
+ /* The old qmi->sock is gone, our work is done */
+ break;
+ }
+
+ if (msglen < 0) {
+ pr_err("qmi recvmsg failed: %zd\n", msglen);
+ break;
+ }
+
+ if (sq.sq_node == qmi->sq.sq_node &&
+ sq.sq_port == QRTR_PORT_CTRL) {
+ qmi_recv_ctrl_pkt(qmi, qmi->recv_buf, msglen);
+ } else if (ops->msg_handler) {
+ ops->msg_handler(qmi, &sq, qmi->recv_buf, msglen);
+ } else {
+ qmi_handle_message(qmi, &sq, qmi->recv_buf, msglen);
+ }
+ }
+}
+
+static void qmi_data_ready(struct sock *sk)
+{
+ struct qmi_handle *qmi = sk->sk_user_data;
+
+ /*
+ * This will be NULL if we receive data while being in
+ * qmi_handle_release()
+ */
+ if (!qmi)
+ return;
+
+ queue_work(qmi->wq, &qmi->work);
+}
+
+static struct socket *qmi_sock_create(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq)
+{
+ struct socket *sock;
+ int ret;
+
+ ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
+ PF_QIPCRTR, &sock);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ ret = kernel_getsockname(sock, (struct sockaddr *)sq);
+ if (ret < 0) {
+ sock_release(sock);
+ return ERR_PTR(ret);
+ }
+
+ sock->sk->sk_user_data = qmi;
+ sock->sk->sk_data_ready = qmi_data_ready;
+ sock->sk->sk_error_report = qmi_data_ready;
+
+ return sock;
+}
+
+/**
+ * qmi_handle_init() - initialize a QMI client handle
+ * @qmi: QMI handle to initialize
+ * @recv_buf_size: maximum size of incoming message
+ * @ops: reference to callbacks for QRTR notifications
+ * @handlers: NULL-terminated list of QMI message handlers
+ *
+ * This initializes the QMI client handle to allow sending and receiving QMI
+ * messages. As messages are received the appropriate handler will be invoked.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size,
+ const struct qmi_ops *ops,
+ const struct qmi_msg_handler *handlers)
+{
+ int ret;
+
+ mutex_init(&qmi->txn_lock);
+ mutex_init(&qmi->sock_lock);
+
+ idr_init(&qmi->txns);
+
+ INIT_LIST_HEAD(&qmi->lookups);
+ INIT_LIST_HEAD(&qmi->lookup_results);
+ INIT_LIST_HEAD(&qmi->services);
+
+ INIT_WORK(&qmi->work, qmi_data_ready_work);
+
+ qmi->handlers = handlers;
+ if (ops)
+ qmi->ops = *ops;
+
+ /* Make room for the header */
+ recv_buf_size += sizeof(struct qmi_header);
+ /* Must also be sufficient to hold a control packet */
+ if (recv_buf_size < sizeof(struct qrtr_ctrl_pkt))
+ recv_buf_size = sizeof(struct qrtr_ctrl_pkt);
+
+ qmi->recv_buf_size = recv_buf_size;
+ qmi->recv_buf = kzalloc(recv_buf_size, GFP_KERNEL);
+ if (!qmi->recv_buf)
+ return -ENOMEM;
+
+ qmi->wq = alloc_workqueue("qmi_msg_handler", WQ_UNBOUND, 1);
+ if (!qmi->wq) {
+ ret = -ENOMEM;
+ goto err_free_recv_buf;
+ }
+
+ qmi->sock = qmi_sock_create(qmi, &qmi->sq);
+ if (IS_ERR(qmi->sock)) {
+ pr_err("failed to create QMI socket\n");
+ ret = PTR_ERR(qmi->sock);
+ goto err_destroy_wq;
+ }
+
+ return 0;
+
+err_destroy_wq:
+ destroy_workqueue(qmi->wq);
+err_free_recv_buf:
+ kfree(qmi->recv_buf);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmi_handle_init);
+
+/**
+ * qmi_handle_release() - release the QMI client handle
+ * @qmi: QMI client handle
+ *
+ * This closes the underlying socket and stops any handling of QMI messages.
+ */
+void qmi_handle_release(struct qmi_handle *qmi)
+{
+ struct socket *sock = qmi->sock;
+ struct qmi_service *svc, *tmp;
+
+ sock->sk->sk_user_data = NULL;
+ cancel_work_sync(&qmi->work);
+
+ qmi_recv_del_server(qmi, -1, -1);
+
+ mutex_lock(&qmi->sock_lock);
+ sock_release(sock);
+ qmi->sock = NULL;
+ mutex_unlock(&qmi->sock_lock);
+
+ destroy_workqueue(qmi->wq);
+
+ idr_destroy(&qmi->txns);
+
+ kfree(qmi->recv_buf);
+
+ /* Free registered lookup requests */
+ list_for_each_entry_safe(svc, tmp, &qmi->lookups, list_node) {
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+
+ /* Free registered service information */
+ list_for_each_entry_safe(svc, tmp, &qmi->services, list_node) {
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+}
+EXPORT_SYMBOL(qmi_handle_release);
+
+/**
+ * qmi_send_message() - send a QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @type: type of message to send
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * This function encodes @c_struct using @ei into a message of type @type,
+ * with @msg_id and @txn into a buffer of maximum size @len, and sends this to
+ * @sq.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+static ssize_t qmi_send_message(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq, struct qmi_txn *txn,
+ int type, int msg_id, size_t len,
+ struct qmi_elem_info *ei, const void *c_struct)
+{
+ struct msghdr msghdr = {};
+ struct kvec iv;
+ void *msg;
+ int ret;
+
+ msg = qmi_encode_message(type,
+ msg_id, &len,
+ txn->id, ei,
+ c_struct);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ iv.iov_base = msg;
+ iv.iov_len = len;
+
+ if (sq) {
+ msghdr.msg_name = sq;
+ msghdr.msg_namelen = sizeof(*sq);
+ }
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len);
+ if (ret < 0)
+ pr_err("failed to send QMI message\n");
+ } else {
+ ret = -EPIPE;
+ }
+ mutex_unlock(&qmi->sock_lock);
+
+ kfree(msg);
+
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * qmi_send_request() - send a request QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, int msg_id, size_t len,
+ struct qmi_elem_info *ei, const void *c_struct)
+{
+ return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei,
+ c_struct);
+}
+EXPORT_SYMBOL(qmi_send_request);
+
+/**
+ * qmi_send_response() - send a response QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, int msg_id, size_t len,
+ struct qmi_elem_info *ei, const void *c_struct)
+{
+ return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei,
+ c_struct);
+}
+EXPORT_SYMBOL(qmi_send_response);
+
+/**
+ * qmi_send_indication() - send an indication QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ int msg_id, size_t len, struct qmi_elem_info *ei,
+ const void *c_struct)
+{
+ struct qmi_txn txn;
+ ssize_t rval;
+ int ret;
+
+ ret = qmi_txn_init(qmi, &txn, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ rval = qmi_send_message(qmi, sq, &txn, QMI_INDICATION, msg_id, len, ei,
+ c_struct);
+
+ /* We don't care about future messages on this txn */
+ qmi_txn_cancel(&txn);
+
+ return rval;
+}
+EXPORT_SYMBOL(qmi_send_indication);
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
new file mode 100644
index 000000000..97bb5989a
--- /dev/null
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/qcom_scm.h>
+
+#define QCOM_RMTFS_MEM_DEV_MAX (MINORMASK + 1)
+
+static dev_t qcom_rmtfs_mem_major;
+
+struct qcom_rmtfs_mem {
+ struct device dev;
+ struct cdev cdev;
+
+ void *base;
+ phys_addr_t addr;
+ phys_addr_t size;
+
+ unsigned int client_id;
+
+ unsigned int perms;
+};
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL);
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+ struct qcom_rmtfs_mem,
+ dev);
+
+ if (attr == &dev_attr_phys_addr)
+ return sprintf(buf, "%pa\n", &rmtfs_mem->addr);
+ if (attr == &dev_attr_size)
+ return sprintf(buf, "%pa\n", &rmtfs_mem->size);
+ if (attr == &dev_attr_client_id)
+ return sprintf(buf, "%d\n", rmtfs_mem->client_id);
+
+ return -EINVAL;
+}
+
+static struct attribute *qcom_rmtfs_mem_attrs[] = {
+ &dev_attr_phys_addr.attr,
+ &dev_attr_size.attr,
+ &dev_attr_client_id.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(qcom_rmtfs_mem);
+
+static int qcom_rmtfs_mem_open(struct inode *inode, struct file *filp)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(inode->i_cdev,
+ struct qcom_rmtfs_mem,
+ cdev);
+
+ get_device(&rmtfs_mem->dev);
+ filp->private_data = rmtfs_mem;
+
+ return 0;
+}
+static ssize_t qcom_rmtfs_mem_read(struct file *filp,
+ char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ if (*f_pos >= rmtfs_mem->size)
+ return 0;
+
+ if (*f_pos + count >= rmtfs_mem->size)
+ count = rmtfs_mem->size - *f_pos;
+
+ if (copy_to_user(buf, rmtfs_mem->base + *f_pos, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static ssize_t qcom_rmtfs_mem_write(struct file *filp,
+ const char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ if (*f_pos >= rmtfs_mem->size)
+ return 0;
+
+ if (*f_pos + count >= rmtfs_mem->size)
+ count = rmtfs_mem->size - *f_pos;
+
+ if (copy_from_user(rmtfs_mem->base + *f_pos, buf, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ put_device(&rmtfs_mem->dev);
+
+ return 0;
+}
+
+static const struct file_operations qcom_rmtfs_mem_fops = {
+ .owner = THIS_MODULE,
+ .open = qcom_rmtfs_mem_open,
+ .read = qcom_rmtfs_mem_read,
+ .write = qcom_rmtfs_mem_write,
+ .release = qcom_rmtfs_mem_release,
+ .llseek = default_llseek,
+};
+
+static void qcom_rmtfs_mem_release_device(struct device *dev)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+ struct qcom_rmtfs_mem,
+ dev);
+
+ kfree(rmtfs_mem);
+}
+
+static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct qcom_scm_vmperm perms[2];
+ struct reserved_mem *rmem;
+ struct qcom_rmtfs_mem *rmtfs_mem;
+ u32 client_id;
+ u32 vmid;
+ int ret;
+
+ rmem = of_reserved_mem_lookup(node);
+ if (!rmem) {
+ dev_err(&pdev->dev, "failed to acquire memory region\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "qcom,client-id", &client_id);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse \"qcom,client-id\"\n");
+ return ret;
+
+ }
+
+ rmtfs_mem = kzalloc(sizeof(*rmtfs_mem), GFP_KERNEL);
+ if (!rmtfs_mem)
+ return -ENOMEM;
+
+ rmtfs_mem->addr = rmem->base;
+ rmtfs_mem->client_id = client_id;
+ rmtfs_mem->size = rmem->size;
+
+ device_initialize(&rmtfs_mem->dev);
+ rmtfs_mem->dev.parent = &pdev->dev;
+ rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
+ rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
+
+ rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
+ rmtfs_mem->size, MEMREMAP_WC);
+ if (IS_ERR(rmtfs_mem->base)) {
+ dev_err(&pdev->dev, "failed to remap rmtfs_mem region\n");
+ ret = PTR_ERR(rmtfs_mem->base);
+ goto put_device;
+ }
+
+ cdev_init(&rmtfs_mem->cdev, &qcom_rmtfs_mem_fops);
+ rmtfs_mem->cdev.owner = THIS_MODULE;
+
+ dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
+ rmtfs_mem->dev.id = client_id;
+ rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
+
+ ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add cdev: %d\n", ret);
+ goto put_device;
+ }
+
+ ret = of_property_read_u32(node, "qcom,vmid", &vmid);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
+ goto remove_cdev;
+ } else if (!ret) {
+ if (!qcom_scm_is_available()) {
+ ret = -EPROBE_DEFER;
+ goto remove_cdev;
+ }
+
+ perms[0].vmid = QCOM_SCM_VMID_HLOS;
+ perms[0].perm = QCOM_SCM_PERM_RW;
+ perms[1].vmid = vmid;
+ perms[1].perm = QCOM_SCM_PERM_RW;
+
+ rmtfs_mem->perms = BIT(QCOM_SCM_VMID_HLOS);
+ ret = qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
+ &rmtfs_mem->perms, perms, 2);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "assign memory failed\n");
+ goto remove_cdev;
+ }
+ }
+
+ dev_set_drvdata(&pdev->dev, rmtfs_mem);
+
+ return 0;
+
+remove_cdev:
+ cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+put_device:
+ put_device(&rmtfs_mem->dev);
+
+ return ret;
+}
+
+static int qcom_rmtfs_mem_remove(struct platform_device *pdev)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev);
+ struct qcom_scm_vmperm perm;
+
+ if (rmtfs_mem->perms) {
+ perm.vmid = QCOM_SCM_VMID_HLOS;
+ perm.perm = QCOM_SCM_PERM_RW;
+
+ qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
+ &rmtfs_mem->perms, &perm, 1);
+ }
+
+ cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+ put_device(&rmtfs_mem->dev);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_rmtfs_mem_of_match[] = {
+ { .compatible = "qcom,rmtfs-mem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_rmtfs_mem_of_match);
+
+static struct platform_driver qcom_rmtfs_mem_driver = {
+ .probe = qcom_rmtfs_mem_probe,
+ .remove = qcom_rmtfs_mem_remove,
+ .driver = {
+ .name = "qcom_rmtfs_mem",
+ .of_match_table = qcom_rmtfs_mem_of_match,
+ },
+};
+
+static int qcom_rmtfs_mem_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
+ QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
+ if (ret < 0) {
+ pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&qcom_rmtfs_mem_driver);
+ if (ret < 0) {
+ pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
+ unregister_chrdev_region(qcom_rmtfs_mem_major,
+ QCOM_RMTFS_MEM_DEV_MAX);
+ }
+
+ return ret;
+}
+module_init(qcom_rmtfs_mem_init);
+
+static void qcom_rmtfs_mem_exit(void)
+{
+ platform_driver_unregister(&qcom_rmtfs_mem_driver);
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+}
+module_exit(qcom_rmtfs_mem_exit);
+
+MODULE_AUTHOR("Linaro Ltd");
+MODULE_DESCRIPTION("Qualcomm Remote Filesystem memory driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
new file mode 100644
index 000000000..a7bbbb679
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef __RPM_INTERNAL_H__
+#define __RPM_INTERNAL_H__
+
+#include <linux/bitmap.h>
+#include <soc/qcom/tcs.h>
+
+#define TCS_TYPE_NR 4
+#define MAX_CMDS_PER_TCS 16
+#define MAX_TCS_PER_TYPE 3
+#define MAX_TCS_NR (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
+#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
+
+struct rsc_drv;
+
+/**
+ * struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
+ * to the controller
+ *
+ * @drv: the controller
+ * @type: type of the TCS in this group - active, sleep, wake
+ * @mask: mask of the TCSes relative to all the TCSes in the RSC
+ * @offset: start of the TCS group relative to the TCSes in the RSC
+ * @num_tcs: number of TCSes in this type
+ * @ncpt: number of commands in each TCS
+ * @lock: lock for synchronizing this TCS writes
+ * @req: requests that are sent from the TCS
+ * @cmd_cache: flattened cache of cmds in sleep/wake TCS
+ * @slots: indicates which of @cmd_addr are occupied
+ */
+struct tcs_group {
+ struct rsc_drv *drv;
+ int type;
+ u32 mask;
+ u32 offset;
+ int num_tcs;
+ int ncpt;
+ spinlock_t lock;
+ const struct tcs_request *req[MAX_TCS_PER_TYPE];
+ u32 *cmd_cache;
+ DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
+};
+
+/**
+ * struct rpmh_request: the message to be sent to rpmh-rsc
+ *
+ * @msg: the request
+ * @cmd: the payload that will be part of the @msg
+ * @completion: triggered when request is done
+ * @dev: the device making the request
+ * @err: err return from the controller
+ * @needs_free: check to free dynamically allocated request object
+ */
+struct rpmh_request {
+ struct tcs_request msg;
+ struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
+ struct completion *completion;
+ const struct device *dev;
+ int err;
+ bool needs_free;
+};
+
+/**
+ * struct rpmh_ctrlr: our representation of the controller
+ *
+ * @cache: the list of cached requests
+ * @cache_lock: synchronize access to the cache data
+ * @dirty: was the cache updated since flush
+ * @batch_cache: Cache sleep and wake requests sent as batch
+ */
+struct rpmh_ctrlr {
+ struct list_head cache;
+ spinlock_t cache_lock;
+ bool dirty;
+ struct list_head batch_cache;
+};
+
+/**
+ * struct rsc_drv: the Direct Resource Voter (DRV) of the
+ * Resource State Coordinator controller (RSC)
+ *
+ * @name: controller identifier
+ * @tcs_base: start address of the TCS registers in this controller
+ * @id: instance id in the controller (Direct Resource Voter)
+ * @num_tcs: number of TCSes in this DRV
+ * @tcs: TCS groups
+ * @tcs_in_use: s/w state of the TCS
+ * @lock: synchronize state of the controller
+ * @client: handle to the DRV's client.
+ */
+struct rsc_drv {
+ const char *name;
+ void __iomem *tcs_base;
+ int id;
+ int num_tcs;
+ struct tcs_group tcs[TCS_TYPE_NR];
+ DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
+ spinlock_t lock;
+ struct rpmh_ctrlr client;
+};
+
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
+ const struct tcs_request *msg);
+int rpmh_rsc_invalidate(struct rsc_drv *drv);
+
+void rpmh_tx_done(const struct tcs_request *msg, int r);
+
+#endif /* __RPM_INTERNAL_H__ */
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
new file mode 100644
index 000000000..519d19f57
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+#include "rpmh-internal.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace-rpmh.h"
+
+#define RSC_DRV_TCS_OFFSET 672
+#define RSC_DRV_CMD_OFFSET 20
+
+/* DRV Configuration Information Register */
+#define DRV_PRNT_CHLD_CONFIG 0x0C
+#define DRV_NUM_TCS_MASK 0x3F
+#define DRV_NUM_TCS_SHIFT 6
+#define DRV_NCPT_MASK 0x1F
+#define DRV_NCPT_SHIFT 27
+
+/* Register offsets */
+#define RSC_DRV_IRQ_ENABLE 0x00
+#define RSC_DRV_IRQ_STATUS 0x04
+#define RSC_DRV_IRQ_CLEAR 0x08
+#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
+#define RSC_DRV_CONTROL 0x14
+#define RSC_DRV_STATUS 0x18
+#define RSC_DRV_CMD_ENABLE 0x1C
+#define RSC_DRV_CMD_MSGID 0x30
+#define RSC_DRV_CMD_ADDR 0x34
+#define RSC_DRV_CMD_DATA 0x38
+#define RSC_DRV_CMD_STATUS 0x3C
+#define RSC_DRV_CMD_RESP_DATA 0x40
+
+#define TCS_AMC_MODE_ENABLE BIT(16)
+#define TCS_AMC_MODE_TRIGGER BIT(24)
+
+/* TCS CMD register bit mask */
+#define CMD_MSGID_LEN 8
+#define CMD_MSGID_RESP_REQ BIT(8)
+#define CMD_MSGID_WRITE BIT(16)
+#define CMD_STATUS_ISSUED BIT(8)
+#define CMD_STATUS_COMPL BIT(16)
+
+static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
+{
+ return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
+ RSC_DRV_CMD_OFFSET * cmd_id);
+}
+
+static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
+ u32 data)
+{
+ writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
+ RSC_DRV_CMD_OFFSET * cmd_id);
+}
+
+static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
+{
+ writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
+}
+
+static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
+{
+ writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
+ for (;;) {
+ if (data == readl(drv->tcs_base + reg +
+ RSC_DRV_TCS_OFFSET * tcs_id))
+ break;
+ udelay(1);
+ }
+}
+
+static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
+{
+ return !test_bit(tcs_id, drv->tcs_in_use) &&
+ read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
+}
+
+static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
+{
+ return &drv->tcs[type];
+}
+
+static int tcs_invalidate(struct rsc_drv *drv, int type)
+{
+ int m;
+ struct tcs_group *tcs;
+
+ tcs = get_tcs_of_type(drv, type);
+
+ spin_lock(&tcs->lock);
+ if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
+ spin_unlock(&tcs->lock);
+ return 0;
+ }
+
+ for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
+ if (!tcs_is_free(drv, m)) {
+ spin_unlock(&tcs->lock);
+ return -EAGAIN;
+ }
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
+ }
+ bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
+ spin_unlock(&tcs->lock);
+
+ return 0;
+}
+
+/**
+ * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
+ *
+ * @drv: the RSC controller
+ */
+int rpmh_rsc_invalidate(struct rsc_drv *drv)
+{
+ int ret;
+
+ ret = tcs_invalidate(drv, SLEEP_TCS);
+ if (!ret)
+ ret = tcs_invalidate(drv, WAKE_TCS);
+
+ return ret;
+}
+
+static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
+ const struct tcs_request *msg)
+{
+ int type;
+ struct tcs_group *tcs;
+
+ switch (msg->state) {
+ case RPMH_ACTIVE_ONLY_STATE:
+ type = ACTIVE_TCS;
+ break;
+ case RPMH_WAKE_ONLY_STATE:
+ type = WAKE_TCS;
+ break;
+ case RPMH_SLEEP_STATE:
+ type = SLEEP_TCS;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * If we are making an active request on a RSC that does not have a
+ * dedicated TCS for active state use, then re-purpose a wake TCS to
+ * send active votes.
+ */
+ tcs = get_tcs_of_type(drv, type);
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
+ tcs = get_tcs_of_type(drv, WAKE_TCS);
+
+ return tcs;
+}
+
+static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
+ int tcs_id)
+{
+ struct tcs_group *tcs;
+ int i;
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ tcs = &drv->tcs[i];
+ if (tcs->mask & BIT(tcs_id))
+ return tcs->req[tcs_id - tcs->offset];
+ }
+
+ return NULL;
+}
+
+static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
+{
+ u32 enable;
+
+ /*
+ * HW req: Clear the DRV_CONTROL and enable TCS again
+ * While clearing ensure that the AMC mode trigger is cleared
+ * and then the mode enable is cleared.
+ */
+ enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
+ enable &= ~TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable &= ~TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+
+ if (trigger) {
+ /* Enable the AMC mode on the TCS and then trigger the TCS */
+ enable = TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable |= TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ }
+}
+
+static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
+{
+ u32 data;
+
+ data = read_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, 0);
+ if (enable)
+ data |= BIT(tcs_id);
+ else
+ data &= ~BIT(tcs_id);
+ write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, data);
+}
+
+/**
+ * tcs_tx_done: TX Done interrupt handler
+ */
+static irqreturn_t tcs_tx_done(int irq, void *p)
+{
+ struct rsc_drv *drv = p;
+ int i, j, err = 0;
+ unsigned long irq_status;
+ const struct tcs_request *req;
+ struct tcs_cmd *cmd;
+
+ irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
+
+ for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
+ req = get_req_from_tcs(drv, i);
+ if (!req) {
+ WARN_ON(1);
+ goto skip;
+ }
+
+ err = 0;
+ for (j = 0; j < req->num_cmds; j++) {
+ u32 sts;
+
+ cmd = &req->cmds[j];
+ sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
+ if (!(sts & CMD_STATUS_ISSUED) ||
+ ((req->wait_for_compl || cmd->wait) &&
+ !(sts & CMD_STATUS_COMPL))) {
+ pr_err("Incomplete request: %s: addr=%#x data=%#x",
+ drv->name, cmd->addr, cmd->data);
+ err = -EIO;
+ }
+ }
+
+ trace_rpmh_tx_done(drv, i, req, err);
+
+ /*
+ * If wake tcs was re-purposed for sending active
+ * votes, clear AMC trigger & enable modes and
+ * disable interrupt for this TCS
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ __tcs_set_trigger(drv, i, false);
+skip:
+ /* Reclaim the TCS */
+ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
+ write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
+ write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
+ spin_lock(&drv->lock);
+ clear_bit(i, drv->tcs_in_use);
+ /*
+ * Disable interrupt for WAKE TCS to avoid being
+ * spammed with interrupts coming when the solver
+ * sends its wake votes.
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ enable_tcs_irq(drv, i, false);
+ spin_unlock(&drv->lock);
+ if (req)
+ rpmh_tx_done(req, err);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
+ const struct tcs_request *msg)
+{
+ u32 msgid, cmd_msgid;
+ u32 cmd_enable = 0;
+ u32 cmd_complete;
+ struct tcs_cmd *cmd;
+ int i, j;
+
+ cmd_msgid = CMD_MSGID_LEN;
+ cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
+ cmd_msgid |= CMD_MSGID_WRITE;
+
+ cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+
+ for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
+ cmd = &msg->cmds[i];
+ cmd_enable |= BIT(j);
+ cmd_complete |= cmd->wait << j;
+ msgid = cmd_msgid;
+ msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
+
+ write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
+ write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
+ write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
+ trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
+ }
+
+ write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
+ cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
+}
+
+static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
+ const struct tcs_request *msg)
+{
+ unsigned long curr_enabled;
+ u32 addr;
+ int i, j, k;
+ int tcs_id = tcs->offset;
+
+ for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
+ if (tcs_is_free(drv, tcs_id))
+ continue;
+
+ curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+
+ for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
+ addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
+ for (k = 0; k < msg->num_cmds; k++) {
+ if (addr == msg->cmds[k].addr)
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int find_free_tcs(struct tcs_group *tcs)
+{
+ int i;
+
+ for (i = 0; i < tcs->num_tcs; i++) {
+ if (tcs_is_free(tcs->drv, tcs->offset + i))
+ return tcs->offset + i;
+ }
+
+ return -EBUSY;
+}
+
+static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id;
+ unsigned long flags;
+ int ret;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ spin_lock_irqsave(&tcs->lock, flags);
+ spin_lock(&drv->lock);
+ /*
+ * The h/w does not like if we send a request to the same address,
+ * when one is already in-flight or being processed.
+ */
+ ret = check_for_req_inflight(drv, tcs, msg);
+ if (ret) {
+ spin_unlock(&drv->lock);
+ goto done_write;
+ }
+
+ tcs_id = find_free_tcs(tcs);
+ if (tcs_id < 0) {
+ ret = tcs_id;
+ spin_unlock(&drv->lock);
+ goto done_write;
+ }
+
+ tcs->req[tcs_id - tcs->offset] = msg;
+ set_bit(tcs_id, drv->tcs_in_use);
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
+ /*
+ * Clear previously programmed WAKE commands in selected
+ * repurposed TCS to avoid triggering them. tcs->slots will be
+ * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
+ */
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+ enable_tcs_irq(drv, tcs_id, true);
+ }
+ spin_unlock(&drv->lock);
+
+ __tcs_buffer_write(drv, tcs_id, 0, msg);
+ __tcs_set_trigger(drv, tcs_id, true);
+
+done_write:
+ spin_unlock_irqrestore(&tcs->lock, flags);
+ return ret;
+}
+
+/**
+ * rpmh_rsc_send_data: Validate the incoming message and write to the
+ * appropriate TCS block.
+ *
+ * @drv: the controller
+ * @msg: the data to be sent
+ *
+ * Return: 0 on success, -EINVAL on error.
+ * Note: This call blocks until a valid data is written to the TCS.
+ */
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ int ret;
+
+ if (!msg || !msg->cmds || !msg->num_cmds ||
+ msg->num_cmds > MAX_RPMH_PAYLOAD) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ do {
+ ret = tcs_write(drv, msg);
+ if (ret == -EBUSY) {
+ pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
+ msg->cmds[0].addr);
+ udelay(10);
+ }
+ } while (ret == -EBUSY);
+
+ return ret;
+}
+
+static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
+ int len)
+{
+ int i, j;
+
+ /* Check for already cached commands */
+ for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
+ if (tcs->cmd_cache[i] != cmd[0].addr)
+ continue;
+ if (i + len >= tcs->num_tcs * tcs->ncpt)
+ goto seq_err;
+ for (j = 0; j < len; j++) {
+ if (tcs->cmd_cache[i + j] != cmd[j].addr)
+ goto seq_err;
+ }
+ return i;
+ }
+
+ return -ENODATA;
+
+seq_err:
+ WARN(1, "Message does not match previous sequence.\n");
+ return -EINVAL;
+}
+
+static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
+ int *tcs_id, int *cmd_id)
+{
+ int slot, offset;
+ int i = 0;
+
+ /* Find if we already have the msg in our TCS */
+ slot = find_match(tcs, msg->cmds, msg->num_cmds);
+ if (slot >= 0)
+ goto copy_data;
+
+ /* Do over, until we can fit the full payload in a TCS */
+ do {
+ slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
+ i, msg->num_cmds, 0);
+ if (slot == tcs->num_tcs * tcs->ncpt)
+ return -ENOMEM;
+ i += tcs->ncpt;
+ } while (slot + msg->num_cmds - 1 >= i);
+
+copy_data:
+ bitmap_set(tcs->slots, slot, msg->num_cmds);
+ /* Copy the addresses of the resources over to the slots */
+ for (i = 0; i < msg->num_cmds; i++)
+ tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
+
+ offset = slot / tcs->ncpt;
+ *tcs_id = offset + tcs->offset;
+ *cmd_id = slot % tcs->ncpt;
+
+ return 0;
+}
+
+static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id = 0, cmd_id = 0;
+ unsigned long flags;
+ int ret;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ spin_lock_irqsave(&tcs->lock, flags);
+ /* find the TCS id and the command in the TCS to write to */
+ ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
+ if (!ret)
+ __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
+ spin_unlock_irqrestore(&tcs->lock, flags);
+
+ return ret;
+}
+
+/**
+ * rpmh_rsc_write_ctrl_data: Write request to the controller
+ *
+ * @drv: the controller
+ * @msg: the data to be written to the controller
+ *
+ * There is no response returned for writing the request to the controller.
+ */
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ if (!msg || !msg->cmds || !msg->num_cmds ||
+ msg->num_cmds > MAX_RPMH_PAYLOAD) {
+ pr_err("Payload error\n");
+ return -EINVAL;
+ }
+
+ /* Data sent to this API will not be sent immediately */
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE)
+ return -EINVAL;
+
+ return tcs_ctrl_write(drv, msg);
+}
+
+static int rpmh_probe_tcs_config(struct platform_device *pdev,
+ struct rsc_drv *drv)
+{
+ struct tcs_type_config {
+ u32 type;
+ u32 n;
+ } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
+ struct device_node *dn = pdev->dev.of_node;
+ u32 config, max_tcs, ncpt, offset;
+ int i, ret, n, st = 0;
+ struct tcs_group *tcs;
+ struct resource *res;
+ void __iomem *base;
+ char drv_id[10] = {0};
+
+ snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
+ if (ret)
+ return ret;
+ drv->tcs_base = base + offset;
+
+ config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
+
+ max_tcs = config;
+ max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
+ max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
+
+ ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
+ ncpt = ncpt >> DRV_NCPT_SHIFT;
+
+ n = of_property_count_u32_elems(dn, "qcom,tcs-config");
+ if (n != 2 * TCS_TYPE_NR)
+ return -EINVAL;
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+ i * 2, &tcs_cfg[i].type);
+ if (ret)
+ return ret;
+ if (tcs_cfg[i].type >= TCS_TYPE_NR)
+ return -EINVAL;
+
+ ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+ i * 2 + 1, &tcs_cfg[i].n);
+ if (ret)
+ return ret;
+ if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ tcs = &drv->tcs[tcs_cfg[i].type];
+ if (tcs->drv)
+ return -EINVAL;
+ tcs->drv = drv;
+ tcs->type = tcs_cfg[i].type;
+ tcs->num_tcs = tcs_cfg[i].n;
+ tcs->ncpt = ncpt;
+ spin_lock_init(&tcs->lock);
+
+ if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
+ continue;
+
+ if (st + tcs->num_tcs > max_tcs ||
+ st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
+ return -EINVAL;
+
+ tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
+ tcs->offset = st;
+ st += tcs->num_tcs;
+
+ /*
+ * Allocate memory to cache sleep and wake requests to
+ * avoid reading TCS register memory.
+ */
+ if (tcs->type == ACTIVE_TCS)
+ continue;
+
+ tcs->cmd_cache = devm_kcalloc(&pdev->dev,
+ tcs->num_tcs * ncpt, sizeof(u32),
+ GFP_KERNEL);
+ if (!tcs->cmd_cache)
+ return -ENOMEM;
+ }
+
+ drv->num_tcs = st;
+
+ return 0;
+}
+
+static int rpmh_rsc_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct rsc_drv *drv;
+ int ret, irq;
+
+ /*
+ * Even though RPMh doesn't directly use cmd-db, all of its children
+ * do. To avoid adding this check to our children we'll do it now.
+ */
+ ret = cmd_db_ready();
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Command DB not available (%d)\n",
+ ret);
+ return ret;
+ }
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
+ if (ret)
+ return ret;
+
+ drv->name = of_get_property(dn, "label", NULL);
+ if (!drv->name)
+ drv->name = dev_name(&pdev->dev);
+
+ ret = rpmh_probe_tcs_config(pdev, drv);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&drv->lock);
+ bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
+
+ irq = platform_get_irq(pdev, drv->id);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
+ drv->name, drv);
+ if (ret)
+ return ret;
+
+ /* Enable the active TCS to send requests immediately */
+ write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
+
+ spin_lock_init(&drv->client.cache_lock);
+ INIT_LIST_HEAD(&drv->client.cache);
+ INIT_LIST_HEAD(&drv->client.batch_cache);
+
+ dev_set_drvdata(&pdev->dev, drv);
+
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id rpmh_drv_match[] = {
+ { .compatible = "qcom,rpmh-rsc", },
+ { }
+};
+
+static struct platform_driver rpmh_driver = {
+ .probe = rpmh_rsc_probe,
+ .driver = {
+ .name = "rpmh",
+ .of_match_table = rpmh_drv_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rpmh_driver_init(void)
+{
+ return platform_driver_register(&rpmh_driver);
+}
+arch_initcall(rpmh_driver_init);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
new file mode 100644
index 000000000..8e19a8bdf
--- /dev/null
+++ b/drivers/soc/qcom/rpmh.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <soc/qcom/rpmh.h>
+
+#include "rpmh-internal.h"
+
+#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
+
+#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
+ struct rpmh_request name = { \
+ .msg = { \
+ .state = s, \
+ .cmds = name.cmd, \
+ .num_cmds = 0, \
+ .wait_for_compl = true, \
+ }, \
+ .cmd = { { 0 } }, \
+ .completion = q, \
+ .dev = dev, \
+ .needs_free = false, \
+ }
+
+#define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
+
+/**
+ * struct cache_req: the request object for caching
+ *
+ * @addr: the address of the resource
+ * @sleep_val: the sleep vote
+ * @wake_val: the wake vote
+ * @list: linked list obj
+ */
+struct cache_req {
+ u32 addr;
+ u32 sleep_val;
+ u32 wake_val;
+ struct list_head list;
+};
+
+/**
+ * struct batch_cache_req - An entry in our batch catch
+ *
+ * @list: linked list obj
+ * @count: number of messages
+ * @rpm_msgs: the messages
+ */
+
+struct batch_cache_req {
+ struct list_head list;
+ int count;
+ struct rpmh_request rpm_msgs[];
+};
+
+static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
+{
+ struct rsc_drv *drv = dev_get_drvdata(dev->parent);
+
+ return &drv->client;
+}
+
+void rpmh_tx_done(const struct tcs_request *msg, int r)
+{
+ struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
+ msg);
+ struct completion *compl = rpm_msg->completion;
+ bool free = rpm_msg->needs_free;
+
+ rpm_msg->err = r;
+
+ if (r)
+ dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
+ rpm_msg->msg.cmds[0].addr, r);
+
+ if (!compl)
+ goto exit;
+
+ /* Signal the blocking thread we are done */
+ complete(compl);
+
+exit:
+ if (free)
+ kfree(rpm_msg);
+}
+
+static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
+{
+ struct cache_req *p, *req = NULL;
+
+ list_for_each_entry(p, &ctrlr->cache, list) {
+ if (p->addr == addr) {
+ req = p;
+ break;
+ }
+ }
+
+ return req;
+}
+
+static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
+ enum rpmh_state state,
+ struct tcs_cmd *cmd)
+{
+ struct cache_req *req;
+ unsigned long flags;
+ u32 old_sleep_val, old_wake_val;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ req = __find_req(ctrlr, cmd->addr);
+ if (req)
+ goto existing;
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req) {
+ req = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+
+ req->addr = cmd->addr;
+ req->sleep_val = req->wake_val = UINT_MAX;
+ list_add_tail(&req->list, &ctrlr->cache);
+
+existing:
+ old_sleep_val = req->sleep_val;
+ old_wake_val = req->wake_val;
+
+ switch (state) {
+ case RPMH_ACTIVE_ONLY_STATE:
+ case RPMH_WAKE_ONLY_STATE:
+ req->wake_val = cmd->data;
+ break;
+ case RPMH_SLEEP_STATE:
+ req->sleep_val = cmd->data;
+ break;
+ }
+
+ ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
+ req->wake_val != old_wake_val) &&
+ req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX;
+
+unlock:
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return req;
+}
+
+/**
+ * __rpmh_write: Cache and send the RPMH request
+ *
+ * @dev: The device making the request
+ * @state: Active/Sleep request type
+ * @rpm_msg: The data that needs to be sent (cmds).
+ *
+ * Cache the RPMH request and send if the state is ACTIVE_ONLY.
+ * SLEEP/WAKE_ONLY requests are not sent to the controller at
+ * this time. Use rpmh_flush() to send them to the controller.
+ */
+static int __rpmh_write(const struct device *dev, enum rpmh_state state,
+ struct rpmh_request *rpm_msg)
+{
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ int ret = -EINVAL;
+ struct cache_req *req;
+ int i;
+
+ rpm_msg->msg.state = state;
+
+ /* Cache the request in our store and link the payload */
+ for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
+ req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
+
+ rpm_msg->msg.state = state;
+
+ if (state == RPMH_ACTIVE_ONLY_STATE) {
+ WARN_ON(irqs_disabled());
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
+ } else {
+ ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+ &rpm_msg->msg);
+ /* Clean up our call by spoofing tx_done */
+ rpmh_tx_done(&rpm_msg->msg, ret);
+ }
+
+ return ret;
+}
+
+static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
+ return -EINVAL;
+
+ memcpy(req->cmd, cmd, n * sizeof(*cmd));
+
+ req->msg.state = state;
+ req->msg.cmds = req->cmd;
+ req->msg.num_cmds = n;
+
+ return 0;
+}
+
+/**
+ * rpmh_write_async: Write a set of RPMH commands
+ *
+ * @dev: The device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in payload
+ *
+ * Write a set of RPMH commands, the order of commands is maintained
+ * and will be sent as a single shot.
+ */
+int rpmh_write_async(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ struct rpmh_request *rpm_msg;
+ int ret;
+
+ rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
+ if (!rpm_msg)
+ return -ENOMEM;
+ rpm_msg->needs_free = true;
+
+ ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
+ if (ret) {
+ kfree(rpm_msg);
+ return ret;
+ }
+
+ return __rpmh_write(dev, state, rpm_msg);
+}
+EXPORT_SYMBOL(rpmh_write_async);
+
+/**
+ * rpmh_write: Write a set of RPMH commands and block until response
+ *
+ * @rc: The RPMH handle got from rpmh_get_client
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in @cmd
+ *
+ * May sleep. Do not call from atomic contexts.
+ */
+int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ DECLARE_COMPLETION_ONSTACK(compl);
+ DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
+ int ret;
+
+ if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
+ return -EINVAL;
+
+ memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
+ rpm_msg.msg.num_cmds = n;
+
+ ret = __rpmh_write(dev, state, &rpm_msg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
+ WARN_ON(!ret);
+ return (ret > 0) ? 0 : -ETIMEDOUT;
+}
+EXPORT_SYMBOL(rpmh_write);
+
+static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_add_tail(&req->list, &ctrlr->batch_cache);
+ ctrlr->dirty = true;
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+static int flush_batch(struct rpmh_ctrlr *ctrlr)
+{
+ struct batch_cache_req *req;
+ const struct rpmh_request *rpm_msg;
+ unsigned long flags;
+ int ret = 0;
+ int i;
+
+ /* Send Sleep/Wake requests to the controller, expect no response */
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry(req, &ctrlr->batch_cache, list) {
+ for (i = 0; i < req->count; i++) {
+ rpm_msg = req->rpm_msgs + i;
+ ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+ &rpm_msg->msg);
+ if (ret)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return ret;
+}
+
+/**
+ * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
+ * batch to finish.
+ *
+ * @dev: the device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The array of count of elements in each batch, 0 terminated.
+ *
+ * Write a request to the RSC controller without caching. If the request
+ * state is ACTIVE, then the requests are treated as completion request
+ * and sent to the controller immediately. The function waits until all the
+ * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
+ * request is sent as fire-n-forget and no ack is expected.
+ *
+ * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
+ */
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n)
+{
+ struct batch_cache_req *req;
+ struct rpmh_request *rpm_msgs;
+ struct completion *compls;
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ unsigned long time_left;
+ int count = 0;
+ int ret, i;
+ void *ptr;
+
+ if (!cmd || !n)
+ return -EINVAL;
+
+ while (n[count] > 0)
+ count++;
+ if (!count)
+ return -EINVAL;
+
+ ptr = kzalloc(sizeof(*req) +
+ count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
+ GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ req = ptr;
+ compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
+
+ req->count = count;
+ rpm_msgs = req->rpm_msgs;
+
+ for (i = 0; i < count; i++) {
+ __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
+ cmd += n[i];
+ }
+
+ if (state != RPMH_ACTIVE_ONLY_STATE) {
+ cache_batch(ctrlr, req);
+ return 0;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct completion *compl = &compls[i];
+
+ init_completion(compl);
+ rpm_msgs[i].completion = compl;
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
+ if (ret) {
+ pr_err("Error(%d) sending RPMH message addr=%#x\n",
+ ret, rpm_msgs[i].msg.cmds[0].addr);
+ break;
+ }
+ }
+
+ time_left = RPMH_TIMEOUT_MS;
+ while (i--) {
+ time_left = wait_for_completion_timeout(&compls[i], time_left);
+ if (!time_left) {
+ /*
+ * Better hope they never finish because they'll signal
+ * the completion that we're going to free once
+ * we've returned from this function.
+ */
+ WARN_ON(1);
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ }
+
+exit:
+ kfree(ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmh_write_batch);
+
+static int is_req_valid(struct cache_req *req)
+{
+ return (req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX &&
+ req->sleep_val != req->wake_val);
+}
+
+static int send_single(const struct device *dev, enum rpmh_state state,
+ u32 addr, u32 data)
+{
+ DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+
+ /* Wake sets are always complete and sleep sets are not */
+ rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
+ rpm_msg.cmd[0].addr = addr;
+ rpm_msg.cmd[0].data = data;
+ rpm_msg.msg.num_cmds = 1;
+
+ return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
+}
+
+/**
+ * rpmh_flush: Flushes the buffered active and sleep sets to TCS
+ *
+ * @dev: The device making the request
+ *
+ * Return: -EBUSY if the controller is busy, probably waiting on a response
+ * to a RPMH request sent earlier.
+ *
+ * This function is always called from the sleep code from the last CPU
+ * that is powering down the entire system. Since no other RPMH API would be
+ * executing at this time, it is safe to run lockless.
+ */
+int rpmh_flush(const struct device *dev)
+{
+ struct cache_req *p;
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ int ret;
+
+ if (!ctrlr->dirty) {
+ pr_debug("Skipping flush, TCS has latest data.\n");
+ return 0;
+ }
+
+ /* Invalidate the TCSes first to avoid stale data */
+ do {
+ ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+ } while (ret == -EAGAIN);
+ if (ret)
+ return ret;
+
+ /* First flush the cached batch requests */
+ ret = flush_batch(ctrlr);
+ if (ret)
+ return ret;
+
+ /*
+ * Nobody else should be calling this function other than system PM,
+ * hence we can run without locks.
+ */
+ list_for_each_entry(p, &ctrlr->cache, list) {
+ if (!is_req_valid(p)) {
+ pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
+ __func__, p->addr, p->sleep_val, p->wake_val);
+ continue;
+ }
+ ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
+ if (ret)
+ return ret;
+ ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
+ p->addr, p->wake_val);
+ if (ret)
+ return ret;
+ }
+
+ ctrlr->dirty = false;
+
+ return 0;
+}
+EXPORT_SYMBOL(rpmh_flush);
+
+/**
+ * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
+ *
+ * @dev: The device making the request
+ *
+ * Invalidate the sleep and wake values in batch_cache.
+ */
+int rpmh_invalidate(const struct device *dev)
+{
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ struct batch_cache_req *req, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+ kfree(req);
+ INIT_LIST_HEAD(&ctrlr->batch_cache);
+ ctrlr->dirty = true;
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
new file mode 100644
index 000000000..93517ed83
--- /dev/null
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include <linux/rpmsg.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#define RPM_REQUEST_TIMEOUT (5 * HZ)
+
+/**
+ * struct qcom_smd_rpm - state of the rpm device driver
+ * @rpm_channel: reference to the smd channel
+ * @ack: completion for acks
+ * @lock: mutual exclusion around the send/complete pair
+ * @ack_status: result of the rpm request
+ */
+struct qcom_smd_rpm {
+ struct rpmsg_endpoint *rpm_channel;
+ struct device *dev;
+
+ struct completion ack;
+ struct mutex lock;
+ int ack_status;
+};
+
+/**
+ * struct qcom_rpm_header - header for all rpm requests and responses
+ * @service_type: identifier of the service
+ * @length: length of the payload
+ */
+struct qcom_rpm_header {
+ __le32 service_type;
+ __le32 length;
+};
+
+/**
+ * struct qcom_rpm_request - request message to the rpm
+ * @msg_id: identifier of the outgoing message
+ * @flags: active/sleep state flags
+ * @type: resource type
+ * @id: resource id
+ * @data_len: length of the payload following this header
+ */
+struct qcom_rpm_request {
+ __le32 msg_id;
+ __le32 flags;
+ __le32 type;
+ __le32 id;
+ __le32 data_len;
+};
+
+/**
+ * struct qcom_rpm_message - response message from the rpm
+ * @msg_type: indicator of the type of message
+ * @length: the size of this message, including the message header
+ * @msg_id: message id
+ * @message: textual message from the rpm
+ *
+ * Multiple of these messages can be stacked in an rpm message.
+ */
+struct qcom_rpm_message {
+ __le32 msg_type;
+ __le32 length;
+ union {
+ __le32 msg_id;
+ u8 message[0];
+ };
+};
+
+#define RPM_SERVICE_TYPE_REQUEST 0x00716572 /* "req\0" */
+
+#define RPM_MSG_TYPE_ERR 0x00727265 /* "err\0" */
+#define RPM_MSG_TYPE_MSG_ID 0x2367736d /* "msg#" */
+
+/**
+ * qcom_rpm_smd_write - write @buf to @type:@id
+ * @rpm: rpm handle
+ * @type: resource type
+ * @id: resource identifier
+ * @buf: the data to be written
+ * @count: number of bytes in @buf
+ */
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+ int state,
+ u32 type, u32 id,
+ void *buf,
+ size_t count)
+{
+ static unsigned msg_id = 1;
+ int left;
+ int ret;
+ struct {
+ struct qcom_rpm_header hdr;
+ struct qcom_rpm_request req;
+ u8 payload[];
+ } *pkt;
+ size_t size = sizeof(*pkt) + count;
+
+ /* SMD packets to the RPM may not exceed 256 bytes */
+ if (WARN_ON(size >= 256))
+ return -EINVAL;
+
+ pkt = kmalloc(size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ mutex_lock(&rpm->lock);
+
+ pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST);
+ pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count);
+
+ pkt->req.msg_id = cpu_to_le32(msg_id++);
+ pkt->req.flags = cpu_to_le32(state);
+ pkt->req.type = cpu_to_le32(type);
+ pkt->req.id = cpu_to_le32(id);
+ pkt->req.data_len = cpu_to_le32(count);
+ memcpy(pkt->payload, buf, count);
+
+ ret = rpmsg_send(rpm->rpm_channel, pkt, size);
+ if (ret)
+ goto out;
+
+ left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
+ if (!left)
+ ret = -ETIMEDOUT;
+ else
+ ret = rpm->ack_status;
+
+out:
+ kfree(pkt);
+ mutex_unlock(&rpm->lock);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_smd_write);
+
+static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev,
+ void *data,
+ int count,
+ void *priv,
+ u32 addr)
+{
+ const struct qcom_rpm_header *hdr = data;
+ size_t hdr_length = le32_to_cpu(hdr->length);
+ const struct qcom_rpm_message *msg;
+ struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev);
+ const u8 *buf = data + sizeof(struct qcom_rpm_header);
+ const u8 *end = buf + hdr_length;
+ char msgbuf[32];
+ int status = 0;
+ u32 len, msg_length;
+
+ if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
+ hdr_length < sizeof(struct qcom_rpm_message)) {
+ dev_err(rpm->dev, "invalid request\n");
+ return 0;
+ }
+
+ while (buf < end) {
+ msg = (struct qcom_rpm_message *)buf;
+ msg_length = le32_to_cpu(msg->length);
+ switch (le32_to_cpu(msg->msg_type)) {
+ case RPM_MSG_TYPE_MSG_ID:
+ break;
+ case RPM_MSG_TYPE_ERR:
+ len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf));
+ memcpy_fromio(msgbuf, msg->message, len);
+ msgbuf[len - 1] = 0;
+
+ if (!strcmp(msgbuf, "resource does not exist"))
+ status = -ENXIO;
+ else
+ status = -EINVAL;
+ break;
+ }
+
+ buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4);
+ }
+
+ rpm->ack_status = status;
+ complete(&rpm->ack);
+ return 0;
+}
+
+static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
+{
+ struct qcom_smd_rpm *rpm;
+
+ rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
+ if (!rpm)
+ return -ENOMEM;
+
+ mutex_init(&rpm->lock);
+ init_completion(&rpm->ack);
+
+ rpm->dev = &rpdev->dev;
+ rpm->rpm_channel = rpdev->ept;
+ dev_set_drvdata(&rpdev->dev, rpm);
+
+ return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
+}
+
+static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
+{
+ of_platform_depopulate(&rpdev->dev);
+}
+
+static const struct of_device_id qcom_smd_rpm_of_match[] = {
+ { .compatible = "qcom,rpm-apq8084" },
+ { .compatible = "qcom,rpm-msm8916" },
+ { .compatible = "qcom,rpm-msm8974" },
+ { .compatible = "qcom,rpm-msm8996" },
+ { .compatible = "qcom,rpm-msm8998" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
+
+static struct rpmsg_driver qcom_smd_rpm_driver = {
+ .probe = qcom_smd_rpm_probe,
+ .remove = qcom_smd_rpm_remove,
+ .callback = qcom_smd_rpm_callback,
+ .drv = {
+ .name = "qcom_smd_rpm",
+ .of_match_table = qcom_smd_rpm_of_match,
+ },
+};
+
+static int __init qcom_smd_rpm_init(void)
+{
+ return register_rpmsg_driver(&qcom_smd_rpm_driver);
+}
+arch_initcall(qcom_smd_rpm_init);
+
+static void __exit qcom_smd_rpm_exit(void)
+{
+ unregister_rpmsg_driver(&qcom_smd_rpm_driver);
+}
+module_exit(qcom_smd_rpm_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
new file mode 100644
index 000000000..bf4bd71ab
--- /dev/null
+++ b/drivers/soc/qcom/smem.c
@@ -0,0 +1,1023 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+/*
+ * The Qualcomm shared memory system is a allocate only heap structure that
+ * consists of one of more memory areas that can be accessed by the processors
+ * in the SoC.
+ *
+ * All systems contains a global heap, accessible by all processors in the SoC,
+ * with a table of contents data structure (@smem_header) at the beginning of
+ * the main shared memory block.
+ *
+ * The global header contains meta data for allocations as well as a fixed list
+ * of 512 entries (@smem_global_entry) that can be initialized to reference
+ * parts of the shared memory space.
+ *
+ *
+ * In addition to this global heap a set of "private" heaps can be set up at
+ * boot time with access restrictions so that only certain processor pairs can
+ * access the data.
+ *
+ * These partitions are referenced from an optional partition table
+ * (@smem_ptable), that is found 4kB from the end of the main smem region. The
+ * partition table entries (@smem_ptable_entry) lists the involved processors
+ * (or hosts) and their location in the main shared memory region.
+ *
+ * Each partition starts with a header (@smem_partition_header) that identifies
+ * the partition and holds properties for the two internal memory regions. The
+ * two regions are cached and non-cached memory respectively. Each region
+ * contain a link list of allocation headers (@smem_private_entry) followed by
+ * their data.
+ *
+ * Items in the non-cached region are allocated from the start of the partition
+ * while items in the cached region are allocated from the end. The free area
+ * is hence the region between the cached and non-cached offsets. The header of
+ * cached items comes after the data.
+ *
+ * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
+ * for the global heap. A new global partition is created from the global heap
+ * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
+ * set by the bootloader.
+ *
+ * To synchronize allocations in the shared memory heaps a remote spinlock must
+ * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
+ * platforms.
+ *
+ */
+
+/*
+ * The version member of the smem header contains an array of versions for the
+ * various software components in the SoC. We verify that the boot loader
+ * version is a valid version as a sanity check.
+ */
+#define SMEM_MASTER_SBL_VERSION_INDEX 7
+#define SMEM_GLOBAL_HEAP_VERSION 11
+#define SMEM_GLOBAL_PART_VERSION 12
+
+/*
+ * The first 8 items are only to be allocated by the boot loader while
+ * initializing the heap.
+ */
+#define SMEM_ITEM_LAST_FIXED 8
+
+/* Highest accepted item number, for both global and private heaps */
+#define SMEM_ITEM_COUNT 512
+
+/* Processor/host identifier for the application processor */
+#define SMEM_HOST_APPS 0
+
+/* Processor/host identifier for the global partition */
+#define SMEM_GLOBAL_HOST 0xfffe
+
+/* Max number of processors/hosts in a system */
+#define SMEM_HOST_COUNT 10
+
+/**
+ * struct smem_proc_comm - proc_comm communication struct (legacy)
+ * @command: current command to be executed
+ * @status: status of the currently requested command
+ * @params: parameters to the command
+ */
+struct smem_proc_comm {
+ __le32 command;
+ __le32 status;
+ __le32 params[2];
+};
+
+/**
+ * struct smem_global_entry - entry to reference smem items on the heap
+ * @allocated: boolean to indicate if this entry is used
+ * @offset: offset to the allocated space
+ * @size: size of the allocated space, 8 byte aligned
+ * @aux_base: base address for the memory region used by this unit, or 0 for
+ * the default region. bits 0,1 are reserved
+ */
+struct smem_global_entry {
+ __le32 allocated;
+ __le32 offset;
+ __le32 size;
+ __le32 aux_base; /* bits 1:0 reserved */
+};
+#define AUX_BASE_MASK 0xfffffffc
+
+/**
+ * struct smem_header - header found in beginning of primary smem region
+ * @proc_comm: proc_comm communication interface (legacy)
+ * @version: array of versions for the various subsystems
+ * @initialized: boolean to indicate that smem is initialized
+ * @free_offset: index of the first unallocated byte in smem
+ * @available: number of bytes available for allocation
+ * @reserved: reserved field, must be 0
+ * toc: array of references to items
+ */
+struct smem_header {
+ struct smem_proc_comm proc_comm[4];
+ __le32 version[32];
+ __le32 initialized;
+ __le32 free_offset;
+ __le32 available;
+ __le32 reserved;
+ struct smem_global_entry toc[SMEM_ITEM_COUNT];
+};
+
+/**
+ * struct smem_ptable_entry - one entry in the @smem_ptable list
+ * @offset: offset, within the main shared memory region, of the partition
+ * @size: size of the partition
+ * @flags: flags for the partition (currently unused)
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @cacheline: alignment for "cached" entries
+ * @reserved: reserved entries for later use
+ */
+struct smem_ptable_entry {
+ __le32 offset;
+ __le32 size;
+ __le32 flags;
+ __le16 host0;
+ __le16 host1;
+ __le32 cacheline;
+ __le32 reserved[7];
+};
+
+/**
+ * struct smem_ptable - partition table for the private partitions
+ * @magic: magic number, must be SMEM_PTABLE_MAGIC
+ * @version: version of the partition table
+ * @num_entries: number of partitions in the table
+ * @reserved: for now reserved entries
+ * @entry: list of @smem_ptable_entry for the @num_entries partitions
+ */
+struct smem_ptable {
+ u8 magic[4];
+ __le32 version;
+ __le32 num_entries;
+ __le32 reserved[5];
+ struct smem_ptable_entry entry[];
+};
+
+static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
+
+/**
+ * struct smem_partition_header - header of the partitions
+ * @magic: magic number, must be SMEM_PART_MAGIC
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @size: size of the partition
+ * @offset_free_uncached: offset to the first free byte of uncached memory in
+ * this partition
+ * @offset_free_cached: offset to the first free byte of cached memory in this
+ * partition
+ * @reserved: for now reserved entries
+ */
+struct smem_partition_header {
+ u8 magic[4];
+ __le16 host0;
+ __le16 host1;
+ __le32 size;
+ __le32 offset_free_uncached;
+ __le32 offset_free_cached;
+ __le32 reserved[3];
+};
+
+static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
+
+/**
+ * struct smem_private_entry - header of each item in the private partition
+ * @canary: magic number, must be SMEM_PRIVATE_CANARY
+ * @item: identifying number of the smem item
+ * @size: size of the data, including padding bytes
+ * @padding_data: number of bytes of padding of data
+ * @padding_hdr: number of bytes of padding between the header and the data
+ * @reserved: for now reserved entry
+ */
+struct smem_private_entry {
+ u16 canary; /* bytes are the same so no swapping needed */
+ __le16 item;
+ __le32 size; /* includes padding bytes */
+ __le16 padding_data;
+ __le16 padding_hdr;
+ __le32 reserved;
+};
+#define SMEM_PRIVATE_CANARY 0xa5a5
+
+/**
+ * struct smem_info - smem region info located after the table of contents
+ * @magic: magic number, must be SMEM_INFO_MAGIC
+ * @size: size of the smem region
+ * @base_addr: base address of the smem region
+ * @reserved: for now reserved entry
+ * @num_items: highest accepted item number
+ */
+struct smem_info {
+ u8 magic[4];
+ __le32 size;
+ __le32 base_addr;
+ __le32 reserved;
+ __le16 num_items;
+};
+
+static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
+
+/**
+ * struct smem_region - representation of a chunk of memory used for smem
+ * @aux_base: identifier of aux_mem base
+ * @virt_base: virtual base address of memory with this aux_mem identifier
+ * @size: size of the memory region
+ */
+struct smem_region {
+ u32 aux_base;
+ void __iomem *virt_base;
+ size_t size;
+};
+
+/**
+ * struct qcom_smem - device data for the smem device
+ * @dev: device pointer
+ * @hwlock: reference to a hwspinlock
+ * @global_partition: pointer to global partition when in use
+ * @global_cacheline: cacheline size for global partition
+ * @partitions: list of pointers to partitions affecting the current
+ * processor/host
+ * @cacheline: list of cacheline sizes for each host
+ * @item_count: max accepted item number
+ * @num_regions: number of @regions
+ * @regions: list of the memory regions defining the shared memory
+ */
+struct qcom_smem {
+ struct device *dev;
+
+ struct hwspinlock *hwlock;
+
+ struct smem_partition_header *global_partition;
+ size_t global_cacheline;
+ struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+ size_t cacheline[SMEM_HOST_COUNT];
+ u32 item_count;
+
+ unsigned num_regions;
+ struct smem_region regions[0];
+};
+
+static void *
+phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->offset_free_uncached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_cached_entry(struct smem_partition_header *phdr,
+ size_t cacheline)
+{
+ void *p = phdr;
+ struct smem_private_entry *e;
+
+ return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
+}
+
+static void *
+phdr_to_last_cached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->offset_free_cached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + sizeof(*phdr);
+}
+
+static struct smem_private_entry *
+uncached_entry_next(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
+ le32_to_cpu(e->size);
+}
+
+static struct smem_private_entry *
+cached_entry_next(struct smem_private_entry *e, size_t cacheline)
+{
+ void *p = e;
+
+ return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
+}
+
+static void *uncached_entry_to_item(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
+}
+
+static void *cached_entry_to_item(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p - le32_to_cpu(e->size);
+}
+
+/* Pointer to the one and only smem handle */
+static struct qcom_smem *__smem;
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT 1000
+
+static int qcom_smem_alloc_private(struct qcom_smem *smem,
+ struct smem_partition_header *phdr,
+ unsigned item,
+ size_t size)
+{
+ struct smem_private_entry *hdr, *end;
+ size_t alloc_size;
+ void *cached;
+
+ hdr = phdr_to_first_uncached_entry(phdr);
+ end = phdr_to_last_uncached_entry(phdr);
+ cached = phdr_to_last_cached_entry(phdr);
+
+ while (hdr < end) {
+ if (hdr->canary != SMEM_PRIVATE_CANARY)
+ goto bad_canary;
+ if (le16_to_cpu(hdr->item) == item)
+ return -EEXIST;
+
+ hdr = uncached_entry_next(hdr);
+ }
+
+ /* Check that we don't grow into the cached region */
+ alloc_size = sizeof(*hdr) + ALIGN(size, 8);
+ if ((void *)hdr + alloc_size > cached) {
+ dev_err(smem->dev, "Out of memory\n");
+ return -ENOSPC;
+ }
+
+ hdr->canary = SMEM_PRIVATE_CANARY;
+ hdr->item = cpu_to_le16(item);
+ hdr->size = cpu_to_le32(ALIGN(size, 8));
+ hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
+ hdr->padding_hdr = 0;
+
+ /*
+ * Ensure the header is written before we advance the free offset, so
+ * that remote processors that does not take the remote spinlock still
+ * gets a consistent view of the linked list.
+ */
+ wmb();
+ le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
+
+ return 0;
+bad_canary:
+ dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
+ le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
+
+ return -EINVAL;
+}
+
+static int qcom_smem_alloc_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t size)
+{
+ struct smem_global_entry *entry;
+ struct smem_header *header;
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (entry->allocated)
+ return -EEXIST;
+
+ size = ALIGN(size, 8);
+ if (WARN_ON(size > le32_to_cpu(header->available)))
+ return -ENOMEM;
+
+ entry->offset = header->free_offset;
+ entry->size = cpu_to_le32(size);
+
+ /*
+ * Ensure the header is consistent before we mark the item allocated,
+ * so that remote processors will get a consistent view of the item
+ * even though they do not take the spinlock on read.
+ */
+ wmb();
+ entry->allocated = cpu_to_le32(1);
+
+ le32_add_cpu(&header->free_offset, size);
+ le32_add_cpu(&header->available, -size);
+
+ return 0;
+}
+
+/**
+ * qcom_smem_alloc() - allocate space for a smem item
+ * @host: remote processor id, or -1
+ * @item: smem item handle
+ * @size: number of bytes to be allocated
+ *
+ * Allocate space for a given smem item of size @size, given that the item is
+ * not yet allocated.
+ */
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
+{
+ struct smem_partition_header *phdr;
+ unsigned long flags;
+ int ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (item < SMEM_ITEM_LAST_FIXED) {
+ dev_err(__smem->dev,
+ "Rejecting allocation of static entry %d\n", item);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(item >= __smem->item_count))
+ return -EINVAL;
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ret;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+ phdr = __smem->partitions[host];
+ ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+ } else if (__smem->global_partition) {
+ phdr = __smem->global_partition;
+ ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+ } else {
+ ret = qcom_smem_alloc_global(__smem, item, size);
+ }
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smem_alloc);
+
+static void *qcom_smem_get_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t *size)
+{
+ struct smem_header *header;
+ struct smem_region *area;
+ struct smem_global_entry *entry;
+ u32 aux_base;
+ unsigned i;
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (!entry->allocated)
+ return ERR_PTR(-ENXIO);
+
+ aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
+
+ for (i = 0; i < smem->num_regions; i++) {
+ area = &smem->regions[i];
+
+ if (area->aux_base == aux_base || !aux_base) {
+ if (size != NULL)
+ *size = le32_to_cpu(entry->size);
+ return area->virt_base + le32_to_cpu(entry->offset);
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void *qcom_smem_get_private(struct qcom_smem *smem,
+ struct smem_partition_header *phdr,
+ size_t cacheline,
+ unsigned item,
+ size_t *size)
+{
+ struct smem_private_entry *e, *end;
+
+ e = phdr_to_first_uncached_entry(phdr);
+ end = phdr_to_last_uncached_entry(phdr);
+
+ while (e < end) {
+ if (e->canary != SMEM_PRIVATE_CANARY)
+ goto invalid_canary;
+
+ if (le16_to_cpu(e->item) == item) {
+ if (size != NULL)
+ *size = le32_to_cpu(e->size) -
+ le16_to_cpu(e->padding_data);
+
+ return uncached_entry_to_item(e);
+ }
+
+ e = uncached_entry_next(e);
+ }
+
+ /* Item was not found in the uncached list, search the cached list */
+
+ e = phdr_to_first_cached_entry(phdr, cacheline);
+ end = phdr_to_last_cached_entry(phdr);
+
+ while (e > end) {
+ if (e->canary != SMEM_PRIVATE_CANARY)
+ goto invalid_canary;
+
+ if (le16_to_cpu(e->item) == item) {
+ if (size != NULL)
+ *size = le32_to_cpu(e->size) -
+ le16_to_cpu(e->padding_data);
+
+ return cached_entry_to_item(e);
+ }
+
+ e = cached_entry_next(e, cacheline);
+ }
+
+ return ERR_PTR(-ENOENT);
+
+invalid_canary:
+ dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
+ le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * qcom_smem_get() - resolve ptr of size of a smem item
+ * @host: the remote processor, or -1
+ * @item: smem item handle
+ * @size: pointer to be filled out with size of the item
+ *
+ * Looks up smem item and returns pointer to it. Size of smem
+ * item is returned in @size.
+ */
+void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
+{
+ struct smem_partition_header *phdr;
+ unsigned long flags;
+ size_t cacheln;
+ int ret;
+ void *ptr = ERR_PTR(-EPROBE_DEFER);
+
+ if (!__smem)
+ return ptr;
+
+ if (WARN_ON(item >= __smem->item_count))
+ return ERR_PTR(-EINVAL);
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+ phdr = __smem->partitions[host];
+ cacheln = __smem->cacheline[host];
+ ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+ } else if (__smem->global_partition) {
+ phdr = __smem->global_partition;
+ cacheln = __smem->global_cacheline;
+ ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+ } else {
+ ptr = qcom_smem_get_global(__smem, item, size);
+ }
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+ return ptr;
+
+}
+EXPORT_SYMBOL(qcom_smem_get);
+
+/**
+ * qcom_smem_get_free_space() - retrieve amount of free space in a partition
+ * @host: the remote processor identifying a partition, or -1
+ *
+ * To be used by smem clients as a quick way to determine if any new
+ * allocations has been made.
+ */
+int qcom_smem_get_free_space(unsigned host)
+{
+ struct smem_partition_header *phdr;
+ struct smem_header *header;
+ unsigned ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+ phdr = __smem->partitions[host];
+ ret = le32_to_cpu(phdr->offset_free_cached) -
+ le32_to_cpu(phdr->offset_free_uncached);
+ } else if (__smem->global_partition) {
+ phdr = __smem->global_partition;
+ ret = le32_to_cpu(phdr->offset_free_cached) -
+ le32_to_cpu(phdr->offset_free_uncached);
+ } else {
+ header = __smem->regions[0].virt_base;
+ ret = le32_to_cpu(header->available);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_smem_get_free_space);
+
+/**
+ * qcom_smem_virt_to_phys() - return the physical address associated
+ * with an smem item pointer (previously returned by qcom_smem_get()
+ * @p: the virtual address to convert
+ *
+ * Returns 0 if the pointer provided is not within any smem region.
+ */
+phys_addr_t qcom_smem_virt_to_phys(void *p)
+{
+ unsigned i;
+
+ for (i = 0; i < __smem->num_regions; i++) {
+ struct smem_region *region = &__smem->regions[i];
+
+ if (p < region->virt_base)
+ continue;
+ if (p < region->virt_base + region->size) {
+ u64 offset = p - region->virt_base;
+
+ return (phys_addr_t)region->aux_base + offset;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_smem_virt_to_phys);
+
+static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
+{
+ struct smem_header *header;
+ __le32 *versions;
+
+ header = smem->regions[0].virt_base;
+ versions = header->version;
+
+ return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
+}
+
+static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
+{
+ struct smem_ptable *ptable;
+ u32 version;
+
+ ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
+ if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
+ return ERR_PTR(-ENOENT);
+
+ version = le32_to_cpu(ptable->version);
+ if (version != 1) {
+ dev_err(smem->dev,
+ "Unsupported partition header version %d\n", version);
+ return ERR_PTR(-EINVAL);
+ }
+ return ptable;
+}
+
+static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
+{
+ struct smem_ptable *ptable;
+ struct smem_info *info;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR_OR_NULL(ptable))
+ return SMEM_ITEM_COUNT;
+
+ info = (struct smem_info *)&ptable->entry[ptable->num_entries];
+ if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
+ return SMEM_ITEM_COUNT;
+
+ return le16_to_cpu(info->num_items);
+}
+
+static int qcom_smem_set_global_partition(struct qcom_smem *smem)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ u32 host0, host1, size;
+ bool found = false;
+ int i;
+
+ if (smem->global_partition) {
+ dev_err(smem->dev, "Already found the global partition\n");
+ return -EINVAL;
+ }
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR(ptable))
+ return PTR_ERR(ptable);
+
+ for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+ entry = &ptable->entry[i];
+ host0 = le16_to_cpu(entry->host0);
+ host1 = le16_to_cpu(entry->host1);
+
+ if (host0 == SMEM_GLOBAL_HOST && host0 == host1) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(smem->dev, "Missing entry for global partition\n");
+ return -EINVAL;
+ }
+
+ if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) {
+ dev_err(smem->dev, "Invalid entry for global partition\n");
+ return -EINVAL;
+ }
+
+ header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+ host0 = le16_to_cpu(header->host0);
+ host1 = le16_to_cpu(header->host1);
+
+ if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
+ dev_err(smem->dev, "Global partition has invalid magic\n");
+ return -EINVAL;
+ }
+
+ if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) {
+ dev_err(smem->dev, "Global partition hosts are invalid\n");
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
+ dev_err(smem->dev, "Global partition has invalid size\n");
+ return -EINVAL;
+ }
+
+ size = le32_to_cpu(header->offset_free_uncached);
+ if (size > le32_to_cpu(header->size)) {
+ dev_err(smem->dev,
+ "Global partition has invalid free pointer\n");
+ return -EINVAL;
+ }
+
+ smem->global_partition = header;
+ smem->global_cacheline = le32_to_cpu(entry->cacheline);
+
+ return 0;
+}
+
+static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
+ unsigned int local_host)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ unsigned int remote_host;
+ u32 host0, host1;
+ int i;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR(ptable))
+ return PTR_ERR(ptable);
+
+ for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+ entry = &ptable->entry[i];
+ host0 = le16_to_cpu(entry->host0);
+ host1 = le16_to_cpu(entry->host1);
+
+ if (host0 != local_host && host1 != local_host)
+ continue;
+
+ if (!le32_to_cpu(entry->offset))
+ continue;
+
+ if (!le32_to_cpu(entry->size))
+ continue;
+
+ if (host0 == local_host)
+ remote_host = host1;
+ else
+ remote_host = host0;
+
+ if (remote_host >= SMEM_HOST_COUNT) {
+ dev_err(smem->dev,
+ "Invalid remote host %d\n",
+ remote_host);
+ return -EINVAL;
+ }
+
+ if (smem->partitions[remote_host]) {
+ dev_err(smem->dev,
+ "Already found a partition for host %d\n",
+ remote_host);
+ return -EINVAL;
+ }
+
+ header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+ host0 = le16_to_cpu(header->host0);
+ host1 = le16_to_cpu(header->host1);
+
+ if (memcmp(header->magic, SMEM_PART_MAGIC,
+ sizeof(header->magic))) {
+ dev_err(smem->dev,
+ "Partition %d has invalid magic\n", i);
+ return -EINVAL;
+ }
+
+ if (host0 != local_host && host1 != local_host) {
+ dev_err(smem->dev,
+ "Partition %d hosts are invalid\n", i);
+ return -EINVAL;
+ }
+
+ if (host0 != remote_host && host1 != remote_host) {
+ dev_err(smem->dev,
+ "Partition %d hosts are invalid\n", i);
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
+ dev_err(smem->dev,
+ "Partition %d has invalid size\n", i);
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
+ dev_err(smem->dev,
+ "Partition %d has invalid free pointer\n", i);
+ return -EINVAL;
+ }
+
+ smem->partitions[remote_host] = header;
+ smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
+ }
+
+ return 0;
+}
+
+static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
+ const char *name, int i)
+{
+ struct device_node *np;
+ struct resource r;
+ int ret;
+
+ np = of_parse_phandle(dev->of_node, name, 0);
+ if (!np) {
+ dev_err(dev, "No %s specified\n", name);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ smem->regions[i].aux_base = (u32)r.start;
+ smem->regions[i].size = resource_size(&r);
+ smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r));
+ if (!smem->regions[i].virt_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qcom_smem_probe(struct platform_device *pdev)
+{
+ struct smem_header *header;
+ struct qcom_smem *smem;
+ size_t array_size;
+ int num_regions;
+ int hwlock_id;
+ u32 version;
+ int ret;
+
+ num_regions = 1;
+ if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
+ num_regions++;
+
+ array_size = num_regions * sizeof(struct smem_region);
+ smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
+ if (!smem)
+ return -ENOMEM;
+
+ smem->dev = &pdev->dev;
+ smem->num_regions = num_regions;
+
+ ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
+ if (ret)
+ return ret;
+
+ if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
+ "qcom,rpm-msg-ram", 1)))
+ return ret;
+
+ header = smem->regions[0].virt_base;
+ if (le32_to_cpu(header->initialized) != 1 ||
+ le32_to_cpu(header->reserved)) {
+ dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
+ return -EINVAL;
+ }
+
+ version = qcom_smem_get_sbl_version(smem);
+ switch (version >> 16) {
+ case SMEM_GLOBAL_PART_VERSION:
+ ret = qcom_smem_set_global_partition(smem);
+ if (ret < 0)
+ return ret;
+ smem->item_count = qcom_smem_get_item_count(smem);
+ break;
+ case SMEM_GLOBAL_HEAP_VERSION:
+ smem->item_count = SMEM_ITEM_COUNT;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
+ return -EINVAL;
+ }
+
+ ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
+ if (ret < 0 && ret != -ENOENT)
+ return ret;
+
+ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+ if (hwlock_id < 0) {
+ if (hwlock_id != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ return hwlock_id;
+ }
+
+ smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ if (!smem->hwlock)
+ return -ENXIO;
+
+ __smem = smem;
+
+ return 0;
+}
+
+static int qcom_smem_remove(struct platform_device *pdev)
+{
+ hwspin_lock_free(__smem->hwlock);
+ __smem = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smem_of_match[] = {
+ { .compatible = "qcom,smem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
+
+static struct platform_driver qcom_smem_driver = {
+ .probe = qcom_smem_probe,
+ .remove = qcom_smem_remove,
+ .driver = {
+ .name = "qcom-smem",
+ .of_match_table = qcom_smem_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init qcom_smem_init(void)
+{
+ return platform_driver_register(&qcom_smem_driver);
+}
+arch_initcall(qcom_smem_init);
+
+static void __exit qcom_smem_exit(void)
+{
+ platform_driver_unregister(&qcom_smem_driver);
+}
+module_exit(qcom_smem_exit)
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
new file mode 100644
index 000000000..d5437ca76
--- /dev/null
+++ b/drivers/soc/qcom/smem_state.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem_state.h>
+
+static LIST_HEAD(smem_states);
+static DEFINE_MUTEX(list_lock);
+
+/**
+ * struct qcom_smem_state - state context
+ * @refcount: refcount for the state
+ * @orphan: boolean indicator that this state has been unregistered
+ * @list: entry in smem_states list
+ * @of_node: of_node to use for matching the state in DT
+ * @priv: implementation private data
+ * @ops: ops for the state
+ */
+struct qcom_smem_state {
+ struct kref refcount;
+ bool orphan;
+
+ struct list_head list;
+ struct device_node *of_node;
+
+ void *priv;
+
+ struct qcom_smem_state_ops ops;
+};
+
+/**
+ * qcom_smem_state_update_bits() - update the masked bits in state with value
+ * @state: state handle acquired by calling qcom_smem_state_get()
+ * @mask: bit mask for the change
+ * @value: new value for the masked bits
+ *
+ * Returns 0 on success, otherwise negative errno.
+ */
+int qcom_smem_state_update_bits(struct qcom_smem_state *state,
+ u32 mask,
+ u32 value)
+{
+ if (state->orphan)
+ return -ENXIO;
+
+ if (!state->ops.update_bits)
+ return -ENOTSUPP;
+
+ return state->ops.update_bits(state->priv, mask, value);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_update_bits);
+
+static struct qcom_smem_state *of_node_to_state(struct device_node *np)
+{
+ struct qcom_smem_state *state;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(state, &smem_states, list) {
+ if (state->of_node == np) {
+ kref_get(&state->refcount);
+ goto unlock;
+ }
+ }
+ state = ERR_PTR(-EPROBE_DEFER);
+
+unlock:
+ mutex_unlock(&list_lock);
+
+ return state;
+}
+
+/**
+ * qcom_smem_state_get() - acquire handle to a state
+ * @dev: client device pointer
+ * @con_id: name of the state to lookup
+ * @bit: flags from the state reference, indicating which bit's affected
+ *
+ * Returns handle to the state, or ERR_PTR(). qcom_smem_state_put() must be
+ * called to release the returned state handle.
+ */
+struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+ const char *con_id,
+ unsigned *bit)
+{
+ struct qcom_smem_state *state;
+ struct of_phandle_args args;
+ int index = 0;
+ int ret;
+
+ if (con_id) {
+ index = of_property_match_string(dev->of_node,
+ "qcom,smem-state-names",
+ con_id);
+ if (index < 0) {
+ dev_err(dev, "missing qcom,smem-state-names\n");
+ return ERR_PTR(index);
+ }
+ }
+
+ ret = of_parse_phandle_with_args(dev->of_node,
+ "qcom,smem-states",
+ "#qcom,smem-state-cells",
+ index,
+ &args);
+ if (ret) {
+ dev_err(dev, "failed to parse qcom,smem-states property\n");
+ return ERR_PTR(ret);
+ }
+
+ if (args.args_count != 1) {
+ dev_err(dev, "invalid #qcom,smem-state-cells\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ state = of_node_to_state(args.np);
+ if (IS_ERR(state))
+ goto put;
+
+ *bit = args.args[0];
+
+put:
+ of_node_put(args.np);
+ return state;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_get);
+
+static void qcom_smem_state_release(struct kref *ref)
+{
+ struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount);
+
+ list_del(&state->list);
+ kfree(state);
+}
+
+/**
+ * qcom_smem_state_put() - release state handle
+ * @state: state handle to be released
+ */
+void qcom_smem_state_put(struct qcom_smem_state *state)
+{
+ mutex_lock(&list_lock);
+ kref_put(&state->refcount, qcom_smem_state_release);
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_put);
+
+/**
+ * qcom_smem_state_register() - register a new state
+ * @of_node: of_node used for matching client lookups
+ * @ops: implementation ops
+ * @priv: implementation specific private data
+ */
+struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
+ const struct qcom_smem_state_ops *ops,
+ void *priv)
+{
+ struct qcom_smem_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&state->refcount);
+
+ state->of_node = of_node;
+ state->ops = *ops;
+ state->priv = priv;
+
+ mutex_lock(&list_lock);
+ list_add(&state->list, &smem_states);
+ mutex_unlock(&list_lock);
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_register);
+
+/**
+ * qcom_smem_state_unregister() - unregister a registered state
+ * @state: state handle to be unregistered
+ */
+void qcom_smem_state_unregister(struct qcom_smem_state *state)
+{
+ state->orphan = true;
+ qcom_smem_state_put(state);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_unregister);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
new file mode 100644
index 000000000..5721a353d
--- /dev/null
+++ b/drivers/soc/qcom/smp2p.c
@@ -0,0 +1,608 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_client.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/spinlock.h>
+
+/*
+ * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
+ * of a single 32-bit value between two processors. Each value has a single
+ * writer (the local side) and a single reader (the remote side). Values are
+ * uniquely identified in the system by the directed edge (local processor ID
+ * to remote processor ID) and a string identifier.
+ *
+ * Each processor is responsible for creating the outgoing SMEM items and each
+ * item is writable by the local processor and readable by the remote
+ * processor. By using two separate SMEM items that are single-reader and
+ * single-writer, SMP2P does not require any remote locking mechanisms.
+ *
+ * The driver uses the Linux GPIO and interrupt framework to expose a virtual
+ * GPIO for each outbound entry and a virtual interrupt controller for each
+ * inbound entry.
+ */
+
+#define SMP2P_MAX_ENTRY 16
+#define SMP2P_MAX_ENTRY_NAME 16
+
+#define SMP2P_FEATURE_SSR_ACK 0x1
+
+#define SMP2P_MAGIC 0x504d5324
+
+/**
+ * struct smp2p_smem_item - in memory communication structure
+ * @magic: magic number
+ * @version: version - must be 1
+ * @features: features flag - currently unused
+ * @local_pid: processor id of sending end
+ * @remote_pid: processor id of receiving end
+ * @total_entries: number of entries - always SMP2P_MAX_ENTRY
+ * @valid_entries: number of allocated entries
+ * @flags:
+ * @entries: individual communication entries
+ * @name: name of the entry
+ * @value: content of the entry
+ */
+struct smp2p_smem_item {
+ u32 magic;
+ u8 version;
+ unsigned features:24;
+ u16 local_pid;
+ u16 remote_pid;
+ u16 total_entries;
+ u16 valid_entries;
+ u32 flags;
+
+ struct {
+ u8 name[SMP2P_MAX_ENTRY_NAME];
+ u32 value;
+ } entries[SMP2P_MAX_ENTRY];
+} __packed;
+
+/**
+ * struct smp2p_entry - driver context matching one entry
+ * @node: list entry to keep track of allocated entries
+ * @smp2p: reference to the device driver context
+ * @name: name of the entry, to match against smp2p_smem_item
+ * @value: pointer to smp2p_smem_item entry value
+ * @last_value: last handled value
+ * @domain: irq_domain for inbound entries
+ * @irq_enabled:bitmap to track enabled irq bits
+ * @irq_rising: bitmap to mark irq bits for rising detection
+ * @irq_falling:bitmap to mark irq bits for falling detection
+ * @state: smem state handle
+ * @lock: spinlock to protect read-modify-write of the value
+ */
+struct smp2p_entry {
+ struct list_head node;
+ struct qcom_smp2p *smp2p;
+
+ const char *name;
+ u32 *value;
+ u32 last_value;
+
+ struct irq_domain *domain;
+ DECLARE_BITMAP(irq_enabled, 32);
+ DECLARE_BITMAP(irq_rising, 32);
+ DECLARE_BITMAP(irq_falling, 32);
+
+ struct qcom_smem_state *state;
+
+ spinlock_t lock;
+};
+
+#define SMP2P_INBOUND 0
+#define SMP2P_OUTBOUND 1
+
+/**
+ * struct qcom_smp2p - device driver context
+ * @dev: device driver handle
+ * @in: pointer to the inbound smem item
+ * @smem_items: ids of the two smem items
+ * @valid_entries: already scanned inbound entries
+ * @local_pid: processor id of the inbound edge
+ * @remote_pid: processor id of the outbound edge
+ * @ipc_regmap: regmap for the outbound ipc
+ * @ipc_offset: offset within the regmap
+ * @ipc_bit: bit in regmap@offset to kick to signal remote processor
+ * @mbox_client: mailbox client handle
+ * @mbox_chan: apcs ipc mailbox channel handle
+ * @inbound: list of inbound entries
+ * @outbound: list of outbound entries
+ */
+struct qcom_smp2p {
+ struct device *dev;
+
+ struct smp2p_smem_item *in;
+ struct smp2p_smem_item *out;
+
+ unsigned smem_items[SMP2P_OUTBOUND + 1];
+
+ unsigned valid_entries;
+
+ unsigned local_pid;
+ unsigned remote_pid;
+
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+
+ struct list_head inbound;
+ struct list_head outbound;
+};
+
+static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
+{
+ /* Make sure any updated data is written before the kick */
+ wmb();
+
+ if (smp2p->mbox_chan) {
+ mbox_send_message(smp2p->mbox_chan, NULL);
+ mbox_client_txdone(smp2p->mbox_chan, 0);
+ } else {
+ regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
+ }
+}
+
+/**
+ * qcom_smp2p_intr() - interrupt handler for incoming notifications
+ * @irq: unused
+ * @data: smp2p driver context
+ *
+ * Handle notifications from the remote side to handle newly allocated entries
+ * or any changes to the state bits of existing entries.
+ */
+static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+{
+ struct smp2p_smem_item *in;
+ struct smp2p_entry *entry;
+ struct qcom_smp2p *smp2p = data;
+ unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND];
+ unsigned pid = smp2p->remote_pid;
+ size_t size;
+ int irq_pin;
+ u32 status;
+ char buf[SMP2P_MAX_ENTRY_NAME];
+ u32 val;
+ int i;
+
+ in = smp2p->in;
+
+ /* Acquire smem item, if not already found */
+ if (!in) {
+ in = qcom_smem_get(pid, smem_id, &size);
+ if (IS_ERR(in)) {
+ dev_err(smp2p->dev,
+ "Unable to acquire remote smp2p item\n");
+ return IRQ_HANDLED;
+ }
+
+ smp2p->in = in;
+ }
+
+ /* Match newly created entries */
+ for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
+ list_for_each_entry(entry, &smp2p->inbound, node) {
+ memcpy(buf, in->entries[i].name, sizeof(buf));
+ if (!strcmp(buf, entry->name)) {
+ entry->value = &in->entries[i].value;
+ break;
+ }
+ }
+ }
+ smp2p->valid_entries = i;
+
+ /* Fire interrupts based on any value changes */
+ list_for_each_entry(entry, &smp2p->inbound, node) {
+ /* Ignore entries not yet allocated by the remote side */
+ if (!entry->value)
+ continue;
+
+ val = readl(entry->value);
+
+ status = val ^ entry->last_value;
+ entry->last_value = val;
+
+ /* No changes of this entry? */
+ if (!status)
+ continue;
+
+ for_each_set_bit(i, entry->irq_enabled, 32) {
+ if (!(status & BIT(i)))
+ continue;
+
+ if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
+ (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void smp2p_mask_irq(struct irq_data *irqd)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ clear_bit(irq, entry->irq_enabled);
+}
+
+static void smp2p_unmask_irq(struct irq_data *irqd)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ set_bit(irq, entry->irq_enabled);
+}
+
+static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ set_bit(irq, entry->irq_rising);
+ else
+ clear_bit(irq, entry->irq_rising);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ set_bit(irq, entry->irq_falling);
+ else
+ clear_bit(irq, entry->irq_falling);
+
+ return 0;
+}
+
+static struct irq_chip smp2p_irq_chip = {
+ .name = "smp2p",
+ .irq_mask = smp2p_mask_irq,
+ .irq_unmask = smp2p_unmask_irq,
+ .irq_set_type = smp2p_set_irq_type,
+};
+
+static int smp2p_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct smp2p_entry *entry = d->host_data;
+
+ irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, entry);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops smp2p_irq_ops = {
+ .map = smp2p_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
+ struct smp2p_entry *entry,
+ struct device_node *node)
+{
+ entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
+ if (!entry->domain) {
+ dev_err(smp2p->dev, "failed to add irq_domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int smp2p_update_bits(void *data, u32 mask, u32 value)
+{
+ struct smp2p_entry *entry = data;
+ unsigned long flags;
+ u32 orig;
+ u32 val;
+
+ spin_lock_irqsave(&entry->lock, flags);
+ val = orig = readl(entry->value);
+ val &= ~mask;
+ val |= value;
+ writel(val, entry->value);
+ spin_unlock_irqrestore(&entry->lock, flags);
+
+ if (val != orig)
+ qcom_smp2p_kick(entry->smp2p);
+
+ return 0;
+}
+
+static const struct qcom_smem_state_ops smp2p_state_ops = {
+ .update_bits = smp2p_update_bits,
+};
+
+static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
+ struct smp2p_entry *entry,
+ struct device_node *node)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ char buf[SMP2P_MAX_ENTRY_NAME] = {};
+
+ /* Allocate an entry from the smem item */
+ strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
+ memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
+
+ /* Make the logical entry reference the physical value */
+ entry->value = &out->entries[out->valid_entries].value;
+
+ out->valid_entries++;
+
+ entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
+ if (IS_ERR(entry->state)) {
+ dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
+ return PTR_ERR(entry->state);
+ }
+
+ return 0;
+}
+
+static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out;
+ unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
+ unsigned pid = smp2p->remote_pid;
+ int ret;
+
+ ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
+ if (ret < 0 && ret != -EEXIST) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(smp2p->dev,
+ "unable to allocate local smp2p item\n");
+ return ret;
+ }
+
+ out = qcom_smem_get(pid, smem_id, NULL);
+ if (IS_ERR(out)) {
+ dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
+ return PTR_ERR(out);
+ }
+
+ memset(out, 0, sizeof(*out));
+ out->magic = SMP2P_MAGIC;
+ out->local_pid = smp2p->local_pid;
+ out->remote_pid = smp2p->remote_pid;
+ out->total_entries = SMP2P_MAX_ENTRY;
+ out->valid_entries = 0;
+
+ /*
+ * Make sure the rest of the header is written before we validate the
+ * item by writing a valid version number.
+ */
+ wmb();
+ out->version = 1;
+
+ qcom_smp2p_kick(smp2p);
+
+ smp2p->out = out;
+
+ return 0;
+}
+
+static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
+{
+ struct device_node *syscon;
+ struct device *dev = smp2p->dev;
+ const char *key;
+ int ret;
+
+ syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
+ if (!syscon) {
+ dev_err(dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(smp2p->ipc_regmap))
+ return PTR_ERR(smp2p->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
+ if (ret < 0) {
+ dev_err(dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
+ if (ret < 0) {
+ dev_err(dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qcom_smp2p_probe(struct platform_device *pdev)
+{
+ struct smp2p_entry *entry;
+ struct device_node *node;
+ struct qcom_smp2p *smp2p;
+ const char *key;
+ int irq;
+ int ret;
+
+ smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
+ if (!smp2p)
+ return -ENOMEM;
+
+ smp2p->dev = &pdev->dev;
+ INIT_LIST_HEAD(&smp2p->inbound);
+ INIT_LIST_HEAD(&smp2p->outbound);
+
+ platform_set_drvdata(pdev, smp2p);
+
+ key = "qcom,smem";
+ ret = of_property_read_u32_array(pdev->dev.of_node, key,
+ smp2p->smem_items, 2);
+ if (ret)
+ return ret;
+
+ key = "qcom,local-pid";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
+ if (ret)
+ goto report_read_failure;
+
+ key = "qcom,remote-pid";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
+ if (ret)
+ goto report_read_failure;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n");
+ return irq;
+ }
+
+ smp2p->mbox_client.dev = &pdev->dev;
+ smp2p->mbox_client.knows_txdone = true;
+ smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
+ if (IS_ERR(smp2p->mbox_chan)) {
+ if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
+ return PTR_ERR(smp2p->mbox_chan);
+
+ smp2p->mbox_chan = NULL;
+
+ ret = smp2p_parse_ipc(smp2p);
+ if (ret)
+ return ret;
+ }
+
+ ret = qcom_smp2p_alloc_outbound_item(smp2p);
+ if (ret < 0)
+ goto release_mbox;
+
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ ret = -ENOMEM;
+ goto unwind_interfaces;
+ }
+
+ entry->smp2p = smp2p;
+ spin_lock_init(&entry->lock);
+
+ ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
+ if (ret < 0)
+ goto unwind_interfaces;
+
+ if (of_property_read_bool(node, "interrupt-controller")) {
+ ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
+ if (ret < 0)
+ goto unwind_interfaces;
+
+ list_add(&entry->node, &smp2p->inbound);
+ } else {
+ ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
+ if (ret < 0)
+ goto unwind_interfaces;
+
+ list_add(&entry->node, &smp2p->outbound);
+ }
+ }
+
+ /* Kick the outgoing edge after allocating entries */
+ qcom_smp2p_kick(smp2p);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, qcom_smp2p_intr,
+ IRQF_ONESHOT,
+ "smp2p", (void *)smp2p);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request interrupt\n");
+ goto unwind_interfaces;
+ }
+
+
+ return 0;
+
+unwind_interfaces:
+ list_for_each_entry(entry, &smp2p->inbound, node)
+ irq_domain_remove(entry->domain);
+
+ list_for_each_entry(entry, &smp2p->outbound, node)
+ qcom_smem_state_unregister(entry->state);
+
+ smp2p->out->valid_entries = 0;
+
+release_mbox:
+ mbox_free_channel(smp2p->mbox_chan);
+
+ return ret;
+
+report_read_failure:
+ dev_err(&pdev->dev, "failed to read %s\n", key);
+ return -EINVAL;
+}
+
+static int qcom_smp2p_remove(struct platform_device *pdev)
+{
+ struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
+ struct smp2p_entry *entry;
+
+ list_for_each_entry(entry, &smp2p->inbound, node)
+ irq_domain_remove(entry->domain);
+
+ list_for_each_entry(entry, &smp2p->outbound, node)
+ qcom_smem_state_unregister(entry->state);
+
+ mbox_free_channel(smp2p->mbox_chan);
+
+ smp2p->out->valid_entries = 0;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smp2p_of_match[] = {
+ { .compatible = "qcom,smp2p" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
+
+static struct platform_driver qcom_smp2p_driver = {
+ .probe = qcom_smp2p_probe,
+ .remove = qcom_smp2p_remove,
+ .driver = {
+ .name = "qcom_smp2p",
+ .of_match_table = qcom_smp2p_of_match,
+ },
+};
+module_platform_driver(qcom_smp2p_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
new file mode 100644
index 000000000..5304529b4
--- /dev/null
+++ b/drivers/soc/qcom/smsm.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+/*
+ * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
+ * for communicating single bit state information to remote processors.
+ *
+ * The implementation is based on two sections of shared memory; the first
+ * holding the state bits and the second holding a matrix of subscription bits.
+ *
+ * The state bits are structured in entries of 32 bits, each belonging to one
+ * system in the SoC. The entry belonging to the local system is considered
+ * read-write, while the rest should be considered read-only.
+ *
+ * The subscription matrix consists of N bitmaps per entry, denoting interest
+ * in updates of the entry for each of the N hosts. Upon updating a state bit
+ * each host's subscription bitmap should be queried and the remote system
+ * should be interrupted if they request so.
+ *
+ * The subscription matrix is laid out in entry-major order:
+ * entry0: [host0 ... hostN]
+ * .
+ * .
+ * entryM: [host0 ... hostN]
+ *
+ * A third, optional, shared memory region might contain information regarding
+ * the number of entries in the state bitmap as well as number of columns in
+ * the subscription matrix.
+ */
+
+/*
+ * Shared memory identifiers, used to acquire handles to respective memory
+ * region.
+ */
+#define SMEM_SMSM_SHARED_STATE 85
+#define SMEM_SMSM_CPU_INTR_MASK 333
+#define SMEM_SMSM_SIZE_INFO 419
+
+/*
+ * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
+ */
+#define SMSM_DEFAULT_NUM_ENTRIES 8
+#define SMSM_DEFAULT_NUM_HOSTS 3
+
+struct smsm_entry;
+struct smsm_host;
+
+/**
+ * struct qcom_smsm - smsm driver context
+ * @dev: smsm device pointer
+ * @local_host: column in the subscription matrix representing this system
+ * @num_hosts: number of columns in the subscription matrix
+ * @num_entries: number of entries in the state map and rows in the subscription
+ * matrix
+ * @local_state: pointer to the local processor's state bits
+ * @subscription: pointer to local processor's row in subscription matrix
+ * @state: smem state handle
+ * @lock: spinlock for read-modify-write of the outgoing state
+ * @entries: context for each of the entries
+ * @hosts: context for each of the hosts
+ */
+struct qcom_smsm {
+ struct device *dev;
+
+ u32 local_host;
+
+ u32 num_hosts;
+ u32 num_entries;
+
+ u32 *local_state;
+ u32 *subscription;
+ struct qcom_smem_state *state;
+
+ spinlock_t lock;
+
+ struct smsm_entry *entries;
+ struct smsm_host *hosts;
+};
+
+/**
+ * struct smsm_entry - per remote processor entry context
+ * @smsm: back-reference to driver context
+ * @domain: IRQ domain for this entry, if representing a remote system
+ * @irq_enabled: bitmap of which state bits IRQs are enabled
+ * @irq_rising: bitmap tracking if rising bits should be propagated
+ * @irq_falling: bitmap tracking if falling bits should be propagated
+ * @last_value: snapshot of state bits last time the interrupts where propagated
+ * @remote_state: pointer to this entry's state bits
+ * @subscription: pointer to a row in the subscription matrix representing this
+ * entry
+ */
+struct smsm_entry {
+ struct qcom_smsm *smsm;
+
+ struct irq_domain *domain;
+ DECLARE_BITMAP(irq_enabled, 32);
+ DECLARE_BITMAP(irq_rising, 32);
+ DECLARE_BITMAP(irq_falling, 32);
+ unsigned long last_value;
+
+ u32 *remote_state;
+ u32 *subscription;
+};
+
+/**
+ * struct smsm_host - representation of a remote host
+ * @ipc_regmap: regmap for outgoing interrupt
+ * @ipc_offset: offset in @ipc_regmap for outgoing interrupt
+ * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt
+ */
+struct smsm_host {
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+};
+
+/**
+ * smsm_update_bits() - change bit in outgoing entry and inform subscribers
+ * @data: smsm context pointer
+ * @offset: bit in the entry
+ * @value: new value
+ *
+ * Used to set and clear the bits in the outgoing/local entry and inform
+ * subscribers about the change.
+ */
+static int smsm_update_bits(void *data, u32 mask, u32 value)
+{
+ struct qcom_smsm *smsm = data;
+ struct smsm_host *hostp;
+ unsigned long flags;
+ u32 changes;
+ u32 host;
+ u32 orig;
+ u32 val;
+
+ spin_lock_irqsave(&smsm->lock, flags);
+
+ /* Update the entry */
+ val = orig = readl(smsm->local_state);
+ val &= ~mask;
+ val |= value;
+
+ /* Don't signal if we didn't change the value */
+ changes = val ^ orig;
+ if (!changes) {
+ spin_unlock_irqrestore(&smsm->lock, flags);
+ goto done;
+ }
+
+ /* Write out the new value */
+ writel(val, smsm->local_state);
+ spin_unlock_irqrestore(&smsm->lock, flags);
+
+ /* Make sure the value update is ordered before any kicks */
+ wmb();
+
+ /* Iterate over all hosts to check whom wants a kick */
+ for (host = 0; host < smsm->num_hosts; host++) {
+ hostp = &smsm->hosts[host];
+
+ val = readl(smsm->subscription + host);
+ if (val & changes && hostp->ipc_regmap) {
+ regmap_write(hostp->ipc_regmap,
+ hostp->ipc_offset,
+ BIT(hostp->ipc_bit));
+ }
+ }
+
+done:
+ return 0;
+}
+
+static const struct qcom_smem_state_ops smsm_state_ops = {
+ .update_bits = smsm_update_bits,
+};
+
+/**
+ * smsm_intr() - cascading IRQ handler for SMSM
+ * @irq: unused
+ * @data: entry related to this IRQ
+ *
+ * This function cascades an incoming interrupt from a remote system, based on
+ * the state bits and configuration.
+ */
+static irqreturn_t smsm_intr(int irq, void *data)
+{
+ struct smsm_entry *entry = data;
+ unsigned i;
+ int irq_pin;
+ u32 changed;
+ u32 val;
+
+ val = readl(entry->remote_state);
+ changed = val ^ xchg(&entry->last_value, val);
+
+ for_each_set_bit(i, entry->irq_enabled, 32) {
+ if (!(changed & BIT(i)))
+ continue;
+
+ if (val & BIT(i)) {
+ if (test_bit(i, entry->irq_rising)) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ } else {
+ if (test_bit(i, entry->irq_falling)) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
+ * @irqd: IRQ handle to be masked
+ *
+ * This un-subscribes the local CPU from interrupts upon changes to the defines
+ * status bit. The bit is also cleared from cascading.
+ */
+static void smsm_mask_irq(struct irq_data *irqd)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+ struct qcom_smsm *smsm = entry->smsm;
+ u32 val;
+
+ if (entry->subscription) {
+ val = readl(entry->subscription + smsm->local_host);
+ val &= ~BIT(irq);
+ writel(val, entry->subscription + smsm->local_host);
+ }
+
+ clear_bit(irq, entry->irq_enabled);
+}
+
+/**
+ * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
+ * @irqd: IRQ handle to be unmasked
+ *
+
+ * This subscribes the local CPU to interrupts upon changes to the defined
+ * status bit. The bit is also marked for cascading.
+
+ */
+static void smsm_unmask_irq(struct irq_data *irqd)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+ struct qcom_smsm *smsm = entry->smsm;
+ u32 val;
+
+ /* Make sure our last cached state is up-to-date */
+ if (readl(entry->remote_state) & BIT(irq))
+ set_bit(irq, &entry->last_value);
+ else
+ clear_bit(irq, &entry->last_value);
+
+ set_bit(irq, entry->irq_enabled);
+
+ if (entry->subscription) {
+ val = readl(entry->subscription + smsm->local_host);
+ val |= BIT(irq);
+ writel(val, entry->subscription + smsm->local_host);
+ }
+}
+
+/**
+ * smsm_set_irq_type() - updates the requested IRQ type for the cascading
+ * @irqd: consumer interrupt handle
+ * @type: requested flags
+ */
+static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ set_bit(irq, entry->irq_rising);
+ else
+ clear_bit(irq, entry->irq_rising);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ set_bit(irq, entry->irq_falling);
+ else
+ clear_bit(irq, entry->irq_falling);
+
+ return 0;
+}
+
+static struct irq_chip smsm_irq_chip = {
+ .name = "smsm",
+ .irq_mask = smsm_mask_irq,
+ .irq_unmask = smsm_unmask_irq,
+ .irq_set_type = smsm_set_irq_type,
+};
+
+/**
+ * smsm_irq_map() - sets up a mapping for a cascaded IRQ
+ * @d: IRQ domain representing an entry
+ * @irq: IRQ to set up
+ * @hw: unused
+ */
+static int smsm_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct smsm_entry *entry = d->host_data;
+
+ irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, entry);
+ irq_set_nested_thread(irq, 1);
+
+ return 0;
+}
+
+static const struct irq_domain_ops smsm_irq_ops = {
+ .map = smsm_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+/**
+ * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
+ * @smsm: smsm driver context
+ * @host_id: index of the remote host to be resolved
+ *
+ * Parses device tree to acquire the information needed for sending the
+ * outgoing interrupts to a remote host - identified by @host_id.
+ */
+static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
+{
+ struct device_node *syscon;
+ struct device_node *node = smsm->dev->of_node;
+ struct smsm_host *host = &smsm->hosts[host_id];
+ char key[16];
+ int ret;
+
+ snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
+ syscon = of_parse_phandle(node, key, 0);
+ if (!syscon)
+ return 0;
+
+ host->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(host->ipc_regmap))
+ return PTR_ERR(host->ipc_regmap);
+
+ ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
+ if (ret < 0) {
+ dev_err(smsm->dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
+ if (ret < 0) {
+ dev_err(smsm->dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
+ * @smsm: smsm driver context
+ * @entry: entry context to be set up
+ * @node: dt node containing the entry's properties
+ */
+static int smsm_inbound_entry(struct qcom_smsm *smsm,
+ struct smsm_entry *entry,
+ struct device_node *node)
+{
+ int ret;
+ int irq;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ dev_err(smsm->dev, "failed to parse smsm interrupt\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(smsm->dev, irq,
+ NULL, smsm_intr,
+ IRQF_ONESHOT,
+ "smsm", (void *)entry);
+ if (ret) {
+ dev_err(smsm->dev, "failed to request interrupt\n");
+ return ret;
+ }
+
+ entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
+ if (!entry->domain) {
+ dev_err(smsm->dev, "failed to add irq_domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * smsm_get_size_info() - parse the optional memory segment for sizes
+ * @smsm: smsm driver context
+ *
+ * Attempt to acquire the number of hosts and entries from the optional shared
+ * memory location. Not being able to find this segment should indicate that
+ * we're on a older system where these values was hard coded to
+ * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
+ *
+ * Returns 0 on success, negative errno on failure.
+ */
+static int smsm_get_size_info(struct qcom_smsm *smsm)
+{
+ size_t size;
+ struct {
+ u32 num_hosts;
+ u32 num_entries;
+ u32 reserved0;
+ u32 reserved1;
+ } *info;
+
+ info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
+ if (IS_ERR(info) && PTR_ERR(info) != -ENOENT) {
+ if (PTR_ERR(info) != -EPROBE_DEFER)
+ dev_err(smsm->dev, "unable to retrieve smsm size info\n");
+ return PTR_ERR(info);
+ } else if (IS_ERR(info) || size != sizeof(*info)) {
+ dev_warn(smsm->dev, "no smsm size info, using defaults\n");
+ smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
+ smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
+ return 0;
+ }
+
+ smsm->num_entries = info->num_entries;
+ smsm->num_hosts = info->num_hosts;
+
+ dev_dbg(smsm->dev,
+ "found custom size of smsm: %d entries %d hosts\n",
+ smsm->num_entries, smsm->num_hosts);
+
+ return 0;
+}
+
+static int qcom_smsm_probe(struct platform_device *pdev)
+{
+ struct device_node *local_node;
+ struct device_node *node;
+ struct smsm_entry *entry;
+ struct qcom_smsm *smsm;
+ u32 *intr_mask;
+ size_t size;
+ u32 *states;
+ u32 id;
+ int ret;
+
+ smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
+ if (!smsm)
+ return -ENOMEM;
+ smsm->dev = &pdev->dev;
+ spin_lock_init(&smsm->lock);
+
+ ret = smsm_get_size_info(smsm);
+ if (ret)
+ return ret;
+
+ smsm->entries = devm_kcalloc(&pdev->dev,
+ smsm->num_entries,
+ sizeof(struct smsm_entry),
+ GFP_KERNEL);
+ if (!smsm->entries)
+ return -ENOMEM;
+
+ smsm->hosts = devm_kcalloc(&pdev->dev,
+ smsm->num_hosts,
+ sizeof(struct smsm_host),
+ GFP_KERNEL);
+ if (!smsm->hosts)
+ return -ENOMEM;
+
+ for_each_child_of_node(pdev->dev.of_node, local_node) {
+ if (of_find_property(local_node, "#qcom,smem-state-cells", NULL))
+ break;
+ }
+ if (!local_node) {
+ dev_err(&pdev->dev, "no state entry\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32(pdev->dev.of_node,
+ "qcom,local-host",
+ &smsm->local_host);
+
+ /* Parse the host properties */
+ for (id = 0; id < smsm->num_hosts; id++) {
+ ret = smsm_parse_ipc(smsm, id);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Acquire the main SMSM state vector */
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
+ smsm->num_entries * sizeof(u32));
+ if (ret < 0 && ret != -EEXIST) {
+ dev_err(&pdev->dev, "unable to allocate shared state entry\n");
+ return ret;
+ }
+
+ states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
+ if (IS_ERR(states)) {
+ dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
+ return PTR_ERR(states);
+ }
+
+ /* Acquire the list of interrupt mask vectors */
+ size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
+ if (ret < 0 && ret != -EEXIST) {
+ dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
+ return ret;
+ }
+
+ intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
+ if (IS_ERR(intr_mask)) {
+ dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
+ return PTR_ERR(intr_mask);
+ }
+
+ /* Setup the reference to the local state bits */
+ smsm->local_state = states + smsm->local_host;
+ smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
+
+ /* Register the outgoing state */
+ smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
+ if (IS_ERR(smsm->state)) {
+ dev_err(smsm->dev, "failed to register qcom_smem_state\n");
+ return PTR_ERR(smsm->state);
+ }
+
+ /* Register handlers for remote processor entries of interest. */
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ if (!of_property_read_bool(node, "interrupt-controller"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret || id >= smsm->num_entries) {
+ dev_err(&pdev->dev, "invalid reg of entry\n");
+ if (!ret)
+ ret = -EINVAL;
+ goto unwind_interfaces;
+ }
+ entry = &smsm->entries[id];
+
+ entry->smsm = smsm;
+ entry->remote_state = states + id;
+
+ /* Setup subscription pointers and unsubscribe to any kicks */
+ entry->subscription = intr_mask + id * smsm->num_hosts;
+ writel(0, entry->subscription + smsm->local_host);
+
+ ret = smsm_inbound_entry(smsm, entry, node);
+ if (ret < 0)
+ goto unwind_interfaces;
+ }
+
+ platform_set_drvdata(pdev, smsm);
+
+ return 0;
+
+unwind_interfaces:
+ for (id = 0; id < smsm->num_entries; id++)
+ if (smsm->entries[id].domain)
+ irq_domain_remove(smsm->entries[id].domain);
+
+ qcom_smem_state_unregister(smsm->state);
+
+ return ret;
+}
+
+static int qcom_smsm_remove(struct platform_device *pdev)
+{
+ struct qcom_smsm *smsm = platform_get_drvdata(pdev);
+ unsigned id;
+
+ for (id = 0; id < smsm->num_entries; id++)
+ if (smsm->entries[id].domain)
+ irq_domain_remove(smsm->entries[id].domain);
+
+ qcom_smem_state_unregister(smsm->state);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smsm_of_match[] = {
+ { .compatible = "qcom,smsm" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
+
+static struct platform_driver qcom_smsm_driver = {
+ .probe = qcom_smsm_probe,
+ .remove = qcom_smsm_remove,
+ .driver = {
+ .name = "qcom-smsm",
+ .of_match_table = qcom_smsm_of_match,
+ },
+};
+module_platform_driver(qcom_smsm_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
new file mode 100644
index 000000000..f9d7a85b2
--- /dev/null
+++ b/drivers/soc/qcom/spm.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ *
+ * SAW power controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/cpuidle.h>
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+
+#define MAX_PMIC_DATA 2
+#define MAX_SEQ_DATA 64
+#define SPM_CTL_INDEX 0x7f
+#define SPM_CTL_INDEX_SHIFT 4
+#define SPM_CTL_EN BIT(0)
+
+enum pm_sleep_mode {
+ PM_SLEEP_MODE_STBY,
+ PM_SLEEP_MODE_RET,
+ PM_SLEEP_MODE_SPC,
+ PM_SLEEP_MODE_PC,
+ PM_SLEEP_MODE_NR,
+};
+
+enum spm_reg {
+ SPM_REG_CFG,
+ SPM_REG_SPM_CTL,
+ SPM_REG_DLY,
+ SPM_REG_PMIC_DLY,
+ SPM_REG_PMIC_DATA_0,
+ SPM_REG_PMIC_DATA_1,
+ SPM_REG_VCTL,
+ SPM_REG_SEQ_ENTRY,
+ SPM_REG_SPM_STS,
+ SPM_REG_PMIC_STS,
+ SPM_REG_NR,
+};
+
+struct spm_reg_data {
+ const u8 *reg_offset;
+ u32 spm_cfg;
+ u32 spm_dly;
+ u32 pmic_dly;
+ u32 pmic_data[MAX_PMIC_DATA];
+ u8 seq[MAX_SEQ_DATA];
+ u8 start_index[PM_SLEEP_MODE_NR];
+};
+
+struct spm_driver_data {
+ void __iomem *reg_base;
+ const struct spm_reg_data *reg_data;
+};
+
+static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8974, 8084 */
+static const struct spm_reg_data spm_reg_8974_8084_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
+ 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
+ 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 3,
+};
+
+static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x20,
+ [SPM_REG_PMIC_DLY] = 0x24,
+ [SPM_REG_PMIC_DATA_0] = 0x28,
+ [SPM_REG_PMIC_DATA_1] = 0x2C,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8064 */
+static const struct spm_reg_data spm_reg_8064_cpu = {
+ .reg_offset = spm_reg_offset_v1_1,
+ .spm_cfg = 0x1F,
+ .pmic_dly = 0x02020004,
+ .pmic_data[0] = 0x0084009C,
+ .pmic_data[1] = 0x00A4001C,
+ .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
+ 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 2,
+};
+
+static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv);
+
+typedef int (*idle_fn)(void);
+static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops);
+
+static inline void spm_register_write(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ if (drv->reg_data->reg_offset[reg])
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+}
+
+/* Ensure a guaranteed write, before return */
+static inline void spm_register_write_sync(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ u32 ret;
+
+ if (!drv->reg_data->reg_offset[reg])
+ return;
+
+ do {
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ ret = readl_relaxed(drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ if (ret == val)
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+static inline u32 spm_register_read(struct spm_driver_data *drv,
+ enum spm_reg reg)
+{
+ return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
+}
+
+static void spm_set_low_power_mode(struct spm_driver_data *drv,
+ enum pm_sleep_mode mode)
+{
+ u32 start_index;
+ u32 ctl_val;
+
+ start_index = drv->reg_data->start_index[mode];
+
+ ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
+ ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
+ ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
+ ctl_val |= SPM_CTL_EN;
+ spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
+}
+
+static int qcom_pm_collapse(unsigned long int unused)
+{
+ qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON);
+
+ /*
+ * Returns here only if there was a pending interrupt and we did not
+ * power down as a result.
+ */
+ return -1;
+}
+
+static int qcom_cpu_spc(void)
+{
+ int ret;
+ struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv);
+
+ spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC);
+ ret = cpu_suspend(0, qcom_pm_collapse);
+ /*
+ * ARM common code executes WFI without calling into our driver and
+ * if the SPM mode is not reset, then we may accidently power down the
+ * cpu when we intended only to gate the cpu clock.
+ * Ensure the state is set to standby before returning.
+ */
+ spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+ return ret;
+}
+
+static int qcom_idle_enter(unsigned long index)
+{
+ return __this_cpu_read(qcom_idle_ops)[index]();
+}
+
+static const struct of_device_id qcom_idle_state_match[] __initconst = {
+ { .compatible = "qcom,idle-state-spc", .data = qcom_cpu_spc },
+ { },
+};
+
+static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
+{
+ const struct of_device_id *match_id;
+ struct device_node *state_node;
+ int i;
+ int state_count = 1;
+ idle_fn idle_fns[CPUIDLE_STATE_MAX];
+ idle_fn *fns;
+ cpumask_t mask;
+ bool use_scm_power_down = false;
+
+ for (i = 0; ; i++) {
+ state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ if (!state_node)
+ break;
+
+ if (!of_device_is_available(state_node))
+ continue;
+
+ if (i == CPUIDLE_STATE_MAX) {
+ pr_warn("%s: cpuidle states reached max possible\n",
+ __func__);
+ break;
+ }
+
+ match_id = of_match_node(qcom_idle_state_match, state_node);
+ if (!match_id)
+ return -ENODEV;
+
+ idle_fns[state_count] = match_id->data;
+
+ /* Check if any of the states allow power down */
+ if (match_id->data == qcom_cpu_spc)
+ use_scm_power_down = true;
+
+ state_count++;
+ }
+
+ if (state_count == 1)
+ goto check_spm;
+
+ fns = devm_kcalloc(get_cpu_device(cpu), state_count, sizeof(*fns),
+ GFP_KERNEL);
+ if (!fns)
+ return -ENOMEM;
+
+ for (i = 1; i < state_count; i++)
+ fns[i] = idle_fns[i];
+
+ if (use_scm_power_down) {
+ /* We have atleast one power down mode */
+ cpumask_clear(&mask);
+ cpumask_set_cpu(cpu, &mask);
+ qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask);
+ }
+
+ per_cpu(qcom_idle_ops, cpu) = fns;
+
+ /*
+ * SPM probe for the cpu should have happened by now, if the
+ * SPM device does not exist, return -ENXIO to indicate that the
+ * cpu does not support idle states.
+ */
+check_spm:
+ return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
+}
+
+static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
+ .suspend = qcom_idle_enter,
+ .init = qcom_cpuidle_init,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
+CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
+
+static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
+ int *spm_cpu)
+{
+ struct spm_driver_data *drv = NULL;
+ struct device_node *cpu_node, *saw_node;
+ int cpu;
+ bool found = 0;
+
+ for_each_possible_cpu(cpu) {
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ continue;
+ saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ found = (saw_node == pdev->dev.of_node);
+ of_node_put(saw_node);
+ of_node_put(cpu_node);
+ if (found)
+ break;
+ }
+
+ if (found) {
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (drv)
+ *spm_cpu = cpu;
+ }
+
+ return drv;
+}
+
+static const struct of_device_id spm_match_table[] = {
+ { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
+ .data = &spm_reg_8064_cpu },
+ { },
+};
+
+static int spm_dev_probe(struct platform_device *pdev)
+{
+ struct spm_driver_data *drv;
+ struct resource *res;
+ const struct of_device_id *match_id;
+ void __iomem *addr;
+ int cpu;
+
+ drv = spm_get_drv(pdev, &cpu);
+ if (!drv)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drv->reg_base))
+ return PTR_ERR(drv->reg_base);
+
+ match_id = of_match_node(spm_match_table, pdev->dev.of_node);
+ if (!match_id)
+ return -ENODEV;
+
+ drv->reg_data = match_id->data;
+
+ /* Write the SPM sequences first.. */
+ addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
+ __iowrite32_copy(addr, drv->reg_data->seq,
+ ARRAY_SIZE(drv->reg_data->seq) / 4);
+
+ /*
+ * ..and then the control registers.
+ * On some SoC if the control registers are written first and if the
+ * CPU was held in reset, the reset signal could trigger the SPM state
+ * machine, before the sequences are completely written.
+ */
+ spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+ spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0,
+ drv->reg_data->pmic_data[0]);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1,
+ drv->reg_data->pmic_data[1]);
+
+ /* Set up Standby as the default low power mode */
+ spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+ per_cpu(cpu_spm_drv, cpu) = drv;
+
+ return 0;
+}
+
+static struct platform_driver spm_driver = {
+ .probe = spm_dev_probe,
+ .driver = {
+ .name = "saw",
+ .of_match_table = spm_match_table,
+ },
+};
+
+builtin_platform_driver(spm_driver);
diff --git a/drivers/soc/qcom/trace-rpmh.h b/drivers/soc/qcom/trace-rpmh.h
new file mode 100644
index 000000000..feb0cb455
--- /dev/null
+++ b/drivers/soc/qcom/trace-rpmh.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_TRACE_RPMH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPMH_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpmh
+
+#include <linux/tracepoint.h>
+#include "rpmh-internal.h"
+
+TRACE_EVENT(rpmh_tx_done,
+
+ TP_PROTO(struct rsc_drv *d, int m, const struct tcs_request *r, int e),
+
+ TP_ARGS(d, m, r, e),
+
+ TP_STRUCT__entry(
+ __string(name, d->name)
+ __field(int, m)
+ __field(u32, addr)
+ __field(u32, data)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, d->name);
+ __entry->m = m;
+ __entry->addr = r->cmds[0].addr;
+ __entry->data = r->cmds[0].data;
+ __entry->err = e;
+ ),
+
+ TP_printk("%s: ack: tcs-m: %d addr: %#x data: %#x errno: %d",
+ __get_str(name), __entry->m, __entry->addr, __entry->data,
+ __entry->err)
+);
+
+TRACE_EVENT(rpmh_send_msg,
+
+ TP_PROTO(struct rsc_drv *d, int m, int n, u32 h,
+ const struct tcs_cmd *c),
+
+ TP_ARGS(d, m, n, h, c),
+
+ TP_STRUCT__entry(
+ __string(name, d->name)
+ __field(int, m)
+ __field(int, n)
+ __field(u32, hdr)
+ __field(u32, addr)
+ __field(u32, data)
+ __field(bool, wait)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, d->name);
+ __entry->m = m;
+ __entry->n = n;
+ __entry->hdr = h;
+ __entry->addr = c->addr;
+ __entry->data = c->data;
+ __entry->wait = c->wait;
+ ),
+
+ TP_printk("%s: send-msg: tcs(m): %d cmd(n): %d msgid: %#x addr: %#x data: %#x complete: %d",
+ __get_str(name), __entry->m, __entry->n, __entry->hdr,
+ __entry->addr, __entry->data, __entry->wait)
+);
+
+#endif /* _TRACE_RPMH_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-rpmh
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
new file mode 100644
index 000000000..373400dd8
--- /dev/null
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2016, Linaro Ltd.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
+
+#define WCNSS_REQUEST_TIMEOUT (5 * HZ)
+#define WCNSS_CBC_TIMEOUT (10 * HZ)
+
+#define WCNSS_ACK_DONE_BOOTING 1
+#define WCNSS_ACK_COLD_BOOTING 2
+
+#define NV_FRAGMENT_SIZE 3072
+#define NVBIN_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+
+/**
+ * struct wcnss_ctrl - driver context
+ * @dev: device handle
+ * @channel: SMD channel handle
+ * @ack: completion for outstanding requests
+ * @cbc: completion for cbc complete indication
+ * @ack_status: status of the outstanding request
+ * @probe_work: worker for uploading nv binary
+ */
+struct wcnss_ctrl {
+ struct device *dev;
+ struct rpmsg_endpoint *channel;
+
+ struct completion ack;
+ struct completion cbc;
+ int ack_status;
+
+ struct work_struct probe_work;
+};
+
+/* message types */
+enum {
+ WCNSS_VERSION_REQ = 0x01000000,
+ WCNSS_VERSION_RESP,
+ WCNSS_DOWNLOAD_NV_REQ,
+ WCNSS_DOWNLOAD_NV_RESP,
+ WCNSS_UPLOAD_CAL_REQ,
+ WCNSS_UPLOAD_CAL_RESP,
+ WCNSS_DOWNLOAD_CAL_REQ,
+ WCNSS_DOWNLOAD_CAL_RESP,
+ WCNSS_VBAT_LEVEL_IND,
+ WCNSS_BUILD_VERSION_REQ,
+ WCNSS_BUILD_VERSION_RESP,
+ WCNSS_PM_CONFIG_REQ,
+ WCNSS_CBC_COMPLETE_IND,
+};
+
+/**
+ * struct wcnss_msg_hdr - common packet header for requests and responses
+ * @type: packet message type
+ * @len: total length of the packet, including this header
+ */
+struct wcnss_msg_hdr {
+ u32 type;
+ u32 len;
+} __packed;
+
+/**
+ * struct wcnss_version_resp - version request response
+ * @hdr: common packet wcnss_msg_hdr header
+ */
+struct wcnss_version_resp {
+ struct wcnss_msg_hdr hdr;
+ u8 major;
+ u8 minor;
+ u8 version;
+ u8 revision;
+} __packed;
+
+/**
+ * struct wcnss_download_nv_req - firmware fragment request
+ * @hdr: common packet wcnss_msg_hdr header
+ * @seq: sequence number of this fragment
+ * @last: boolean indicator of this being the last fragment of the binary
+ * @frag_size: length of this fragment
+ * @fragment: fragment data
+ */
+struct wcnss_download_nv_req {
+ struct wcnss_msg_hdr hdr;
+ u16 seq;
+ u16 last;
+ u32 frag_size;
+ u8 fragment[];
+} __packed;
+
+/**
+ * struct wcnss_download_nv_resp - firmware download response
+ * @hdr: common packet wcnss_msg_hdr header
+ * @status: boolean to indicate success of the download
+ */
+struct wcnss_download_nv_resp {
+ struct wcnss_msg_hdr hdr;
+ u8 status;
+} __packed;
+
+/**
+ * wcnss_ctrl_smd_callback() - handler from SMD responses
+ * @channel: smd channel handle
+ * @data: pointer to the incoming data packet
+ * @count: size of the incoming data packet
+ *
+ * Handles any incoming packets from the remote WCNSS_CTRL service.
+ */
+static int wcnss_ctrl_smd_callback(struct rpmsg_device *rpdev,
+ void *data,
+ int count,
+ void *priv,
+ u32 addr)
+{
+ struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
+ const struct wcnss_download_nv_resp *nvresp;
+ const struct wcnss_version_resp *version;
+ const struct wcnss_msg_hdr *hdr = data;
+
+ switch (hdr->type) {
+ case WCNSS_VERSION_RESP:
+ if (count != sizeof(*version)) {
+ dev_err(wcnss->dev,
+ "invalid size of version response\n");
+ break;
+ }
+
+ version = data;
+ dev_info(wcnss->dev, "WCNSS Version %d.%d %d.%d\n",
+ version->major, version->minor,
+ version->version, version->revision);
+
+ complete(&wcnss->ack);
+ break;
+ case WCNSS_DOWNLOAD_NV_RESP:
+ if (count != sizeof(*nvresp)) {
+ dev_err(wcnss->dev,
+ "invalid size of download response\n");
+ break;
+ }
+
+ nvresp = data;
+ wcnss->ack_status = nvresp->status;
+ complete(&wcnss->ack);
+ break;
+ case WCNSS_CBC_COMPLETE_IND:
+ dev_dbg(wcnss->dev, "cold boot complete\n");
+ complete(&wcnss->cbc);
+ break;
+ default:
+ dev_info(wcnss->dev, "unknown message type %d\n", hdr->type);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * wcnss_request_version() - send a version request to WCNSS
+ * @wcnss: wcnss ctrl driver context
+ */
+static int wcnss_request_version(struct wcnss_ctrl *wcnss)
+{
+ struct wcnss_msg_hdr msg;
+ int ret;
+
+ msg.type = WCNSS_VERSION_REQ;
+ msg.len = sizeof(msg);
+ ret = rpmsg_send(wcnss->channel, &msg, sizeof(msg));
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_CBC_TIMEOUT);
+ if (!ret) {
+ dev_err(wcnss->dev, "timeout waiting for version response\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * wcnss_download_nv() - send nv binary to WCNSS
+ * @wcnss: wcnss_ctrl state handle
+ * @expect_cbc: indicator to caller that an cbc event is expected
+ *
+ * Returns 0 on success. Negative errno on failure.
+ */
+static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
+{
+ struct wcnss_download_nv_req *req;
+ const struct firmware *fw;
+ const void *data;
+ ssize_t left;
+ int ret;
+
+ req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = request_firmware(&fw, NVBIN_FILE, wcnss->dev);
+ if (ret < 0) {
+ dev_err(wcnss->dev, "Failed to load nv file %s: %d\n",
+ NVBIN_FILE, ret);
+ goto free_req;
+ }
+
+ data = fw->data;
+ left = fw->size;
+
+ req->hdr.type = WCNSS_DOWNLOAD_NV_REQ;
+ req->hdr.len = sizeof(*req) + NV_FRAGMENT_SIZE;
+
+ req->last = 0;
+ req->frag_size = NV_FRAGMENT_SIZE;
+
+ req->seq = 0;
+ do {
+ if (left <= NV_FRAGMENT_SIZE) {
+ req->last = 1;
+ req->frag_size = left;
+ req->hdr.len = sizeof(*req) + left;
+ }
+
+ memcpy(req->fragment, data, req->frag_size);
+
+ ret = rpmsg_send(wcnss->channel, req, req->hdr.len);
+ if (ret < 0) {
+ dev_err(wcnss->dev, "failed to send smd packet\n");
+ goto release_fw;
+ }
+
+ /* Increment for next fragment */
+ req->seq++;
+
+ data += NV_FRAGMENT_SIZE;
+ left -= NV_FRAGMENT_SIZE;
+ } while (left > 0);
+
+ ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT);
+ if (!ret) {
+ dev_err(wcnss->dev, "timeout waiting for nv upload ack\n");
+ ret = -ETIMEDOUT;
+ } else {
+ *expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING;
+ ret = 0;
+ }
+
+release_fw:
+ release_firmware(fw);
+free_req:
+ kfree(req);
+
+ return ret;
+}
+
+/**
+ * qcom_wcnss_open_channel() - open additional SMD channel to WCNSS
+ * @wcnss: wcnss handle, retrieved from drvdata
+ * @name: SMD channel name
+ * @cb: callback to handle incoming data on the channel
+ */
+struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv)
+{
+ struct rpmsg_channel_info chinfo;
+ struct wcnss_ctrl *_wcnss = wcnss;
+
+ strscpy(chinfo.name, name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = RPMSG_ADDR_ANY;
+
+ return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo);
+}
+EXPORT_SYMBOL(qcom_wcnss_open_channel);
+
+static void wcnss_async_probe(struct work_struct *work)
+{
+ struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work);
+ bool expect_cbc;
+ int ret;
+
+ ret = wcnss_request_version(wcnss);
+ if (ret < 0)
+ return;
+
+ ret = wcnss_download_nv(wcnss, &expect_cbc);
+ if (ret < 0)
+ return;
+
+ /* Wait for pending cold boot completion if indicated by the nv downloader */
+ if (expect_cbc) {
+ ret = wait_for_completion_timeout(&wcnss->cbc, WCNSS_REQUEST_TIMEOUT);
+ if (!ret)
+ dev_err(wcnss->dev, "expected cold boot completion\n");
+ }
+
+ of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev);
+}
+
+static int wcnss_ctrl_probe(struct rpmsg_device *rpdev)
+{
+ struct wcnss_ctrl *wcnss;
+
+ wcnss = devm_kzalloc(&rpdev->dev, sizeof(*wcnss), GFP_KERNEL);
+ if (!wcnss)
+ return -ENOMEM;
+
+ wcnss->dev = &rpdev->dev;
+ wcnss->channel = rpdev->ept;
+
+ init_completion(&wcnss->ack);
+ init_completion(&wcnss->cbc);
+ INIT_WORK(&wcnss->probe_work, wcnss_async_probe);
+
+ dev_set_drvdata(&rpdev->dev, wcnss);
+
+ schedule_work(&wcnss->probe_work);
+
+ return 0;
+}
+
+static void wcnss_ctrl_remove(struct rpmsg_device *rpdev)
+{
+ struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
+
+ cancel_work_sync(&wcnss->probe_work);
+ of_platform_depopulate(&rpdev->dev);
+}
+
+static const struct of_device_id wcnss_ctrl_of_match[] = {
+ { .compatible = "qcom,wcnss", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, wcnss_ctrl_of_match);
+
+static struct rpmsg_driver wcnss_ctrl_driver = {
+ .probe = wcnss_ctrl_probe,
+ .remove = wcnss_ctrl_remove,
+ .callback = wcnss_ctrl_smd_callback,
+ .drv = {
+ .name = "qcom_wcnss_ctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = wcnss_ctrl_of_match,
+ },
+};
+
+module_rpmsg_driver(wcnss_ctrl_driver);
+
+MODULE_DESCRIPTION("Qualcomm WCNSS control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
new file mode 100644
index 000000000..1d824cbd4
--- /dev/null
+++ b/drivers/soc/renesas/Kconfig
@@ -0,0 +1,95 @@
+config SOC_RENESAS
+ bool "Renesas SoC driver support" if COMPILE_TEST && !ARCH_RENESAS
+ default y if ARCH_RENESAS
+ select SOC_BUS
+ select RST_RCAR if ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || \
+ ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77965 || \
+ ARCH_R8A77970 || ARCH_R8A77980 || ARCH_R8A77990 || \
+ ARCH_R8A77995
+ select SYSC_R8A7743 if ARCH_R8A7743
+ select SYSC_R8A7745 if ARCH_R8A7745
+ select SYSC_R8A77470 if ARCH_R8A77470
+ select SYSC_R8A7779 if ARCH_R8A7779
+ select SYSC_R8A7790 if ARCH_R8A7790
+ select SYSC_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
+ select SYSC_R8A7792 if ARCH_R8A7792
+ select SYSC_R8A7794 if ARCH_R8A7794
+ select SYSC_R8A7795 if ARCH_R8A7795
+ select SYSC_R8A7796 if ARCH_R8A7796
+ select SYSC_R8A77965 if ARCH_R8A77965
+ select SYSC_R8A77970 if ARCH_R8A77970
+ select SYSC_R8A77980 if ARCH_R8A77980
+ select SYSC_R8A77990 if ARCH_R8A77990
+ select SYSC_R8A77995 if ARCH_R8A77995
+
+if SOC_RENESAS
+
+# SoC
+config SYSC_R8A7743
+ bool "RZ/G1M System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7745
+ bool "RZ/G1E System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77470
+ bool "RZ/G1C System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7779
+ bool "R-Car H1 System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7790
+ bool "R-Car H2 System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7791
+ bool "R-Car M2-W/N System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7792
+ bool "R-Car V2H System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7794
+ bool "R-Car E2 System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7795
+ bool "R-Car H3 System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7796
+ bool "R-Car M3-W System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77965
+ bool "R-Car M3-N System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77970
+ bool "R-Car V3M System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77980
+ bool "R-Car V3H System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77990
+ bool "R-Car E3 System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77995
+ bool "R-Car D3 System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
+# Family
+config RST_RCAR
+ bool "R-Car Reset Controller support" if COMPILE_TEST
+
+config SYSC_RCAR
+ bool "R-Car System Controller support" if COMPILE_TEST
+
+endif # SOC_RENESAS
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
new file mode 100644
index 000000000..c37b0803c
--- /dev/null
+++ b/drivers/soc/renesas/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+# Generic, must be first because of soc_device_register()
+obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
+
+# SoC
+obj-$(CONFIG_SYSC_R8A7743) += r8a7743-sysc.o
+obj-$(CONFIG_SYSC_R8A7745) += r8a7745-sysc.o
+obj-$(CONFIG_SYSC_R8A77470) += r8a77470-sysc.o
+obj-$(CONFIG_SYSC_R8A7779) += r8a7779-sysc.o
+obj-$(CONFIG_SYSC_R8A7790) += r8a7790-sysc.o
+obj-$(CONFIG_SYSC_R8A7791) += r8a7791-sysc.o
+obj-$(CONFIG_SYSC_R8A7792) += r8a7792-sysc.o
+obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o
+obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o
+obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o
+obj-$(CONFIG_SYSC_R8A77965) += r8a77965-sysc.o
+obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o
+obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o
+obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o
+obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
+ifdef CONFIG_SMP
+obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
+endif
+
+# Family
+obj-$(CONFIG_RST_RCAR) += rcar-rst.o
+obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o
diff --git a/drivers/soc/renesas/r8a7743-sysc.c b/drivers/soc/renesas/r8a7743-sysc.c
new file mode 100644
index 000000000..9583a327d
--- /dev/null
+++ b/drivers/soc/renesas/r8a7743-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1M System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7743-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7743_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7743_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7743_PD_CA15_SCU, R8A7743_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7743_PD_CA15_CPU0, R8A7743_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7743_PD_CA15_CPU1, R8A7743_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7743_PD_SGX, R8A7743_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7743_sysc_info __initconst = {
+ .areas = r8a7743_areas,
+ .num_areas = ARRAY_SIZE(r8a7743_areas),
+};
diff --git a/drivers/soc/renesas/r8a7745-sysc.c b/drivers/soc/renesas/r8a7745-sysc.c
new file mode 100644
index 000000000..d17887c08
--- /dev/null
+++ b/drivers/soc/renesas/r8a7745-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1E System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7745-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7745_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7745_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca7-scu", 0x100, 0, R8A7745_PD_CA7_SCU, R8A7745_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7745_PD_CA7_CPU0, R8A7745_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7745_PD_CA7_CPU1, R8A7745_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7745_PD_SGX, R8A7745_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7745_sysc_info __initconst = {
+ .areas = r8a7745_areas,
+ .num_areas = ARRAY_SIZE(r8a7745_areas),
+};
diff --git a/drivers/soc/renesas/r8a77470-sysc.c b/drivers/soc/renesas/r8a77470-sysc.c
new file mode 100644
index 000000000..cfa015e20
--- /dev/null
+++ b/drivers/soc/renesas/r8a77470-sysc.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G1C System Controller
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a77470-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a77470_areas[] __initconst = {
+ { "always-on", 0, 0, R8A77470_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca7-scu", 0x100, 0, R8A77470_PD_CA7_SCU, R8A77470_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A77470_PD_CA7_CPU0, R8A77470_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A77470_PD_CA7_CPU1, R8A77470_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A77470_PD_SGX, R8A77470_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a77470_sysc_info __initconst = {
+ .areas = r8a77470_areas,
+ .num_areas = ARRAY_SIZE(r8a77470_areas),
+};
diff --git a/drivers/soc/renesas/r8a7779-sysc.c b/drivers/soc/renesas/r8a7779-sysc.c
new file mode 100644
index 000000000..9e8e6b7fa
--- /dev/null
+++ b/drivers/soc/renesas/r8a7779-sysc.c
@@ -0,0 +1,34 @@
+/*
+ * Renesas R-Car H1 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7779-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7779_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7779_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "arm1", 0x40, 1, R8A7779_PD_ARM1, R8A7779_PD_ALWAYS_ON,
+ PD_CPU_CR },
+ { "arm2", 0x40, 2, R8A7779_PD_ARM2, R8A7779_PD_ALWAYS_ON,
+ PD_CPU_CR },
+ { "arm3", 0x40, 3, R8A7779_PD_ARM3, R8A7779_PD_ALWAYS_ON,
+ PD_CPU_CR },
+ { "sgx", 0xc0, 0, R8A7779_PD_SGX, R8A7779_PD_ALWAYS_ON },
+ { "vdp", 0x100, 0, R8A7779_PD_VDP, R8A7779_PD_ALWAYS_ON },
+ { "imp", 0x140, 0, R8A7779_PD_IMP, R8A7779_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7779_sysc_info __initconst = {
+ .areas = r8a7779_areas,
+ .num_areas = ARRAY_SIZE(r8a7779_areas),
+};
diff --git a/drivers/soc/renesas/r8a7790-sysc.c b/drivers/soc/renesas/r8a7790-sysc.c
new file mode 100644
index 000000000..7a567ad0f
--- /dev/null
+++ b/drivers/soc/renesas/r8a7790-sysc.c
@@ -0,0 +1,48 @@
+/*
+ * Renesas R-Car H2 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7790-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7790_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7790_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7790_PD_CA15_SCU, R8A7790_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7790_PD_CA15_CPU0, R8A7790_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7790_PD_CA15_CPU1, R8A7790_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu2", 0x40, 2, R8A7790_PD_CA15_CPU2, R8A7790_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu3", 0x40, 3, R8A7790_PD_CA15_CPU3, R8A7790_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca7-scu", 0x100, 0, R8A7790_PD_CA7_SCU, R8A7790_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7790_PD_CA7_CPU0, R8A7790_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7790_PD_CA7_CPU1, R8A7790_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu2", 0x1c0, 2, R8A7790_PD_CA7_CPU2, R8A7790_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu3", 0x1c0, 3, R8A7790_PD_CA7_CPU3, R8A7790_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "sh-4a", 0x80, 0, R8A7790_PD_SH_4A, R8A7790_PD_ALWAYS_ON },
+ { "rgx", 0xc0, 0, R8A7790_PD_RGX, R8A7790_PD_ALWAYS_ON },
+ { "imp", 0x140, 0, R8A7790_PD_IMP, R8A7790_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7790_sysc_info __initconst = {
+ .areas = r8a7790_areas,
+ .num_areas = ARRAY_SIZE(r8a7790_areas),
+};
diff --git a/drivers/soc/renesas/r8a7791-sysc.c b/drivers/soc/renesas/r8a7791-sysc.c
new file mode 100644
index 000000000..03b9f41a3
--- /dev/null
+++ b/drivers/soc/renesas/r8a7791-sysc.c
@@ -0,0 +1,33 @@
+/*
+ * Renesas R-Car M2-W/N System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7791-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7791_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7791_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7791_PD_CA15_SCU, R8A7791_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7791_PD_CA15_CPU0, R8A7791_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7791_PD_CA15_CPU1, R8A7791_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "sh-4a", 0x80, 0, R8A7791_PD_SH_4A, R8A7791_PD_ALWAYS_ON },
+ { "sgx", 0xc0, 0, R8A7791_PD_SGX, R8A7791_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7791_sysc_info __initconst = {
+ .areas = r8a7791_areas,
+ .num_areas = ARRAY_SIZE(r8a7791_areas),
+};
diff --git a/drivers/soc/renesas/r8a7792-sysc.c b/drivers/soc/renesas/r8a7792-sysc.c
new file mode 100644
index 000000000..ca7467d7b
--- /dev/null
+++ b/drivers/soc/renesas/r8a7792-sysc.c
@@ -0,0 +1,34 @@
+/*
+ * Renesas R-Car V2H (R8A7792) System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7792-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7792_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7792_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7792_PD_CA15_SCU, R8A7792_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7792_PD_CA15_CPU0, R8A7792_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7792_PD_CA15_CPU1, R8A7792_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7792_PD_SGX, R8A7792_PD_ALWAYS_ON },
+ { "imp", 0x140, 0, R8A7792_PD_IMP, R8A7792_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7792_sysc_info __initconst = {
+ .areas = r8a7792_areas,
+ .num_areas = ARRAY_SIZE(r8a7792_areas),
+};
diff --git a/drivers/soc/renesas/r8a7794-sysc.c b/drivers/soc/renesas/r8a7794-sysc.c
new file mode 100644
index 000000000..c4da2941e
--- /dev/null
+++ b/drivers/soc/renesas/r8a7794-sysc.c
@@ -0,0 +1,33 @@
+/*
+ * Renesas R-Car E2 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7794-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7794_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7794_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca7-scu", 0x100, 0, R8A7794_PD_CA7_SCU, R8A7794_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7794_PD_CA7_CPU0, R8A7794_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7794_PD_CA7_CPU1, R8A7794_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "sh-4a", 0x80, 0, R8A7794_PD_SH_4A, R8A7794_PD_ALWAYS_ON },
+ { "sgx", 0xc0, 0, R8A7794_PD_SGX, R8A7794_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7794_sysc_info __initconst = {
+ .areas = r8a7794_areas,
+ .num_areas = ARRAY_SIZE(r8a7794_areas),
+};
diff --git a/drivers/soc/renesas/r8a7795-sysc.c b/drivers/soc/renesas/r8a7795-sysc.c
new file mode 100644
index 000000000..741266618
--- /dev/null
+++ b/drivers/soc/renesas/r8a7795-sysc.c
@@ -0,0 +1,78 @@
+/*
+ * Renesas R-Car H3 System Controller
+ *
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/sys_soc.h>
+
+#include <dt-bindings/power/r8a7795-sysc.h>
+
+#include "rcar-sysc.h"
+
+static struct rcar_sysc_area r8a7795_areas[] __initdata = {
+ { "always-on", 0, 0, R8A7795_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A7795_PD_CA57_SCU, R8A7795_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A7795_PD_CA57_CPU0, R8A7795_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A7795_PD_CA57_CPU1, R8A7795_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu2", 0x80, 2, R8A7795_PD_CA57_CPU2, R8A7795_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu3", 0x80, 3, R8A7795_PD_CA57_CPU3, R8A7795_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca53-scu", 0x140, 0, R8A7795_PD_CA53_SCU, R8A7795_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A7795_PD_CA53_CPU0, R8A7795_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A7795_PD_CA53_CPU1, R8A7795_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu2", 0x200, 2, R8A7795_PD_CA53_CPU2, R8A7795_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A7795_PD_CA53_CPU3, R8A7795_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "a3vp", 0x340, 0, R8A7795_PD_A3VP, R8A7795_PD_ALWAYS_ON },
+ { "cr7", 0x240, 0, R8A7795_PD_CR7, R8A7795_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A7795_PD_A3VC, R8A7795_PD_ALWAYS_ON },
+ /* A2VC0 exists on ES1.x only */
+ { "a2vc0", 0x3c0, 0, R8A7795_PD_A2VC0, R8A7795_PD_A3VC },
+ { "a2vc1", 0x3c0, 1, R8A7795_PD_A2VC1, R8A7795_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A7795_PD_3DG_A, R8A7795_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A7795_PD_3DG_B, R8A7795_PD_3DG_A },
+ { "3dg-c", 0x100, 2, R8A7795_PD_3DG_C, R8A7795_PD_3DG_B },
+ { "3dg-d", 0x100, 3, R8A7795_PD_3DG_D, R8A7795_PD_3DG_C },
+ { "3dg-e", 0x100, 4, R8A7795_PD_3DG_E, R8A7795_PD_3DG_D },
+ { "a3ir", 0x180, 0, R8A7795_PD_A3IR, R8A7795_PD_ALWAYS_ON },
+};
+
+
+ /*
+ * Fixups for R-Car H3 revisions after ES1.x
+ */
+
+static const struct soc_device_attribute r8a7795es1[] __initconst = {
+ { .soc_id = "r8a7795", .revision = "ES1.*" },
+ { /* sentinel */ }
+};
+
+static int __init r8a7795_sysc_init(void)
+{
+ if (!soc_device_match(r8a7795es1))
+ rcar_sysc_nullify(r8a7795_areas, ARRAY_SIZE(r8a7795_areas),
+ R8A7795_PD_A2VC0);
+
+ return 0;
+}
+
+const struct rcar_sysc_info r8a7795_sysc_info __initconst = {
+ .init = r8a7795_sysc_init,
+ .areas = r8a7795_areas,
+ .num_areas = ARRAY_SIZE(r8a7795_areas),
+};
diff --git a/drivers/soc/renesas/r8a7796-sysc.c b/drivers/soc/renesas/r8a7796-sysc.c
new file mode 100644
index 000000000..f700c842b
--- /dev/null
+++ b/drivers/soc/renesas/r8a7796-sysc.c
@@ -0,0 +1,48 @@
+/*
+ * Renesas R-Car M3-W System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7796-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7796_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7796_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A7796_PD_CA57_SCU, R8A7796_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A7796_PD_CA57_CPU0, R8A7796_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A7796_PD_CA57_CPU1, R8A7796_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca53-scu", 0x140, 0, R8A7796_PD_CA53_SCU, R8A7796_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A7796_PD_CA53_CPU0, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A7796_PD_CA53_CPU1, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu2", 0x200, 2, R8A7796_PD_CA53_CPU2, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A7796_PD_CA53_CPU3, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A7796_PD_CR7, R8A7796_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A7796_PD_A3VC, R8A7796_PD_ALWAYS_ON },
+ { "a2vc0", 0x3c0, 0, R8A7796_PD_A2VC0, R8A7796_PD_A3VC },
+ { "a2vc1", 0x3c0, 1, R8A7796_PD_A2VC1, R8A7796_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A7796_PD_3DG_A, R8A7796_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A7796_PD_3DG_B, R8A7796_PD_3DG_A },
+ { "a3ir", 0x180, 0, R8A7796_PD_A3IR, R8A7796_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7796_sysc_info __initconst = {
+ .areas = r8a7796_areas,
+ .num_areas = ARRAY_SIZE(r8a7796_areas),
+};
diff --git a/drivers/soc/renesas/r8a77965-sysc.c b/drivers/soc/renesas/r8a77965-sysc.c
new file mode 100644
index 000000000..d7f7928e3
--- /dev/null
+++ b/drivers/soc/renesas/r8a77965-sysc.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car M3-N System Controller
+ * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ *
+ * Based on Renesas R-Car M3-W System Controller
+ * Copyright (C) 2016 Glider bvba
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a77965-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a77965_areas[] __initconst = {
+ { "always-on", 0, 0, R8A77965_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A77965_PD_CA57_SCU, R8A77965_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A77965_PD_CA57_CPU0, R8A77965_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A77965_PD_CA57_CPU1, R8A77965_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A77965_PD_CR7, R8A77965_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A77965_PD_A3VC, R8A77965_PD_ALWAYS_ON },
+ { "a3vp", 0x340, 0, R8A77965_PD_A3VP, R8A77965_PD_ALWAYS_ON },
+ { "a2vc1", 0x3c0, 1, R8A77965_PD_A2VC1, R8A77965_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A77965_PD_3DG_A, R8A77965_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A77965_PD_3DG_B, R8A77965_PD_3DG_A },
+ { "a3ir", 0x180, 0, R8A77965_PD_A3IR, R8A77965_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a77965_sysc_info __initconst = {
+ .areas = r8a77965_areas,
+ .num_areas = ARRAY_SIZE(r8a77965_areas),
+};
diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c
new file mode 100644
index 000000000..77422baa7
--- /dev/null
+++ b/drivers/soc/renesas/r8a77970-sysc.c
@@ -0,0 +1,39 @@
+/*
+ * Renesas R-Car V3M System Controller
+ *
+ * Copyright (C) 2017 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a77970-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a77970_areas[] __initconst = {
+ { "always-on", 0, 0, R8A77970_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca53-scu", 0x140, 0, R8A77970_PD_CA53_SCU, R8A77970_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A77970_PD_CA53_CPU0, R8A77970_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A77970_PD_CA53_CPU1, R8A77970_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A77970_PD_CR7, R8A77970_PD_ALWAYS_ON },
+ { "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON },
+ { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_A3IR },
+ { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A3IR },
+ { "a2dp", 0x400, 2, R8A77970_PD_A2DP, R8A77970_PD_A3IR },
+ { "a2cn", 0x400, 3, R8A77970_PD_A2CN, R8A77970_PD_A3IR },
+ { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_A3IR },
+ { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A3IR },
+};
+
+const struct rcar_sysc_info r8a77970_sysc_info __initconst = {
+ .areas = r8a77970_areas,
+ .num_areas = ARRAY_SIZE(r8a77970_areas),
+};
diff --git a/drivers/soc/renesas/r8a77980-sysc.c b/drivers/soc/renesas/r8a77980-sysc.c
new file mode 100644
index 000000000..a8dbe55e8
--- /dev/null
+++ b/drivers/soc/renesas/r8a77980-sysc.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car V3H System Controller
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018 Cogent Embedded, Inc.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a77980-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
+ { "always-on", 0, 0, R8A77980_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca53-scu", 0x140, 0, R8A77980_PD_CA53_SCU, R8A77980_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A77980_PD_CA53_CPU0, R8A77980_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A77980_PD_CA53_CPU1, R8A77980_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu2", 0x200, 2, R8A77980_PD_CA53_CPU2, R8A77980_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON },
+ { "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON },
+ { "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR },
+ { "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
+ { "a2ir2", 0x400, 2, R8A77980_PD_A2IR2, R8A77980_PD_A3IR },
+ { "a2ir3", 0x400, 3, R8A77980_PD_A2IR3, R8A77980_PD_A3IR },
+ { "a2ir4", 0x400, 4, R8A77980_PD_A2IR4, R8A77980_PD_A3IR },
+ { "a2ir5", 0x400, 5, R8A77980_PD_A2IR5, R8A77980_PD_A3IR },
+ { "a2sc0", 0x400, 6, R8A77980_PD_A2SC0, R8A77980_PD_A3IR },
+ { "a2sc1", 0x400, 7, R8A77980_PD_A2SC1, R8A77980_PD_A3IR },
+ { "a2sc2", 0x400, 8, R8A77980_PD_A2SC2, R8A77980_PD_A3IR },
+ { "a2sc3", 0x400, 9, R8A77980_PD_A2SC3, R8A77980_PD_A3IR },
+ { "a2sc4", 0x400, 10, R8A77980_PD_A2SC4, R8A77980_PD_A3IR },
+ { "a2dp0", 0x400, 11, R8A77980_PD_A2DP0, R8A77980_PD_A3IR },
+ { "a2dp1", 0x400, 12, R8A77980_PD_A2DP1, R8A77980_PD_A3IR },
+ { "a2cn", 0x400, 13, R8A77980_PD_A2CN, R8A77980_PD_A3IR },
+ { "a3vip0", 0x2c0, 0, R8A77980_PD_A3VIP0, R8A77980_PD_ALWAYS_ON },
+ { "a3vip1", 0x300, 0, R8A77980_PD_A3VIP1, R8A77980_PD_ALWAYS_ON },
+ { "a3vip2", 0x280, 0, R8A77980_PD_A3VIP2, R8A77980_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a77980_sysc_info __initconst = {
+ .areas = r8a77980_areas,
+ .num_areas = ARRAY_SIZE(r8a77980_areas),
+};
diff --git a/drivers/soc/renesas/r8a77990-sysc.c b/drivers/soc/renesas/r8a77990-sysc.c
new file mode 100644
index 000000000..664b244eb
--- /dev/null
+++ b/drivers/soc/renesas/r8a77990-sysc.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car E3 System Controller
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/sys_soc.h>
+
+#include <dt-bindings/power/r8a77990-sysc.h>
+
+#include "rcar-sysc.h"
+
+static struct rcar_sysc_area r8a77990_areas[] __initdata = {
+ { "always-on", 0, 0, R8A77990_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca53-scu", 0x140, 0, R8A77990_PD_CA53_SCU, R8A77990_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A77990_PD_CA53_CPU0, R8A77990_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A77990_PD_CA53_CPU1, R8A77990_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A77990_PD_CR7, R8A77990_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A77990_PD_A3VC, R8A77990_PD_ALWAYS_ON },
+ { "a2vc1", 0x3c0, 1, R8A77990_PD_A2VC1, R8A77990_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A77990_PD_3DG_A, R8A77990_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A77990_PD_3DG_B, R8A77990_PD_3DG_A },
+};
+
+/* Fixups for R-Car E3 ES1.0 revision */
+static const struct soc_device_attribute r8a77990[] __initconst = {
+ { .soc_id = "r8a77990", .revision = "ES1.0" },
+ { /* sentinel */ }
+};
+
+static int __init r8a77990_sysc_init(void)
+{
+ if (soc_device_match(r8a77990)) {
+ /* Fix incorrect 3DG hierarchy */
+ swap(r8a77990_areas[7], r8a77990_areas[8]);
+ r8a77990_areas[7].parent = R8A77990_PD_ALWAYS_ON;
+ r8a77990_areas[8].parent = R8A77990_PD_3DG_B;
+ }
+
+ return 0;
+}
+
+const struct rcar_sysc_info r8a77990_sysc_info __initconst = {
+ .init = r8a77990_sysc_init,
+ .areas = r8a77990_areas,
+ .num_areas = ARRAY_SIZE(r8a77990_areas),
+};
diff --git a/drivers/soc/renesas/r8a77995-sysc.c b/drivers/soc/renesas/r8a77995-sysc.c
new file mode 100644
index 000000000..1b2ef415b
--- /dev/null
+++ b/drivers/soc/renesas/r8a77995-sysc.c
@@ -0,0 +1,30 @@
+/*
+ * Renesas R-Car D3 System Controller
+ *
+ * Copyright (C) 2017 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a77995-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a77995_areas[] __initconst = {
+ { "always-on", 0, 0, R8A77995_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca53-scu", 0x140, 0, R8A77995_PD_CA53_SCU, R8A77995_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A77995_PD_CA53_CPU0, R8A77995_PD_CA53_SCU,
+ PD_CPU_NOCR },
+};
+
+
+const struct rcar_sysc_info r8a77995_sysc_info __initconst = {
+ .areas = r8a77995_areas,
+ .num_areas = ARRAY_SIZE(r8a77995_areas),
+};
diff --git a/drivers/soc/renesas/r9a06g032-smp.c b/drivers/soc/renesas/r9a06g032-smp.c
new file mode 100644
index 000000000..a1926e8d7
--- /dev/null
+++ b/drivers/soc/renesas/r9a06g032-smp.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R9A06G032 Second CA7 enabler.
+ *
+ * Copyright (C) 2018 Renesas Electronics Europe Limited
+ *
+ * Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ * Derived from actions,s500-smp
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/smp.h>
+
+/*
+ * The second CPU is parked in ROM at boot time. It requires waking it after
+ * writing an address into the BOOTADDR register of sysctrl.
+ *
+ * So the default value of the "cpu-release-addr" corresponds to BOOTADDR...
+ *
+ * *However* the BOOTADDR register is not available when the kernel
+ * starts in NONSEC mode.
+ *
+ * So for NONSEC mode, the bootloader re-parks the second CPU into a pen
+ * in SRAM, and changes the "cpu-release-addr" of linux's DT to a SRAM address,
+ * which is not restricted.
+ */
+
+static void __iomem *cpu_bootaddr;
+
+static DEFINE_SPINLOCK(cpu_lock);
+
+static int
+r9a06g032_smp_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ if (!cpu_bootaddr)
+ return -ENODEV;
+
+ spin_lock(&cpu_lock);
+
+ writel(__pa_symbol(secondary_startup), cpu_bootaddr);
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ spin_unlock(&cpu_lock);
+
+ return 0;
+}
+
+static void __init r9a06g032_smp_prepare_cpus(unsigned int max_cpus)
+{
+ struct device_node *dn;
+ int ret = -EINVAL, dns;
+ u32 bootaddr;
+
+ dn = of_get_cpu_node(1, NULL);
+ if (!dn) {
+ pr_err("CPU#1: missing device tree node\n");
+ return;
+ }
+ /*
+ * Determine the address from which the CPU is polling.
+ * The bootloader *does* change this property.
+ * Note: The property can be either 64 or 32 bits, so handle both cases
+ */
+ if (of_find_property(dn, "cpu-release-addr", &dns)) {
+ if (dns == sizeof(u64)) {
+ u64 temp;
+
+ ret = of_property_read_u64(dn,
+ "cpu-release-addr", &temp);
+ bootaddr = temp;
+ } else {
+ ret = of_property_read_u32(dn,
+ "cpu-release-addr",
+ &bootaddr);
+ }
+ }
+ of_node_put(dn);
+ if (ret) {
+ pr_err("CPU#1: invalid cpu-release-addr property\n");
+ return;
+ }
+ pr_info("CPU#1: cpu-release-addr %08x\n", bootaddr);
+
+ cpu_bootaddr = ioremap(bootaddr, sizeof(bootaddr));
+}
+
+static const struct smp_operations r9a06g032_smp_ops __initconst = {
+ .smp_prepare_cpus = r9a06g032_smp_prepare_cpus,
+ .smp_boot_secondary = r9a06g032_smp_boot_secondary,
+};
+
+CPU_METHOD_OF_DECLARE(r9a06g032_smp,
+ "renesas,r9a06g032-smp", &r9a06g032_smp_ops);
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
new file mode 100644
index 000000000..d9c1034e7
--- /dev/null
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -0,0 +1,121 @@
+/*
+ * R-Car Gen1 RESET/WDT, R-Car Gen2, Gen3, and RZ/G RST Driver
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#define WDTRSTCR_RESET 0xA55A0002
+#define WDTRSTCR 0x0054
+
+static int rcar_rst_enable_wdt_reset(void __iomem *base)
+{
+ iowrite32(WDTRSTCR_RESET, base + WDTRSTCR);
+ return 0;
+}
+
+struct rst_config {
+ unsigned int modemr; /* Mode Monitoring Register Offset */
+ int (*configure)(void *base); /* Platform specific configuration */
+};
+
+static const struct rst_config rcar_rst_gen1 __initconst = {
+ .modemr = 0x20,
+};
+
+static const struct rst_config rcar_rst_gen2 __initconst = {
+ .modemr = 0x60,
+ .configure = rcar_rst_enable_wdt_reset,
+};
+
+static const struct rst_config rcar_rst_gen3 __initconst = {
+ .modemr = 0x60,
+};
+
+static const struct of_device_id rcar_rst_matches[] __initconst = {
+ /* RZ/G is handled like R-Car Gen2 */
+ { .compatible = "renesas,r8a7743-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7745-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a77470-rst", .data = &rcar_rst_gen2 },
+ /* R-Car Gen1 */
+ { .compatible = "renesas,r8a7778-reset-wdt", .data = &rcar_rst_gen1 },
+ { .compatible = "renesas,r8a7779-reset-wdt", .data = &rcar_rst_gen1 },
+ /* R-Car Gen2 */
+ { .compatible = "renesas,r8a7790-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7791-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7792-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7793-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7794-rst", .data = &rcar_rst_gen2 },
+ /* R-Car Gen3 */
+ { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77965-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77990-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 },
+ { /* sentinel */ }
+};
+
+static void __iomem *rcar_rst_base __initdata;
+static u32 saved_mode __initdata;
+
+static int __init rcar_rst_init(void)
+{
+ const struct of_device_id *match;
+ const struct rst_config *cfg;
+ struct device_node *np;
+ void __iomem *base;
+ int error = 0;
+
+ np = of_find_matching_node_and_match(NULL, rcar_rst_matches, &match);
+ if (!np)
+ return -ENODEV;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("%pOF: Cannot map regs\n", np);
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ rcar_rst_base = base;
+ cfg = match->data;
+ saved_mode = ioread32(base + cfg->modemr);
+ if (cfg->configure) {
+ error = cfg->configure(base);
+ if (error) {
+ pr_warn("%pOF: Cannot run SoC specific configuration\n",
+ np);
+ goto out_put;
+ }
+ }
+
+ pr_debug("%pOF: MODE = 0x%08x\n", np, saved_mode);
+
+out_put:
+ of_node_put(np);
+ return error;
+}
+
+int __init rcar_rst_read_mode_pins(u32 *mode)
+{
+ int error;
+
+ if (!rcar_rst_base) {
+ error = rcar_rst_init();
+ if (error)
+ return error;
+ }
+
+ *mode = saved_mode;
+ return 0;
+}
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
new file mode 100644
index 000000000..029188e8b
--- /dev/null
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -0,0 +1,492 @@
+/*
+ * R-Car SYSC Power management support
+ *
+ * Copyright (C) 2014 Magnus Damm
+ * Copyright (C) 2015-2017 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/soc/renesas/rcar-sysc.h>
+
+#include "rcar-sysc.h"
+
+/* SYSC Common */
+#define SYSCSR 0x00 /* SYSC Status Register */
+#define SYSCISR 0x04 /* Interrupt Status Register */
+#define SYSCISCR 0x08 /* Interrupt Status Clear Register */
+#define SYSCIER 0x0c /* Interrupt Enable Register */
+#define SYSCIMR 0x10 /* Interrupt Mask Register */
+
+/* SYSC Status Register */
+#define SYSCSR_PONENB 1 /* Ready for power resume requests */
+#define SYSCSR_POFFENB 0 /* Ready for power shutoff requests */
+
+/*
+ * Power Control Register Offsets inside the register block for each domain
+ * Note: The "CR" registers for ARM cores exist on H1 only
+ * Use WFI to power off, CPG/APMU to resume ARM cores on R-Car Gen2
+ * Use PSCI on R-Car Gen3
+ */
+#define PWRSR_OFFS 0x00 /* Power Status Register */
+#define PWROFFCR_OFFS 0x04 /* Power Shutoff Control Register */
+#define PWROFFSR_OFFS 0x08 /* Power Shutoff Status Register */
+#define PWRONCR_OFFS 0x0c /* Power Resume Control Register */
+#define PWRONSR_OFFS 0x10 /* Power Resume Status Register */
+#define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */
+
+
+#define SYSCSR_RETRIES 100
+#define SYSCSR_DELAY_US 1
+
+#define PWRER_RETRIES 100
+#define PWRER_DELAY_US 1
+
+#define SYSCISR_RETRIES 1000
+#define SYSCISR_DELAY_US 1
+
+#define RCAR_PD_ALWAYS_ON 32 /* Always-on power area */
+
+struct rcar_sysc_ch {
+ u16 chan_offs;
+ u8 chan_bit;
+ u8 isr_bit;
+};
+
+static void __iomem *rcar_sysc_base;
+static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */
+
+static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
+{
+ unsigned int sr_bit, reg_offs;
+ int k;
+
+ if (on) {
+ sr_bit = SYSCSR_PONENB;
+ reg_offs = PWRONCR_OFFS;
+ } else {
+ sr_bit = SYSCSR_POFFENB;
+ reg_offs = PWROFFCR_OFFS;
+ }
+
+ /* Wait until SYSC is ready to accept a power request */
+ for (k = 0; k < SYSCSR_RETRIES; k++) {
+ if (ioread32(rcar_sysc_base + SYSCSR) & BIT(sr_bit))
+ break;
+ udelay(SYSCSR_DELAY_US);
+ }
+
+ if (k == SYSCSR_RETRIES)
+ return -EAGAIN;
+
+ /* Submit power shutoff or power resume request */
+ iowrite32(BIT(sysc_ch->chan_bit),
+ rcar_sysc_base + sysc_ch->chan_offs + reg_offs);
+
+ return 0;
+}
+
+static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
+{
+ unsigned int isr_mask = BIT(sysc_ch->isr_bit);
+ unsigned int chan_mask = BIT(sysc_ch->chan_bit);
+ unsigned int status;
+ unsigned long flags;
+ int ret = 0;
+ int k;
+
+ spin_lock_irqsave(&rcar_sysc_lock, flags);
+
+ iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
+
+ /* Submit power shutoff or resume request until it was accepted */
+ for (k = 0; k < PWRER_RETRIES; k++) {
+ ret = rcar_sysc_pwr_on_off(sysc_ch, on);
+ if (ret)
+ goto out;
+
+ status = ioread32(rcar_sysc_base +
+ sysc_ch->chan_offs + PWRER_OFFS);
+ if (!(status & chan_mask))
+ break;
+
+ udelay(PWRER_DELAY_US);
+ }
+
+ if (k == PWRER_RETRIES) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Wait until the power shutoff or resume request has completed * */
+ for (k = 0; k < SYSCISR_RETRIES; k++) {
+ if (ioread32(rcar_sysc_base + SYSCISR) & isr_mask)
+ break;
+ udelay(SYSCISR_DELAY_US);
+ }
+
+ if (k == SYSCISR_RETRIES)
+ ret = -EIO;
+
+ iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
+
+ out:
+ spin_unlock_irqrestore(&rcar_sysc_lock, flags);
+
+ pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
+ sysc_ch->isr_bit, ioread32(rcar_sysc_base + SYSCISR), ret);
+ return ret;
+}
+
+static int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch)
+{
+ return rcar_sysc_power(sysc_ch, false);
+}
+
+static int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch)
+{
+ return rcar_sysc_power(sysc_ch, true);
+}
+
+static bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch)
+{
+ unsigned int st;
+
+ st = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS);
+ if (st & BIT(sysc_ch->chan_bit))
+ return true;
+
+ return false;
+}
+
+struct rcar_sysc_pd {
+ struct generic_pm_domain genpd;
+ struct rcar_sysc_ch ch;
+ unsigned int flags;
+ char name[0];
+};
+
+static inline struct rcar_sysc_pd *to_rcar_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct rcar_sysc_pd, genpd);
+}
+
+static int rcar_sysc_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct rcar_sysc_pd *pd = to_rcar_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+ return rcar_sysc_power_down(&pd->ch);
+}
+
+static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
+{
+ struct rcar_sysc_pd *pd = to_rcar_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+ return rcar_sysc_power_up(&pd->ch);
+}
+
+static bool has_cpg_mstp;
+
+static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
+{
+ struct generic_pm_domain *genpd = &pd->genpd;
+ const char *name = pd->genpd.name;
+ struct dev_power_governor *gov = &simple_qos_governor;
+ int error;
+
+ if (pd->flags & PD_CPU) {
+ /*
+ * This domain contains a CPU core and therefore it should
+ * only be turned off if the CPU is not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "CPU");
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ } else if (pd->flags & PD_SCU) {
+ /*
+ * This domain contains an SCU and cache-controller, and
+ * therefore it should only be turned off if the CPU cores are
+ * not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "SCU");
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ } else if (pd->flags & PD_NO_CR) {
+ /*
+ * This domain cannot be turned off.
+ */
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ }
+
+ if (!(pd->flags & (PD_CPU | PD_SCU))) {
+ /* Enable Clock Domain for I/O devices */
+ genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ if (has_cpg_mstp) {
+ genpd->attach_dev = cpg_mstp_attach_dev;
+ genpd->detach_dev = cpg_mstp_detach_dev;
+ } else {
+ genpd->attach_dev = cpg_mssr_attach_dev;
+ genpd->detach_dev = cpg_mssr_detach_dev;
+ }
+ }
+
+ genpd->power_off = rcar_sysc_pd_power_off;
+ genpd->power_on = rcar_sysc_pd_power_on;
+
+ if (pd->flags & (PD_CPU | PD_NO_CR)) {
+ /* Skip CPUs (handled by SMP code) and areas without control */
+ pr_debug("%s: Not touching %s\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ if (!rcar_sysc_power_is_off(&pd->ch)) {
+ pr_debug("%s: %s is already powered\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ rcar_sysc_power_up(&pd->ch);
+
+finalize:
+ error = pm_genpd_init(genpd, gov, false);
+ if (error)
+ pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+ return error;
+}
+
+static const struct of_device_id rcar_sysc_matches[] __initconst = {
+#ifdef CONFIG_SYSC_R8A7743
+ { .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A7745
+ { .compatible = "renesas,r8a7745-sysc", .data = &r8a7745_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A77470
+ { .compatible = "renesas,r8a77470-sysc", .data = &r8a77470_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A7779
+ { .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A7790
+ { .compatible = "renesas,r8a7790-sysc", .data = &r8a7790_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A7791
+ { .compatible = "renesas,r8a7791-sysc", .data = &r8a7791_sysc_info },
+ /* R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. */
+ { .compatible = "renesas,r8a7793-sysc", .data = &r8a7791_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A7792
+ { .compatible = "renesas,r8a7792-sysc", .data = &r8a7792_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A7794
+ { .compatible = "renesas,r8a7794-sysc", .data = &r8a7794_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A7795
+ { .compatible = "renesas,r8a7795-sysc", .data = &r8a7795_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A7796
+ { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A77965
+ { .compatible = "renesas,r8a77965-sysc", .data = &r8a77965_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A77970
+ { .compatible = "renesas,r8a77970-sysc", .data = &r8a77970_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A77980
+ { .compatible = "renesas,r8a77980-sysc", .data = &r8a77980_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A77990
+ { .compatible = "renesas,r8a77990-sysc", .data = &r8a77990_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A77995
+ { .compatible = "renesas,r8a77995-sysc", .data = &r8a77995_sysc_info },
+#endif
+ { /* sentinel */ }
+};
+
+struct rcar_pm_domains {
+ struct genpd_onecell_data onecell_data;
+ struct generic_pm_domain *domains[RCAR_PD_ALWAYS_ON + 1];
+};
+
+static struct genpd_onecell_data *rcar_sysc_onecell_data;
+
+static int __init rcar_sysc_pd_init(void)
+{
+ const struct rcar_sysc_info *info;
+ const struct of_device_id *match;
+ struct rcar_pm_domains *domains;
+ struct device_node *np;
+ u32 syscier, syscimr;
+ void __iomem *base;
+ unsigned int i;
+ int error;
+
+ np = of_find_matching_node_and_match(NULL, rcar_sysc_matches, &match);
+ if (!np)
+ return -ENODEV;
+
+ info = match->data;
+
+ if (info->init) {
+ error = info->init();
+ if (error)
+ return error;
+ }
+
+ has_cpg_mstp = of_find_compatible_node(NULL, NULL,
+ "renesas,cpg-mstp-clocks");
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("%pOF: Cannot map regs\n", np);
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ rcar_sysc_base = base;
+
+ domains = kzalloc(sizeof(*domains), GFP_KERNEL);
+ if (!domains) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ domains->onecell_data.domains = domains->domains;
+ domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
+ rcar_sysc_onecell_data = &domains->onecell_data;
+
+ for (i = 0, syscier = 0; i < info->num_areas; i++)
+ syscier |= BIT(info->areas[i].isr_bit);
+
+ /*
+ * Mask all interrupt sources to prevent the CPU from receiving them.
+ * Make sure not to clear reserved bits that were set before.
+ */
+ syscimr = ioread32(base + SYSCIMR);
+ syscimr |= syscier;
+ pr_debug("%pOF: syscimr = 0x%08x\n", np, syscimr);
+ iowrite32(syscimr, base + SYSCIMR);
+
+ /*
+ * SYSC needs all interrupt sources enabled to control power.
+ */
+ pr_debug("%pOF: syscier = 0x%08x\n", np, syscier);
+ iowrite32(syscier, base + SYSCIER);
+
+ /*
+ * First, create all PM domains
+ */
+ for (i = 0; i < info->num_areas; i++) {
+ const struct rcar_sysc_area *area = &info->areas[i];
+ struct rcar_sysc_pd *pd;
+
+ if (!area->name) {
+ /* Skip NULLified area */
+ continue;
+ }
+
+ pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL);
+ if (!pd) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ strcpy(pd->name, area->name);
+ pd->genpd.name = pd->name;
+ pd->ch.chan_offs = area->chan_offs;
+ pd->ch.chan_bit = area->chan_bit;
+ pd->ch.isr_bit = area->isr_bit;
+ pd->flags = area->flags;
+
+ error = rcar_sysc_pd_setup(pd);
+ if (error)
+ goto out_put;
+
+ domains->domains[area->isr_bit] = &pd->genpd;
+ }
+
+ /*
+ * Second, link all PM domains to their parents
+ */
+ for (i = 0; i < info->num_areas; i++) {
+ const struct rcar_sysc_area *area = &info->areas[i];
+
+ if (!area->name || area->parent < 0)
+ continue;
+
+ error = pm_genpd_add_subdomain(domains->domains[area->parent],
+ domains->domains[area->isr_bit]);
+ if (error)
+ pr_warn("Failed to add PM subdomain %s to parent %u\n",
+ area->name, area->parent);
+ }
+
+ error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
+
+out_put:
+ of_node_put(np);
+ return error;
+}
+early_initcall(rcar_sysc_pd_init);
+
+void __init rcar_sysc_nullify(struct rcar_sysc_area *areas,
+ unsigned int num_areas, u8 id)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_areas; i++)
+ if (areas[i].isr_bit == id) {
+ areas[i].name = NULL;
+ return;
+ }
+}
+
+#ifdef CONFIG_ARCH_R8A7779
+static int rcar_sysc_power_cpu(unsigned int idx, bool on)
+{
+ struct generic_pm_domain *genpd;
+ struct rcar_sysc_pd *pd;
+ unsigned int i;
+
+ if (!rcar_sysc_onecell_data)
+ return -ENODEV;
+
+ for (i = 0; i < rcar_sysc_onecell_data->num_domains; i++) {
+ genpd = rcar_sysc_onecell_data->domains[i];
+ if (!genpd)
+ continue;
+
+ pd = to_rcar_pd(genpd);
+ if (!(pd->flags & PD_CPU) || pd->ch.chan_bit != idx)
+ continue;
+
+ return on ? rcar_sysc_power_up(&pd->ch)
+ : rcar_sysc_power_down(&pd->ch);
+ }
+
+ return -ENOENT;
+}
+
+int rcar_sysc_power_down_cpu(unsigned int cpu)
+{
+ return rcar_sysc_power_cpu(cpu, false);
+}
+
+int rcar_sysc_power_up_cpu(unsigned int cpu)
+{
+ return rcar_sysc_power_cpu(cpu, true);
+}
+#endif /* CONFIG_ARCH_R8A7779 */
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
new file mode 100644
index 000000000..a22e7cf25
--- /dev/null
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -0,0 +1,76 @@
+/*
+ * Renesas R-Car System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __SOC_RENESAS_RCAR_SYSC_H__
+#define __SOC_RENESAS_RCAR_SYSC_H__
+
+#include <linux/types.h>
+
+
+/*
+ * Power Domain flags
+ */
+#define PD_CPU BIT(0) /* Area contains main CPU core */
+#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
+#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
+
+#define PD_CPU_CR PD_CPU /* CPU area has CR (R-Car H1) */
+#define PD_CPU_NOCR PD_CPU | PD_NO_CR /* CPU area lacks CR (R-Car Gen2/3) */
+#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
+
+
+/*
+ * Description of a Power Area
+ */
+
+struct rcar_sysc_area {
+ const char *name;
+ u16 chan_offs; /* Offset of PWRSR register for this area */
+ u8 chan_bit; /* Bit in PWR* (except for PWRUP in PWRSR) */
+ u8 isr_bit; /* Bit in SYSCI*R */
+ int parent; /* -1 if none */
+ unsigned int flags; /* See PD_* */
+};
+
+
+/*
+ * SoC-specific Power Area Description
+ */
+
+struct rcar_sysc_info {
+ int (*init)(void); /* Optional */
+ const struct rcar_sysc_area *areas;
+ unsigned int num_areas;
+};
+
+extern const struct rcar_sysc_info r8a7743_sysc_info;
+extern const struct rcar_sysc_info r8a7745_sysc_info;
+extern const struct rcar_sysc_info r8a77470_sysc_info;
+extern const struct rcar_sysc_info r8a7779_sysc_info;
+extern const struct rcar_sysc_info r8a7790_sysc_info;
+extern const struct rcar_sysc_info r8a7791_sysc_info;
+extern const struct rcar_sysc_info r8a7792_sysc_info;
+extern const struct rcar_sysc_info r8a7794_sysc_info;
+extern const struct rcar_sysc_info r8a7795_sysc_info;
+extern const struct rcar_sysc_info r8a7796_sysc_info;
+extern const struct rcar_sysc_info r8a77965_sysc_info;
+extern const struct rcar_sysc_info r8a77970_sysc_info;
+extern const struct rcar_sysc_info r8a77980_sysc_info;
+extern const struct rcar_sysc_info r8a77990_sysc_info;
+extern const struct rcar_sysc_info r8a77995_sysc_info;
+
+
+ /*
+ * Helpers for fixing up power area tables depending on SoC revision
+ */
+
+extern void rcar_sysc_nullify(struct rcar_sysc_area *areas,
+ unsigned int num_areas, u8 id);
+
+#endif /* __SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
new file mode 100644
index 000000000..2a43d6e99
--- /dev/null
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -0,0 +1,327 @@
+/*
+ * Renesas SoC Identification
+ *
+ * Copyright (C) 2014-2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+
+struct renesas_family {
+ const char name[16];
+ u32 reg; /* CCCR or PRR, if not in DT */
+};
+
+static const struct renesas_family fam_rcar_gen1 __initconst __maybe_unused = {
+ .name = "R-Car Gen1",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen2 __initconst __maybe_unused = {
+ .name = "R-Car Gen2",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = {
+ .name = "R-Car Gen3",
+ .reg = 0xfff00044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
+ .name = "R-Mobile",
+ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
+};
+
+static const struct renesas_family fam_rza __initconst __maybe_unused = {
+ .name = "RZ/A",
+};
+
+static const struct renesas_family fam_rzg __initconst __maybe_unused = {
+ .name = "RZ/G",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_shmobile __initconst __maybe_unused = {
+ .name = "SH-Mobile",
+ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
+};
+
+
+struct renesas_soc {
+ const struct renesas_family *family;
+ u8 id;
+};
+
+static const struct renesas_soc soc_rz_a1h __initconst __maybe_unused = {
+ .family = &fam_rza,
+};
+
+static const struct renesas_soc soc_rmobile_ape6 __initconst __maybe_unused = {
+ .family = &fam_rmobile,
+ .id = 0x3f,
+};
+
+static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = {
+ .family = &fam_rmobile,
+ .id = 0x40,
+};
+
+static const struct renesas_soc soc_rz_g1h __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x45,
+};
+
+static const struct renesas_soc soc_rz_g1m __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x47,
+};
+
+static const struct renesas_soc soc_rz_g1n __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x4b,
+};
+
+static const struct renesas_soc soc_rz_g1e __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x4c,
+};
+
+static const struct renesas_soc soc_rz_g1c __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x53,
+};
+
+static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
+ .family = &fam_rcar_gen1,
+};
+
+static const struct renesas_soc soc_rcar_h1 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen1,
+ .id = 0x3b,
+};
+
+static const struct renesas_soc soc_rcar_h2 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x45,
+};
+
+static const struct renesas_soc soc_rcar_m2_w __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x47,
+};
+
+static const struct renesas_soc soc_rcar_v2h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4a,
+};
+
+static const struct renesas_soc soc_rcar_m2_n __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4b,
+};
+
+static const struct renesas_soc soc_rcar_e2 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4c,
+};
+
+static const struct renesas_soc soc_rcar_h3 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x4f,
+};
+
+static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x52,
+};
+
+static const struct renesas_soc soc_rcar_m3_n __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x55,
+};
+
+static const struct renesas_soc soc_rcar_v3m __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x54,
+};
+
+static const struct renesas_soc soc_rcar_v3h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x56,
+};
+
+static const struct renesas_soc soc_rcar_e3 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x57,
+};
+
+static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x58,
+};
+
+static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
+ .family = &fam_shmobile,
+ .id = 0x37,
+};
+
+
+static const struct of_device_id renesas_socs[] __initconst = {
+#ifdef CONFIG_ARCH_R7S72100
+ { .compatible = "renesas,r7s72100", .data = &soc_rz_a1h },
+#endif
+#ifdef CONFIG_ARCH_R8A73A4
+ { .compatible = "renesas,r8a73a4", .data = &soc_rmobile_ape6 },
+#endif
+#ifdef CONFIG_ARCH_R8A7740
+ { .compatible = "renesas,r8a7740", .data = &soc_rmobile_a1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7742
+ { .compatible = "renesas,r8a7742", .data = &soc_rz_g1h },
+#endif
+#ifdef CONFIG_ARCH_R8A7743
+ { .compatible = "renesas,r8a7743", .data = &soc_rz_g1m },
+#endif
+#ifdef CONFIG_ARCH_R8A7744
+ { .compatible = "renesas,r8a7744", .data = &soc_rz_g1n },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+ { .compatible = "renesas,r8a7745", .data = &soc_rz_g1e },
+#endif
+#ifdef CONFIG_ARCH_R8A77470
+ { .compatible = "renesas,r8a77470", .data = &soc_rz_g1c },
+#endif
+#ifdef CONFIG_ARCH_R8A7778
+ { .compatible = "renesas,r8a7778", .data = &soc_rcar_m1a },
+#endif
+#ifdef CONFIG_ARCH_R8A7779
+ { .compatible = "renesas,r8a7779", .data = &soc_rcar_h1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7790
+ { .compatible = "renesas,r8a7790", .data = &soc_rcar_h2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7791
+ { .compatible = "renesas,r8a7791", .data = &soc_rcar_m2_w },
+#endif
+#ifdef CONFIG_ARCH_R8A7792
+ { .compatible = "renesas,r8a7792", .data = &soc_rcar_v2h },
+#endif
+#ifdef CONFIG_ARCH_R8A7793
+ { .compatible = "renesas,r8a7793", .data = &soc_rcar_m2_n },
+#endif
+#ifdef CONFIG_ARCH_R8A7794
+ { .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7795
+ { .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
+#endif
+#ifdef CONFIG_ARCH_R8A7796
+ { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
+#endif
+#ifdef CONFIG_ARCH_R8A77965
+ { .compatible = "renesas,r8a77965", .data = &soc_rcar_m3_n },
+#endif
+#ifdef CONFIG_ARCH_R8A77970
+ { .compatible = "renesas,r8a77970", .data = &soc_rcar_v3m },
+#endif
+#ifdef CONFIG_ARCH_R8A77980
+ { .compatible = "renesas,r8a77980", .data = &soc_rcar_v3h },
+#endif
+#ifdef CONFIG_ARCH_R8A77990
+ { .compatible = "renesas,r8a77990", .data = &soc_rcar_e3 },
+#endif
+#ifdef CONFIG_ARCH_R8A77995
+ { .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 },
+#endif
+#ifdef CONFIG_ARCH_SH73A0
+ { .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
+#endif
+ { /* sentinel */ }
+};
+
+static int __init renesas_soc_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const struct renesas_family *family;
+ const struct of_device_id *match;
+ const struct renesas_soc *soc;
+ void __iomem *chipid = NULL;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ unsigned int product;
+
+ match = of_match_node(renesas_socs, of_root);
+ if (!match)
+ return -ENODEV;
+
+ soc = match->data;
+ family = soc->family;
+
+ /* Try PRR first, then hardcoded fallback */
+ np = of_find_compatible_node(NULL, NULL, "renesas,prr");
+ if (np) {
+ chipid = of_iomap(np, 0);
+ of_node_put(np);
+ } else if (soc->id) {
+ chipid = ioremap(family->reg, 4);
+ }
+ if (chipid) {
+ product = readl(chipid);
+ iounmap(chipid);
+ /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
+ if ((product & 0x7fff) == 0x5210)
+ product ^= 0x11;
+ /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
+ if ((product & 0x7fff) == 0x5211)
+ product ^= 0x12;
+ if (soc->id && ((product >> 8) & 0xff) != soc->id) {
+ pr_warn("SoC mismatch (product = 0x%x)\n", product);
+ return -ENODEV;
+ }
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
+ soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1,
+ GFP_KERNEL);
+ if (chipid)
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
+ ((product >> 4) & 0x0f) + 1,
+ product & 0xf);
+
+ pr_info("Detected Renesas %s %s %s\n", soc_dev_attr->family,
+ soc_dev_attr->soc_id, soc_dev_attr->revision ?: "");
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree_const(soc_dev_attr->family);
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ return 0;
+}
+early_initcall(renesas_soc_init);
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
new file mode 100644
index 000000000..d483b0e29
--- /dev/null
+++ b/drivers/soc/rockchip/Kconfig
@@ -0,0 +1,28 @@
+if ARCH_ROCKCHIP || COMPILE_TEST
+
+#
+# Rockchip Soc drivers
+#
+
+config ROCKCHIP_GRF
+ bool "Rockchip General Register Files support" if COMPILE_TEST
+ default y if ARCH_ROCKCHIP
+ help
+ The General Register Files are a central component providing
+ special additional settings registers for a lot of soc-components.
+ In a lot of cases there also need to be default settings initialized
+ to make some of them conform to expectations of the kernel.
+
+config ROCKCHIP_PM_DOMAINS
+ bool "Rockchip generic power domain"
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y here to enable power domain support.
+ In order to meet high performance and low power requirements, a power
+ management unit is designed or saving power when RK3288 in low power
+ mode. The RK3288 PMU is dedicated for managing the power of the whole chip.
+
+ If unsure, say N.
+
+endif
diff --git a/drivers/soc/rockchip/Makefile b/drivers/soc/rockchip/Makefile
new file mode 100644
index 000000000..c851fa005
--- /dev/null
+++ b/drivers/soc/rockchip/Makefile
@@ -0,0 +1,5 @@
+#
+# Rockchip Soc drivers
+#
+obj-$(CONFIG_ROCKCHIP_GRF) += grf.o
+obj-$(CONFIG_ROCKCHIP_PM_DOMAINS) += pm_domains.o
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
new file mode 100644
index 000000000..3e7e999ee
--- /dev/null
+++ b/drivers/soc/rockchip/grf.c
@@ -0,0 +1,180 @@
+/*
+ * Rockchip Generic Register Files setup
+ *
+ * Copyright (c) 2016 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
+
+struct rockchip_grf_value {
+ const char *desc;
+ u32 reg;
+ u32 val;
+};
+
+struct rockchip_grf_info {
+ const struct rockchip_grf_value *values;
+ int num_values;
+};
+
+#define RK3036_GRF_SOC_CON0 0x140
+
+static const struct rockchip_grf_value rk3036_defaults[] __initconst = {
+ /*
+ * Disable auto jtag/sdmmc switching that causes issues with the
+ * clock-framework and the mmc controllers making them unreliable.
+ */
+ { "jtag switching", RK3036_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 11) },
+};
+
+static const struct rockchip_grf_info rk3036_grf __initconst = {
+ .values = rk3036_defaults,
+ .num_values = ARRAY_SIZE(rk3036_defaults),
+};
+
+#define RK3128_GRF_SOC_CON0 0x140
+
+static const struct rockchip_grf_value rk3128_defaults[] __initconst = {
+ { "jtag switching", RK3128_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 8) },
+};
+
+static const struct rockchip_grf_info rk3128_grf __initconst = {
+ .values = rk3128_defaults,
+ .num_values = ARRAY_SIZE(rk3128_defaults),
+};
+
+#define RK3228_GRF_SOC_CON6 0x418
+
+static const struct rockchip_grf_value rk3228_defaults[] __initconst = {
+ { "jtag switching", RK3228_GRF_SOC_CON6, HIWORD_UPDATE(0, 1, 8) },
+};
+
+static const struct rockchip_grf_info rk3228_grf __initconst = {
+ .values = rk3228_defaults,
+ .num_values = ARRAY_SIZE(rk3228_defaults),
+};
+
+#define RK3288_GRF_SOC_CON0 0x244
+#define RK3288_GRF_SOC_CON2 0x24c
+
+static const struct rockchip_grf_value rk3288_defaults[] __initconst = {
+ { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) },
+ { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) },
+};
+
+static const struct rockchip_grf_info rk3288_grf __initconst = {
+ .values = rk3288_defaults,
+ .num_values = ARRAY_SIZE(rk3288_defaults),
+};
+
+#define RK3328_GRF_SOC_CON4 0x410
+
+static const struct rockchip_grf_value rk3328_defaults[] __initconst = {
+ { "jtag switching", RK3328_GRF_SOC_CON4, HIWORD_UPDATE(0, 1, 12) },
+};
+
+static const struct rockchip_grf_info rk3328_grf __initconst = {
+ .values = rk3328_defaults,
+ .num_values = ARRAY_SIZE(rk3328_defaults),
+};
+
+#define RK3368_GRF_SOC_CON15 0x43c
+
+static const struct rockchip_grf_value rk3368_defaults[] __initconst = {
+ { "jtag switching", RK3368_GRF_SOC_CON15, HIWORD_UPDATE(0, 1, 13) },
+};
+
+static const struct rockchip_grf_info rk3368_grf __initconst = {
+ .values = rk3368_defaults,
+ .num_values = ARRAY_SIZE(rk3368_defaults),
+};
+
+#define RK3399_GRF_SOC_CON7 0xe21c
+
+static const struct rockchip_grf_value rk3399_defaults[] __initconst = {
+ { "jtag switching", RK3399_GRF_SOC_CON7, HIWORD_UPDATE(0, 1, 12) },
+};
+
+static const struct rockchip_grf_info rk3399_grf __initconst = {
+ .values = rk3399_defaults,
+ .num_values = ARRAY_SIZE(rk3399_defaults),
+};
+
+static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
+ {
+ .compatible = "rockchip,rk3036-grf",
+ .data = (void *)&rk3036_grf,
+ }, {
+ .compatible = "rockchip,rk3128-grf",
+ .data = (void *)&rk3128_grf,
+ }, {
+ .compatible = "rockchip,rk3228-grf",
+ .data = (void *)&rk3228_grf,
+ }, {
+ .compatible = "rockchip,rk3288-grf",
+ .data = (void *)&rk3288_grf,
+ }, {
+ .compatible = "rockchip,rk3328-grf",
+ .data = (void *)&rk3328_grf,
+ }, {
+ .compatible = "rockchip,rk3368-grf",
+ .data = (void *)&rk3368_grf,
+ }, {
+ .compatible = "rockchip,rk3399-grf",
+ .data = (void *)&rk3399_grf,
+ },
+ { /* sentinel */ },
+};
+
+static int __init rockchip_grf_init(void)
+{
+ const struct rockchip_grf_info *grf_info;
+ const struct of_device_id *match;
+ struct device_node *np;
+ struct regmap *grf;
+ int ret, i;
+
+ np = of_find_matching_node_and_match(NULL, rockchip_grf_dt_match,
+ &match);
+ if (!np)
+ return -ENODEV;
+ if (!match || !match->data) {
+ pr_err("%s: missing grf data\n", __func__);
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ grf_info = match->data;
+
+ grf = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(grf)) {
+ pr_err("%s: could not get grf syscon\n", __func__);
+ return PTR_ERR(grf);
+ }
+
+ for (i = 0; i < grf_info->num_values; i++) {
+ const struct rockchip_grf_value *val = &grf_info->values[i];
+
+ pr_debug("%s: adjusting %s in %#6x to %#10x\n", __func__,
+ val->desc, val->reg, val->val);
+ ret = regmap_write(grf, val->reg, val->val);
+ if (ret < 0)
+ pr_err("%s: write to %#6x failed with %d\n",
+ __func__, val->reg, ret);
+ }
+
+ return 0;
+}
+postcore_initcall(rockchip_grf_init);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
new file mode 100644
index 000000000..6dff86821
--- /dev/null
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -0,0 +1,1000 @@
+/*
+ * Rockchip Generic power domain support.
+ *
+ * Copyright (c) 2015 ROCKCHIP, Co. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
+#include <linux/of_address.h>
+#include <linux/of_clk.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/power/px30-power.h>
+#include <dt-bindings/power/rk3036-power.h>
+#include <dt-bindings/power/rk3128-power.h>
+#include <dt-bindings/power/rk3228-power.h>
+#include <dt-bindings/power/rk3288-power.h>
+#include <dt-bindings/power/rk3328-power.h>
+#include <dt-bindings/power/rk3366-power.h>
+#include <dt-bindings/power/rk3368-power.h>
+#include <dt-bindings/power/rk3399-power.h>
+
+struct rockchip_domain_info {
+ int pwr_mask;
+ int status_mask;
+ int req_mask;
+ int idle_mask;
+ int ack_mask;
+ bool active_wakeup;
+ int pwr_w_mask;
+ int req_w_mask;
+};
+
+struct rockchip_pmu_info {
+ u32 pwr_offset;
+ u32 status_offset;
+ u32 req_offset;
+ u32 idle_offset;
+ u32 ack_offset;
+
+ u32 core_pwrcnt_offset;
+ u32 gpu_pwrcnt_offset;
+
+ unsigned int core_power_transition_time;
+ unsigned int gpu_power_transition_time;
+
+ int num_domains;
+ const struct rockchip_domain_info *domain_info;
+};
+
+#define MAX_QOS_REGS_NUM 5
+#define QOS_PRIORITY 0x08
+#define QOS_MODE 0x0c
+#define QOS_BANDWIDTH 0x10
+#define QOS_SATURATION 0x14
+#define QOS_EXTCONTROL 0x18
+
+struct rockchip_pm_domain {
+ struct generic_pm_domain genpd;
+ const struct rockchip_domain_info *info;
+ struct rockchip_pmu *pmu;
+ int num_qos;
+ struct regmap **qos_regmap;
+ u32 *qos_save_regs[MAX_QOS_REGS_NUM];
+ int num_clks;
+ struct clk_bulk_data *clks;
+};
+
+struct rockchip_pmu {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct rockchip_pmu_info *info;
+ struct mutex mutex; /* mutex lock for pmu */
+ struct genpd_onecell_data genpd_data;
+ struct generic_pm_domain *domains[];
+};
+
+#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
+
+#define DOMAIN(pwr, status, req, idle, ack, wakeup) \
+{ \
+ .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
+ .status_mask = (status >= 0) ? BIT(status) : 0, \
+ .req_mask = (req >= 0) ? BIT(req) : 0, \
+ .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
+ .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
+ .active_wakeup = wakeup, \
+}
+
+#define DOMAIN_M(pwr, status, req, idle, ack, wakeup) \
+{ \
+ .pwr_w_mask = (pwr >= 0) ? BIT(pwr + 16) : 0, \
+ .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
+ .status_mask = (status >= 0) ? BIT(status) : 0, \
+ .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \
+ .req_mask = (req >= 0) ? BIT(req) : 0, \
+ .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
+ .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
+ .active_wakeup = wakeup, \
+}
+
+#define DOMAIN_RK3036(req, ack, idle, wakeup) \
+{ \
+ .req_mask = (req >= 0) ? BIT(req) : 0, \
+ .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \
+ .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
+ .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
+ .active_wakeup = wakeup, \
+}
+
+#define DOMAIN_PX30(pwr, status, req, wakeup) \
+ DOMAIN_M(pwr, status, req, (req) + 16, req, wakeup)
+
+#define DOMAIN_RK3288(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, req, (req) + 16, wakeup)
+
+#define DOMAIN_RK3328(pwr, status, req, wakeup) \
+ DOMAIN_M(pwr, pwr, req, (req) + 10, req, wakeup)
+
+#define DOMAIN_RK3368(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, (req) + 16, req, wakeup)
+
+#define DOMAIN_RK3399(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, req, req, wakeup)
+
+static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ const struct rockchip_domain_info *pd_info = pd->info;
+ unsigned int val;
+
+ regmap_read(pmu->regmap, pmu->info->idle_offset, &val);
+ return (val & pd_info->idle_mask) == pd_info->idle_mask;
+}
+
+static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
+{
+ unsigned int val;
+
+ regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
+ return val;
+}
+
+static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
+ bool idle)
+{
+ const struct rockchip_domain_info *pd_info = pd->info;
+ struct generic_pm_domain *genpd = &pd->genpd;
+ struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int target_ack;
+ unsigned int val;
+ bool is_idle;
+ int ret;
+
+ if (pd_info->req_mask == 0)
+ return 0;
+ else if (pd_info->req_w_mask)
+ regmap_write(pmu->regmap, pmu->info->req_offset,
+ idle ? (pd_info->req_mask | pd_info->req_w_mask) :
+ pd_info->req_w_mask);
+ else
+ regmap_update_bits(pmu->regmap, pmu->info->req_offset,
+ pd_info->req_mask, idle ? -1U : 0);
+
+ dsb(sy);
+
+ /* Wait util idle_ack = 1 */
+ target_ack = idle ? pd_info->ack_mask : 0;
+ ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val,
+ (val & pd_info->ack_mask) == target_ack,
+ 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to get ack on domain '%s', val=0x%x\n",
+ genpd->name, val);
+ return ret;
+ }
+
+ ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd,
+ is_idle, is_idle == idle, 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to set idle on domain '%s', val=%d\n",
+ genpd->name, is_idle);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
+{
+ int i;
+
+ for (i = 0; i < pd->num_qos; i++) {
+ regmap_read(pd->qos_regmap[i],
+ QOS_PRIORITY,
+ &pd->qos_save_regs[0][i]);
+ regmap_read(pd->qos_regmap[i],
+ QOS_MODE,
+ &pd->qos_save_regs[1][i]);
+ regmap_read(pd->qos_regmap[i],
+ QOS_BANDWIDTH,
+ &pd->qos_save_regs[2][i]);
+ regmap_read(pd->qos_regmap[i],
+ QOS_SATURATION,
+ &pd->qos_save_regs[3][i]);
+ regmap_read(pd->qos_regmap[i],
+ QOS_EXTCONTROL,
+ &pd->qos_save_regs[4][i]);
+ }
+ return 0;
+}
+
+static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
+{
+ int i;
+
+ for (i = 0; i < pd->num_qos; i++) {
+ regmap_write(pd->qos_regmap[i],
+ QOS_PRIORITY,
+ pd->qos_save_regs[0][i]);
+ regmap_write(pd->qos_regmap[i],
+ QOS_MODE,
+ pd->qos_save_regs[1][i]);
+ regmap_write(pd->qos_regmap[i],
+ QOS_BANDWIDTH,
+ pd->qos_save_regs[2][i]);
+ regmap_write(pd->qos_regmap[i],
+ QOS_SATURATION,
+ pd->qos_save_regs[3][i]);
+ regmap_write(pd->qos_regmap[i],
+ QOS_EXTCONTROL,
+ pd->qos_save_regs[4][i]);
+ }
+
+ return 0;
+}
+
+static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int val;
+
+ /* check idle status for idle-only domains */
+ if (pd->info->status_mask == 0)
+ return !rockchip_pmu_domain_is_idle(pd);
+
+ regmap_read(pmu->regmap, pmu->info->status_offset, &val);
+
+ /* 1'b0: power on, 1'b1: power off */
+ return !(val & pd->info->status_mask);
+}
+
+static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
+ bool on)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ struct generic_pm_domain *genpd = &pd->genpd;
+ bool is_on;
+
+ if (pd->info->pwr_mask == 0)
+ return;
+ else if (pd->info->pwr_w_mask)
+ regmap_write(pmu->regmap, pmu->info->pwr_offset,
+ on ? pd->info->pwr_w_mask :
+ (pd->info->pwr_mask | pd->info->pwr_w_mask));
+ else
+ regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
+ pd->info->pwr_mask, on ? 0 : -1U);
+
+ dsb(sy);
+
+ if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
+ is_on == on, 0, 10000)) {
+ dev_err(pmu->dev,
+ "failed to set domain '%s', val=%d\n",
+ genpd->name, is_on);
+ return;
+ }
+}
+
+static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ int ret;
+
+ mutex_lock(&pmu->mutex);
+
+ if (rockchip_pmu_domain_is_on(pd) != power_on) {
+ ret = clk_bulk_enable(pd->num_clks, pd->clks);
+ if (ret < 0) {
+ dev_err(pmu->dev, "failed to enable clocks\n");
+ mutex_unlock(&pmu->mutex);
+ return ret;
+ }
+
+ if (!power_on) {
+ rockchip_pmu_save_qos(pd);
+
+ /* if powering down, idle request to NIU first */
+ rockchip_pmu_set_idle_request(pd, true);
+ }
+
+ rockchip_do_pmu_set_power_domain(pd, power_on);
+
+ if (power_on) {
+ /* if powering up, leave idle mode */
+ rockchip_pmu_set_idle_request(pd, false);
+
+ rockchip_pmu_restore_qos(pd);
+ }
+
+ clk_bulk_disable(pd->num_clks, pd->clks);
+ }
+
+ mutex_unlock(&pmu->mutex);
+ return 0;
+}
+
+static int rockchip_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
+
+ return rockchip_pd_power(pd, true);
+}
+
+static int rockchip_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
+
+ return rockchip_pd_power(pd, false);
+}
+
+static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ struct clk *clk;
+ int i;
+ int error;
+
+ dev_dbg(dev, "attaching to power domain '%s'\n", genpd->name);
+
+ error = pm_clk_create(dev);
+ if (error) {
+ dev_err(dev, "pm_clk_create failed %d\n", error);
+ return error;
+ }
+
+ i = 0;
+ while ((clk = of_clk_get(dev->of_node, i++)) && !IS_ERR(clk)) {
+ dev_dbg(dev, "adding clock '%pC' to list of PM clocks\n", clk);
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk failed %d\n", error);
+ clk_put(clk);
+ pm_clk_destroy(dev);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ dev_dbg(dev, "detaching from power domain '%s'\n", genpd->name);
+
+ pm_clk_destroy(dev);
+}
+
+static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
+ struct device_node *node)
+{
+ const struct rockchip_domain_info *pd_info;
+ struct rockchip_pm_domain *pd;
+ struct device_node *qos_node;
+ int i, j;
+ u32 id;
+ int error;
+
+ error = of_property_read_u32(node, "reg", &id);
+ if (error) {
+ dev_err(pmu->dev,
+ "%s: failed to retrieve domain id (reg): %d\n",
+ node->name, error);
+ return -EINVAL;
+ }
+
+ if (id >= pmu->info->num_domains) {
+ dev_err(pmu->dev, "%s: invalid domain id %d\n",
+ node->name, id);
+ return -EINVAL;
+ }
+
+ pd_info = &pmu->info->domain_info[id];
+ if (!pd_info) {
+ dev_err(pmu->dev, "%s: undefined domain id %d\n",
+ node->name, id);
+ return -EINVAL;
+ }
+
+ pd = devm_kzalloc(pmu->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->info = pd_info;
+ pd->pmu = pmu;
+
+ pd->num_clks = of_clk_get_parent_count(node);
+ if (pd->num_clks > 0) {
+ pd->clks = devm_kcalloc(pmu->dev, pd->num_clks,
+ sizeof(*pd->clks), GFP_KERNEL);
+ if (!pd->clks)
+ return -ENOMEM;
+ } else {
+ dev_dbg(pmu->dev, "%s: doesn't have clocks: %d\n",
+ node->name, pd->num_clks);
+ pd->num_clks = 0;
+ }
+
+ for (i = 0; i < pd->num_clks; i++) {
+ pd->clks[i].clk = of_clk_get(node, i);
+ if (IS_ERR(pd->clks[i].clk)) {
+ error = PTR_ERR(pd->clks[i].clk);
+ dev_err(pmu->dev,
+ "%s: failed to get clk at index %d: %d\n",
+ node->name, i, error);
+ return error;
+ }
+ }
+
+ error = clk_bulk_prepare(pd->num_clks, pd->clks);
+ if (error)
+ goto err_put_clocks;
+
+ pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
+ NULL);
+
+ if (pd->num_qos > 0) {
+ pd->qos_regmap = devm_kcalloc(pmu->dev, pd->num_qos,
+ sizeof(*pd->qos_regmap),
+ GFP_KERNEL);
+ if (!pd->qos_regmap) {
+ error = -ENOMEM;
+ goto err_unprepare_clocks;
+ }
+
+ for (j = 0; j < MAX_QOS_REGS_NUM; j++) {
+ pd->qos_save_regs[j] = devm_kcalloc(pmu->dev,
+ pd->num_qos,
+ sizeof(u32),
+ GFP_KERNEL);
+ if (!pd->qos_save_regs[j]) {
+ error = -ENOMEM;
+ goto err_unprepare_clocks;
+ }
+ }
+
+ for (j = 0; j < pd->num_qos; j++) {
+ qos_node = of_parse_phandle(node, "pm_qos", j);
+ if (!qos_node) {
+ error = -ENODEV;
+ goto err_unprepare_clocks;
+ }
+ pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
+ if (IS_ERR(pd->qos_regmap[j])) {
+ error = -ENODEV;
+ of_node_put(qos_node);
+ goto err_unprepare_clocks;
+ }
+ of_node_put(qos_node);
+ }
+ }
+
+ error = rockchip_pd_power(pd, true);
+ if (error) {
+ dev_err(pmu->dev,
+ "failed to power on domain '%s': %d\n",
+ node->name, error);
+ goto err_unprepare_clocks;
+ }
+
+ pd->genpd.name = node->name;
+ pd->genpd.power_off = rockchip_pd_power_off;
+ pd->genpd.power_on = rockchip_pd_power_on;
+ pd->genpd.attach_dev = rockchip_pd_attach_dev;
+ pd->genpd.detach_dev = rockchip_pd_detach_dev;
+ pd->genpd.flags = GENPD_FLAG_PM_CLK;
+ if (pd_info->active_wakeup)
+ pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
+ pm_genpd_init(&pd->genpd, NULL, false);
+
+ pmu->genpd_data.domains[id] = &pd->genpd;
+ return 0;
+
+err_unprepare_clocks:
+ clk_bulk_unprepare(pd->num_clks, pd->clks);
+err_put_clocks:
+ clk_bulk_put(pd->num_clks, pd->clks);
+ return error;
+}
+
+static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
+{
+ int ret;
+
+ /*
+ * We're in the error cleanup already, so we only complain,
+ * but won't emit another error on top of the original one.
+ */
+ ret = pm_genpd_remove(&pd->genpd);
+ if (ret < 0)
+ dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n",
+ pd->genpd.name, ret);
+
+ clk_bulk_unprepare(pd->num_clks, pd->clks);
+ clk_bulk_put(pd->num_clks, pd->clks);
+
+ /* protect the zeroing of pm->num_clks */
+ mutex_lock(&pd->pmu->mutex);
+ pd->num_clks = 0;
+ mutex_unlock(&pd->pmu->mutex);
+
+ /* devm will free our memory */
+}
+
+static void rockchip_pm_domain_cleanup(struct rockchip_pmu *pmu)
+{
+ struct generic_pm_domain *genpd;
+ struct rockchip_pm_domain *pd;
+ int i;
+
+ for (i = 0; i < pmu->genpd_data.num_domains; i++) {
+ genpd = pmu->genpd_data.domains[i];
+ if (genpd) {
+ pd = to_rockchip_pd(genpd);
+ rockchip_pm_remove_one_domain(pd);
+ }
+ }
+
+ /* devm will free our memory */
+}
+
+static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
+ u32 domain_reg_offset,
+ unsigned int count)
+{
+ /* First configure domain power down transition count ... */
+ regmap_write(pmu->regmap, domain_reg_offset, count);
+ /* ... and then power up count. */
+ regmap_write(pmu->regmap, domain_reg_offset + 4, count);
+}
+
+static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
+ struct device_node *parent)
+{
+ struct device_node *np;
+ struct generic_pm_domain *child_domain, *parent_domain;
+ int error;
+
+ for_each_child_of_node(parent, np) {
+ u32 idx;
+
+ error = of_property_read_u32(parent, "reg", &idx);
+ if (error) {
+ dev_err(pmu->dev,
+ "%s: failed to retrieve domain id (reg): %d\n",
+ parent->name, error);
+ goto err_out;
+ }
+ parent_domain = pmu->genpd_data.domains[idx];
+
+ error = rockchip_pm_add_one_domain(pmu, np);
+ if (error) {
+ dev_err(pmu->dev, "failed to handle node %s: %d\n",
+ np->name, error);
+ goto err_out;
+ }
+
+ error = of_property_read_u32(np, "reg", &idx);
+ if (error) {
+ dev_err(pmu->dev,
+ "%s: failed to retrieve domain id (reg): %d\n",
+ np->name, error);
+ goto err_out;
+ }
+ child_domain = pmu->genpd_data.domains[idx];
+
+ error = pm_genpd_add_subdomain(parent_domain, child_domain);
+ if (error) {
+ dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n",
+ parent_domain->name, child_domain->name, error);
+ goto err_out;
+ } else {
+ dev_dbg(pmu->dev, "%s add subdomain: %s\n",
+ parent_domain->name, child_domain->name);
+ }
+
+ rockchip_pm_add_subdomain(pmu, np);
+ }
+
+ return 0;
+
+err_out:
+ of_node_put(np);
+ return error;
+}
+
+static int rockchip_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *node;
+ struct device *parent;
+ struct rockchip_pmu *pmu;
+ const struct of_device_id *match;
+ const struct rockchip_pmu_info *pmu_info;
+ int error;
+
+ if (!np) {
+ dev_err(dev, "device tree node not found\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match || !match->data) {
+ dev_err(dev, "missing pmu data\n");
+ return -EINVAL;
+ }
+
+ pmu_info = match->data;
+
+ pmu = devm_kzalloc(dev,
+ struct_size(pmu, domains, pmu_info->num_domains),
+ GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ pmu->dev = &pdev->dev;
+ mutex_init(&pmu->mutex);
+
+ pmu->info = pmu_info;
+
+ pmu->genpd_data.domains = pmu->domains;
+ pmu->genpd_data.num_domains = pmu_info->num_domains;
+
+ parent = dev->parent;
+ if (!parent) {
+ dev_err(dev, "no parent for syscon devices\n");
+ return -ENODEV;
+ }
+
+ pmu->regmap = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR(pmu->regmap)) {
+ dev_err(dev, "no regmap available\n");
+ return PTR_ERR(pmu->regmap);
+ }
+
+ /*
+ * Configure power up and down transition delays for CORE
+ * and GPU domains.
+ */
+ if (pmu_info->core_power_transition_time)
+ rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
+ pmu_info->core_power_transition_time);
+ if (pmu_info->gpu_pwrcnt_offset)
+ rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
+ pmu_info->gpu_power_transition_time);
+
+ error = -ENODEV;
+
+ for_each_available_child_of_node(np, node) {
+ error = rockchip_pm_add_one_domain(pmu, node);
+ if (error) {
+ dev_err(dev, "failed to handle node %s: %d\n",
+ node->name, error);
+ of_node_put(node);
+ goto err_out;
+ }
+
+ error = rockchip_pm_add_subdomain(pmu, node);
+ if (error < 0) {
+ dev_err(dev, "failed to handle subdomain node %s: %d\n",
+ node->name, error);
+ of_node_put(node);
+ goto err_out;
+ }
+ }
+
+ if (error) {
+ dev_dbg(dev, "no power domains defined\n");
+ goto err_out;
+ }
+
+ error = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+ if (error) {
+ dev_err(dev, "failed to add provider: %d\n", error);
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ rockchip_pm_domain_cleanup(pmu);
+ return error;
+}
+
+static const struct rockchip_domain_info px30_pm_domains[] = {
+ [PX30_PD_USB] = DOMAIN_PX30(5, 5, 10, false),
+ [PX30_PD_SDCARD] = DOMAIN_PX30(8, 8, 9, false),
+ [PX30_PD_GMAC] = DOMAIN_PX30(10, 10, 6, false),
+ [PX30_PD_MMC_NAND] = DOMAIN_PX30(11, 11, 5, false),
+ [PX30_PD_VPU] = DOMAIN_PX30(12, 12, 14, false),
+ [PX30_PD_VO] = DOMAIN_PX30(13, 13, 7, false),
+ [PX30_PD_VI] = DOMAIN_PX30(14, 14, 8, false),
+ [PX30_PD_GPU] = DOMAIN_PX30(15, 15, 2, false),
+};
+
+static const struct rockchip_domain_info rk3036_pm_domains[] = {
+ [RK3036_PD_MSCH] = DOMAIN_RK3036(14, 23, 30, true),
+ [RK3036_PD_CORE] = DOMAIN_RK3036(13, 17, 24, false),
+ [RK3036_PD_PERI] = DOMAIN_RK3036(12, 18, 25, false),
+ [RK3036_PD_VIO] = DOMAIN_RK3036(11, 19, 26, false),
+ [RK3036_PD_VPU] = DOMAIN_RK3036(10, 20, 27, false),
+ [RK3036_PD_GPU] = DOMAIN_RK3036(9, 21, 28, false),
+ [RK3036_PD_SYS] = DOMAIN_RK3036(8, 22, 29, false),
+};
+
+static const struct rockchip_domain_info rk3128_pm_domains[] = {
+ [RK3128_PD_CORE] = DOMAIN_RK3288(0, 0, 4, false),
+ [RK3128_PD_MSCH] = DOMAIN_RK3288(-1, -1, 6, true),
+ [RK3128_PD_VIO] = DOMAIN_RK3288(3, 3, 2, false),
+ [RK3128_PD_VIDEO] = DOMAIN_RK3288(2, 2, 1, false),
+ [RK3128_PD_GPU] = DOMAIN_RK3288(1, 1, 3, false),
+};
+
+static const struct rockchip_domain_info rk3228_pm_domains[] = {
+ [RK3228_PD_CORE] = DOMAIN_RK3036(0, 0, 16, true),
+ [RK3228_PD_MSCH] = DOMAIN_RK3036(1, 1, 17, true),
+ [RK3228_PD_BUS] = DOMAIN_RK3036(2, 2, 18, true),
+ [RK3228_PD_SYS] = DOMAIN_RK3036(3, 3, 19, true),
+ [RK3228_PD_VIO] = DOMAIN_RK3036(4, 4, 20, false),
+ [RK3228_PD_VOP] = DOMAIN_RK3036(5, 5, 21, false),
+ [RK3228_PD_VPU] = DOMAIN_RK3036(6, 6, 22, false),
+ [RK3228_PD_RKVDEC] = DOMAIN_RK3036(7, 7, 23, false),
+ [RK3228_PD_GPU] = DOMAIN_RK3036(8, 8, 24, false),
+ [RK3228_PD_PERI] = DOMAIN_RK3036(9, 9, 25, true),
+ [RK3228_PD_GMAC] = DOMAIN_RK3036(10, 10, 26, false),
+};
+
+static const struct rockchip_domain_info rk3288_pm_domains[] = {
+ [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false),
+ [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false),
+ [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false),
+ [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false),
+};
+
+static const struct rockchip_domain_info rk3328_pm_domains[] = {
+ [RK3328_PD_CORE] = DOMAIN_RK3328(-1, 0, 0, false),
+ [RK3328_PD_GPU] = DOMAIN_RK3328(-1, 1, 1, false),
+ [RK3328_PD_BUS] = DOMAIN_RK3328(-1, 2, 2, true),
+ [RK3328_PD_MSCH] = DOMAIN_RK3328(-1, 3, 3, true),
+ [RK3328_PD_PERI] = DOMAIN_RK3328(-1, 4, 4, true),
+ [RK3328_PD_VIDEO] = DOMAIN_RK3328(-1, 5, 5, false),
+ [RK3328_PD_HEVC] = DOMAIN_RK3328(-1, 6, 6, false),
+ [RK3328_PD_VIO] = DOMAIN_RK3328(-1, 8, 8, false),
+ [RK3328_PD_VPU] = DOMAIN_RK3328(-1, 9, 9, false),
+};
+
+static const struct rockchip_domain_info rk3366_pm_domains[] = {
+ [RK3366_PD_PERI] = DOMAIN_RK3368(10, 10, 6, true),
+ [RK3366_PD_VIO] = DOMAIN_RK3368(14, 14, 8, false),
+ [RK3366_PD_VIDEO] = DOMAIN_RK3368(13, 13, 7, false),
+ [RK3366_PD_RKVDEC] = DOMAIN_RK3368(11, 11, 7, false),
+ [RK3366_PD_WIFIBT] = DOMAIN_RK3368(8, 8, 9, false),
+ [RK3366_PD_VPU] = DOMAIN_RK3368(12, 12, 7, false),
+ [RK3366_PD_GPU] = DOMAIN_RK3368(15, 15, 2, false),
+};
+
+static const struct rockchip_domain_info rk3368_pm_domains[] = {
+ [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true),
+ [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false),
+ [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false),
+ [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false),
+ [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false),
+};
+
+static const struct rockchip_domain_info rk3399_pm_domains[] = {
+ [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false),
+ [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false),
+ [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true),
+ [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true),
+ [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true),
+ [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true),
+ [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true),
+ [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true),
+ [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false),
+ [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false),
+ [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false),
+ [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false),
+ [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false),
+ [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false),
+ [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false),
+ [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false),
+ [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false),
+ [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false),
+ [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false),
+ [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false),
+ [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true),
+ [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true),
+ [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true),
+ [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false),
+ [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true),
+ [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true),
+ [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true),
+};
+
+static const struct rockchip_pmu_info px30_pmu = {
+ .pwr_offset = 0x18,
+ .status_offset = 0x20,
+ .req_offset = 0x64,
+ .idle_offset = 0x6c,
+ .ack_offset = 0x6c,
+
+ .num_domains = ARRAY_SIZE(px30_pm_domains),
+ .domain_info = px30_pm_domains,
+};
+
+static const struct rockchip_pmu_info rk3036_pmu = {
+ .req_offset = 0x148,
+ .idle_offset = 0x14c,
+ .ack_offset = 0x14c,
+
+ .num_domains = ARRAY_SIZE(rk3036_pm_domains),
+ .domain_info = rk3036_pm_domains,
+};
+
+static const struct rockchip_pmu_info rk3128_pmu = {
+ .pwr_offset = 0x04,
+ .status_offset = 0x08,
+ .req_offset = 0x0c,
+ .idle_offset = 0x10,
+ .ack_offset = 0x10,
+
+ .num_domains = ARRAY_SIZE(rk3128_pm_domains),
+ .domain_info = rk3128_pm_domains,
+};
+
+static const struct rockchip_pmu_info rk3228_pmu = {
+ .req_offset = 0x40c,
+ .idle_offset = 0x488,
+ .ack_offset = 0x488,
+
+ .num_domains = ARRAY_SIZE(rk3228_pm_domains),
+ .domain_info = rk3228_pm_domains,
+};
+
+static const struct rockchip_pmu_info rk3288_pmu = {
+ .pwr_offset = 0x08,
+ .status_offset = 0x0c,
+ .req_offset = 0x10,
+ .idle_offset = 0x14,
+ .ack_offset = 0x14,
+
+ .core_pwrcnt_offset = 0x34,
+ .gpu_pwrcnt_offset = 0x3c,
+
+ .core_power_transition_time = 24, /* 1us */
+ .gpu_power_transition_time = 24, /* 1us */
+
+ .num_domains = ARRAY_SIZE(rk3288_pm_domains),
+ .domain_info = rk3288_pm_domains,
+};
+
+static const struct rockchip_pmu_info rk3328_pmu = {
+ .req_offset = 0x414,
+ .idle_offset = 0x484,
+ .ack_offset = 0x484,
+
+ .num_domains = ARRAY_SIZE(rk3328_pm_domains),
+ .domain_info = rk3328_pm_domains,
+};
+
+static const struct rockchip_pmu_info rk3366_pmu = {
+ .pwr_offset = 0x0c,
+ .status_offset = 0x10,
+ .req_offset = 0x3c,
+ .idle_offset = 0x40,
+ .ack_offset = 0x40,
+
+ .core_pwrcnt_offset = 0x48,
+ .gpu_pwrcnt_offset = 0x50,
+
+ .core_power_transition_time = 24,
+ .gpu_power_transition_time = 24,
+
+ .num_domains = ARRAY_SIZE(rk3366_pm_domains),
+ .domain_info = rk3366_pm_domains,
+};
+
+static const struct rockchip_pmu_info rk3368_pmu = {
+ .pwr_offset = 0x0c,
+ .status_offset = 0x10,
+ .req_offset = 0x3c,
+ .idle_offset = 0x40,
+ .ack_offset = 0x40,
+
+ .core_pwrcnt_offset = 0x48,
+ .gpu_pwrcnt_offset = 0x50,
+
+ .core_power_transition_time = 24,
+ .gpu_power_transition_time = 24,
+
+ .num_domains = ARRAY_SIZE(rk3368_pm_domains),
+ .domain_info = rk3368_pm_domains,
+};
+
+static const struct rockchip_pmu_info rk3399_pmu = {
+ .pwr_offset = 0x14,
+ .status_offset = 0x18,
+ .req_offset = 0x60,
+ .idle_offset = 0x64,
+ .ack_offset = 0x68,
+
+ /* ARM Trusted Firmware manages power transition times */
+
+ .num_domains = ARRAY_SIZE(rk3399_pm_domains),
+ .domain_info = rk3399_pm_domains,
+};
+
+static const struct of_device_id rockchip_pm_domain_dt_match[] = {
+ {
+ .compatible = "rockchip,px30-power-controller",
+ .data = (void *)&px30_pmu,
+ },
+ {
+ .compatible = "rockchip,rk3036-power-controller",
+ .data = (void *)&rk3036_pmu,
+ },
+ {
+ .compatible = "rockchip,rk3128-power-controller",
+ .data = (void *)&rk3128_pmu,
+ },
+ {
+ .compatible = "rockchip,rk3228-power-controller",
+ .data = (void *)&rk3228_pmu,
+ },
+ {
+ .compatible = "rockchip,rk3288-power-controller",
+ .data = (void *)&rk3288_pmu,
+ },
+ {
+ .compatible = "rockchip,rk3328-power-controller",
+ .data = (void *)&rk3328_pmu,
+ },
+ {
+ .compatible = "rockchip,rk3366-power-controller",
+ .data = (void *)&rk3366_pmu,
+ },
+ {
+ .compatible = "rockchip,rk3368-power-controller",
+ .data = (void *)&rk3368_pmu,
+ },
+ {
+ .compatible = "rockchip,rk3399-power-controller",
+ .data = (void *)&rk3399_pmu,
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver rockchip_pm_domain_driver = {
+ .probe = rockchip_pm_domain_probe,
+ .driver = {
+ .name = "rockchip-pm-domain",
+ .of_match_table = rockchip_pm_domain_dt_match,
+ /*
+ * We can't forcibly eject devices form power domain,
+ * so we can't really remove power domains once they
+ * were added.
+ */
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rockchip_pm_domain_drv_register(void)
+{
+ return platform_driver_register(&rockchip_pm_domain_driver);
+}
+postcore_initcall(rockchip_pm_domain_drv_register);
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
new file mode 100644
index 000000000..2186285fd
--- /dev/null
+++ b/drivers/soc/samsung/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# SAMSUNG SoC drivers
+#
+menuconfig SOC_SAMSUNG
+ bool "Samsung SoC driver support" if COMPILE_TEST
+
+if SOC_SAMSUNG
+
+config EXYNOS_PMU
+ bool "Exynos PMU controller driver" if COMPILE_TEST
+ depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
+ select EXYNOS_PMU_ARM_DRIVERS if ARM && ARCH_EXYNOS
+
+# There is no need to enable these drivers for ARMv8
+config EXYNOS_PMU_ARM_DRIVERS
+ bool "Exynos PMU ARMv7-specific driver extensions" if COMPILE_TEST
+ depends on EXYNOS_PMU
+
+config EXYNOS_PM_DOMAINS
+ bool "Exynos PM domains" if COMPILE_TEST
+ depends on PM_GENERIC_DOMAINS || COMPILE_TEST
+
+endif
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
new file mode 100644
index 000000000..29f294baa
--- /dev/null
+++ b/drivers/soc/samsung/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
+
+obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
+ exynos5250-pmu.o exynos5420-pmu.o
+obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
new file mode 100644
index 000000000..d34ca201b
--- /dev/null
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// EXYNOS - CPU PMU(Power Management Unit) support
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+struct exynos_pmu_context {
+ struct device *dev;
+ const struct exynos_pmu_data *pmu_data;
+};
+
+void __iomem *pmu_base_addr;
+static struct exynos_pmu_context *pmu_context;
+
+void pmu_raw_writel(u32 val, u32 offset)
+{
+ writel_relaxed(val, pmu_base_addr + offset);
+}
+
+u32 pmu_raw_readl(u32 offset)
+{
+ return readl_relaxed(pmu_base_addr + offset);
+}
+
+void exynos_sys_powerdown_conf(enum sys_powerdown mode)
+{
+ unsigned int i;
+ const struct exynos_pmu_data *pmu_data;
+
+ if (!pmu_context || !pmu_context->pmu_data)
+ return;
+
+ pmu_data = pmu_context->pmu_data;
+
+ if (pmu_data->powerdown_conf)
+ pmu_data->powerdown_conf(mode);
+
+ if (pmu_data->pmu_config) {
+ for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
+ pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
+ pmu_data->pmu_config[i].offset);
+ }
+
+ if (pmu_data->powerdown_conf_extra)
+ pmu_data->powerdown_conf_extra(mode);
+}
+
+/*
+ * Split the data between ARM architectures because it is relatively big
+ * and useless on other arch.
+ */
+#ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
+#define exynos_pmu_data_arm_ptr(data) (&data)
+#else
+#define exynos_pmu_data_arm_ptr(data) NULL
+#endif
+
+/*
+ * PMU platform driver and devicetree bindings.
+ */
+static const struct of_device_id exynos_pmu_of_device_ids[] = {
+ {
+ .compatible = "samsung,exynos3250-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
+ }, {
+ .compatible = "samsung,exynos4210-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
+ }, {
+ .compatible = "samsung,exynos4412-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
+ }, {
+ .compatible = "samsung,exynos5250-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data),
+ }, {
+ .compatible = "samsung,exynos5410-pmu",
+ }, {
+ .compatible = "samsung,exynos5420-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data),
+ }, {
+ .compatible = "samsung,exynos5433-pmu",
+ }, {
+ .compatible = "samsung,exynos7-pmu",
+ },
+ { /*sentinel*/ },
+};
+
+struct regmap *exynos_get_pmu_regmap(void)
+{
+ struct device_node *np = of_find_matching_node(NULL,
+ exynos_pmu_of_device_ids);
+ if (np)
+ return syscon_node_to_regmap(np);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
+
+static int exynos_pmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pmu_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pmu_base_addr))
+ return PTR_ERR(pmu_base_addr);
+
+ pmu_context = devm_kzalloc(&pdev->dev,
+ sizeof(struct exynos_pmu_context),
+ GFP_KERNEL);
+ if (!pmu_context)
+ return -ENOMEM;
+ pmu_context->dev = dev;
+ pmu_context->pmu_data = of_device_get_match_data(dev);
+
+ if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
+ pmu_context->pmu_data->pmu_init();
+
+ platform_set_drvdata(pdev, pmu_context);
+
+ if (devm_of_platform_populate(dev))
+ dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
+
+ dev_dbg(dev, "Exynos PMU Driver probe done\n");
+ return 0;
+}
+
+static struct platform_driver exynos_pmu_driver = {
+ .driver = {
+ .name = "exynos-pmu",
+ .of_match_table = exynos_pmu_of_device_ids,
+ },
+ .probe = exynos_pmu_probe,
+};
+
+static int __init exynos_pmu_init(void)
+{
+ return platform_driver_register(&exynos_pmu_driver);
+
+}
+postcore_initcall(exynos_pmu_init);
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
new file mode 100644
index 000000000..977e4daf5
--- /dev/null
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Header for EXYNOS PMU Driver support
+ */
+
+#ifndef __EXYNOS_PMU_H
+#define __EXYNOS_PMU_H
+
+#include <linux/io.h>
+
+#define PMU_TABLE_END (-1U)
+
+struct exynos_pmu_conf {
+ unsigned int offset;
+ u8 val[NUM_SYS_POWERDOWN];
+};
+
+struct exynos_pmu_data {
+ const struct exynos_pmu_conf *pmu_config;
+
+ void (*pmu_init)(void);
+ void (*powerdown_conf)(enum sys_powerdown);
+ void (*powerdown_conf_extra)(enum sys_powerdown);
+};
+
+extern void __iomem *pmu_base_addr;
+
+#ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
+/* list of all exported SoC specific data */
+extern const struct exynos_pmu_data exynos3250_pmu_data;
+extern const struct exynos_pmu_data exynos4210_pmu_data;
+extern const struct exynos_pmu_data exynos4412_pmu_data;
+extern const struct exynos_pmu_data exynos5250_pmu_data;
+extern const struct exynos_pmu_data exynos5420_pmu_data;
+#endif
+
+extern void pmu_raw_writel(u32 val, u32 offset);
+extern u32 pmu_raw_readl(u32 offset);
+#endif /* __EXYNOS_PMU_H */
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
new file mode 100644
index 000000000..275d348ed
--- /dev/null
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// EXYNOS3250 - CPU PMU (Power Management Unit) support
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos3250_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, W-AFTR, SLEEP } */
+ { EXYNOS3_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS3_ARM_L2_SYS_PWR_REG, { 0x0, 0x0, 0x3} },
+ { EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS3_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_TOP_PWR_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS3_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_CAM_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_LCD0_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_MAUDIO_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos3250_list_feed[] = {
+ EXYNOS3_ARM_CORE_OPTION(0),
+ EXYNOS3_ARM_CORE_OPTION(1),
+ EXYNOS3_ARM_CORE_OPTION(2),
+ EXYNOS3_ARM_CORE_OPTION(3),
+ EXYNOS3_ARM_COMMON_OPTION,
+ EXYNOS3_TOP_PWR_OPTION,
+ EXYNOS3_CORE_TOP_PWR_OPTION,
+ S5P_CAM_OPTION,
+ S5P_MFC_OPTION,
+ S5P_G3D_OPTION,
+ S5P_LCD0_OPTION,
+ S5P_ISP_OPTION,
+};
+
+static void exynos3250_powerdown_conf_extra(enum sys_powerdown mode)
+{
+ unsigned int i;
+ unsigned int tmp;
+
+ /* Enable only SC_FEEDBACK */
+ for (i = 0; i < ARRAY_SIZE(exynos3250_list_feed); i++) {
+ tmp = pmu_raw_readl(exynos3250_list_feed[i]);
+ tmp &= ~(EXYNOS3_OPTION_USE_SC_COUNTER);
+ tmp |= EXYNOS3_OPTION_USE_SC_FEEDBACK;
+ pmu_raw_writel(tmp, exynos3250_list_feed[i]);
+ }
+
+ if (mode != SYS_SLEEP)
+ return;
+
+ pmu_raw_writel(XUSBXTI_DURATION, EXYNOS3_XUSBXTI_DURATION);
+ pmu_raw_writel(XXTI_DURATION, EXYNOS3_XXTI_DURATION);
+ pmu_raw_writel(EXT_REGULATOR_DURATION, EXYNOS3_EXT_REGULATOR_DURATION);
+ pmu_raw_writel(EXT_REGULATOR_COREBLK_DURATION,
+ EXYNOS3_EXT_REGULATOR_COREBLK_DURATION);
+}
+
+static void exynos3250_pmu_init(void)
+{
+ unsigned int value;
+
+ /*
+ * To prevent from issuing new bus request form L2 memory system
+ * If core status is power down, should be set '1' to L2 power down
+ */
+ value = pmu_raw_readl(EXYNOS3_ARM_COMMON_OPTION);
+ value |= EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(value, EXYNOS3_ARM_COMMON_OPTION);
+
+ /* Enable USE_STANDBY_WFI for all CORE */
+ pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+ /*
+ * Set PSHOLD port for output high
+ */
+ value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
+ value |= S5P_PS_HOLD_OUTPUT_HIGH;
+ pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
+
+ /*
+ * Enable signal for PSHOLD port
+ */
+ value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
+ value |= S5P_PS_HOLD_EN;
+ pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
+}
+
+const struct exynos_pmu_data exynos3250_pmu_data = {
+ .pmu_config = exynos3250_pmu_config,
+ .pmu_init = exynos3250_pmu_init,
+ .powerdown_conf_extra = exynos3250_powerdown_conf_extra,
+};
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
new file mode 100644
index 000000000..a7cdbf1aa
--- /dev/null
+++ b/drivers/soc/samsung/exynos4-pmu.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// EXYNOS4 - CPU PMU(Power Management Unit) support
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos4210_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+ { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE1_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE1, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL1, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_COMMON_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_L2_0_LOWPWR, { 0x2, 0x2, 0x3 } },
+ { S5P_L2_1_LOWPWR, { 0x2, 0x2, 0x3 } },
+ { S5P_CMU_ACLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SCLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_APLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_MPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_VPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_EPLL_SYSCLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_GPSALIVE_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_CAM_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_TV_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_MFC_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_G3D_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_LCD0_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_LCD1_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_CAM_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_TV_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_MFC_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_G3D_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_LCD0_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_LCD1_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_GPS_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_TOP_BUS_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_TOP_RETENTION_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_TOP_PWR_LOWPWR, { 0x3, 0x0, 0x3 } },
+ { S5P_LOGIC_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_ONENAND_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_MODIMIF_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_G2D_ACP_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_USBOTG_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_HSMMC_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_CSSYS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_SECSS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_PCIE_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_SATA_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_DRAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_PAD_RETENTION_GPIO_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_UART_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ISOLATION_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ALV_SEL_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_XUSBXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_XXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_EXT_REGULATOR_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_GPIO_MODE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_GPIO_MODE_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CAM_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_TV_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MFC_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_G3D_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_LCD0_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_LCD1_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MAUDIO_LOWPWR, { 0x7, 0x7, 0x0 } },
+ { S5P_GPS_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_GPS_ALIVE_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { PMU_TABLE_END,},
+};
+
+static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
+ { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE1_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE1, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL1, { 0x0, 0x0, 0x0 } },
+ { S5P_ISP_ARM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_COMMON_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_L2_0_LOWPWR, { 0x0, 0x0, 0x3 } },
+ /* XXX_OPTION register should be set other field */
+ { S5P_ARM_L2_0_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_L2_1_LOWPWR, { 0x0, 0x0, 0x3 } },
+ { S5P_ARM_L2_1_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_CMU_ACLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SCLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_DRAM_FREQ_DOWN_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_DDRPHY_DLLOFF_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_LPDDR_PHY_DLL_LOCK_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_CMU_ACLKSTOP_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SCLKSTOP_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_COREBLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_APLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_MPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_VPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_EPLL_SYSCLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_MPLLUSER_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_GPSALIVE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_CAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_TV_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_MFC_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_G3D_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_LCD0_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_CAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_TV_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_MFC_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_G3D_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_LCD0_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_TOP_BUS_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_TOP_RETENTION_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_TOP_PWR_LOWPWR, { 0x3, 0x0, 0x3 } },
+ { S5P_TOP_BUS_COREBLK_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_TOP_RETENTION_COREBLK_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_TOP_PWR_COREBLK_LOWPWR, { 0x3, 0x0, 0x3 } },
+ { S5P_LOGIC_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_OSCCLK_GATE_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_LOGIC_RESET_COREBLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_OSCCLK_GATE_COREBLK_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_ONENAND_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_ONENAND_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_HSI_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_HSI_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_G2D_ACP_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_G2D_ACP_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_USBOTG_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_USBOTG_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_HSMMC_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_HSMMC_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_CSSYS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_CSSYS_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_SECSS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_SECSS_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_ROTATOR_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_ROTATOR_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_PAD_RETENTION_DRAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_PAD_RETENTION_GPIO_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_UART_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ISOLATION_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_ISOLATION_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ALV_SEL_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_XUSBXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_XXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_EXT_REGULATOR_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_GPIO_MODE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_GPIO_MODE_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_GPIO_MODE_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_TOP_ASB_RESET_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_TOP_ASB_ISOLATION_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_CAM_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_TV_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MFC_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_G3D_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_LCD0_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_ISP_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MAUDIO_LOWPWR, { 0x7, 0x7, 0x0 } },
+ { S5P_GPS_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_GPS_ALIVE_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_CMU_SYSCLK_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SYSCLK_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE3_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE3, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL3, { 0x0, 0x0, 0x0 } },
+ { PMU_TABLE_END,},
+};
+
+const struct exynos_pmu_data exynos4210_pmu_data = {
+ .pmu_config = exynos4210_pmu_config,
+};
+
+const struct exynos_pmu_data exynos4412_pmu_data = {
+ .pmu_config = exynos4412_pmu_config,
+};
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
new file mode 100644
index 000000000..19b38e008
--- /dev/null
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// EXYNOS5250 - CPU PMU (Power Management Unit) support
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos5250_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+ { EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_FSYS_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS_L2_OPTION(0), { 0x10, 0x10, 0x0 } },
+ { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_USBOTG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_G2D_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_USBDRD_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SDMMC_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_CSSYS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SECSS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_INTRAM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_INTROM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_JPEG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_JPEG_MEM_OPTION, { 0x10, 0x10, 0x0} },
+ { EXYNOS5_HSI_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SATA_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_GSCL_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_DISP1_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MAU_SYS_PWR_REG, { 0x7, 0x7, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos5_list_both_cnt_feed[] = {
+ EXYNOS5_ARM_CORE0_OPTION,
+ EXYNOS5_ARM_CORE1_OPTION,
+ EXYNOS5_ARM_COMMON_OPTION,
+ EXYNOS5_GSCL_OPTION,
+ EXYNOS5_ISP_OPTION,
+ EXYNOS5_MFC_OPTION,
+ EXYNOS5_G3D_OPTION,
+ EXYNOS5_DISP1_OPTION,
+ EXYNOS5_MAU_OPTION,
+ EXYNOS5_TOP_PWR_OPTION,
+ EXYNOS5_TOP_PWR_SYSMEM_OPTION,
+};
+
+static unsigned int const exynos5_list_disable_wfi_wfe[] = {
+ EXYNOS5_ARM_CORE1_OPTION,
+ EXYNOS5_FSYS_ARM_OPTION,
+ EXYNOS5_ISP_ARM_OPTION,
+};
+
+static void exynos5250_pmu_init(void)
+{
+ unsigned int value;
+ /*
+ * When SYS_WDTRESET is set, watchdog timer reset request
+ * is ignored by power management unit.
+ */
+ value = pmu_raw_readl(EXYNOS5_AUTO_WDTRESET_DISABLE);
+ value &= ~EXYNOS5_SYS_WDTRESET;
+ pmu_raw_writel(value, EXYNOS5_AUTO_WDTRESET_DISABLE);
+
+ value = pmu_raw_readl(EXYNOS5_MASK_WDTRESET_REQUEST);
+ value &= ~EXYNOS5_SYS_WDTRESET;
+ pmu_raw_writel(value, EXYNOS5_MASK_WDTRESET_REQUEST);
+}
+
+static void exynos5_powerdown_conf(enum sys_powerdown mode)
+{
+ unsigned int i;
+ unsigned int tmp;
+
+ /*
+ * Enable both SC_FEEDBACK and SC_COUNTER
+ */
+ for (i = 0; i < ARRAY_SIZE(exynos5_list_both_cnt_feed); i++) {
+ tmp = pmu_raw_readl(exynos5_list_both_cnt_feed[i]);
+ tmp |= (EXYNOS5_USE_SC_FEEDBACK |
+ EXYNOS5_USE_SC_COUNTER);
+ pmu_raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
+ }
+
+ /*
+ * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
+ */
+ tmp = pmu_raw_readl(EXYNOS5_ARM_COMMON_OPTION);
+ tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
+
+ /*
+ * Disable WFI/WFE on XXX_OPTION
+ */
+ for (i = 0; i < ARRAY_SIZE(exynos5_list_disable_wfi_wfe); i++) {
+ tmp = pmu_raw_readl(exynos5_list_disable_wfi_wfe[i]);
+ tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
+ EXYNOS5_OPTION_USE_STANDBYWFI);
+ pmu_raw_writel(tmp, exynos5_list_disable_wfi_wfe[i]);
+ }
+}
+
+const struct exynos_pmu_data exynos5250_pmu_data = {
+ .pmu_config = exynos5250_pmu_config,
+ .pmu_init = exynos5250_pmu_init,
+ .powerdown_conf = exynos5_powerdown_conf,
+};
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
new file mode 100644
index 000000000..b236d3b47
--- /dev/null
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// EXYNOS5420 - CPU PMU (Power Management Unit) support
+
+#include <linux/pm.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include <asm/cputype.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos5420_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+ { EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_ARM_CORE2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_ARM_CORE3_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE3_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_L2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_TOP_PWR_SYS_PWR_REG, { 0x3, 0x3, 0x0} },
+ { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_INTRAM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5420_INTROM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GSCL_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_DISP1_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_MAU_SYS_PWR_REG, { 0x7, 0x7, 0x0} },
+ { EXYNOS5420_G2D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_MSC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_FSYS_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_FSYS2_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_PSGEN_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_PERIC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_WCORE_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos5420_list_disable_pmu_reg[] = {
+ EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,
+ EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,
+ EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,
+ EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,
+ EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,
+ EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,
+ EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,
+ EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,
+ EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,
+};
+
+static void exynos5420_powerdown_conf(enum sys_powerdown mode)
+{
+ u32 this_cluster;
+
+ this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
+
+ /*
+ * set the cluster id to IROM register to ensure that we wake
+ * up with the current cluster.
+ */
+ pmu_raw_writel(this_cluster, EXYNOS_IROM_DATA2);
+}
+
+static void exynos5420_pmu_init(void)
+{
+ unsigned int value;
+ int i;
+
+ /*
+ * Set the CMU_RESET, CMU_SYSCLK and CMU_CLKSTOP registers
+ * for local power blocks to Low initially as per Table 8-4:
+ * "System-Level Power-Down Configuration Registers".
+ */
+ for (i = 0; i < ARRAY_SIZE(exynos5420_list_disable_pmu_reg); i++)
+ pmu_raw_writel(0, exynos5420_list_disable_pmu_reg[i]);
+
+ /* Enable USE_STANDBY_WFI for all CORE */
+ pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+ value = pmu_raw_readl(EXYNOS_L2_OPTION(0));
+ value &= ~EXYNOS_L2_USE_RETENTION;
+ pmu_raw_writel(value, EXYNOS_L2_OPTION(0));
+
+ value = pmu_raw_readl(EXYNOS_L2_OPTION(1));
+ value &= ~EXYNOS_L2_USE_RETENTION;
+ pmu_raw_writel(value, EXYNOS_L2_OPTION(1));
+
+ /*
+ * If L2_COMMON is turned off, clocks related to ATB async
+ * bridge are gated. Thus, when ISP power is gated, LPI
+ * may get stuck.
+ */
+ value = pmu_raw_readl(EXYNOS5420_LPI_MASK);
+ value |= EXYNOS5420_ATB_ISP_ARM;
+ pmu_raw_writel(value, EXYNOS5420_LPI_MASK);
+
+ value = pmu_raw_readl(EXYNOS5420_LPI_MASK1);
+ value |= EXYNOS5420_ATB_KFC;
+ pmu_raw_writel(value, EXYNOS5420_LPI_MASK1);
+
+ /* Prevent issue of new bus request from L2 memory */
+ value = pmu_raw_readl(EXYNOS5420_ARM_COMMON_OPTION);
+ value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(value, EXYNOS5420_ARM_COMMON_OPTION);
+
+ value = pmu_raw_readl(EXYNOS5420_KFC_COMMON_OPTION);
+ value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(value, EXYNOS5420_KFC_COMMON_OPTION);
+
+ /* This setting is to reduce suspend/resume time */
+ pmu_raw_writel(DUR_WAIT_RESET, EXYNOS5420_LOGIC_RESET_DURATION3);
+
+ /* Serialized CPU wakeup of Eagle */
+ pmu_raw_writel(SPREAD_ENABLE, EXYNOS5420_ARM_INTR_SPREAD_ENABLE);
+
+ pmu_raw_writel(SPREAD_USE_STANDWFI,
+ EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI);
+
+ pmu_raw_writel(0x1, EXYNOS5420_UP_SCHEDULER);
+
+ pr_info("EXYNOS5420 PMU initialized\n");
+}
+
+const struct exynos_pmu_data exynos5420_pmu_data = {
+ .pmu_config = exynos5420_pmu_config,
+ .pmu_init = exynos5420_pmu_init,
+ .powerdown_conf = exynos5420_powerdown_conf,
+};
diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c
new file mode 100644
index 000000000..ab8582971
--- /dev/null
+++ b/drivers/soc/samsung/pm_domains.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Exynos Generic power domain support.
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd.
+// http://www.samsung.com
+//
+// Implementation of Exynos specific power domain control which is used in
+// conjunction with runtime-pm. Support for both device-tree and non-device-tree
+// based power domain support is included.
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/pm_domain.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
+
+struct exynos_pm_domain_config {
+ /* Value for LOCAL_PWR_CFG and STATUS fields for each domain */
+ u32 local_pwr_cfg;
+};
+
+/*
+ * Exynos specific wrapper around the generic power domain
+ */
+struct exynos_pm_domain {
+ void __iomem *base;
+ bool is_off;
+ struct generic_pm_domain pd;
+ u32 local_pwr_cfg;
+};
+
+static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
+{
+ struct exynos_pm_domain *pd;
+ void __iomem *base;
+ u32 timeout, pwr;
+ char *op;
+
+ pd = container_of(domain, struct exynos_pm_domain, pd);
+ base = pd->base;
+
+ pwr = power_on ? pd->local_pwr_cfg : 0;
+ writel_relaxed(pwr, base);
+
+ /* Wait max 1ms */
+ timeout = 10;
+
+ while ((readl_relaxed(base + 0x4) & pd->local_pwr_cfg) != pwr) {
+ if (!timeout) {
+ op = (power_on) ? "enable" : "disable";
+ pr_err("Power domain %s %s failed\n", domain->name, op);
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ cpu_relax();
+ usleep_range(80, 100);
+ }
+
+ return 0;
+}
+
+static int exynos_pd_power_on(struct generic_pm_domain *domain)
+{
+ return exynos_pd_power(domain, true);
+}
+
+static int exynos_pd_power_off(struct generic_pm_domain *domain)
+{
+ return exynos_pd_power(domain, false);
+}
+
+static const struct exynos_pm_domain_config exynos4210_cfg __initconst = {
+ .local_pwr_cfg = 0x7,
+};
+
+static const struct exynos_pm_domain_config exynos5433_cfg __initconst = {
+ .local_pwr_cfg = 0xf,
+};
+
+static const struct of_device_id exynos_pm_domain_of_match[] __initconst = {
+ {
+ .compatible = "samsung,exynos4210-pd",
+ .data = &exynos4210_cfg,
+ }, {
+ .compatible = "samsung,exynos5433-pd",
+ .data = &exynos5433_cfg,
+ },
+ { },
+};
+
+static __init const char *exynos_get_domain_name(struct device_node *node)
+{
+ const char *name;
+
+ if (of_property_read_string(node, "label", &name) < 0)
+ name = kbasename(node->full_name);
+ return kstrdup_const(name, GFP_KERNEL);
+}
+
+static __init int exynos4_pm_init_power_domain(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+
+ for_each_matching_node_and_match(np, exynos_pm_domain_of_match, &match) {
+ const struct exynos_pm_domain_config *pm_domain_cfg;
+ struct exynos_pm_domain *pd;
+ int on;
+
+ pm_domain_cfg = match->data;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+ pd->pd.name = exynos_get_domain_name(np);
+ if (!pd->pd.name) {
+ kfree(pd);
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ pd->base = of_iomap(np, 0);
+ if (!pd->base) {
+ pr_warn("%s: failed to map memory\n", __func__);
+ kfree_const(pd->pd.name);
+ kfree(pd);
+ continue;
+ }
+
+ pd->pd.power_off = exynos_pd_power_off;
+ pd->pd.power_on = exynos_pd_power_on;
+ pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg;
+
+ on = readl_relaxed(pd->base + 0x4) & pd->local_pwr_cfg;
+
+ pm_genpd_init(&pd->pd, NULL, !on);
+ of_genpd_add_provider_simple(np, &pd->pd);
+ }
+
+ /* Assign the child power domains to their parents */
+ for_each_matching_node(np, exynos_pm_domain_of_match) {
+ struct of_phandle_args child, parent;
+
+ child.np = np;
+ child.args_count = 0;
+
+ if (of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0,
+ &parent) != 0)
+ continue;
+
+ if (of_genpd_add_subdomain(&parent, &child))
+ pr_warn("%pOF failed to add subdomain: %pOF\n",
+ parent.np, child.np);
+ else
+ pr_info("%pOF has as child subdomain: %pOF.\n",
+ parent.np, child.np);
+ }
+
+ return 0;
+}
+core_initcall(exynos4_pm_init_power_domain);
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
new file mode 100644
index 000000000..e84eb4e59
--- /dev/null
+++ b/drivers/soc/sunxi/Kconfig
@@ -0,0 +1,11 @@
+#
+# Allwinner sunXi SoC drivers
+#
+config SUNXI_SRAM
+ bool
+ default ARCH_SUNXI
+ select REGMAP_MMIO
+ help
+ Say y here to enable the SRAM controller support. This
+ device is responsible on mapping the SRAM in the sunXi SoCs
+ whether to the CPU/DMA, or to the devices.
diff --git a/drivers/soc/sunxi/Makefile b/drivers/soc/sunxi/Makefile
new file mode 100644
index 000000000..4cf9dbdf3
--- /dev/null
+++ b/drivers/soc/sunxi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SUNXI_SRAM) += sunxi_sram.o
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
new file mode 100644
index 000000000..b4b0f3480
--- /dev/null
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -0,0 +1,407 @@
+/*
+ * Allwinner SoCs SRAM Controller Driver
+ *
+ * Copyright (C) 2015 Maxime Ripard
+ *
+ * Author: Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/soc/sunxi/sunxi_sram.h>
+
+struct sunxi_sram_func {
+ char *func;
+ u8 val;
+ u32 reg_val;
+};
+
+struct sunxi_sram_data {
+ char *name;
+ u8 reg;
+ u8 offset;
+ u8 width;
+ struct sunxi_sram_func *func;
+ struct list_head list;
+};
+
+struct sunxi_sram_desc {
+ struct sunxi_sram_data data;
+ bool claimed;
+};
+
+#define SUNXI_SRAM_MAP(_reg_val, _val, _func) \
+ { \
+ .func = _func, \
+ .val = _val, \
+ .reg_val = _reg_val, \
+ }
+
+#define SUNXI_SRAM_DATA(_name, _reg, _off, _width, ...) \
+ { \
+ .name = _name, \
+ .reg = _reg, \
+ .offset = _off, \
+ .width = _width, \
+ .func = (struct sunxi_sram_func[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
+static struct sunxi_sram_desc sun4i_a10_sram_a3_a4 = {
+ .data = SUNXI_SRAM_DATA("A3-A4", 0x4, 0x4, 2,
+ SUNXI_SRAM_MAP(0, 0, "cpu"),
+ SUNXI_SRAM_MAP(1, 1, "emac")),
+};
+
+static struct sunxi_sram_desc sun4i_a10_sram_c1 = {
+ .data = SUNXI_SRAM_DATA("C1", 0x0, 0x0, 31,
+ SUNXI_SRAM_MAP(0, 0, "cpu"),
+ SUNXI_SRAM_MAP(0x7fffffff, 1, "ve")),
+};
+
+static struct sunxi_sram_desc sun4i_a10_sram_d = {
+ .data = SUNXI_SRAM_DATA("D", 0x4, 0x0, 1,
+ SUNXI_SRAM_MAP(0, 0, "cpu"),
+ SUNXI_SRAM_MAP(1, 1, "usb-otg")),
+};
+
+static struct sunxi_sram_desc sun50i_a64_sram_c = {
+ .data = SUNXI_SRAM_DATA("C", 0x4, 24, 1,
+ SUNXI_SRAM_MAP(0, 1, "cpu"),
+ SUNXI_SRAM_MAP(1, 0, "de2")),
+};
+
+static const struct of_device_id sunxi_sram_dt_ids[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-sram-a3-a4",
+ .data = &sun4i_a10_sram_a3_a4.data,
+ },
+ {
+ .compatible = "allwinner,sun4i-a10-sram-c1",
+ .data = &sun4i_a10_sram_c1.data,
+ },
+ {
+ .compatible = "allwinner,sun4i-a10-sram-d",
+ .data = &sun4i_a10_sram_d.data,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-sram-c",
+ .data = &sun50i_a64_sram_c.data,
+ },
+ {}
+};
+
+static struct device *sram_dev;
+static LIST_HEAD(claimed_sram);
+static DEFINE_SPINLOCK(sram_lock);
+static void __iomem *base;
+
+static int sunxi_sram_show(struct seq_file *s, void *data)
+{
+ struct device_node *sram_node, *section_node;
+ const struct sunxi_sram_data *sram_data;
+ const struct of_device_id *match;
+ struct sunxi_sram_func *func;
+ const __be32 *sram_addr_p, *section_addr_p;
+ u32 val;
+
+ seq_puts(s, "Allwinner sunXi SRAM\n");
+ seq_puts(s, "--------------------\n\n");
+
+ for_each_child_of_node(sram_dev->of_node, sram_node) {
+ sram_addr_p = of_get_address(sram_node, 0, NULL, NULL);
+
+ seq_printf(s, "sram@%08x\n",
+ be32_to_cpu(*sram_addr_p));
+
+ for_each_child_of_node(sram_node, section_node) {
+ match = of_match_node(sunxi_sram_dt_ids, section_node);
+ if (!match)
+ continue;
+ sram_data = match->data;
+
+ section_addr_p = of_get_address(section_node, 0,
+ NULL, NULL);
+
+ seq_printf(s, "\tsection@%04x\t(%s)\n",
+ be32_to_cpu(*section_addr_p),
+ sram_data->name);
+
+ val = readl(base + sram_data->reg);
+ val >>= sram_data->offset;
+ val &= GENMASK(sram_data->width - 1, 0);
+
+ for (func = sram_data->func; func->func; func++) {
+ seq_printf(s, "\t\t%s%c\n", func->func,
+ func->reg_val == val ?
+ '*' : ' ');
+ }
+ }
+
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+static int sunxi_sram_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sunxi_sram_show, inode->i_private);
+}
+
+static const struct file_operations sunxi_sram_fops = {
+ .open = sunxi_sram_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static inline struct sunxi_sram_desc *to_sram_desc(const struct sunxi_sram_data *data)
+{
+ return container_of(data, struct sunxi_sram_desc, data);
+}
+
+static const struct sunxi_sram_data *sunxi_sram_of_parse(struct device_node *node,
+ unsigned int *reg_value)
+{
+ const struct of_device_id *match;
+ const struct sunxi_sram_data *data;
+ struct sunxi_sram_func *func;
+ struct of_phandle_args args;
+ u8 val;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(node, "allwinner,sram", 1, 0,
+ &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!of_device_is_available(args.np)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ val = args.args[0];
+
+ match = of_match_node(sunxi_sram_dt_ids, args.np);
+ if (!match) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data = match->data;
+ if (!data) {
+ ret = -EINVAL;
+ goto err;
+ };
+
+ for (func = data->func; func->func; func++) {
+ if (val == func->val) {
+ if (reg_value)
+ *reg_value = func->reg_val;
+
+ break;
+ }
+ }
+
+ if (!func->func) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ of_node_put(args.np);
+ return match->data;
+
+err:
+ of_node_put(args.np);
+ return ERR_PTR(ret);
+}
+
+int sunxi_sram_claim(struct device *dev)
+{
+ const struct sunxi_sram_data *sram_data;
+ struct sunxi_sram_desc *sram_desc;
+ unsigned int device;
+ u32 val, mask;
+
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (!base)
+ return -EPROBE_DEFER;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ sram_data = sunxi_sram_of_parse(dev->of_node, &device);
+ if (IS_ERR(sram_data))
+ return PTR_ERR(sram_data);
+
+ sram_desc = to_sram_desc(sram_data);
+
+ spin_lock(&sram_lock);
+
+ if (sram_desc->claimed) {
+ spin_unlock(&sram_lock);
+ return -EBUSY;
+ }
+
+ mask = GENMASK(sram_data->offset + sram_data->width - 1,
+ sram_data->offset);
+ val = readl(base + sram_data->reg);
+ val &= ~mask;
+ writel(val | ((device << sram_data->offset) & mask),
+ base + sram_data->reg);
+
+ spin_unlock(&sram_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(sunxi_sram_claim);
+
+int sunxi_sram_release(struct device *dev)
+{
+ const struct sunxi_sram_data *sram_data;
+ struct sunxi_sram_desc *sram_desc;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ sram_data = sunxi_sram_of_parse(dev->of_node, NULL);
+ if (IS_ERR(sram_data))
+ return -EINVAL;
+
+ sram_desc = to_sram_desc(sram_data);
+
+ spin_lock(&sram_lock);
+ sram_desc->claimed = false;
+ spin_unlock(&sram_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(sunxi_sram_release);
+
+struct sunxi_sramc_variant {
+ bool has_emac_clock;
+};
+
+static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = {
+ /* Nothing special */
+};
+
+static const struct sunxi_sramc_variant sun50i_a64_sramc_variant = {
+ .has_emac_clock = true,
+};
+
+#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
+static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
+ unsigned int reg)
+{
+ if (reg == SUNXI_SRAM_EMAC_CLOCK_REG)
+ return true;
+ return false;
+}
+
+static struct regmap_config sunxi_sram_emac_clock_regmap = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ /* last defined register */
+ .max_register = SUNXI_SRAM_EMAC_CLOCK_REG,
+ /* other devices have no business accessing other registers */
+ .readable_reg = sunxi_sram_regmap_accessible_reg,
+ .writeable_reg = sunxi_sram_regmap_accessible_reg,
+};
+
+static int sunxi_sram_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct dentry *d;
+ struct regmap *emac_clock;
+ const struct sunxi_sramc_variant *variant;
+
+ sram_dev = &pdev->dev;
+
+ variant = of_device_get_match_data(&pdev->dev);
+ if (!variant)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ d = debugfs_create_file("sram", S_IRUGO, NULL, NULL,
+ &sunxi_sram_fops);
+ if (!d)
+ return -ENOMEM;
+
+ if (variant->has_emac_clock) {
+ emac_clock = devm_regmap_init_mmio(&pdev->dev, base,
+ &sunxi_sram_emac_clock_regmap);
+
+ if (IS_ERR(emac_clock))
+ return PTR_ERR(emac_clock);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sunxi_sram_dt_match[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-sram-controller",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun4i-a10-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-a23-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-sram-controller",
+ .data = &sun50i_a64_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-system-control",
+ .data = &sun50i_a64_sramc_variant,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match);
+
+static struct platform_driver sunxi_sram_driver = {
+ .driver = {
+ .name = "sunxi-sram",
+ .of_match_table = sunxi_sram_dt_match,
+ },
+ .probe = sunxi_sram_probe,
+};
+module_platform_driver(sunxi_sram_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
new file mode 100644
index 000000000..fe4481676
--- /dev/null
+++ b/drivers/soc/tegra/Kconfig
@@ -0,0 +1,134 @@
+if ARCH_TEGRA
+
+# 32-bit ARM SoCs
+if ARM
+
+config ARCH_TEGRA_2x_SOC
+ bool "Enable support for Tegra20 family"
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
+ select ARM_ERRATA_720789
+ select ARM_ERRATA_754327 if SMP
+ select ARM_ERRATA_764369 if SMP
+ select PINCTRL_TEGRA20
+ select PL310_ERRATA_727915 if CACHE_L2X0
+ select PL310_ERRATA_769419 if CACHE_L2X0
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ select TEGRA_TIMER
+ help
+ Support for NVIDIA Tegra AP20 and T20 processors, based on the
+ ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
+
+config ARCH_TEGRA_3x_SOC
+ bool "Enable support for Tegra30 family"
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_764369 if SMP
+ select PINCTRL_TEGRA30
+ select PL310_ERRATA_769419 if CACHE_L2X0
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ select TEGRA_TIMER
+ help
+ Support for NVIDIA Tegra T30 processor family, based on the
+ ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
+
+config ARCH_TEGRA_114_SOC
+ bool "Enable support for Tegra114 family"
+ select ARM_ERRATA_798181 if SMP
+ select HAVE_ARM_ARCH_TIMER
+ select PINCTRL_TEGRA114
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ select TEGRA_TIMER
+ help
+ Support for NVIDIA Tegra T114 processor family, based on the
+ ARM CortexA15MP CPU
+
+config ARCH_TEGRA_124_SOC
+ bool "Enable support for Tegra124 family"
+ select HAVE_ARM_ARCH_TIMER
+ select PINCTRL_TEGRA124
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ select TEGRA_TIMER
+ help
+ Support for NVIDIA Tegra T124 processor family, based on the
+ ARM CortexA15MP CPU
+
+endif
+
+# 64-bit ARM SoCs
+if ARM64
+
+config ARCH_TEGRA_132_SOC
+ bool "NVIDIA Tegra132 SoC"
+ select PINCTRL_TEGRA124
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ help
+ Enable support for NVIDIA Tegra132 SoC, based on the Denver
+ ARMv8 CPU. The Tegra132 SoC is similar to the Tegra124 SoC,
+ but contains an NVIDIA Denver CPU complex in place of
+ Tegra124's "4+1" Cortex-A15 CPU complex.
+
+config ARCH_TEGRA_210_SOC
+ bool "NVIDIA Tegra210 SoC"
+ select PINCTRL_TEGRA210
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ help
+ Enable support for the NVIDIA Tegra210 SoC. Also known as Tegra X1,
+ the Tegra210 has four Cortex-A57 cores paired with four Cortex-A53
+ cores in a switched configuration. It features a GPU of the Maxwell
+ architecture with support for DX11, SM4, OpenGL 4.5, OpenGL ES 3.1
+ and providing 256 CUDA cores. It supports hardware-accelerated en-
+ and decoding of various video standards including H.265, H.264 and
+ VP8 at 4K resolution and up to 60 fps.
+
+ Besides the multimedia features it also comes with a variety of I/O
+ controllers, such as GPIO, I2C, SPI, SDHCI, PCIe, SATA and XHCI, to
+ name only a few.
+
+config ARCH_TEGRA_186_SOC
+ bool "NVIDIA Tegra186 SoC"
+ select MAILBOX
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ select SOC_TEGRA_PMC
+ help
+ Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a
+ combination of Denver and Cortex-A57 CPU cores and a GPU based on
+ the Pascal architecture. It contains an ADSP with a Cortex-A9 CPU
+ used for audio processing, hardware video encoders/decoders with
+ multi-format support, ISP for image capture processing and BPMP for
+ power management.
+
+config ARCH_TEGRA_194_SOC
+ bool "NVIDIA Tegra194 SoC"
+ select MAILBOX
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ select SOC_TEGRA_PMC
+ help
+ Enable support for the NVIDIA Tegra194 SoC.
+
+endif
+endif
+
+config SOC_TEGRA_FUSE
+ def_bool y
+ depends on ARCH_TEGRA
+ select SOC_BUS
+
+config SOC_TEGRA_FLOWCTRL
+ bool
+
+config SOC_TEGRA_PMC
+ bool
+
+config SOC_TEGRA_POWERGATE_BPMP
+ def_bool y
+ depends on PM_GENERIC_DOMAINS
+ depends on TEGRA_BPMP
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
new file mode 100644
index 000000000..902759fe5
--- /dev/null
+++ b/drivers/soc/tegra/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += fuse/
+
+obj-y += common.o
+obj-$(CONFIG_SOC_TEGRA_FLOWCTRL) += flowctrl.o
+obj-$(CONFIG_SOC_TEGRA_PMC) += pmc.o
+obj-$(CONFIG_SOC_TEGRA_POWERGATE_BPMP) += powergate-bpmp.o
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
new file mode 100644
index 000000000..7bfb154d6
--- /dev/null
+++ b/drivers/soc/tegra/common.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+
+#include <soc/tegra/common.h>
+
+static const struct of_device_id tegra_machine_match[] = {
+ { .compatible = "nvidia,tegra20", },
+ { .compatible = "nvidia,tegra30", },
+ { .compatible = "nvidia,tegra114", },
+ { .compatible = "nvidia,tegra124", },
+ { .compatible = "nvidia,tegra132", },
+ { .compatible = "nvidia,tegra210", },
+ { }
+};
+
+bool soc_is_tegra(void)
+{
+ const struct of_device_id *match;
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return false;
+
+ match = of_match_node(tegra_machine_match, root);
+ of_node_put(root);
+
+ return match != NULL;
+}
diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c
new file mode 100644
index 000000000..5433cc7a0
--- /dev/null
+++ b/drivers/soc/tegra/flowctrl.c
@@ -0,0 +1,224 @@
+/*
+ * drivers/soc/tegra/flowctrl.c
+ *
+ * Functions and macros to control the flowcontroller
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/flowctrl.h>
+#include <soc/tegra/fuse.h>
+
+static u8 flowctrl_offset_halt_cpu[] = {
+ FLOW_CTRL_HALT_CPU0_EVENTS,
+ FLOW_CTRL_HALT_CPU1_EVENTS,
+ FLOW_CTRL_HALT_CPU1_EVENTS + 8,
+ FLOW_CTRL_HALT_CPU1_EVENTS + 16,
+};
+
+static u8 flowctrl_offset_cpu_csr[] = {
+ FLOW_CTRL_CPU0_CSR,
+ FLOW_CTRL_CPU1_CSR,
+ FLOW_CTRL_CPU1_CSR + 8,
+ FLOW_CTRL_CPU1_CSR + 16,
+};
+
+static void __iomem *tegra_flowctrl_base;
+
+static void flowctrl_update(u8 offset, u32 value)
+{
+ if (WARN_ONCE(IS_ERR_OR_NULL(tegra_flowctrl_base),
+ "Tegra flowctrl not initialised!\n"))
+ return;
+
+ writel(value, tegra_flowctrl_base + offset);
+
+ /* ensure the update has reached the flow controller */
+ wmb();
+ readl_relaxed(tegra_flowctrl_base + offset);
+}
+
+u32 flowctrl_read_cpu_csr(unsigned int cpuid)
+{
+ u8 offset = flowctrl_offset_cpu_csr[cpuid];
+
+ if (WARN_ONCE(IS_ERR_OR_NULL(tegra_flowctrl_base),
+ "Tegra flowctrl not initialised!\n"))
+ return 0;
+
+ return readl(tegra_flowctrl_base + offset);
+}
+
+void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
+{
+ return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
+}
+
+void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value)
+{
+ return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
+}
+
+void flowctrl_cpu_suspend_enter(unsigned int cpuid)
+{
+ unsigned int reg;
+ int i;
+
+ reg = flowctrl_read_cpu_csr(cpuid);
+ switch (tegra_get_chip_id()) {
+ case TEGRA20:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP;
+ /* pwr gating on wfe */
+ reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid;
+ break;
+ case TEGRA30:
+ case TEGRA114:
+ case TEGRA124:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
+ /* pwr gating on wfi */
+ reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid;
+ break;
+ }
+ reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event flag */
+ reg |= FLOW_CTRL_CSR_ENABLE; /* pwr gating */
+ flowctrl_write_cpu_csr(cpuid, reg);
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (i == cpuid)
+ continue;
+ reg = flowctrl_read_cpu_csr(i);
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG;
+ reg |= FLOW_CTRL_CSR_INTR_FLAG;
+ flowctrl_write_cpu_csr(i, reg);
+ }
+}
+
+void flowctrl_cpu_suspend_exit(unsigned int cpuid)
+{
+ unsigned int reg;
+
+ /* Disable powergating via flow controller for CPU0 */
+ reg = flowctrl_read_cpu_csr(cpuid);
+ switch (tegra_get_chip_id()) {
+ case TEGRA20:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP;
+ break;
+ case TEGRA30:
+ case TEGRA114:
+ case TEGRA124:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
+ break;
+ }
+ reg &= ~FLOW_CTRL_CSR_ENABLE; /* clear enable */
+ reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr */
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event */
+ flowctrl_write_cpu_csr(cpuid, reg);
+}
+
+static int tegra_flowctrl_probe(struct platform_device *pdev)
+{
+ void __iomem *base = tegra_flowctrl_base;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tegra_flowctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra_flowctrl_base))
+ return PTR_ERR(tegra_flowctrl_base);
+
+ iounmap(base);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_flowctrl_match[] = {
+ { .compatible = "nvidia,tegra210-flowctrl" },
+ { .compatible = "nvidia,tegra124-flowctrl" },
+ { .compatible = "nvidia,tegra114-flowctrl" },
+ { .compatible = "nvidia,tegra30-flowctrl" },
+ { .compatible = "nvidia,tegra20-flowctrl" },
+ { }
+};
+
+static struct platform_driver tegra_flowctrl_driver = {
+ .driver = {
+ .name = "tegra-flowctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = tegra_flowctrl_match,
+ },
+ .probe = tegra_flowctrl_probe,
+};
+builtin_platform_driver(tegra_flowctrl_driver);
+
+static int __init tegra_flowctrl_init(void)
+{
+ struct resource res;
+ struct device_node *np;
+
+ if (!soc_is_tegra())
+ return 0;
+
+ np = of_find_matching_node(NULL, tegra_flowctrl_match);
+ if (np) {
+ if (of_address_to_resource(np, 0, &res) < 0) {
+ pr_err("failed to get flowctrl register\n");
+ return -ENXIO;
+ }
+ of_node_put(np);
+ } else if (IS_ENABLED(CONFIG_ARM)) {
+ /*
+ * Hardcoded fallback for 32-bit Tegra
+ * devices if device tree node is missing.
+ */
+ res.start = 0x60007000;
+ res.end = 0x60007fff;
+ res.flags = IORESOURCE_MEM;
+ } else {
+ /*
+ * At this point we're running on a Tegra,
+ * that doesn't support the flow controller
+ * (eg. Tegra186), so just return.
+ */
+ return 0;
+ }
+
+ tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res));
+ if (!tegra_flowctrl_base)
+ return -ENXIO;
+
+ return 0;
+}
+early_initcall(tegra_flowctrl_init);
diff --git a/drivers/soc/tegra/fuse/Makefile b/drivers/soc/tegra/fuse/Makefile
new file mode 100644
index 000000000..ea8332cc3
--- /dev/null
+++ b/drivers/soc/tegra/fuse/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += fuse-tegra.o
+obj-y += fuse-tegra30.o
+obj-y += tegra-apbmisc.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += fuse-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += speedo-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += speedo-tegra30.o
+obj-$(CONFIG_ARCH_TEGRA_114_SOC) += speedo-tegra114.o
+obj-$(CONFIG_ARCH_TEGRA_124_SOC) += speedo-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_132_SOC) += speedo-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_210_SOC) += speedo-tegra210.o
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
new file mode 100644
index 000000000..52130ec8c
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+struct tegra_sku_info tegra_sku_info;
+EXPORT_SYMBOL(tegra_sku_info);
+
+static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
+ [TEGRA_REVISION_UNKNOWN] = "unknown",
+ [TEGRA_REVISION_A01] = "A01",
+ [TEGRA_REVISION_A02] = "A02",
+ [TEGRA_REVISION_A03] = "A03",
+ [TEGRA_REVISION_A03p] = "A03 prime",
+ [TEGRA_REVISION_A04] = "A04",
+};
+
+static u8 fuse_readb(struct tegra_fuse *fuse, unsigned int offset)
+{
+ u32 val;
+
+ val = fuse->read(fuse, round_down(offset, 4));
+ val >>= (offset % 4) * 8;
+ val &= 0xff;
+
+ return val;
+}
+
+static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tegra_fuse *fuse = dev_get_drvdata(dev);
+ int i;
+
+ if (pos < 0 || pos >= attr->size)
+ return 0;
+
+ if (size > attr->size - pos)
+ size = attr->size - pos;
+
+ for (i = 0; i < size; i++)
+ buf[i] = fuse_readb(fuse, pos + i);
+
+ return i;
+}
+
+static struct bin_attribute fuse_bin_attr = {
+ .attr = { .name = "fuse", .mode = S_IRUGO, },
+ .read = fuse_read,
+};
+
+static int tegra_fuse_create_sysfs(struct device *dev, unsigned int size,
+ const struct tegra_fuse_info *info)
+{
+ fuse_bin_attr.size = size;
+
+ return device_create_bin_file(dev, &fuse_bin_attr);
+}
+
+static const struct of_device_id car_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-car", },
+ { .compatible = "nvidia,tegra30-car", },
+ { .compatible = "nvidia,tegra114-car", },
+ { .compatible = "nvidia,tegra124-car", },
+ { .compatible = "nvidia,tegra132-car", },
+ { .compatible = "nvidia,tegra210-car", },
+ {},
+};
+
+static struct tegra_fuse *fuse = &(struct tegra_fuse) {
+ .base = NULL,
+ .soc = NULL,
+};
+
+static const struct of_device_id tegra_fuse_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_186_SOC
+ { .compatible = "nvidia,tegra186-efuse", .data = &tegra186_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+ { .compatible = "nvidia,tegra210-efuse", .data = &tegra210_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+ { .compatible = "nvidia,tegra132-efuse", .data = &tegra124_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ { .compatible = "nvidia,tegra124-efuse", .data = &tegra124_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ { .compatible = "nvidia,tegra114-efuse", .data = &tegra114_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ { .compatible = "nvidia,tegra30-efuse", .data = &tegra30_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ { .compatible = "nvidia,tegra20-efuse", .data = &tegra20_fuse_soc },
+#endif
+ { /* sentinel */ }
+};
+
+static int tegra_fuse_probe(struct platform_device *pdev)
+{
+ void __iomem *base = fuse->base;
+ struct resource *res;
+ int err;
+
+ /* take over the memory region from the early initialization */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fuse->phys = res->start;
+ fuse->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fuse->base)) {
+ err = PTR_ERR(fuse->base);
+ fuse->base = base;
+ return err;
+ }
+
+ fuse->clk = devm_clk_get(&pdev->dev, "fuse");
+ if (IS_ERR(fuse->clk)) {
+ dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
+ PTR_ERR(fuse->clk));
+ fuse->base = base;
+ return PTR_ERR(fuse->clk);
+ }
+
+ platform_set_drvdata(pdev, fuse);
+ fuse->dev = &pdev->dev;
+
+ if (fuse->soc->probe) {
+ err = fuse->soc->probe(fuse);
+ if (err < 0) {
+ fuse->base = base;
+ return err;
+ }
+ }
+
+ if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
+ fuse->soc->info))
+ return -ENODEV;
+
+ /* release the early I/O memory mapping */
+ iounmap(base);
+
+ return 0;
+}
+
+static struct platform_driver tegra_fuse_driver = {
+ .driver = {
+ .name = "tegra-fuse",
+ .of_match_table = tegra_fuse_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra_fuse_probe,
+};
+builtin_platform_driver(tegra_fuse_driver);
+
+u32 __init tegra_fuse_read_spare(unsigned int spare)
+{
+ unsigned int offset = fuse->soc->info->spare + spare * 4;
+
+ return fuse->read_early(fuse, offset) & 1;
+}
+
+u32 __init tegra_fuse_read_early(unsigned int offset)
+{
+ return fuse->read_early(fuse, offset);
+}
+
+int tegra_fuse_readl(unsigned long offset, u32 *value)
+{
+ if (!fuse->read)
+ return -EPROBE_DEFER;
+
+ *value = fuse->read(fuse, offset);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_fuse_readl);
+
+static void tegra_enable_fuse_clk(void __iomem *base)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + 0x48);
+ reg |= 1 << 28;
+ writel(reg, base + 0x48);
+
+ /*
+ * Enable FUSE clock. This needs to be hardcoded because the clock
+ * subsystem is not active during early boot.
+ */
+ reg = readl(base + 0x14);
+ reg |= 1 << 7;
+ writel(reg, base + 0x14);
+}
+
+struct device * __init tegra_soc_device_register(void)
+{
+ struct soc_device_attribute *attr;
+ struct soc_device *dev;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return NULL;
+
+ attr->family = kasprintf(GFP_KERNEL, "Tegra");
+ attr->revision = kasprintf(GFP_KERNEL, "%d", tegra_sku_info.revision);
+ attr->soc_id = kasprintf(GFP_KERNEL, "%u", tegra_get_chip_id());
+
+ dev = soc_device_register(attr);
+ if (IS_ERR(dev)) {
+ kfree(attr->soc_id);
+ kfree(attr->revision);
+ kfree(attr->family);
+ kfree(attr);
+ return ERR_CAST(dev);
+ }
+
+ return soc_device_to_device(dev);
+}
+
+static int __init tegra_init_fuse(void)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+ struct resource regs;
+
+ tegra_init_apbmisc();
+
+ np = of_find_matching_node_and_match(NULL, tegra_fuse_match, &match);
+ if (!np) {
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * a FUSE node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain a FUSE node.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ u8 chip = tegra_get_chip_id();
+
+ regs.start = 0x7000f800;
+ regs.end = 0x7000fbff;
+ regs.flags = IORESOURCE_MEM;
+
+ switch (chip) {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ case TEGRA20:
+ fuse->soc = &tegra20_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ case TEGRA30:
+ fuse->soc = &tegra30_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ case TEGRA114:
+ fuse->soc = &tegra114_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ case TEGRA124:
+ fuse->soc = &tegra124_fuse_soc;
+ break;
+#endif
+
+ default:
+ pr_warn("Unsupported SoC: %02x\n", chip);
+ break;
+ }
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return 0;
+ }
+ } else {
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &regs) < 0) {
+ pr_err("failed to get FUSE register\n");
+ return -ENXIO;
+ }
+
+ fuse->soc = match->data;
+ }
+
+ np = of_find_matching_node(NULL, car_match);
+ if (np) {
+ void __iomem *base = of_iomap(np, 0);
+ if (base) {
+ tegra_enable_fuse_clk(base);
+ iounmap(base);
+ } else {
+ pr_err("failed to map clock registers\n");
+ return -ENXIO;
+ }
+ }
+
+ fuse->base = ioremap_nocache(regs.start, resource_size(&regs));
+ if (!fuse->base) {
+ pr_err("failed to map FUSE registers\n");
+ return -ENXIO;
+ }
+
+ fuse->soc->init(fuse);
+
+ pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
+ tegra_revision_name[tegra_sku_info.revision],
+ tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id,
+ tegra_sku_info.soc_process_id);
+ pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
+ tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+
+
+ return 0;
+}
+early_initcall(tegra_init_fuse);
+
+#ifdef CONFIG_ARM64
+static int __init tegra_init_soc(void)
+{
+ struct device_node *np;
+ struct device *soc;
+
+ /* make sure we're running on Tegra */
+ np = of_find_matching_node(NULL, tegra_fuse_match);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
+ soc = tegra_soc_device_register();
+ if (IS_ERR(soc)) {
+ pr_err("failed to register SoC device: %ld\n", PTR_ERR(soc));
+ return PTR_ERR(soc);
+ }
+
+ return 0;
+}
+device_initcall(tegra_init_soc);
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
new file mode 100644
index 000000000..49ff017f3
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Based on drivers/misc/eeprom/sunxi_sid.c
+ */
+
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define FUSE_BEGIN 0x100
+#define FUSE_UID_LOW 0x08
+#define FUSE_UID_HIGH 0x0c
+
+static u32 tegra20_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
+{
+ return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
+}
+
+static void apb_dma_complete(void *args)
+{
+ struct tegra_fuse *fuse = args;
+
+ complete(&fuse->apbdma.wait);
+}
+
+static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
+{
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ struct dma_async_tx_descriptor *dma_desc;
+ unsigned long time_left;
+ u32 value = 0;
+ int err;
+
+ mutex_lock(&fuse->apbdma.lock);
+
+ fuse->apbdma.config.src_addr = fuse->phys + FUSE_BEGIN + offset;
+
+ err = dmaengine_slave_config(fuse->apbdma.chan, &fuse->apbdma.config);
+ if (err)
+ goto out;
+
+ dma_desc = dmaengine_prep_slave_single(fuse->apbdma.chan,
+ fuse->apbdma.phys,
+ sizeof(u32), DMA_DEV_TO_MEM,
+ flags);
+ if (!dma_desc)
+ goto out;
+
+ dma_desc->callback = apb_dma_complete;
+ dma_desc->callback_param = fuse;
+
+ reinit_completion(&fuse->apbdma.wait);
+
+ clk_prepare_enable(fuse->clk);
+
+ dmaengine_submit(dma_desc);
+ dma_async_issue_pending(fuse->apbdma.chan);
+ time_left = wait_for_completion_timeout(&fuse->apbdma.wait,
+ msecs_to_jiffies(50));
+
+ if (WARN(time_left == 0, "apb read dma timed out"))
+ dmaengine_terminate_all(fuse->apbdma.chan);
+ else
+ value = *fuse->apbdma.virt;
+
+ clk_disable_unprepare(fuse->clk);
+
+out:
+ mutex_unlock(&fuse->apbdma.lock);
+ return value;
+}
+
+static bool dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ struct device_node *np = chan->device->dev->of_node;
+
+ return of_device_is_compatible(np, "nvidia,tegra20-apbdma");
+}
+
+static int tegra20_fuse_probe(struct tegra_fuse *fuse)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ fuse->apbdma.chan = __dma_request_channel(&mask, dma_filter, NULL);
+ if (!fuse->apbdma.chan)
+ return -EPROBE_DEFER;
+
+ fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32),
+ &fuse->apbdma.phys,
+ GFP_KERNEL);
+ if (!fuse->apbdma.virt) {
+ dma_release_channel(fuse->apbdma.chan);
+ return -ENOMEM;
+ }
+
+ fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ fuse->apbdma.config.src_maxburst = 1;
+ fuse->apbdma.config.dst_maxburst = 1;
+ fuse->apbdma.config.direction = DMA_DEV_TO_MEM;
+ fuse->apbdma.config.device_fc = false;
+
+ init_completion(&fuse->apbdma.wait);
+ mutex_init(&fuse->apbdma.lock);
+ fuse->read = tegra20_fuse_read;
+
+ return 0;
+}
+
+static const struct tegra_fuse_info tegra20_fuse_info = {
+ .read = tegra20_fuse_read,
+ .size = 0x1f8,
+ .spare = 0x100,
+};
+
+/* Early boot code. This code is called before the devices are created */
+
+static void __init tegra20_fuse_add_randomness(void)
+{
+ u32 randomness[7];
+
+ randomness[0] = tegra_sku_info.sku_id;
+ randomness[1] = tegra_read_straps();
+ randomness[2] = tegra_read_chipid();
+ randomness[3] = tegra_sku_info.cpu_process_id << 16;
+ randomness[3] |= tegra_sku_info.soc_process_id;
+ randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
+ randomness[4] |= tegra_sku_info.soc_speedo_id;
+ randomness[5] = tegra_fuse_read_early(FUSE_UID_LOW);
+ randomness[6] = tegra_fuse_read_early(FUSE_UID_HIGH);
+
+ add_device_randomness(randomness, sizeof(randomness));
+}
+
+static void __init tegra20_fuse_init(struct tegra_fuse *fuse)
+{
+ fuse->read_early = tegra20_fuse_read_early;
+
+ tegra_init_revision();
+ fuse->soc->speedo_init(&tegra_sku_info);
+ tegra20_fuse_add_randomness();
+}
+
+const struct tegra_fuse_soc tegra20_fuse_soc = {
+ .init = tegra20_fuse_init,
+ .speedo_init = tegra20_init_speedo_data,
+ .probe = tegra20_fuse_probe,
+ .info = &tegra20_fuse_info,
+};
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
new file mode 100644
index 000000000..7c47a0ceb
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define FUSE_BEGIN 0x100
+
+/* Tegra30 and later */
+#define FUSE_VENDOR_CODE 0x100
+#define FUSE_FAB_CODE 0x104
+#define FUSE_LOT_CODE_0 0x108
+#define FUSE_LOT_CODE_1 0x10c
+#define FUSE_WAFER_ID 0x110
+#define FUSE_X_COORDINATE 0x114
+#define FUSE_Y_COORDINATE 0x118
+
+#define FUSE_HAS_REVISION_INFO BIT(0)
+
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_186_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_194_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_234_SOC)
+static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
+{
+ if (WARN_ON(!fuse->base))
+ return 0;
+
+ return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
+}
+
+static u32 tegra30_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
+{
+ u32 value;
+ int err;
+
+ err = clk_prepare_enable(fuse->clk);
+ if (err < 0) {
+ dev_err(fuse->dev, "failed to enable FUSE clock: %d\n", err);
+ return 0;
+ }
+
+ value = readl_relaxed(fuse->base + FUSE_BEGIN + offset);
+
+ clk_disable_unprepare(fuse->clk);
+
+ return value;
+}
+
+static void __init tegra30_fuse_add_randomness(void)
+{
+ u32 randomness[12];
+
+ randomness[0] = tegra_sku_info.sku_id;
+ randomness[1] = tegra_read_straps();
+ randomness[2] = tegra_read_chipid();
+ randomness[3] = tegra_sku_info.cpu_process_id << 16;
+ randomness[3] |= tegra_sku_info.soc_process_id;
+ randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
+ randomness[4] |= tegra_sku_info.soc_speedo_id;
+ randomness[5] = tegra_fuse_read_early(FUSE_VENDOR_CODE);
+ randomness[6] = tegra_fuse_read_early(FUSE_FAB_CODE);
+ randomness[7] = tegra_fuse_read_early(FUSE_LOT_CODE_0);
+ randomness[8] = tegra_fuse_read_early(FUSE_LOT_CODE_1);
+ randomness[9] = tegra_fuse_read_early(FUSE_WAFER_ID);
+ randomness[10] = tegra_fuse_read_early(FUSE_X_COORDINATE);
+ randomness[11] = tegra_fuse_read_early(FUSE_Y_COORDINATE);
+
+ add_device_randomness(randomness, sizeof(randomness));
+}
+
+static void __init tegra30_fuse_init(struct tegra_fuse *fuse)
+{
+ fuse->read_early = tegra30_fuse_read_early;
+ fuse->read = tegra30_fuse_read;
+
+ tegra_init_revision();
+
+ if (fuse->soc->speedo_init)
+ fuse->soc->speedo_init(&tegra_sku_info);
+
+ tegra30_fuse_add_randomness();
+}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static const struct tegra_fuse_info tegra30_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x2a4,
+ .spare = 0x144,
+};
+
+const struct tegra_fuse_soc tegra30_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra30_init_speedo_data,
+ .info = &tegra30_fuse_info,
+};
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+static const struct tegra_fuse_info tegra114_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x2a0,
+ .spare = 0x180,
+};
+
+const struct tegra_fuse_soc tegra114_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra114_init_speedo_data,
+ .info = &tegra114_fuse_info,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+static const struct tegra_fuse_info tegra124_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x200,
+};
+
+const struct tegra_fuse_soc tegra124_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra124_init_speedo_data,
+ .info = &tegra124_fuse_info,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_fuse_info tegra210_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra210_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra210_init_speedo_data,
+ .info = &tegra210_fuse_info,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+static const struct tegra_fuse_info tegra186_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra186_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra186_fuse_info,
+};
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
new file mode 100644
index 000000000..bf489d50e
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_MISC_TEGRA_FUSE_H
+#define __DRIVERS_MISC_TEGRA_FUSE_H
+
+#include <linux/dmaengine.h>
+#include <linux/types.h>
+
+struct tegra_fuse;
+
+struct tegra_fuse_info {
+ u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+ unsigned int size;
+ unsigned int spare;
+};
+
+struct tegra_fuse_soc {
+ void (*init)(struct tegra_fuse *fuse);
+ void (*speedo_init)(struct tegra_sku_info *info);
+ int (*probe)(struct tegra_fuse *fuse);
+
+ const struct tegra_fuse_info *info;
+};
+
+struct tegra_fuse {
+ struct device *dev;
+ void __iomem *base;
+ phys_addr_t phys;
+ struct clk *clk;
+
+ u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset);
+ u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+ const struct tegra_fuse_soc *soc;
+
+ /* APBDMA on Tegra20 */
+ struct {
+ struct mutex lock;
+ struct completion wait;
+ struct dma_chan *chan;
+ struct dma_slave_config config;
+ dma_addr_t phys;
+ u32 *virt;
+ } apbdma;
+};
+
+void tegra_init_revision(void);
+void tegra_init_apbmisc(void);
+
+u32 __init tegra_fuse_read_spare(unsigned int spare);
+u32 __init tegra_fuse_read_early(unsigned int offset);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+void tegra20_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+void tegra30_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+void tegra114_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+void tegra124_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+void tegra210_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+extern const struct tegra_fuse_soc tegra20_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+extern const struct tegra_fuse_soc tegra30_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+extern const struct tegra_fuse_soc tegra114_fuse_soc;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+extern const struct tegra_fuse_soc tegra124_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_fuse_soc tegra210_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_186_SOC
+extern const struct tegra_fuse_soc tegra186_fuse_soc;
+#endif
+
+#endif
diff --git a/drivers/soc/tegra/fuse/speedo-tegra114.c b/drivers/soc/tegra/fuse/speedo-tegra114.c
new file mode 100644
index 000000000..1ba41ebbb
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra114.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define SOC_PROCESS_CORNERS 2
+#define CPU_PROCESS_CORNERS 2
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+ {1123, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ {1695, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+ int *threshold)
+{
+ u32 tmp;
+ u32 sku = sku_info->sku_id;
+ enum tegra_revision rev = sku_info->revision;
+
+ switch (sku) {
+ case 0x00:
+ case 0x10:
+ case 0x05:
+ case 0x06:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+ break;
+
+ case 0x03:
+ case 0x04:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 1;
+ *threshold = THRESHOLD_INDEX_1;
+ break;
+
+ default:
+ pr_err("Tegra Unknown SKU %d\n", sku);
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+ break;
+ }
+
+ if (rev == TEGRA_REVISION_A01) {
+ tmp = tegra_fuse_read_early(0x270) << 1;
+ tmp |= tegra_fuse_read_early(0x26c);
+ if (!tmp)
+ sku_info->cpu_speedo_id = 0;
+ }
+}
+
+void __init tegra114_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ u32 cpu_speedo_val;
+ u32 soc_speedo_val;
+ int threshold;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+ rev_sku_to_speedo_ids(sku_info, &threshold);
+
+ cpu_speedo_val = tegra_fuse_read_early(0x12c) + 1024;
+ soc_speedo_val = tegra_fuse_read_early(0x134);
+
+ for (i = 0; i < CPU_PROCESS_CORNERS; i++)
+ if (cpu_speedo_val < cpu_process_speedos[threshold][i])
+ break;
+ sku_info->cpu_process_id = i;
+
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++)
+ if (soc_speedo_val < soc_process_speedos[threshold][i])
+ break;
+ sku_info->soc_process_id = i;
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra124.c b/drivers/soc/tegra/fuse/speedo-tegra124.c
new file mode 100644
index 000000000..a63a13410
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra124.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_PROCESS_CORNERS 2
+#define GPU_PROCESS_CORNERS 2
+#define SOC_PROCESS_CORNERS 2
+
+#define FUSE_CPU_SPEEDO_0 0x14
+#define FUSE_CPU_SPEEDO_1 0x2c
+#define FUSE_CPU_SPEEDO_2 0x30
+#define FUSE_SOC_SPEEDO_0 0x34
+#define FUSE_SOC_SPEEDO_1 0x38
+#define FUSE_SOC_SPEEDO_2 0x3c
+#define FUSE_CPU_IDDQ 0x18
+#define FUSE_SOC_IDDQ 0x40
+#define FUSE_GPU_IDDQ 0x128
+#define FUSE_FT_REV 0x28
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ {2190, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = {
+ {1965, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+ {2101, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+ int *threshold)
+{
+ int sku = sku_info->sku_id;
+
+ /* Assign to default */
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ sku_info->gpu_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+
+ switch (sku) {
+ case 0x00: /* Eng sku */
+ case 0x0F:
+ case 0x23:
+ /* Using the default */
+ break;
+ case 0x83:
+ sku_info->cpu_speedo_id = 2;
+ break;
+
+ case 0x1F:
+ case 0x87:
+ case 0x27:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 0;
+ sku_info->gpu_speedo_id = 1;
+ *threshold = THRESHOLD_INDEX_0;
+ break;
+ case 0x81:
+ case 0x21:
+ case 0x07:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 1;
+ sku_info->gpu_speedo_id = 1;
+ *threshold = THRESHOLD_INDEX_1;
+ break;
+ case 0x49:
+ case 0x4A:
+ case 0x48:
+ sku_info->cpu_speedo_id = 4;
+ sku_info->soc_speedo_id = 2;
+ sku_info->gpu_speedo_id = 3;
+ *threshold = THRESHOLD_INDEX_1;
+ break;
+ default:
+ pr_err("Tegra Unknown SKU %d\n", sku);
+ /* Using the default for the error case */
+ break;
+ }
+}
+
+void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ int i, threshold, cpu_speedo_0_value, soc_speedo_0_value;
+ int cpu_iddq_value, gpu_iddq_value, soc_iddq_value;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+ cpu_speedo_0_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
+
+ /* GPU Speedo is stored in CPU_SPEEDO_2 */
+ sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+
+ soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
+
+ cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
+ soc_iddq_value = tegra_fuse_read_early(FUSE_SOC_IDDQ);
+ gpu_iddq_value = tegra_fuse_read_early(FUSE_GPU_IDDQ);
+
+ sku_info->cpu_speedo_value = cpu_speedo_0_value;
+
+ if (sku_info->cpu_speedo_value == 0) {
+ pr_warn("Tegra Warning: Speedo value not fused.\n");
+ WARN_ON(1);
+ return;
+ }
+
+ rev_sku_to_speedo_ids(sku_info, &threshold);
+
+ sku_info->cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
+
+ for (i = 0; i < GPU_PROCESS_CORNERS; i++)
+ if (sku_info->gpu_speedo_value <
+ gpu_process_speedos[threshold][i])
+ break;
+ sku_info->gpu_process_id = i;
+
+ for (i = 0; i < CPU_PROCESS_CORNERS; i++)
+ if (sku_info->cpu_speedo_value <
+ cpu_process_speedos[threshold][i])
+ break;
+ sku_info->cpu_process_id = i;
+
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++)
+ if (soc_speedo_0_value <
+ soc_process_speedos[threshold][i])
+ break;
+ sku_info->soc_process_id = i;
+
+ pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
+ sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra20.c b/drivers/soc/tegra/fuse/speedo-tegra20.c
new file mode 100644
index 000000000..5f7818bf6
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra20.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_SPEEDO_LSBIT 20
+#define CPU_SPEEDO_MSBIT 29
+#define CPU_SPEEDO_REDUND_LSBIT 30
+#define CPU_SPEEDO_REDUND_MSBIT 39
+#define CPU_SPEEDO_REDUND_OFFS (CPU_SPEEDO_REDUND_MSBIT - CPU_SPEEDO_MSBIT)
+
+#define SOC_SPEEDO_LSBIT 40
+#define SOC_SPEEDO_MSBIT 47
+#define SOC_SPEEDO_REDUND_LSBIT 48
+#define SOC_SPEEDO_REDUND_MSBIT 55
+#define SOC_SPEEDO_REDUND_OFFS (SOC_SPEEDO_REDUND_MSBIT - SOC_SPEEDO_MSBIT)
+
+#define SPEEDO_MULT 4
+
+#define PROCESS_CORNERS_NUM 4
+
+#define SPEEDO_ID_SELECT_0(rev) ((rev) <= 2)
+#define SPEEDO_ID_SELECT_1(sku) \
+ (((sku) != 20) && ((sku) != 23) && ((sku) != 24) && \
+ ((sku) != 27) && ((sku) != 28))
+
+enum {
+ SPEEDO_ID_0,
+ SPEEDO_ID_1,
+ SPEEDO_ID_2,
+ SPEEDO_ID_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][PROCESS_CORNERS_NUM] = {
+ {315, 366, 420, UINT_MAX},
+ {303, 368, 419, UINT_MAX},
+ {316, 331, 383, UINT_MAX},
+};
+
+static const u32 __initconst soc_process_speedos[][PROCESS_CORNERS_NUM] = {
+ {165, 195, 224, UINT_MAX},
+ {165, 195, 224, UINT_MAX},
+ {165, 195, 224, UINT_MAX},
+};
+
+void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ u32 reg;
+ u32 val;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != SPEEDO_ID_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) != SPEEDO_ID_COUNT);
+
+ if (SPEEDO_ID_SELECT_0(sku_info->revision))
+ sku_info->soc_speedo_id = SPEEDO_ID_0;
+ else if (SPEEDO_ID_SELECT_1(sku_info->sku_id))
+ sku_info->soc_speedo_id = SPEEDO_ID_1;
+ else
+ sku_info->soc_speedo_id = SPEEDO_ID_2;
+
+ val = 0;
+ for (i = CPU_SPEEDO_MSBIT; i >= CPU_SPEEDO_LSBIT; i--) {
+ reg = tegra_fuse_read_spare(i) |
+ tegra_fuse_read_spare(i + CPU_SPEEDO_REDUND_OFFS);
+ val = (val << 1) | (reg & 0x1);
+ }
+ val = val * SPEEDO_MULT;
+ pr_debug("Tegra CPU speedo value %u\n", val);
+
+ for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
+ if (val <= cpu_process_speedos[sku_info->soc_speedo_id][i])
+ break;
+ }
+ sku_info->cpu_process_id = i;
+
+ val = 0;
+ for (i = SOC_SPEEDO_MSBIT; i >= SOC_SPEEDO_LSBIT; i--) {
+ reg = tegra_fuse_read_spare(i) |
+ tegra_fuse_read_spare(i + SOC_SPEEDO_REDUND_OFFS);
+ val = (val << 1) | (reg & 0x1);
+ }
+ val = val * SPEEDO_MULT;
+ pr_debug("Core speedo value %u\n", val);
+
+ for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
+ if (val <= soc_process_speedos[sku_info->soc_speedo_id][i])
+ break;
+ }
+ sku_info->soc_process_id = i;
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
new file mode 100644
index 000000000..4403b8956
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2013-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_PROCESS_CORNERS 2
+#define GPU_PROCESS_CORNERS 2
+#define SOC_PROCESS_CORNERS 3
+
+#define FUSE_CPU_SPEEDO_0 0x014
+#define FUSE_CPU_SPEEDO_1 0x02c
+#define FUSE_CPU_SPEEDO_2 0x030
+#define FUSE_SOC_SPEEDO_0 0x034
+#define FUSE_SOC_SPEEDO_1 0x038
+#define FUSE_SOC_SPEEDO_2 0x03c
+#define FUSE_CPU_IDDQ 0x018
+#define FUSE_SOC_IDDQ 0x040
+#define FUSE_GPU_IDDQ 0x128
+#define FUSE_FT_REV 0x028
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ { 2119, UINT_MAX },
+ { 2119, UINT_MAX },
+};
+
+static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = {
+ { UINT_MAX, UINT_MAX },
+ { UINT_MAX, UINT_MAX },
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+ { 1950, 2100, UINT_MAX },
+ { 1950, 2100, UINT_MAX },
+};
+
+static u8 __init get_speedo_revision(void)
+{
+ return tegra_fuse_read_spare(4) << 2 |
+ tegra_fuse_read_spare(3) << 1 |
+ tegra_fuse_read_spare(2) << 0;
+}
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+ u8 speedo_rev, int *threshold)
+{
+ int sku = sku_info->sku_id;
+
+ /* Assign to default */
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ sku_info->gpu_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+
+ switch (sku) {
+ case 0x00: /* Engineering SKU */
+ case 0x01: /* Engineering SKU */
+ case 0x07:
+ case 0x17:
+ case 0x27:
+ if (speedo_rev >= 2)
+ sku_info->gpu_speedo_id = 1;
+ break;
+
+ case 0x13:
+ if (speedo_rev >= 2)
+ sku_info->gpu_speedo_id = 1;
+
+ sku_info->cpu_speedo_id = 1;
+ break;
+
+ default:
+ pr_err("Tegra210: unknown SKU %#04x\n", sku);
+ /* Using the default for the error case */
+ break;
+ }
+}
+
+static int get_process_id(int value, const u32 *speedos, unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (value < speedos[i])
+ return i;
+
+ return -EINVAL;
+}
+
+void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ int cpu_speedo[3], soc_speedo[3], cpu_iddq, gpu_iddq, soc_iddq;
+ unsigned int index;
+ u8 speedo_revision;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+ /* Read speedo/IDDQ fuses */
+ cpu_speedo[0] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
+ cpu_speedo[1] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_1);
+ cpu_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+
+ soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
+ soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
+ soc_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+
+ cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
+ soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
+ gpu_iddq = tegra_fuse_read_early(FUSE_GPU_IDDQ) * 5;
+
+ /*
+ * Determine CPU, GPU and SoC speedo values depending on speedo fusing
+ * revision. Note that GPU speedo value is fused in CPU_SPEEDO_2.
+ */
+ speedo_revision = get_speedo_revision();
+ pr_info("Speedo Revision %u\n", speedo_revision);
+
+ if (speedo_revision >= 3) {
+ sku_info->cpu_speedo_value = cpu_speedo[0];
+ sku_info->gpu_speedo_value = cpu_speedo[2];
+ sku_info->soc_speedo_value = soc_speedo[0];
+ } else if (speedo_revision == 2) {
+ sku_info->cpu_speedo_value = (-1938 + (1095 * cpu_speedo[0] / 100)) / 10;
+ sku_info->gpu_speedo_value = (-1662 + (1082 * cpu_speedo[2] / 100)) / 10;
+ sku_info->soc_speedo_value = ( -705 + (1037 * soc_speedo[0] / 100)) / 10;
+ } else {
+ sku_info->cpu_speedo_value = 2100;
+ sku_info->gpu_speedo_value = cpu_speedo[2] - 75;
+ sku_info->soc_speedo_value = 1900;
+ }
+
+ if ((sku_info->cpu_speedo_value <= 0) ||
+ (sku_info->gpu_speedo_value <= 0) ||
+ (sku_info->soc_speedo_value <= 0)) {
+ WARN(1, "speedo value not fused\n");
+ return;
+ }
+
+ rev_sku_to_speedo_ids(sku_info, speedo_revision, &index);
+
+ sku_info->gpu_process_id = get_process_id(sku_info->gpu_speedo_value,
+ gpu_process_speedos[index],
+ GPU_PROCESS_CORNERS);
+
+ sku_info->cpu_process_id = get_process_id(sku_info->cpu_speedo_value,
+ cpu_process_speedos[index],
+ CPU_PROCESS_CORNERS);
+
+ sku_info->soc_process_id = get_process_id(sku_info->soc_speedo_value,
+ soc_process_speedos[index],
+ SOC_PROCESS_CORNERS);
+
+ pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
+ sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra30.c b/drivers/soc/tegra/fuse/speedo-tegra30.c
new file mode 100644
index 000000000..9b010b3ef
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra30.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define SOC_PROCESS_CORNERS 1
+#define CPU_PROCESS_CORNERS 6
+
+#define FUSE_SPEEDO_CALIB_0 0x14
+#define FUSE_PACKAGE_INFO 0XFC
+#define FUSE_TEST_PROG_VER 0X28
+
+#define G_SPEEDO_BIT_MINUS1 58
+#define G_SPEEDO_BIT_MINUS1_R 59
+#define G_SPEEDO_BIT_MINUS2 60
+#define G_SPEEDO_BIT_MINUS2_R 61
+#define LP_SPEEDO_BIT_MINUS1 62
+#define LP_SPEEDO_BIT_MINUS1_R 63
+#define LP_SPEEDO_BIT_MINUS2 64
+#define LP_SPEEDO_BIT_MINUS2_R 65
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_2,
+ THRESHOLD_INDEX_3,
+ THRESHOLD_INDEX_4,
+ THRESHOLD_INDEX_5,
+ THRESHOLD_INDEX_6,
+ THRESHOLD_INDEX_7,
+ THRESHOLD_INDEX_8,
+ THRESHOLD_INDEX_9,
+ THRESHOLD_INDEX_10,
+ THRESHOLD_INDEX_11,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+ {180},
+ {170},
+ {195},
+ {180},
+ {168},
+ {192},
+ {180},
+ {170},
+ {195},
+ {180},
+ {180},
+ {180},
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ {306, 338, 360, 376, UINT_MAX},
+ {295, 336, 358, 375, UINT_MAX},
+ {325, 325, 358, 375, UINT_MAX},
+ {325, 325, 358, 375, UINT_MAX},
+ {292, 324, 348, 364, UINT_MAX},
+ {324, 324, 348, 364, UINT_MAX},
+ {324, 324, 348, 364, UINT_MAX},
+ {295, 336, 358, 375, UINT_MAX},
+ {358, 358, 358, 358, 397, UINT_MAX},
+ {364, 364, 364, 364, 397, UINT_MAX},
+ {295, 336, 358, 375, 391, UINT_MAX},
+ {295, 336, 358, 375, 391, UINT_MAX},
+};
+
+static int threshold_index __initdata;
+
+static void __init fuse_speedo_calib(u32 *speedo_g, u32 *speedo_lp)
+{
+ u32 reg;
+ int ate_ver;
+ int bit_minus1;
+ int bit_minus2;
+
+ reg = tegra_fuse_read_early(FUSE_SPEEDO_CALIB_0);
+
+ *speedo_lp = (reg & 0xFFFF) * 4;
+ *speedo_g = ((reg >> 16) & 0xFFFF) * 4;
+
+ ate_ver = tegra_fuse_read_early(FUSE_TEST_PROG_VER);
+ pr_debug("Tegra ATE prog ver %d.%d\n", ate_ver/10, ate_ver%10);
+
+ if (ate_ver >= 26) {
+ bit_minus1 = tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS1);
+ bit_minus1 |= tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS1_R);
+ bit_minus2 = tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS2);
+ bit_minus2 |= tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS2_R);
+ *speedo_lp |= (bit_minus1 << 1) | bit_minus2;
+
+ bit_minus1 = tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS1);
+ bit_minus1 |= tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS1_R);
+ bit_minus2 = tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS2);
+ bit_minus2 |= tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS2_R);
+ *speedo_g |= (bit_minus1 << 1) | bit_minus2;
+ } else {
+ *speedo_lp |= 0x3;
+ *speedo_g |= 0x3;
+ }
+}
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info)
+{
+ int package_id = tegra_fuse_read_early(FUSE_PACKAGE_INFO) & 0x0F;
+
+ switch (sku_info->revision) {
+ case TEGRA_REVISION_A01:
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ threshold_index = THRESHOLD_INDEX_0;
+ break;
+ case TEGRA_REVISION_A02:
+ case TEGRA_REVISION_A03:
+ switch (sku_info->sku_id) {
+ case 0x87:
+ case 0x82:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_1;
+ break;
+ case 0x81:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_2;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 4;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_7;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ case 0x80:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 5;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_8;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 6;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_9;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ case 0x83:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 7;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_10;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_3;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ case 0x8F:
+ sku_info->cpu_speedo_id = 8;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_11;
+ break;
+ case 0x08:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_4;
+ break;
+ case 0x02:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_5;
+ break;
+ case 0x04:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_6;
+ break;
+ case 0:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_2;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_3;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ default:
+ pr_warn("Tegra Unknown SKU %d\n", sku_info->sku_id);
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ threshold_index = THRESHOLD_INDEX_0;
+ break;
+ }
+ break;
+ default:
+ pr_warn("Tegra Unknown chip rev %d\n", sku_info->revision);
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ threshold_index = THRESHOLD_INDEX_0;
+ break;
+ }
+}
+
+void __init tegra30_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ u32 cpu_speedo_val;
+ u32 soc_speedo_val;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+
+ rev_sku_to_speedo_ids(sku_info);
+ fuse_speedo_calib(&cpu_speedo_val, &soc_speedo_val);
+ pr_debug("Tegra CPU speedo value %u\n", cpu_speedo_val);
+ pr_debug("Tegra Core speedo value %u\n", soc_speedo_val);
+
+ for (i = 0; i < CPU_PROCESS_CORNERS; i++) {
+ if (cpu_speedo_val < cpu_process_speedos[threshold_index][i])
+ break;
+ }
+ sku_info->cpu_process_id = i - 1;
+
+ if (sku_info->cpu_process_id == -1) {
+ pr_warn("Tegra CPU speedo value %3d out of range",
+ cpu_speedo_val);
+ sku_info->cpu_process_id = 0;
+ sku_info->cpu_speedo_id = 1;
+ }
+
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++) {
+ if (soc_speedo_val < soc_process_speedos[threshold_index][i])
+ break;
+ }
+ sku_info->soc_process_id = i - 1;
+
+ if (sku_info->soc_process_id == -1) {
+ pr_warn("Tegra SoC speedo value %3d out of range",
+ soc_speedo_val);
+ sku_info->soc_process_id = 0;
+ sku_info->soc_speedo_id = 1;
+ }
+}
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
new file mode 100644
index 000000000..d1cbb0fe1
--- /dev/null
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/common.h>
+
+#include "fuse.h"
+
+#define FUSE_SKU_INFO 0x10
+
+#define PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT 4
+#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG \
+ (0xf << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
+#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT \
+ (0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
+
+static void __iomem *apbmisc_base;
+static void __iomem *strapping_base;
+static bool long_ram_code;
+
+u32 tegra_read_chipid(void)
+{
+ if (!apbmisc_base) {
+ WARN(1, "Tegra Chip ID not yet available\n");
+ return 0;
+ }
+
+ return readl_relaxed(apbmisc_base + 4);
+}
+
+u8 tegra_get_chip_id(void)
+{
+ return (tegra_read_chipid() >> 8) & 0xff;
+}
+
+u32 tegra_read_straps(void)
+{
+ if (strapping_base)
+ return readl_relaxed(strapping_base);
+ else
+ return 0;
+}
+
+u32 tegra_read_ram_code(void)
+{
+ u32 straps = tegra_read_straps();
+
+ if (long_ram_code)
+ straps &= PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG;
+ else
+ straps &= PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT;
+
+ return straps >> PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT;
+}
+
+static const struct of_device_id apbmisc_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-apbmisc", },
+ { .compatible = "nvidia,tegra186-misc", },
+ {},
+};
+
+void __init tegra_init_revision(void)
+{
+ u32 id, chip_id, minor_rev;
+ int rev;
+
+ id = tegra_read_chipid();
+ chip_id = (id >> 8) & 0xff;
+ minor_rev = (id >> 16) & 0xf;
+
+ switch (minor_rev) {
+ case 1:
+ rev = TEGRA_REVISION_A01;
+ break;
+ case 2:
+ rev = TEGRA_REVISION_A02;
+ break;
+ case 3:
+ if (chip_id == TEGRA20 && (tegra_fuse_read_spare(18) ||
+ tegra_fuse_read_spare(19)))
+ rev = TEGRA_REVISION_A03p;
+ else
+ rev = TEGRA_REVISION_A03;
+ break;
+ case 4:
+ rev = TEGRA_REVISION_A04;
+ break;
+ default:
+ rev = TEGRA_REVISION_UNKNOWN;
+ }
+
+ tegra_sku_info.revision = rev;
+
+ tegra_sku_info.sku_id = tegra_fuse_read_early(FUSE_SKU_INFO);
+}
+
+void __init tegra_init_apbmisc(void)
+{
+ struct resource apbmisc, straps;
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, apbmisc_match);
+ if (!np) {
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * an APBMISC node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain an APBMISC node.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ /* APBMISC registers (chip revision, ...) */
+ apbmisc.start = 0x70000800;
+ apbmisc.end = 0x70000863;
+ apbmisc.flags = IORESOURCE_MEM;
+
+ /* strapping options */
+ if (of_machine_is_compatible("nvidia,tegra124")) {
+ straps.start = 0x7000e864;
+ straps.end = 0x7000e867;
+ } else {
+ straps.start = 0x70000008;
+ straps.end = 0x7000000b;
+ }
+
+ straps.flags = IORESOURCE_MEM;
+
+ pr_warn("Using APBMISC region %pR\n", &apbmisc);
+ pr_warn("Using strapping options registers %pR\n",
+ &straps);
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return;
+ }
+ } else {
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &apbmisc) < 0) {
+ pr_err("failed to get APBMISC registers\n");
+ return;
+ }
+
+ if (of_address_to_resource(np, 1, &straps) < 0) {
+ pr_err("failed to get strapping options registers\n");
+ return;
+ }
+ }
+
+ apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc));
+ if (!apbmisc_base)
+ pr_err("failed to map APBMISC registers\n");
+
+ strapping_base = ioremap_nocache(straps.start, resource_size(&straps));
+ if (!strapping_base)
+ pr_err("failed to map strapping options registers\n");
+
+ long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
+}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
new file mode 100644
index 000000000..c1880f88a
--- /dev/null
+++ b/drivers/soc/tegra/pmc.c
@@ -0,0 +1,2005 @@
+/*
+ * drivers/soc/tegra/pmc.c
+ *
+ * Copyright (c) 2010 Google, Inc
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "tegra-pmc: " fmt
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_clk.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reboot.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
+
+#define PMC_CNTRL 0x0
+#define PMC_CNTRL_INTR_POLARITY BIT(17) /* inverts INTR polarity */
+#define PMC_CNTRL_CPU_PWRREQ_OE BIT(16) /* CPU pwr req enable */
+#define PMC_CNTRL_CPU_PWRREQ_POLARITY BIT(15) /* CPU pwr req polarity */
+#define PMC_CNTRL_SIDE_EFFECT_LP0 BIT(14) /* LP0 when CPU pwr gated */
+#define PMC_CNTRL_SYSCLK_OE BIT(11) /* system clock enable */
+#define PMC_CNTRL_SYSCLK_POLARITY BIT(10) /* sys clk polarity */
+#define PMC_CNTRL_MAIN_RST BIT(4)
+
+#define DPD_SAMPLE 0x020
+#define DPD_SAMPLE_ENABLE BIT(0)
+#define DPD_SAMPLE_DISABLE (0 << 0)
+
+#define PWRGATE_TOGGLE 0x30
+#define PWRGATE_TOGGLE_START BIT(8)
+
+#define REMOVE_CLAMPING 0x34
+
+#define PWRGATE_STATUS 0x38
+
+#define PMC_IMPL_E_33V_PWR 0x40
+
+#define PMC_PWR_DET 0x48
+
+#define PMC_SCRATCH0_MODE_RECOVERY BIT(31)
+#define PMC_SCRATCH0_MODE_BOOTLOADER BIT(30)
+#define PMC_SCRATCH0_MODE_RCM BIT(1)
+#define PMC_SCRATCH0_MODE_MASK (PMC_SCRATCH0_MODE_RECOVERY | \
+ PMC_SCRATCH0_MODE_BOOTLOADER | \
+ PMC_SCRATCH0_MODE_RCM)
+
+#define PMC_CPUPWRGOOD_TIMER 0xc8
+#define PMC_CPUPWROFF_TIMER 0xcc
+
+#define PMC_PWR_DET_VALUE 0xe4
+
+#define PMC_SCRATCH41 0x140
+
+#define PMC_SENSOR_CTRL 0x1b0
+#define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2)
+#define PMC_SENSOR_CTRL_ENABLE_RST BIT(1)
+
+#define PMC_RST_STATUS 0x1b4
+#define PMC_RST_STATUS_POR 0
+#define PMC_RST_STATUS_WATCHDOG 1
+#define PMC_RST_STATUS_SENSOR 2
+#define PMC_RST_STATUS_SW_MAIN 3
+#define PMC_RST_STATUS_LP0 4
+#define PMC_RST_STATUS_AOTAG 5
+
+#define IO_DPD_REQ 0x1b8
+#define IO_DPD_REQ_CODE_IDLE (0U << 30)
+#define IO_DPD_REQ_CODE_OFF (1U << 30)
+#define IO_DPD_REQ_CODE_ON (2U << 30)
+#define IO_DPD_REQ_CODE_MASK (3U << 30)
+
+#define IO_DPD_STATUS 0x1bc
+#define IO_DPD2_REQ 0x1c0
+#define IO_DPD2_STATUS 0x1c4
+#define SEL_DPD_TIM 0x1c8
+
+#define PMC_SCRATCH54 0x258
+#define PMC_SCRATCH54_DATA_SHIFT 8
+#define PMC_SCRATCH54_ADDR_SHIFT 0
+
+#define PMC_SCRATCH55 0x25c
+#define PMC_SCRATCH55_RESET_TEGRA BIT(31)
+#define PMC_SCRATCH55_CNTRL_ID_SHIFT 27
+#define PMC_SCRATCH55_PINMUX_SHIFT 24
+#define PMC_SCRATCH55_16BITOP BIT(15)
+#define PMC_SCRATCH55_CHECKSUM_SHIFT 16
+#define PMC_SCRATCH55_I2CSLV1_SHIFT 0
+
+#define GPU_RG_CNTRL 0x2d4
+
+/* Tegra186 and later */
+#define WAKE_AOWAKE_CTRL 0x4f4
+#define WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0)
+
+struct tegra_powergate {
+ struct generic_pm_domain genpd;
+ struct tegra_pmc *pmc;
+ unsigned int id;
+ struct clk **clks;
+ unsigned int num_clks;
+ struct reset_control *reset;
+};
+
+struct tegra_io_pad_soc {
+ enum tegra_io_pad id;
+ unsigned int dpd;
+ unsigned int voltage;
+};
+
+struct tegra_pmc_regs {
+ unsigned int scratch0;
+ unsigned int dpd_req;
+ unsigned int dpd_status;
+ unsigned int dpd2_req;
+ unsigned int dpd2_status;
+};
+
+struct tegra_pmc_soc {
+ unsigned int num_powergates;
+ const char *const *powergates;
+ unsigned int num_cpu_powergates;
+ const u8 *cpu_powergates;
+
+ bool has_tsense_reset;
+ bool has_gpu_clamps;
+ bool needs_mbist_war;
+ bool has_impl_33v_pwr;
+
+ const struct tegra_io_pad_soc *io_pads;
+ unsigned int num_io_pads;
+
+ const struct tegra_pmc_regs *regs;
+ void (*init)(struct tegra_pmc *pmc);
+ void (*setup_irq_polarity)(struct tegra_pmc *pmc,
+ struct device_node *np,
+ bool invert);
+};
+
+/**
+ * struct tegra_pmc - NVIDIA Tegra PMC
+ * @dev: pointer to PMC device structure
+ * @base: pointer to I/O remapped register region
+ * @clk: pointer to pclk clock
+ * @soc: pointer to SoC data structure
+ * @debugfs: pointer to debugfs entry
+ * @rate: currently configured rate of pclk
+ * @suspend_mode: lowest suspend mode available
+ * @cpu_good_time: CPU power good time (in microseconds)
+ * @cpu_off_time: CPU power off time (in microsecends)
+ * @core_osc_time: core power good OSC time (in microseconds)
+ * @core_pmu_time: core power good PMU time (in microseconds)
+ * @core_off_time: core power off time (in microseconds)
+ * @corereq_high: core power request is active-high
+ * @sysclkreq_high: system clock request is active-high
+ * @combined_req: combined power request for CPU & core
+ * @cpu_pwr_good_en: CPU power good signal is enabled
+ * @lp0_vec_phys: physical base address of the LP0 warm boot code
+ * @lp0_vec_size: size of the LP0 warm boot code
+ * @powergates_available: Bitmap of available power gates
+ * @powergates_lock: mutex for power gate register access
+ */
+struct tegra_pmc {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *wake;
+ void __iomem *aotag;
+ void __iomem *scratch;
+ struct clk *clk;
+ struct dentry *debugfs;
+
+ const struct tegra_pmc_soc *soc;
+
+ unsigned long rate;
+
+ enum tegra_suspend_mode suspend_mode;
+ u32 cpu_good_time;
+ u32 cpu_off_time;
+ u32 core_osc_time;
+ u32 core_pmu_time;
+ u32 core_off_time;
+ bool corereq_high;
+ bool sysclkreq_high;
+ bool combined_req;
+ bool cpu_pwr_good_en;
+ u32 lp0_vec_phys;
+ u32 lp0_vec_size;
+ DECLARE_BITMAP(powergates_available, TEGRA_POWERGATE_MAX);
+
+ struct mutex powergates_lock;
+};
+
+static struct tegra_pmc *pmc = &(struct tegra_pmc) {
+ .base = NULL,
+ .suspend_mode = TEGRA_SUSPEND_NONE,
+};
+
+static inline struct tegra_powergate *
+to_powergate(struct generic_pm_domain *domain)
+{
+ return container_of(domain, struct tegra_powergate, genpd);
+}
+
+static u32 tegra_pmc_readl(unsigned long offset)
+{
+ return readl(pmc->base + offset);
+}
+
+static void tegra_pmc_writel(u32 value, unsigned long offset)
+{
+ writel(value, pmc->base + offset);
+}
+
+static inline bool tegra_powergate_state(int id)
+{
+ if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
+ return (tegra_pmc_readl(GPU_RG_CNTRL) & 0x1) == 0;
+ else
+ return (tegra_pmc_readl(PWRGATE_STATUS) & BIT(id)) != 0;
+}
+
+static inline bool tegra_powergate_is_valid(int id)
+{
+ return (pmc->soc && pmc->soc->powergates[id]);
+}
+
+static inline bool tegra_powergate_is_available(int id)
+{
+ return test_bit(id, pmc->powergates_available);
+}
+
+static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
+{
+ unsigned int i;
+
+ if (!pmc || !pmc->soc || !name)
+ return -EINVAL;
+
+ for (i = 0; i < pmc->soc->num_powergates; i++) {
+ if (!tegra_powergate_is_valid(i))
+ continue;
+
+ if (!strcmp(name, pmc->soc->powergates[i]))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * tegra_powergate_set() - set the state of a partition
+ * @id: partition ID
+ * @new_state: new state of the partition
+ */
+static int tegra_powergate_set(unsigned int id, bool new_state)
+{
+ bool status;
+ int err;
+
+ if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
+ return -EINVAL;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ if (tegra_powergate_state(id) == new_state) {
+ mutex_unlock(&pmc->powergates_lock);
+ return 0;
+ }
+
+ tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+
+ err = readx_poll_timeout(tegra_powergate_state, id, status,
+ status == new_state, 10, 100000);
+
+ mutex_unlock(&pmc->powergates_lock);
+
+ return err;
+}
+
+static int __tegra_powergate_remove_clamping(unsigned int id)
+{
+ u32 mask;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ /*
+ * On Tegra124 and later, the clamps for the GPU are controlled by a
+ * separate register (with different semantics).
+ */
+ if (id == TEGRA_POWERGATE_3D) {
+ if (pmc->soc->has_gpu_clamps) {
+ tegra_pmc_writel(0, GPU_RG_CNTRL);
+ goto out;
+ }
+ }
+
+ /*
+ * Tegra 2 has a bug where PCIE and VDE clamping masks are
+ * swapped relatively to the partition ids
+ */
+ if (id == TEGRA_POWERGATE_VDEC)
+ mask = (1 << TEGRA_POWERGATE_PCIE);
+ else if (id == TEGRA_POWERGATE_PCIE)
+ mask = (1 << TEGRA_POWERGATE_VDEC);
+ else
+ mask = (1 << id);
+
+ tegra_pmc_writel(mask, REMOVE_CLAMPING);
+
+out:
+ mutex_unlock(&pmc->powergates_lock);
+
+ return 0;
+}
+
+static void tegra_powergate_disable_clocks(struct tegra_powergate *pg)
+{
+ unsigned int i;
+
+ for (i = 0; i < pg->num_clks; i++)
+ clk_disable_unprepare(pg->clks[i]);
+}
+
+static int tegra_powergate_enable_clocks(struct tegra_powergate *pg)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < pg->num_clks; i++) {
+ err = clk_prepare_enable(pg->clks[i]);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ while (i--)
+ clk_disable_unprepare(pg->clks[i]);
+
+ return err;
+}
+
+int __weak tegra210_clk_handle_mbist_war(unsigned int id)
+{
+ return 0;
+}
+
+static int tegra_powergate_power_up(struct tegra_powergate *pg,
+ bool disable_clocks)
+{
+ int err;
+
+ err = reset_control_assert(pg->reset);
+ if (err)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_set(pg->id, true);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_enable_clocks(pg);
+ if (err)
+ goto powergate_off;
+
+ usleep_range(10, 20);
+
+ err = __tegra_powergate_remove_clamping(pg->id);
+ if (err)
+ goto disable_clks;
+
+ usleep_range(10, 20);
+
+ err = reset_control_deassert(pg->reset);
+ if (err)
+ goto disable_clks;
+
+ usleep_range(10, 20);
+
+ if (pg->pmc->soc->needs_mbist_war)
+ err = tegra210_clk_handle_mbist_war(pg->id);
+ if (err)
+ goto disable_clks;
+
+ if (disable_clocks)
+ tegra_powergate_disable_clocks(pg);
+
+ return 0;
+
+disable_clks:
+ tegra_powergate_disable_clocks(pg);
+ usleep_range(10, 20);
+
+powergate_off:
+ tegra_powergate_set(pg->id, false);
+
+ return err;
+}
+
+static int tegra_powergate_power_down(struct tegra_powergate *pg)
+{
+ int err;
+
+ err = tegra_powergate_enable_clocks(pg);
+ if (err)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = reset_control_assert(pg->reset);
+ if (err)
+ goto disable_clks;
+
+ usleep_range(10, 20);
+
+ tegra_powergate_disable_clocks(pg);
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_set(pg->id, false);
+ if (err)
+ goto assert_resets;
+
+ return 0;
+
+assert_resets:
+ tegra_powergate_enable_clocks(pg);
+ usleep_range(10, 20);
+ reset_control_deassert(pg->reset);
+ usleep_range(10, 20);
+
+disable_clks:
+ tegra_powergate_disable_clocks(pg);
+
+ return err;
+}
+
+static int tegra_genpd_power_on(struct generic_pm_domain *domain)
+{
+ struct tegra_powergate *pg = to_powergate(domain);
+ int err;
+
+ err = tegra_powergate_power_up(pg, true);
+ if (err)
+ pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name,
+ err);
+
+ return err;
+}
+
+static int tegra_genpd_power_off(struct generic_pm_domain *domain)
+{
+ struct tegra_powergate *pg = to_powergate(domain);
+ int err;
+
+ err = tegra_powergate_power_down(pg);
+ if (err)
+ pr_err("failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
+
+ return err;
+}
+
+/**
+ * tegra_powergate_power_on() - power on partition
+ * @id: partition ID
+ */
+int tegra_powergate_power_on(unsigned int id)
+{
+ if (!tegra_powergate_is_available(id))
+ return -EINVAL;
+
+ return tegra_powergate_set(id, true);
+}
+
+/**
+ * tegra_powergate_power_off() - power off partition
+ * @id: partition ID
+ */
+int tegra_powergate_power_off(unsigned int id)
+{
+ if (!tegra_powergate_is_available(id))
+ return -EINVAL;
+
+ return tegra_powergate_set(id, false);
+}
+EXPORT_SYMBOL(tegra_powergate_power_off);
+
+/**
+ * tegra_powergate_is_powered() - check if partition is powered
+ * @id: partition ID
+ */
+int tegra_powergate_is_powered(unsigned int id)
+{
+ if (!tegra_powergate_is_valid(id))
+ return -EINVAL;
+
+ return tegra_powergate_state(id);
+}
+
+/**
+ * tegra_powergate_remove_clamping() - remove power clamps for partition
+ * @id: partition ID
+ */
+int tegra_powergate_remove_clamping(unsigned int id)
+{
+ if (!tegra_powergate_is_available(id))
+ return -EINVAL;
+
+ return __tegra_powergate_remove_clamping(id);
+}
+EXPORT_SYMBOL(tegra_powergate_remove_clamping);
+
+/**
+ * tegra_powergate_sequence_power_up() - power up partition
+ * @id: partition ID
+ * @clk: clock for partition
+ * @rst: reset for partition
+ *
+ * Must be called with clk disabled, and returns with clk enabled.
+ */
+int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
+ struct reset_control *rst)
+{
+ struct tegra_powergate *pg;
+ int err;
+
+ if (!tegra_powergate_is_available(id))
+ return -EINVAL;
+
+ pg = kzalloc(sizeof(*pg), GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ pg->id = id;
+ pg->clks = &clk;
+ pg->num_clks = 1;
+ pg->reset = rst;
+ pg->pmc = pmc;
+
+ err = tegra_powergate_power_up(pg, false);
+ if (err)
+ pr_err("failed to turn on partition %d: %d\n", id, err);
+
+ kfree(pg);
+
+ return err;
+}
+EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
+
+#ifdef CONFIG_SMP
+/**
+ * tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID
+ * @cpuid: CPU partition ID
+ *
+ * Returns the partition ID corresponding to the CPU partition ID or a
+ * negative error code on failure.
+ */
+static int tegra_get_cpu_powergate_id(unsigned int cpuid)
+{
+ if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates)
+ return pmc->soc->cpu_powergates[cpuid];
+
+ return -EINVAL;
+}
+
+/**
+ * tegra_pmc_cpu_is_powered() - check if CPU partition is powered
+ * @cpuid: CPU partition ID
+ */
+bool tegra_pmc_cpu_is_powered(unsigned int cpuid)
+{
+ int id;
+
+ id = tegra_get_cpu_powergate_id(cpuid);
+ if (id < 0)
+ return false;
+
+ return tegra_powergate_is_powered(id);
+}
+
+/**
+ * tegra_pmc_cpu_power_on() - power on CPU partition
+ * @cpuid: CPU partition ID
+ */
+int tegra_pmc_cpu_power_on(unsigned int cpuid)
+{
+ int id;
+
+ id = tegra_get_cpu_powergate_id(cpuid);
+ if (id < 0)
+ return id;
+
+ return tegra_powergate_set(id, true);
+}
+
+/**
+ * tegra_pmc_cpu_remove_clamping() - remove power clamps for CPU partition
+ * @cpuid: CPU partition ID
+ */
+int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
+{
+ int id;
+
+ id = tegra_get_cpu_powergate_id(cpuid);
+ if (id < 0)
+ return id;
+
+ return tegra_powergate_remove_clamping(id);
+}
+#endif /* CONFIG_SMP */
+
+static int tegra_pmc_restart_notify(struct notifier_block *this,
+ unsigned long action, void *data)
+{
+ const char *cmd = data;
+ u32 value;
+
+ value = readl(pmc->scratch + pmc->soc->regs->scratch0);
+ value &= ~PMC_SCRATCH0_MODE_MASK;
+
+ if (cmd) {
+ if (strcmp(cmd, "recovery") == 0)
+ value |= PMC_SCRATCH0_MODE_RECOVERY;
+
+ if (strcmp(cmd, "bootloader") == 0)
+ value |= PMC_SCRATCH0_MODE_BOOTLOADER;
+
+ if (strcmp(cmd, "forced-recovery") == 0)
+ value |= PMC_SCRATCH0_MODE_RCM;
+ }
+
+ writel(value, pmc->scratch + pmc->soc->regs->scratch0);
+
+ /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
+ value = tegra_pmc_readl(PMC_CNTRL);
+ value |= PMC_CNTRL_MAIN_RST;
+ tegra_pmc_writel(value, PMC_CNTRL);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tegra_pmc_restart_handler = {
+ .notifier_call = tegra_pmc_restart_notify,
+ .priority = 128,
+};
+
+static int powergate_show(struct seq_file *s, void *data)
+{
+ unsigned int i;
+ int status;
+
+ seq_printf(s, " powergate powered\n");
+ seq_printf(s, "------------------\n");
+
+ for (i = 0; i < pmc->soc->num_powergates; i++) {
+ status = tegra_powergate_is_powered(i);
+ if (status < 0)
+ continue;
+
+ seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
+ status ? "yes" : "no");
+ }
+
+ return 0;
+}
+
+static int powergate_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, powergate_show, inode->i_private);
+}
+
+static const struct file_operations powergate_fops = {
+ .open = powergate_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int tegra_powergate_debugfs_init(void)
+{
+ pmc->debugfs = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
+ &powergate_fops);
+ if (!pmc->debugfs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
+ struct device_node *np)
+{
+ struct clk *clk;
+ unsigned int i, count;
+ int err;
+
+ count = of_clk_get_parent_count(np);
+ if (count == 0)
+ return -ENODEV;
+
+ pg->clks = kcalloc(count, sizeof(clk), GFP_KERNEL);
+ if (!pg->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ pg->clks[i] = of_clk_get(np, i);
+ if (IS_ERR(pg->clks[i])) {
+ err = PTR_ERR(pg->clks[i]);
+ goto err;
+ }
+ }
+
+ pg->num_clks = count;
+
+ return 0;
+
+err:
+ while (i--)
+ clk_put(pg->clks[i]);
+
+ kfree(pg->clks);
+
+ return err;
+}
+
+static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
+ struct device_node *np, bool off)
+{
+ int err;
+
+ pg->reset = of_reset_control_array_get_exclusive(np);
+ if (IS_ERR(pg->reset)) {
+ err = PTR_ERR(pg->reset);
+ pr_err("failed to get device resets: %d\n", err);
+ return err;
+ }
+
+ if (off)
+ err = reset_control_assert(pg->reset);
+ else
+ err = reset_control_deassert(pg->reset);
+
+ if (err)
+ reset_control_put(pg->reset);
+
+ return err;
+}
+
+static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
+{
+ struct tegra_powergate *pg;
+ int id, err;
+ bool off;
+
+ pg = kzalloc(sizeof(*pg), GFP_KERNEL);
+ if (!pg)
+ return;
+
+ id = tegra_powergate_lookup(pmc, np->name);
+ if (id < 0) {
+ pr_err("powergate lookup failed for %s: %d\n", np->name, id);
+ goto free_mem;
+ }
+
+ /*
+ * Clear the bit for this powergate so it cannot be managed
+ * directly via the legacy APIs for controlling powergates.
+ */
+ clear_bit(id, pmc->powergates_available);
+
+ pg->id = id;
+ pg->genpd.name = np->name;
+ pg->genpd.power_off = tegra_genpd_power_off;
+ pg->genpd.power_on = tegra_genpd_power_on;
+ pg->pmc = pmc;
+
+ off = !tegra_powergate_is_powered(pg->id);
+
+ err = tegra_powergate_of_get_clks(pg, np);
+ if (err < 0) {
+ pr_err("failed to get clocks for %s: %d\n", np->name, err);
+ goto set_available;
+ }
+
+ err = tegra_powergate_of_get_resets(pg, np, off);
+ if (err < 0) {
+ pr_err("failed to get resets for %s: %d\n", np->name, err);
+ goto remove_clks;
+ }
+
+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
+ goto remove_resets;
+ }
+
+ /*
+ * FIXME: If XHCI is enabled for Tegra, then power-up the XUSB
+ * host and super-speed partitions. Once the XHCI driver
+ * manages the partitions itself this code can be removed. Note
+ * that we don't register these partitions with the genpd core
+ * to avoid it from powering down the partitions as they appear
+ * to be unused.
+ */
+ if (IS_ENABLED(CONFIG_USB_XHCI_TEGRA) &&
+ (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC)) {
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
+ goto remove_resets;
+ }
+
+ err = pm_genpd_init(&pg->genpd, NULL, off);
+ if (err < 0) {
+ pr_err("failed to initialise PM domain %s: %d\n", np->name,
+ err);
+ goto remove_resets;
+ }
+
+ err = of_genpd_add_provider_simple(np, &pg->genpd);
+ if (err < 0) {
+ pr_err("failed to add PM domain provider for %s: %d\n",
+ np->name, err);
+ goto remove_genpd;
+ }
+
+ pr_debug("added PM domain %s\n", pg->genpd.name);
+
+ return;
+
+remove_genpd:
+ pm_genpd_remove(&pg->genpd);
+
+remove_resets:
+ reset_control_put(pg->reset);
+
+remove_clks:
+ while (pg->num_clks--)
+ clk_put(pg->clks[pg->num_clks]);
+
+ kfree(pg->clks);
+
+set_available:
+ set_bit(id, pmc->powergates_available);
+
+free_mem:
+ kfree(pg);
+}
+
+static void tegra_powergate_init(struct tegra_pmc *pmc,
+ struct device_node *parent)
+{
+ struct device_node *np, *child;
+ unsigned int i;
+
+ /* Create a bitmap of the available and valid partitions */
+ for (i = 0; i < pmc->soc->num_powergates; i++)
+ if (pmc->soc->powergates[i])
+ set_bit(i, pmc->powergates_available);
+
+ np = of_get_child_by_name(parent, "powergates");
+ if (!np)
+ return;
+
+ for_each_child_of_node(np, child)
+ tegra_powergate_add(pmc, child);
+
+ of_node_put(np);
+}
+
+static const struct tegra_io_pad_soc *
+tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
+{
+ unsigned int i;
+
+ for (i = 0; i < pmc->soc->num_io_pads; i++)
+ if (pmc->soc->io_pads[i].id == id)
+ return &pmc->soc->io_pads[i];
+
+ return NULL;
+}
+
+static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request,
+ unsigned long *status, u32 *mask)
+{
+ const struct tegra_io_pad_soc *pad;
+ unsigned long rate, value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad) {
+ pr_err("invalid I/O pad ID %u\n", id);
+ return -ENOENT;
+ }
+
+ if (pad->dpd == UINT_MAX)
+ return -ENOTSUPP;
+
+ *mask = BIT(pad->dpd % 32);
+
+ if (pad->dpd < 32) {
+ *status = pmc->soc->regs->dpd_status;
+ *request = pmc->soc->regs->dpd_req;
+ } else {
+ *status = pmc->soc->regs->dpd2_status;
+ *request = pmc->soc->regs->dpd2_req;
+ }
+
+ if (pmc->clk) {
+ rate = clk_get_rate(pmc->clk);
+ if (!rate) {
+ pr_err("failed to get clock rate\n");
+ return -ENODEV;
+ }
+
+ tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
+
+ /* must be at least 200 ns, in APB (PCLK) clock cycles */
+ value = DIV_ROUND_UP(1000000000, rate);
+ value = DIV_ROUND_UP(200, value);
+ tegra_pmc_writel(value, SEL_DPD_TIM);
+ }
+
+ return 0;
+}
+
+static int tegra_io_pad_poll(unsigned long offset, u32 mask,
+ u32 val, unsigned long timeout)
+{
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_after(timeout, jiffies)) {
+ value = tegra_pmc_readl(offset);
+ if ((value & mask) == val)
+ return 0;
+
+ usleep_range(250, 1000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void tegra_io_pad_unprepare(void)
+{
+ if (pmc->clk)
+ tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
+}
+
+/**
+ * tegra_io_pad_power_enable() - enable power to I/O pad
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_enable(enum tegra_io_pad id)
+{
+ unsigned long request, status;
+ u32 mask;
+ int err;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ if (err < 0) {
+ pr_err("failed to prepare I/O pad: %d\n", err);
+ goto unlock;
+ }
+
+ tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request);
+
+ err = tegra_io_pad_poll(status, mask, 0, 250);
+ if (err < 0) {
+ pr_err("failed to enable I/O pad: %d\n", err);
+ goto unlock;
+ }
+
+ tegra_io_pad_unprepare();
+
+unlock:
+ mutex_unlock(&pmc->powergates_lock);
+ return err;
+}
+EXPORT_SYMBOL(tegra_io_pad_power_enable);
+
+/**
+ * tegra_io_pad_power_disable() - disable power to I/O pad
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_disable(enum tegra_io_pad id)
+{
+ unsigned long request, status;
+ u32 mask;
+ int err;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ if (err < 0) {
+ pr_err("failed to prepare I/O pad: %d\n", err);
+ goto unlock;
+ }
+
+ tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request);
+
+ err = tegra_io_pad_poll(status, mask, mask, 250);
+ if (err < 0) {
+ pr_err("failed to disable I/O pad: %d\n", err);
+ goto unlock;
+ }
+
+ tegra_io_pad_unprepare();
+
+unlock:
+ mutex_unlock(&pmc->powergates_lock);
+ return err;
+}
+EXPORT_SYMBOL(tegra_io_pad_power_disable);
+
+int tegra_io_pad_set_voltage(enum tegra_io_pad id,
+ enum tegra_io_pad_voltage voltage)
+{
+ const struct tegra_io_pad_soc *pad;
+ u32 value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad)
+ return -ENOENT;
+
+ if (pad->voltage == UINT_MAX)
+ return -ENOTSUPP;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ if (pmc->soc->has_impl_33v_pwr) {
+ value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR);
+
+ if (voltage == TEGRA_IO_PAD_1800000UV)
+ value &= ~BIT(pad->voltage);
+ else
+ value |= BIT(pad->voltage);
+
+ tegra_pmc_writel(value, PMC_IMPL_E_33V_PWR);
+ } else {
+ /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */
+ value = tegra_pmc_readl(PMC_PWR_DET);
+ value |= BIT(pad->voltage);
+ tegra_pmc_writel(value, PMC_PWR_DET);
+
+ /* update I/O voltage */
+ value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+
+ if (voltage == TEGRA_IO_PAD_1800000UV)
+ value &= ~BIT(pad->voltage);
+ else
+ value |= BIT(pad->voltage);
+
+ tegra_pmc_writel(value, PMC_PWR_DET_VALUE);
+ }
+
+ mutex_unlock(&pmc->powergates_lock);
+
+ usleep_range(100, 250);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_io_pad_set_voltage);
+
+int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+{
+ const struct tegra_io_pad_soc *pad;
+ u32 value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad)
+ return -ENOENT;
+
+ if (pad->voltage == UINT_MAX)
+ return -ENOTSUPP;
+
+ if (pmc->soc->has_impl_33v_pwr)
+ value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR);
+ else
+ value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+
+ if ((value & BIT(pad->voltage)) == 0)
+ return TEGRA_IO_PAD_1800000UV;
+
+ return TEGRA_IO_PAD_3300000UV;
+}
+EXPORT_SYMBOL(tegra_io_pad_get_voltage);
+
+/**
+ * tegra_io_rail_power_on() - enable power to I/O rail
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * See also: tegra_io_pad_power_enable()
+ */
+int tegra_io_rail_power_on(unsigned int id)
+{
+ return tegra_io_pad_power_enable(id);
+}
+EXPORT_SYMBOL(tegra_io_rail_power_on);
+
+/**
+ * tegra_io_rail_power_off() - disable power to I/O rail
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * See also: tegra_io_pad_power_disable()
+ */
+int tegra_io_rail_power_off(unsigned int id)
+{
+ return tegra_io_pad_power_disable(id);
+}
+EXPORT_SYMBOL(tegra_io_rail_power_off);
+
+#ifdef CONFIG_PM_SLEEP
+enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
+{
+ return pmc->suspend_mode;
+}
+
+void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
+{
+ if (mode < TEGRA_SUSPEND_NONE || mode >= TEGRA_MAX_SUSPEND_MODE)
+ return;
+
+ pmc->suspend_mode = mode;
+}
+
+void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
+{
+ unsigned long long rate = 0;
+ u32 value;
+
+ switch (mode) {
+ case TEGRA_SUSPEND_LP1:
+ rate = 32768;
+ break;
+
+ case TEGRA_SUSPEND_LP2:
+ rate = clk_get_rate(pmc->clk);
+ break;
+
+ default:
+ break;
+ }
+
+ if (WARN_ON_ONCE(rate == 0))
+ rate = 100000000;
+
+ if (rate != pmc->rate) {
+ u64 ticks;
+
+ ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
+ do_div(ticks, USEC_PER_SEC);
+ tegra_pmc_writel(ticks, PMC_CPUPWRGOOD_TIMER);
+
+ ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
+ do_div(ticks, USEC_PER_SEC);
+ tegra_pmc_writel(ticks, PMC_CPUPWROFF_TIMER);
+
+ wmb();
+
+ pmc->rate = rate;
+ }
+
+ value = tegra_pmc_readl(PMC_CNTRL);
+ value &= ~PMC_CNTRL_SIDE_EFFECT_LP0;
+ value |= PMC_CNTRL_CPU_PWRREQ_OE;
+ tegra_pmc_writel(value, PMC_CNTRL);
+}
+#endif
+
+static int tegra_pmc_parse_dt(struct tegra_pmc *pmc, struct device_node *np)
+{
+ u32 value, values[2];
+
+ if (of_property_read_u32(np, "nvidia,suspend-mode", &value)) {
+ } else {
+ switch (value) {
+ case 0:
+ pmc->suspend_mode = TEGRA_SUSPEND_LP0;
+ break;
+
+ case 1:
+ pmc->suspend_mode = TEGRA_SUSPEND_LP1;
+ break;
+
+ case 2:
+ pmc->suspend_mode = TEGRA_SUSPEND_LP2;
+ break;
+
+ default:
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+ break;
+ }
+ }
+
+ pmc->suspend_mode = tegra_pm_validate_suspend_mode(pmc->suspend_mode);
+
+ if (of_property_read_u32(np, "nvidia,cpu-pwr-good-time", &value))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->cpu_good_time = value;
+
+ if (of_property_read_u32(np, "nvidia,cpu-pwr-off-time", &value))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->cpu_off_time = value;
+
+ if (of_property_read_u32_array(np, "nvidia,core-pwr-good-time",
+ values, ARRAY_SIZE(values)))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->core_osc_time = values[0];
+ pmc->core_pmu_time = values[1];
+
+ if (of_property_read_u32(np, "nvidia,core-pwr-off-time", &value))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->core_off_time = value;
+
+ pmc->corereq_high = of_property_read_bool(np,
+ "nvidia,core-power-req-active-high");
+
+ pmc->sysclkreq_high = of_property_read_bool(np,
+ "nvidia,sys-clock-req-active-high");
+
+ pmc->combined_req = of_property_read_bool(np,
+ "nvidia,combined-power-req");
+
+ pmc->cpu_pwr_good_en = of_property_read_bool(np,
+ "nvidia,cpu-pwr-good-en");
+
+ if (of_property_read_u32_array(np, "nvidia,lp0-vec", values,
+ ARRAY_SIZE(values)))
+ if (pmc->suspend_mode == TEGRA_SUSPEND_LP0)
+ pmc->suspend_mode = TEGRA_SUSPEND_LP1;
+
+ pmc->lp0_vec_phys = values[0];
+ pmc->lp0_vec_size = values[1];
+
+ return 0;
+}
+
+static void tegra_pmc_init(struct tegra_pmc *pmc)
+{
+ if (pmc->soc->init)
+ pmc->soc->init(pmc);
+}
+
+static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
+{
+ static const char disabled[] = "emergency thermal reset disabled";
+ u32 pmu_addr, ctrl_id, reg_addr, reg_data, pinmux;
+ struct device *dev = pmc->dev;
+ struct device_node *np;
+ u32 value, checksum;
+
+ if (!pmc->soc->has_tsense_reset)
+ return;
+
+ np = of_get_child_by_name(pmc->dev->of_node, "i2c-thermtrip");
+ if (!np) {
+ dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled);
+ return;
+ }
+
+ if (of_property_read_u32(np, "nvidia,i2c-controller-id", &ctrl_id)) {
+ dev_err(dev, "I2C controller ID missing, %s.\n", disabled);
+ goto out;
+ }
+
+ if (of_property_read_u32(np, "nvidia,bus-addr", &pmu_addr)) {
+ dev_err(dev, "nvidia,bus-addr missing, %s.\n", disabled);
+ goto out;
+ }
+
+ if (of_property_read_u32(np, "nvidia,reg-addr", &reg_addr)) {
+ dev_err(dev, "nvidia,reg-addr missing, %s.\n", disabled);
+ goto out;
+ }
+
+ if (of_property_read_u32(np, "nvidia,reg-data", &reg_data)) {
+ dev_err(dev, "nvidia,reg-data missing, %s.\n", disabled);
+ goto out;
+ }
+
+ if (of_property_read_u32(np, "nvidia,pinmux-id", &pinmux))
+ pinmux = 0;
+
+ value = tegra_pmc_readl(PMC_SENSOR_CTRL);
+ value |= PMC_SENSOR_CTRL_SCRATCH_WRITE;
+ tegra_pmc_writel(value, PMC_SENSOR_CTRL);
+
+ value = (reg_data << PMC_SCRATCH54_DATA_SHIFT) |
+ (reg_addr << PMC_SCRATCH54_ADDR_SHIFT);
+ tegra_pmc_writel(value, PMC_SCRATCH54);
+
+ value = PMC_SCRATCH55_RESET_TEGRA;
+ value |= ctrl_id << PMC_SCRATCH55_CNTRL_ID_SHIFT;
+ value |= pinmux << PMC_SCRATCH55_PINMUX_SHIFT;
+ value |= pmu_addr << PMC_SCRATCH55_I2CSLV1_SHIFT;
+
+ /*
+ * Calculate checksum of SCRATCH54, SCRATCH55 fields. Bits 23:16 will
+ * contain the checksum and are currently zero, so they are not added.
+ */
+ checksum = reg_addr + reg_data + (value & 0xff) + ((value >> 8) & 0xff)
+ + ((value >> 24) & 0xff);
+ checksum &= 0xff;
+ checksum = 0x100 - checksum;
+
+ value |= checksum << PMC_SCRATCH55_CHECKSUM_SHIFT;
+
+ tegra_pmc_writel(value, PMC_SCRATCH55);
+
+ value = tegra_pmc_readl(PMC_SENSOR_CTRL);
+ value |= PMC_SENSOR_CTRL_ENABLE_RST;
+ tegra_pmc_writel(value, PMC_SENSOR_CTRL);
+
+ dev_info(pmc->dev, "emergency thermal reset enabled\n");
+
+out:
+ of_node_put(np);
+}
+
+static int tegra_pmc_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int err;
+
+ /*
+ * Early initialisation should have configured an initial
+ * register mapping and setup the soc data pointer. If these
+ * are not valid then something went badly wrong!
+ */
+ if (WARN_ON(!pmc->base || !pmc->soc))
+ return -ENODEV;
+
+ err = tegra_pmc_parse_dt(pmc, pdev->dev.of_node);
+ if (err < 0)
+ return err;
+
+ /* take over the memory region from the early initialization */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wake");
+ if (res) {
+ pmc->wake = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->wake))
+ return PTR_ERR(pmc->wake);
+ } else {
+ pmc->wake = base;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aotag");
+ if (res) {
+ pmc->aotag = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->aotag))
+ return PTR_ERR(pmc->aotag);
+ } else {
+ pmc->aotag = base;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch");
+ if (res) {
+ pmc->scratch = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->scratch))
+ return PTR_ERR(pmc->scratch);
+ } else {
+ pmc->scratch = base;
+ }
+
+ pmc->clk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(pmc->clk)) {
+ err = PTR_ERR(pmc->clk);
+
+ if (err != -ENOENT) {
+ dev_err(&pdev->dev, "failed to get pclk: %d\n", err);
+ return err;
+ }
+
+ pmc->clk = NULL;
+ }
+
+ pmc->dev = &pdev->dev;
+
+ tegra_pmc_init(pmc);
+
+ tegra_pmc_init_tsense_reset(pmc);
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_powergate_debugfs_init();
+ if (err < 0)
+ return err;
+ }
+
+ err = register_restart_handler(&tegra_pmc_restart_handler);
+ if (err) {
+ debugfs_remove(pmc->debugfs);
+ dev_err(&pdev->dev, "unable to register restart handler, %d\n",
+ err);
+ return err;
+ }
+
+ mutex_lock(&pmc->powergates_lock);
+ iounmap(pmc->base);
+ pmc->base = base;
+ mutex_unlock(&pmc->powergates_lock);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
+static int tegra_pmc_suspend(struct device *dev)
+{
+ tegra_pmc_writel(virt_to_phys(tegra_resume), PMC_SCRATCH41);
+
+ return 0;
+}
+
+static int tegra_pmc_resume(struct device *dev)
+{
+ tegra_pmc_writel(0x0, PMC_SCRATCH41);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tegra_pmc_pm_ops, tegra_pmc_suspend, tegra_pmc_resume);
+
+#endif
+
+static const char * const tegra20_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "cpu",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+};
+
+static const struct tegra_pmc_regs tegra20_pmc_regs = {
+ .scratch0 = 0x50,
+ .dpd_req = 0x1b8,
+ .dpd_status = 0x1bc,
+ .dpd2_req = 0x1c0,
+ .dpd2_status = 0x1c4,
+};
+
+static void tegra20_pmc_init(struct tegra_pmc *pmc)
+{
+ u32 value;
+
+ /* Always enable CPU power request */
+ value = tegra_pmc_readl(PMC_CNTRL);
+ value |= PMC_CNTRL_CPU_PWRREQ_OE;
+ tegra_pmc_writel(value, PMC_CNTRL);
+
+ value = tegra_pmc_readl(PMC_CNTRL);
+
+ if (pmc->sysclkreq_high)
+ value &= ~PMC_CNTRL_SYSCLK_POLARITY;
+ else
+ value |= PMC_CNTRL_SYSCLK_POLARITY;
+
+ /* configure the output polarity while the request is tristated */
+ tegra_pmc_writel(value, PMC_CNTRL);
+
+ /* now enable the request */
+ value = tegra_pmc_readl(PMC_CNTRL);
+ value |= PMC_CNTRL_SYSCLK_OE;
+ tegra_pmc_writel(value, PMC_CNTRL);
+}
+
+static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
+ struct device_node *np,
+ bool invert)
+{
+ u32 value;
+
+ value = tegra_pmc_readl(PMC_CNTRL);
+
+ if (invert)
+ value |= PMC_CNTRL_INTR_POLARITY;
+ else
+ value &= ~PMC_CNTRL_INTR_POLARITY;
+
+ tegra_pmc_writel(value, PMC_CNTRL);
+}
+
+static const struct tegra_pmc_soc tegra20_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra20_powergates),
+ .powergates = tegra20_powergates,
+ .num_cpu_powergates = 0,
+ .cpu_powergates = NULL,
+ .has_tsense_reset = false,
+ .has_gpu_clamps = false,
+ .num_io_pads = 0,
+ .io_pads = NULL,
+ .regs = &tegra20_pmc_regs,
+ .init = tegra20_pmc_init,
+ .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+};
+
+static const char * const tegra30_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "cpu0",
+ [TEGRA_POWERGATE_3D] = "3d0",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_SATA] = "sata",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_3D1] = "3d1",
+};
+
+static const u8 tegra30_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_pmc_soc tegra30_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra30_powergates),
+ .powergates = tegra30_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra30_cpu_powergates),
+ .cpu_powergates = tegra30_cpu_powergates,
+ .has_tsense_reset = true,
+ .has_gpu_clamps = false,
+ .has_impl_33v_pwr = false,
+ .num_io_pads = 0,
+ .io_pads = NULL,
+ .regs = &tegra20_pmc_regs,
+ .init = tegra20_pmc_init,
+ .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+};
+
+static const char * const tegra114_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "crail",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_C1NC] = "c1nc",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+};
+
+static const u8 tegra114_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU0,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_pmc_soc tegra114_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra114_powergates),
+ .powergates = tegra114_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra114_cpu_powergates),
+ .cpu_powergates = tegra114_cpu_powergates,
+ .has_tsense_reset = true,
+ .has_gpu_clamps = false,
+ .has_impl_33v_pwr = false,
+ .num_io_pads = 0,
+ .io_pads = NULL,
+ .regs = &tegra20_pmc_regs,
+ .init = tegra20_pmc_init,
+ .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+};
+
+static const char * const tegra124_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "crail",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_SATA] = "sata",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_C1NC] = "c1nc",
+ [TEGRA_POWERGATE_SOR] = "sor",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+ [TEGRA_POWERGATE_VIC] = "vic",
+ [TEGRA_POWERGATE_IRAM] = "iram",
+};
+
+static const u8 tegra124_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU0,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_io_pad_soc tegra124_io_pads[] = {
+ { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_BB, .dpd = 15, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_COMP, .dpd = 22, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HV, .dpd = 38, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_NAND, .dpd = 13, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 35, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SYS_DDC, .dpd = 58, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+};
+
+static const struct tegra_pmc_soc tegra124_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra124_powergates),
+ .powergates = tegra124_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra124_cpu_powergates),
+ .cpu_powergates = tegra124_cpu_powergates,
+ .has_tsense_reset = true,
+ .has_gpu_clamps = true,
+ .has_impl_33v_pwr = false,
+ .num_io_pads = ARRAY_SIZE(tegra124_io_pads),
+ .io_pads = tegra124_io_pads,
+ .regs = &tegra20_pmc_regs,
+ .init = tegra20_pmc_init,
+ .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+};
+
+static const char * const tegra210_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "crail",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_SATA] = "sata",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_SOR] = "sor",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+ [TEGRA_POWERGATE_VIC] = "vic",
+ [TEGRA_POWERGATE_IRAM] = "iram",
+ [TEGRA_POWERGATE_NVDEC] = "nvdec",
+ [TEGRA_POWERGATE_NVJPG] = "nvjpg",
+ [TEGRA_POWERGATE_AUD] = "aud",
+ [TEGRA_POWERGATE_DFD] = "dfd",
+ [TEGRA_POWERGATE_VE2] = "ve2",
+};
+
+static const u8 tegra210_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU0,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_io_pad_soc tegra210_io_pads[] = {
+ { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = 5 },
+ { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = 18 },
+ { .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = 10 },
+ { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIC, .dpd = 42, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSID, .dpd = 43, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIF, .dpd = 45, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = 19 },
+ { .id = TEGRA_IO_PAD_DEBUG_NONAO, .dpd = 26, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DMIC, .dpd = 50, .voltage = 20 },
+ { .id = TEGRA_IO_PAD_DP, .dpd = 51, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_EMMC, .dpd = 35, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_EMMC2, .dpd = 37, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_GPIO, .dpd = 27, .voltage = 21 },
+ { .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = UINT_MAX, .voltage = 11 },
+ { .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = 12 },
+ { .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = 13 },
+ { .id = TEGRA_IO_PAD_SPI, .dpd = 46, .voltage = 22 },
+ { .id = TEGRA_IO_PAD_SPI_HV, .dpd = 47, .voltage = 23 },
+ { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = 2 },
+ { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB3, .dpd = 18, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+};
+
+static const struct tegra_pmc_soc tegra210_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra210_powergates),
+ .powergates = tegra210_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra210_cpu_powergates),
+ .cpu_powergates = tegra210_cpu_powergates,
+ .has_tsense_reset = true,
+ .has_gpu_clamps = true,
+ .has_impl_33v_pwr = false,
+ .needs_mbist_war = true,
+ .num_io_pads = ARRAY_SIZE(tegra210_io_pads),
+ .io_pads = tegra210_io_pads,
+ .regs = &tegra20_pmc_regs,
+ .init = tegra20_pmc_init,
+ .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+};
+
+static const struct tegra_io_pad_soc tegra186_io_pads[] = {
+ { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK_BIAS, .dpd = 4, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK3, .dpd = 5, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 7, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HDMI_DP0, .dpd = 28, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_HDMI_DP1, .dpd = 29, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC2_HV, .dpd = 34, .voltage = 5 },
+ { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 36, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CAM, .dpd = 38, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIB, .dpd = 40, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSIC, .dpd = 41, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DSID, .dpd = 42, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIC, .dpd = 43, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSID, .dpd = 44, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIE, .dpd = 45, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_CSIF, .dpd = 46, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SPI, .dpd = 47, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_UFS, .dpd = 49, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_DMIC_HV, .dpd = 52, .voltage = 2 },
+ { .id = TEGRA_IO_PAD_EDP, .dpd = 53, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_SDMMC1_HV, .dpd = 55, .voltage = 4 },
+ { .id = TEGRA_IO_PAD_SDMMC3_HV, .dpd = 56, .voltage = 6 },
+ { .id = TEGRA_IO_PAD_CONN, .dpd = 60, .voltage = UINT_MAX },
+ { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = 1 },
+ { .id = TEGRA_IO_PAD_AO_HV, .dpd = UINT_MAX, .voltage = 0 },
+};
+
+static const struct tegra_pmc_regs tegra186_pmc_regs = {
+ .scratch0 = 0x2000,
+ .dpd_req = 0x74,
+ .dpd_status = 0x78,
+ .dpd2_req = 0x7c,
+ .dpd2_status = 0x80,
+};
+
+static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
+ struct device_node *np,
+ bool invert)
+{
+ struct resource regs;
+ void __iomem *wake;
+ u32 value;
+ int index;
+
+ index = of_property_match_string(np, "reg-names", "wake");
+ if (index < 0) {
+ pr_err("failed to find PMC wake registers\n");
+ return;
+ }
+
+ of_address_to_resource(np, index, &regs);
+
+ wake = ioremap_nocache(regs.start, resource_size(&regs));
+ if (!wake) {
+ pr_err("failed to map PMC wake registers\n");
+ return;
+ }
+
+ value = readl(wake + WAKE_AOWAKE_CTRL);
+
+ if (invert)
+ value |= WAKE_AOWAKE_CTRL_INTR_POLARITY;
+ else
+ value &= ~WAKE_AOWAKE_CTRL_INTR_POLARITY;
+
+ writel(value, wake + WAKE_AOWAKE_CTRL);
+
+ iounmap(wake);
+}
+
+static const struct tegra_pmc_soc tegra186_pmc_soc = {
+ .num_powergates = 0,
+ .powergates = NULL,
+ .num_cpu_powergates = 0,
+ .cpu_powergates = NULL,
+ .has_tsense_reset = false,
+ .has_gpu_clamps = false,
+ .has_impl_33v_pwr = true,
+ .num_io_pads = ARRAY_SIZE(tegra186_io_pads),
+ .io_pads = tegra186_io_pads,
+ .regs = &tegra186_pmc_regs,
+ .init = NULL,
+ .setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+};
+
+static const struct of_device_id tegra_pmc_match[] = {
+ { .compatible = "nvidia,tegra194-pmc", .data = &tegra186_pmc_soc },
+ { .compatible = "nvidia,tegra186-pmc", .data = &tegra186_pmc_soc },
+ { .compatible = "nvidia,tegra210-pmc", .data = &tegra210_pmc_soc },
+ { .compatible = "nvidia,tegra132-pmc", .data = &tegra124_pmc_soc },
+ { .compatible = "nvidia,tegra124-pmc", .data = &tegra124_pmc_soc },
+ { .compatible = "nvidia,tegra114-pmc", .data = &tegra114_pmc_soc },
+ { .compatible = "nvidia,tegra30-pmc", .data = &tegra30_pmc_soc },
+ { .compatible = "nvidia,tegra20-pmc", .data = &tegra20_pmc_soc },
+ { }
+};
+
+static struct platform_driver tegra_pmc_driver = {
+ .driver = {
+ .name = "tegra-pmc",
+ .suppress_bind_attrs = true,
+ .of_match_table = tegra_pmc_match,
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
+ .pm = &tegra_pmc_pm_ops,
+#endif
+ },
+ .probe = tegra_pmc_probe,
+};
+builtin_platform_driver(tegra_pmc_driver);
+
+/*
+ * Early initialization to allow access to registers in the very early boot
+ * process.
+ */
+static int __init tegra_pmc_early_init(void)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+ struct resource regs;
+ bool invert;
+
+ mutex_init(&pmc->powergates_lock);
+
+ np = of_find_matching_node_and_match(NULL, tegra_pmc_match, &match);
+ if (!np) {
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * a PMC node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain a PMC node. Note that in this case the
+ * SoC data can't be matched and therefore powergating is
+ * disabled.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ pr_warn("DT node not found, powergating disabled\n");
+
+ regs.start = 0x7000e400;
+ regs.end = 0x7000e7ff;
+ regs.flags = IORESOURCE_MEM;
+
+ pr_warn("Using memory region %pR\n", &regs);
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return 0;
+ }
+ } else {
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &regs) < 0) {
+ pr_err("failed to get PMC registers\n");
+ of_node_put(np);
+ return -ENXIO;
+ }
+ }
+
+ pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
+ if (!pmc->base) {
+ pr_err("failed to map PMC registers\n");
+ of_node_put(np);
+ return -ENXIO;
+ }
+
+ if (np) {
+ pmc->soc = match->data;
+
+ tegra_powergate_init(pmc, np);
+
+ /*
+ * Invert the interrupt polarity if a PMC device tree node
+ * exists and contains the nvidia,invert-interrupt property.
+ */
+ invert = of_property_read_bool(np, "nvidia,invert-interrupt");
+
+ pmc->soc->setup_irq_polarity(pmc, np, invert);
+
+ of_node_put(np);
+ }
+
+ return 0;
+}
+early_initcall(tegra_pmc_early_init);
diff --git a/drivers/soc/tegra/powergate-bpmp.c b/drivers/soc/tegra/powergate-bpmp.c
new file mode 100644
index 000000000..82c7e27cd
--- /dev/null
+++ b/drivers/soc/tegra/powergate-bpmp.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+struct tegra_powergate_info {
+ unsigned int id;
+ char *name;
+};
+
+struct tegra_powergate {
+ struct generic_pm_domain genpd;
+ struct tegra_bpmp *bpmp;
+ unsigned int id;
+};
+
+static inline struct tegra_powergate *
+to_tegra_powergate(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct tegra_powergate, genpd);
+}
+
+static int tegra_bpmp_powergate_set_state(struct tegra_bpmp *bpmp,
+ unsigned int id, u32 state)
+{
+ struct mrq_pg_request request;
+ struct tegra_bpmp_message msg;
+ int err;
+
+ memset(&request, 0, sizeof(request));
+ request.cmd = CMD_PG_SET_STATE;
+ request.id = id;
+ request.set_state.state = state;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_PG;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tegra_bpmp_powergate_get_state(struct tegra_bpmp *bpmp,
+ unsigned int id)
+{
+ struct mrq_pg_response response;
+ struct mrq_pg_request request;
+ struct tegra_bpmp_message msg;
+ int err;
+
+ memset(&request, 0, sizeof(request));
+ request.cmd = CMD_PG_GET_STATE;
+ request.id = id;
+
+ memset(&response, 0, sizeof(response));
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_PG;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return PG_STATE_OFF;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return response.get_state.state;
+}
+
+static int tegra_bpmp_powergate_get_max_id(struct tegra_bpmp *bpmp)
+{
+ struct mrq_pg_response response;
+ struct mrq_pg_request request;
+ struct tegra_bpmp_message msg;
+ int err;
+
+ memset(&request, 0, sizeof(request));
+ request.cmd = CMD_PG_GET_MAX_ID;
+
+ memset(&response, 0, sizeof(response));
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_PG;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return response.get_max_id.max_id;
+}
+
+static char *tegra_bpmp_powergate_get_name(struct tegra_bpmp *bpmp,
+ unsigned int id)
+{
+ struct mrq_pg_response response;
+ struct mrq_pg_request request;
+ struct tegra_bpmp_message msg;
+ int err;
+
+ memset(&request, 0, sizeof(request));
+ request.cmd = CMD_PG_GET_NAME;
+ request.id = id;
+
+ memset(&response, 0, sizeof(response));
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_PG;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0 || msg.rx.ret < 0)
+ return NULL;
+
+ return kstrdup(response.get_name.name, GFP_KERNEL);
+}
+
+static inline bool tegra_bpmp_powergate_is_powered(struct tegra_bpmp *bpmp,
+ unsigned int id)
+{
+ return tegra_bpmp_powergate_get_state(bpmp, id) != PG_STATE_OFF;
+}
+
+static int tegra_powergate_power_on(struct generic_pm_domain *domain)
+{
+ struct tegra_powergate *powergate = to_tegra_powergate(domain);
+ struct tegra_bpmp *bpmp = powergate->bpmp;
+
+ return tegra_bpmp_powergate_set_state(bpmp, powergate->id,
+ PG_STATE_ON);
+}
+
+static int tegra_powergate_power_off(struct generic_pm_domain *domain)
+{
+ struct tegra_powergate *powergate = to_tegra_powergate(domain);
+ struct tegra_bpmp *bpmp = powergate->bpmp;
+
+ return tegra_bpmp_powergate_set_state(bpmp, powergate->id,
+ PG_STATE_OFF);
+}
+
+static struct tegra_powergate *
+tegra_powergate_add(struct tegra_bpmp *bpmp,
+ const struct tegra_powergate_info *info)
+{
+ struct tegra_powergate *powergate;
+ bool off;
+ int err;
+
+ off = !tegra_bpmp_powergate_is_powered(bpmp, info->id);
+
+ powergate = devm_kzalloc(bpmp->dev, sizeof(*powergate), GFP_KERNEL);
+ if (!powergate)
+ return ERR_PTR(-ENOMEM);
+
+ powergate->id = info->id;
+ powergate->bpmp = bpmp;
+
+ powergate->genpd.name = kstrdup(info->name, GFP_KERNEL);
+ powergate->genpd.power_on = tegra_powergate_power_on;
+ powergate->genpd.power_off = tegra_powergate_power_off;
+
+ err = pm_genpd_init(&powergate->genpd, NULL, off);
+ if (err < 0) {
+ kfree(powergate->genpd.name);
+ return ERR_PTR(err);
+ }
+
+ return powergate;
+}
+
+static void tegra_powergate_remove(struct tegra_powergate *powergate)
+{
+ struct generic_pm_domain *genpd = &powergate->genpd;
+ struct tegra_bpmp *bpmp = powergate->bpmp;
+ int err;
+
+ err = pm_genpd_remove(genpd);
+ if (err < 0)
+ dev_err(bpmp->dev, "failed to remove power domain %s: %d\n",
+ genpd->name, err);
+
+ kfree(genpd->name);
+}
+
+static int
+tegra_bpmp_probe_powergates(struct tegra_bpmp *bpmp,
+ struct tegra_powergate_info **powergatesp)
+{
+ struct tegra_powergate_info *powergates;
+ unsigned int max_id, id, count = 0;
+ unsigned int num_holes = 0;
+ int err;
+
+ err = tegra_bpmp_powergate_get_max_id(bpmp);
+ if (err < 0)
+ return err;
+
+ max_id = err;
+
+ dev_dbg(bpmp->dev, "maximum powergate ID: %u\n", max_id);
+
+ powergates = kcalloc(max_id + 1, sizeof(*powergates), GFP_KERNEL);
+ if (!powergates)
+ return -ENOMEM;
+
+ for (id = 0; id <= max_id; id++) {
+ struct tegra_powergate_info *info = &powergates[count];
+
+ info->name = tegra_bpmp_powergate_get_name(bpmp, id);
+ if (!info->name || info->name[0] == '\0') {
+ num_holes++;
+ continue;
+ }
+
+ info->id = id;
+ count++;
+ }
+
+ dev_dbg(bpmp->dev, "holes: %u\n", num_holes);
+
+ *powergatesp = powergates;
+
+ return count;
+}
+
+static int tegra_bpmp_add_powergates(struct tegra_bpmp *bpmp,
+ struct tegra_powergate_info *powergates,
+ unsigned int count)
+{
+ struct genpd_onecell_data *genpd = &bpmp->genpd;
+ struct generic_pm_domain **domains;
+ struct tegra_powergate *powergate;
+ unsigned int i;
+ int err;
+
+ domains = kcalloc(count, sizeof(*domains), GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ powergate = tegra_powergate_add(bpmp, &powergates[i]);
+ if (IS_ERR(powergate)) {
+ err = PTR_ERR(powergate);
+ goto remove;
+ }
+
+ dev_dbg(bpmp->dev, "added power domain %s\n",
+ powergate->genpd.name);
+ domains[i] = &powergate->genpd;
+ }
+
+ genpd->num_domains = count;
+ genpd->domains = domains;
+
+ return 0;
+
+remove:
+ while (i--) {
+ powergate = to_tegra_powergate(domains[i]);
+ tegra_powergate_remove(powergate);
+ }
+
+ kfree(genpd->domains);
+ return err;
+}
+
+static void tegra_bpmp_remove_powergates(struct tegra_bpmp *bpmp)
+{
+ struct genpd_onecell_data *genpd = &bpmp->genpd;
+ unsigned int i = genpd->num_domains;
+ struct tegra_powergate *powergate;
+
+ while (i--) {
+ dev_dbg(bpmp->dev, "removing power domain %s\n",
+ genpd->domains[i]->name);
+ powergate = to_tegra_powergate(genpd->domains[i]);
+ tegra_powergate_remove(powergate);
+ }
+}
+
+static struct generic_pm_domain *
+tegra_powergate_xlate(struct of_phandle_args *spec, void *data)
+{
+ struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
+ struct genpd_onecell_data *genpd = data;
+ unsigned int i;
+
+ for (i = 0; i < genpd->num_domains; i++) {
+ struct tegra_powergate *powergate;
+
+ powergate = to_tegra_powergate(genpd->domains[i]);
+ if (powergate->id == spec->args[0]) {
+ domain = &powergate->genpd;
+ break;
+ }
+ }
+
+ return domain;
+}
+
+int tegra_bpmp_init_powergates(struct tegra_bpmp *bpmp)
+{
+ struct device_node *np = bpmp->dev->of_node;
+ struct tegra_powergate_info *powergates;
+ struct device *dev = bpmp->dev;
+ unsigned int count, i;
+ int err;
+
+ err = tegra_bpmp_probe_powergates(bpmp, &powergates);
+ if (err < 0)
+ return err;
+
+ count = err;
+
+ dev_dbg(dev, "%u power domains probed\n", count);
+
+ err = tegra_bpmp_add_powergates(bpmp, powergates, count);
+ if (err < 0)
+ goto free;
+
+ bpmp->genpd.xlate = tegra_powergate_xlate;
+
+ err = of_genpd_add_provider_onecell(np, &bpmp->genpd);
+ if (err < 0) {
+ dev_err(dev, "failed to add power domain provider: %d\n", err);
+ tegra_bpmp_remove_powergates(bpmp);
+ }
+
+free:
+ for (i = 0; i < count; i++)
+ kfree(powergates[i].name);
+
+ kfree(powergates);
+ return err;
+}
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
new file mode 100644
index 000000000..be4570baa
--- /dev/null
+++ b/drivers/soc/ti/Kconfig
@@ -0,0 +1,76 @@
+# 64-bit ARM SoCs from TI
+if ARM64
+
+if ARCH_K3
+
+config ARCH_K3_AM6_SOC
+ bool "K3 AM6 SoC"
+ help
+ Enable support for TI's AM6 SoC Family support
+
+endif
+
+endif
+
+#
+# TI SOC drivers
+#
+menuconfig SOC_TI
+ bool "TI SOC drivers support"
+
+if SOC_TI
+
+config KEYSTONE_NAVIGATOR_QMSS
+ tristate "Keystone Queue Manager Sub System"
+ depends on ARCH_KEYSTONE
+ help
+ Say y here to support the Keystone multicore Navigator Queue
+ Manager support. The Queue Manager is a hardware module that
+ is responsible for accelerating management of the packet queues.
+ Packets are queued/de-queued by writing/reading descriptor address
+ to a particular memory mapped location in the Queue Manager module.
+
+ If unsure, say N.
+
+config KEYSTONE_NAVIGATOR_DMA
+ tristate "TI Keystone Navigator Packet DMA support"
+ depends on ARCH_KEYSTONE
+ help
+ Say y tp enable support for the Keystone Navigator Packet DMA on
+ on Keystone family of devices. It sets up the dma channels for the
+ Queue Manager Sub System.
+
+ If unsure, say N.
+
+config AMX3_PM
+ tristate "AMx3 Power Management"
+ depends on SOC_AM33XX || SOC_AM43XX
+ depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM
+ help
+ Enable power management on AM335x and AM437x. Required for suspend to mem
+ and standby states on both AM335x and AM437x platforms and for deeper cpuidle
+ c-states on AM335x.
+
+config WKUP_M3_IPC
+ tristate "TI AMx3 Wkup-M3 IPC Driver"
+ depends on WKUP_M3_RPROC
+ depends on OMAP2PLUS_MBOX
+ help
+ TI AM33XX and AM43XX have a Cortex M3, the Wakeup M3, to handle
+ low power transitions. This IPC driver provides the necessary API
+ to communicate and use the Wakeup M3 for PM features like suspend
+ resume and boots it using wkup_m3_rproc driver.
+
+config TI_SCI_PM_DOMAINS
+ tristate "TI SCI PM Domains Driver"
+ depends on TI_SCI_PROTOCOL
+ depends on PM_GENERIC_DOMAINS
+ help
+ Generic power domain implementation for TI device implementing
+ the TI SCI protocol.
+
+ To compile this as a module, choose M here. The module will be
+ called ti_sci_pm_domains. Note this is needed early in boot before
+ rootfs may be available.
+
+endif # SOC_TI
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
new file mode 100644
index 000000000..a22edc0b2
--- /dev/null
+++ b/drivers/soc/ti/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# TI Keystone SOC drivers
+#
+obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o
+knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
+obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
+obj-$(CONFIG_AMX3_PM) += pm33xx.o
+obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
+obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
new file mode 100644
index 000000000..eb2e87229
--- /dev/null
+++ b/drivers/soc/ti/knav_dma.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/knav_dma.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define REG_MASK 0xffffffff
+
+#define DMA_LOOPBACK BIT(31)
+#define DMA_ENABLE BIT(31)
+#define DMA_TEARDOWN BIT(30)
+
+#define DMA_TX_FILT_PSWORDS BIT(29)
+#define DMA_TX_FILT_EINFO BIT(30)
+#define DMA_TX_PRIO_SHIFT 0
+#define DMA_RX_PRIO_SHIFT 16
+#define DMA_PRIO_MASK GENMASK(3, 0)
+#define DMA_PRIO_DEFAULT 0
+#define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */
+#define DMA_RX_TIMEOUT_MASK GENMASK(16, 0)
+#define DMA_RX_TIMEOUT_SHIFT 0
+
+#define CHAN_HAS_EPIB BIT(30)
+#define CHAN_HAS_PSINFO BIT(29)
+#define CHAN_ERR_RETRY BIT(28)
+#define CHAN_PSINFO_AT_SOP BIT(25)
+#define CHAN_SOP_OFF_SHIFT 16
+#define CHAN_SOP_OFF_MASK GENMASK(9, 0)
+#define DESC_TYPE_SHIFT 26
+#define DESC_TYPE_MASK GENMASK(2, 0)
+
+/*
+ * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical
+ * navigator cloud mapping scheme.
+ * using the 14bit physical queue numbers directly maps into this scheme.
+ */
+#define CHAN_QNUM_MASK GENMASK(14, 0)
+#define DMA_MAX_QMS 4
+#define DMA_TIMEOUT 1 /* msecs */
+#define DMA_INVALID_ID 0xffff
+
+struct reg_global {
+ u32 revision;
+ u32 perf_control;
+ u32 emulation_control;
+ u32 priority_control;
+ u32 qm_base_address[DMA_MAX_QMS];
+};
+
+struct reg_chan {
+ u32 control;
+ u32 mode;
+ u32 __rsvd[6];
+};
+
+struct reg_tx_sched {
+ u32 prio;
+};
+
+struct reg_rx_flow {
+ u32 control;
+ u32 tags;
+ u32 tag_sel;
+ u32 fdq_sel[2];
+ u32 thresh[3];
+};
+
+struct knav_dma_pool_device {
+ struct device *dev;
+ struct list_head list;
+};
+
+struct knav_dma_device {
+ bool loopback, enable_all;
+ unsigned tx_priority, rx_priority, rx_timeout;
+ unsigned logical_queue_managers;
+ unsigned qm_base_address[DMA_MAX_QMS];
+ struct reg_global __iomem *reg_global;
+ struct reg_chan __iomem *reg_tx_chan;
+ struct reg_rx_flow __iomem *reg_rx_flow;
+ struct reg_chan __iomem *reg_rx_chan;
+ struct reg_tx_sched __iomem *reg_tx_sched;
+ unsigned max_rx_chan, max_tx_chan;
+ unsigned max_rx_flow;
+ char name[32];
+ atomic_t ref_count;
+ struct list_head list;
+ struct list_head chan_list;
+ spinlock_t lock;
+};
+
+struct knav_dma_chan {
+ enum dma_transfer_direction direction;
+ struct knav_dma_device *dma;
+ atomic_t ref_count;
+
+ /* registers */
+ struct reg_chan __iomem *reg_chan;
+ struct reg_tx_sched __iomem *reg_tx_sched;
+ struct reg_rx_flow __iomem *reg_rx_flow;
+
+ /* configuration stuff */
+ unsigned channel, flow;
+ struct knav_dma_cfg cfg;
+ struct list_head list;
+ spinlock_t lock;
+};
+
+#define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \
+ ch->channel : ch->flow)
+
+static struct knav_dma_pool_device *kdev;
+
+static bool device_ready;
+bool knav_dma_device_ready(void)
+{
+ return device_ready;
+}
+EXPORT_SYMBOL_GPL(knav_dma_device_ready);
+
+static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg)
+{
+ if (!memcmp(&chan->cfg, cfg, sizeof(*cfg)))
+ return true;
+ else
+ return false;
+}
+
+static int chan_start(struct knav_dma_chan *chan,
+ struct knav_dma_cfg *cfg)
+{
+ u32 v = 0;
+
+ spin_lock(&chan->lock);
+ if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) {
+ if (cfg->u.tx.filt_pswords)
+ v |= DMA_TX_FILT_PSWORDS;
+ if (cfg->u.tx.filt_einfo)
+ v |= DMA_TX_FILT_EINFO;
+ writel_relaxed(v, &chan->reg_chan->mode);
+ writel_relaxed(DMA_ENABLE, &chan->reg_chan->control);
+ }
+
+ if (chan->reg_tx_sched)
+ writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio);
+
+ if (chan->reg_rx_flow) {
+ v = 0;
+
+ if (cfg->u.rx.einfo_present)
+ v |= CHAN_HAS_EPIB;
+ if (cfg->u.rx.psinfo_present)
+ v |= CHAN_HAS_PSINFO;
+ if (cfg->u.rx.err_mode == DMA_RETRY)
+ v |= CHAN_ERR_RETRY;
+ v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT;
+ if (cfg->u.rx.psinfo_at_sop)
+ v |= CHAN_PSINFO_AT_SOP;
+ v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK)
+ << CHAN_SOP_OFF_SHIFT;
+ v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK;
+
+ writel_relaxed(v, &chan->reg_rx_flow->control);
+ writel_relaxed(0, &chan->reg_rx_flow->tags);
+ writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
+
+ v = cfg->u.rx.fdq[0] << 16;
+ v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
+ writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]);
+
+ v = cfg->u.rx.fdq[2] << 16;
+ v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
+ writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]);
+
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
+ }
+
+ /* Keep a copy of the cfg */
+ memcpy(&chan->cfg, cfg, sizeof(*cfg));
+ spin_unlock(&chan->lock);
+
+ return 0;
+}
+
+static int chan_teardown(struct knav_dma_chan *chan)
+{
+ unsigned long end, value;
+
+ if (!chan->reg_chan)
+ return 0;
+
+ /* indicate teardown */
+ writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control);
+
+ /* wait for the dma to shut itself down */
+ end = jiffies + msecs_to_jiffies(DMA_TIMEOUT);
+ do {
+ value = readl_relaxed(&chan->reg_chan->control);
+ if ((value & DMA_ENABLE) == 0)
+ break;
+ } while (time_after(end, jiffies));
+
+ if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) {
+ dev_err(kdev->dev, "timeout waiting for teardown\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void chan_stop(struct knav_dma_chan *chan)
+{
+ spin_lock(&chan->lock);
+ if (chan->reg_rx_flow) {
+ /* first detach fdqs, starve out the flow */
+ writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
+ }
+
+ /* teardown the dma channel */
+ chan_teardown(chan);
+
+ /* then disconnect the completion side */
+ if (chan->reg_rx_flow) {
+ writel_relaxed(0, &chan->reg_rx_flow->control);
+ writel_relaxed(0, &chan->reg_rx_flow->tags);
+ writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
+ }
+
+ memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg));
+ spin_unlock(&chan->lock);
+
+ dev_dbg(kdev->dev, "channel stopped\n");
+}
+
+static void dma_hw_enable_all(struct knav_dma_device *dma)
+{
+ int i;
+
+ for (i = 0; i < dma->max_tx_chan; i++) {
+ writel_relaxed(0, &dma->reg_tx_chan[i].mode);
+ writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control);
+ }
+}
+
+
+static void knav_dma_hw_init(struct knav_dma_device *dma)
+{
+ unsigned v;
+ int i;
+
+ spin_lock(&dma->lock);
+ v = dma->loopback ? DMA_LOOPBACK : 0;
+ writel_relaxed(v, &dma->reg_global->emulation_control);
+
+ v = readl_relaxed(&dma->reg_global->perf_control);
+ v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT);
+ writel_relaxed(v, &dma->reg_global->perf_control);
+
+ v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) |
+ (dma->rx_priority << DMA_RX_PRIO_SHIFT));
+
+ writel_relaxed(v, &dma->reg_global->priority_control);
+
+ /* Always enable all Rx channels. Rx paths are managed using flows */
+ for (i = 0; i < dma->max_rx_chan; i++)
+ writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control);
+
+ for (i = 0; i < dma->logical_queue_managers; i++)
+ writel_relaxed(dma->qm_base_address[i],
+ &dma->reg_global->qm_base_address[i]);
+ spin_unlock(&dma->lock);
+}
+
+static void knav_dma_hw_destroy(struct knav_dma_device *dma)
+{
+ int i;
+ unsigned v;
+
+ spin_lock(&dma->lock);
+ v = ~DMA_ENABLE & REG_MASK;
+
+ for (i = 0; i < dma->max_rx_chan; i++)
+ writel_relaxed(v, &dma->reg_rx_chan[i].control);
+
+ for (i = 0; i < dma->max_tx_chan; i++)
+ writel_relaxed(v, &dma->reg_tx_chan[i].control);
+ spin_unlock(&dma->lock);
+}
+
+static void dma_debug_show_channels(struct seq_file *s,
+ struct knav_dma_chan *chan)
+{
+ int i;
+
+ seq_printf(s, "\t%s %d:\t",
+ ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"),
+ chan_number(chan));
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n",
+ chan->cfg.u.tx.filt_einfo,
+ chan->cfg.u.tx.filt_pswords,
+ chan->cfg.u.tx.priority);
+ } else {
+ seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n",
+ chan->cfg.u.rx.einfo_present,
+ chan->cfg.u.rx.psinfo_present,
+ chan->cfg.u.rx.desc_type);
+ seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ",
+ chan->cfg.u.rx.dst_q,
+ chan->cfg.u.rx.thresh);
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
+ seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
+ seq_printf(s, "\n");
+ }
+}
+
+static void dma_debug_show_devices(struct seq_file *s,
+ struct knav_dma_device *dma)
+{
+ struct knav_dma_chan *chan;
+
+ list_for_each_entry(chan, &dma->chan_list, list) {
+ if (atomic_read(&chan->ref_count))
+ dma_debug_show_channels(s, chan);
+ }
+}
+
+static int dma_debug_show(struct seq_file *s, void *v)
+{
+ struct knav_dma_device *dma;
+
+ list_for_each_entry(dma, &kdev->list, list) {
+ if (atomic_read(&dma->ref_count)) {
+ seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n",
+ dma->name, dma->max_tx_chan, dma->max_rx_flow);
+ dma_debug_show_devices(s, dma);
+ }
+ }
+
+ return 0;
+}
+
+static int knav_dma_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dma_debug_show, NULL);
+}
+
+static const struct file_operations knav_dma_debug_ops = {
+ .open = knav_dma_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int of_channel_match_helper(struct device_node *np, const char *name,
+ const char **dma_instance)
+{
+ struct of_phandle_args args;
+ struct device_node *dma_node;
+ int index;
+
+ dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0);
+ if (!dma_node)
+ return -ENODEV;
+
+ *dma_instance = dma_node->name;
+ index = of_property_match_string(np, "ti,navigator-dma-names", name);
+ if (index < 0) {
+ dev_err(kdev->dev, "No 'ti,navigator-dma-names' property\n");
+ return -ENODEV;
+ }
+
+ if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas",
+ 1, index, &args)) {
+ dev_err(kdev->dev, "Missing the phandle args name %s\n", name);
+ return -ENODEV;
+ }
+
+ if (args.args[0] < 0) {
+ dev_err(kdev->dev, "Missing args for %s\n", name);
+ return -ENODEV;
+ }
+
+ return args.args[0];
+}
+
+/**
+ * knav_dma_open_channel() - try to setup an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ * @config: dma configuration parameters
+ *
+ * Returns pointer to appropriate DMA channel on success or error.
+ */
+void *knav_dma_open_channel(struct device *dev, const char *name,
+ struct knav_dma_cfg *config)
+{
+ struct knav_dma_chan *chan;
+ struct knav_dma_device *dma;
+ bool found = false;
+ int chan_num = -1;
+ const char *instance;
+
+ if (!kdev) {
+ pr_err("keystone-navigator-dma driver not registered\n");
+ return (void *)-EINVAL;
+ }
+
+ chan_num = of_channel_match_helper(dev->of_node, name, &instance);
+ if (chan_num < 0) {
+ dev_err(kdev->dev, "No DMA instace with name %s\n", name);
+ return (void *)-EINVAL;
+ }
+
+ dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
+ config->direction == DMA_MEM_TO_DEV ? "transmit" :
+ config->direction == DMA_DEV_TO_MEM ? "receive" :
+ "unknown", chan_num, instance);
+
+ if (config->direction != DMA_MEM_TO_DEV &&
+ config->direction != DMA_DEV_TO_MEM) {
+ dev_err(kdev->dev, "bad direction\n");
+ return (void *)-EINVAL;
+ }
+
+ /* Look for correct dma instance */
+ list_for_each_entry(dma, &kdev->list, list) {
+ if (!strcmp(dma->name, instance)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ dev_err(kdev->dev, "No DMA instace with name %s\n", instance);
+ return (void *)-EINVAL;
+ }
+
+ /* Look for correct dma channel from dma instance */
+ found = false;
+ list_for_each_entry(chan, &dma->chan_list, list) {
+ if (config->direction == DMA_MEM_TO_DEV) {
+ if (chan->channel == chan_num) {
+ found = true;
+ break;
+ }
+ } else {
+ if (chan->flow == chan_num) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (!found) {
+ dev_err(kdev->dev, "channel %d is not in DMA %s\n",
+ chan_num, instance);
+ return (void *)-EINVAL;
+ }
+
+ if (atomic_read(&chan->ref_count) >= 1) {
+ if (!check_config(chan, config)) {
+ dev_err(kdev->dev, "channel %d config miss-match\n",
+ chan_num);
+ return (void *)-EINVAL;
+ }
+ }
+
+ if (atomic_inc_return(&chan->dma->ref_count) <= 1)
+ knav_dma_hw_init(chan->dma);
+
+ if (atomic_inc_return(&chan->ref_count) <= 1)
+ chan_start(chan, config);
+
+ dev_dbg(kdev->dev, "channel %d opened from DMA %s\n",
+ chan_num, instance);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(knav_dma_open_channel);
+
+/**
+ * knav_dma_close_channel() - Destroy a dma channel
+ *
+ * channel: dma channel handle
+ *
+ */
+void knav_dma_close_channel(void *channel)
+{
+ struct knav_dma_chan *chan = channel;
+
+ if (!kdev) {
+ pr_err("keystone-navigator-dma driver not registered\n");
+ return;
+ }
+
+ if (atomic_dec_return(&chan->ref_count) <= 0)
+ chan_stop(chan);
+
+ if (atomic_dec_return(&chan->dma->ref_count) <= 0)
+ knav_dma_hw_destroy(chan->dma);
+
+ dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n",
+ chan->channel, chan->flow, chan->dma->name);
+}
+EXPORT_SYMBOL_GPL(knav_dma_close_channel);
+
+static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
+ struct device_node *node,
+ unsigned index, resource_size_t *_size)
+{
+ struct device *dev = kdev->dev;
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ ret = of_address_to_resource(node, index, &res);
+ if (ret) {
+ dev_err(dev, "Can't translate of node(%s) address for index(%d)\n",
+ node->name, index);
+ return ERR_PTR(ret);
+ }
+
+ regs = devm_ioremap_resource(kdev->dev, &res);
+ if (IS_ERR(regs))
+ dev_err(dev, "Failed to map register base for index(%d) node(%s)\n",
+ index, node->name);
+ if (_size)
+ *_size = resource_size(&res);
+
+ return regs;
+}
+
+static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow)
+{
+ struct knav_dma_device *dma = chan->dma;
+
+ chan->flow = flow;
+ chan->reg_rx_flow = dma->reg_rx_flow + flow;
+ chan->channel = DMA_INVALID_ID;
+ dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow);
+
+ return 0;
+}
+
+static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel)
+{
+ struct knav_dma_device *dma = chan->dma;
+
+ chan->channel = channel;
+ chan->reg_chan = dma->reg_tx_chan + channel;
+ chan->reg_tx_sched = dma->reg_tx_sched + channel;
+ chan->flow = DMA_INVALID_ID;
+ dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan);
+
+ return 0;
+}
+
+static int pktdma_init_chan(struct knav_dma_device *dma,
+ enum dma_transfer_direction dir,
+ unsigned chan_num)
+{
+ struct device *dev = kdev->dev;
+ struct knav_dma_chan *chan;
+ int ret = -EINVAL;
+
+ chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&chan->list);
+ chan->dma = dma;
+ chan->direction = DMA_NONE;
+ atomic_set(&chan->ref_count, 0);
+ spin_lock_init(&chan->lock);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ chan->direction = dir;
+ ret = pktdma_init_tx_chan(chan, chan_num);
+ } else if (dir == DMA_DEV_TO_MEM) {
+ chan->direction = dir;
+ ret = pktdma_init_rx_chan(chan, chan_num);
+ } else {
+ dev_err(dev, "channel(%d) direction unknown\n", chan_num);
+ }
+
+ list_add_tail(&chan->list, &dma->chan_list);
+
+ return ret;
+}
+
+static int dma_init(struct device_node *cloud, struct device_node *dma_node)
+{
+ unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
+ struct device_node *node = dma_node;
+ struct knav_dma_device *dma;
+ int ret, len, num_chan = 0;
+ resource_size_t size;
+ u32 timeout;
+ u32 i;
+
+ dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma) {
+ dev_err(kdev->dev, "could not allocate driver mem\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&dma->list);
+ INIT_LIST_HEAD(&dma->chan_list);
+
+ if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
+ dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
+ return -ENODEV;
+ }
+
+ dma->logical_queue_managers = len / sizeof(u32);
+ if (dma->logical_queue_managers > DMA_MAX_QMS) {
+ dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
+ dma->logical_queue_managers);
+ dma->logical_queue_managers = DMA_MAX_QMS;
+ }
+
+ ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
+ dma->qm_base_address,
+ dma->logical_queue_managers);
+ if (ret) {
+ dev_err(kdev->dev, "invalid navigator cloud addresses\n");
+ return -ENODEV;
+ }
+
+ dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
+ if (!dma->reg_global)
+ return -ENODEV;
+ if (size < sizeof(struct reg_global)) {
+ dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
+ return -ENODEV;
+ }
+
+ dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
+ if (!dma->reg_tx_chan)
+ return -ENODEV;
+
+ max_tx_chan = size / sizeof(struct reg_chan);
+ dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
+ if (!dma->reg_rx_chan)
+ return -ENODEV;
+
+ max_rx_chan = size / sizeof(struct reg_chan);
+ dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
+ if (!dma->reg_tx_sched)
+ return -ENODEV;
+
+ max_tx_sched = size / sizeof(struct reg_tx_sched);
+ dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
+ if (!dma->reg_rx_flow)
+ return -ENODEV;
+
+ max_rx_flow = size / sizeof(struct reg_rx_flow);
+ dma->rx_priority = DMA_PRIO_DEFAULT;
+ dma->tx_priority = DMA_PRIO_DEFAULT;
+
+ dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL);
+ dma->loopback = (of_get_property(node, "ti,loop-back", NULL) != NULL);
+
+ ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
+ if (ret < 0) {
+ dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n",
+ DMA_RX_TIMEOUT_DEFAULT);
+ timeout = DMA_RX_TIMEOUT_DEFAULT;
+ }
+
+ dma->rx_timeout = timeout;
+ dma->max_rx_chan = max_rx_chan;
+ dma->max_rx_flow = max_rx_flow;
+ dma->max_tx_chan = min(max_tx_chan, max_tx_sched);
+ atomic_set(&dma->ref_count, 0);
+ strcpy(dma->name, node->name);
+ spin_lock_init(&dma->lock);
+
+ for (i = 0; i < dma->max_tx_chan; i++) {
+ if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0)
+ num_chan++;
+ }
+
+ for (i = 0; i < dma->max_rx_flow; i++) {
+ if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0)
+ num_chan++;
+ }
+
+ list_add_tail(&dma->list, &kdev->list);
+
+ /*
+ * For DSP software usecases or userpace transport software, setup all
+ * the DMA hardware resources.
+ */
+ if (dma->enable_all) {
+ atomic_inc(&dma->ref_count);
+ knav_dma_hw_init(dma);
+ dma_hw_enable_all(dma);
+ }
+
+ dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n",
+ dma->name, num_chan, dma->max_rx_flow,
+ dma->max_tx_chan, dma->max_rx_chan,
+ dma->loopback ? ", loopback" : "");
+
+ return 0;
+}
+
+static int knav_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *child;
+ int ret = 0;
+
+ if (!node) {
+ dev_err(&pdev->dev, "could not find device info\n");
+ return -EINVAL;
+ }
+
+ kdev = devm_kzalloc(dev,
+ sizeof(struct knav_dma_pool_device), GFP_KERNEL);
+ if (!kdev) {
+ dev_err(dev, "could not allocate driver mem\n");
+ return -ENOMEM;
+ }
+
+ kdev->dev = dev;
+ INIT_LIST_HEAD(&kdev->list);
+
+ pm_runtime_enable(kdev->dev);
+ ret = pm_runtime_get_sync(kdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(kdev->dev);
+ dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
+ goto err_pm_disable;
+ }
+
+ /* Initialise all packet dmas */
+ for_each_child_of_node(node, child) {
+ ret = dma_init(node, child);
+ if (ret) {
+ dev_err(&pdev->dev, "init failed with %d\n", ret);
+ break;
+ }
+ }
+
+ if (list_empty(&kdev->list)) {
+ dev_err(dev, "no valid dma instance\n");
+ ret = -ENODEV;
+ goto err_put_sync;
+ }
+
+ debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
+ &knav_dma_debug_ops);
+
+ device_ready = true;
+ return ret;
+
+err_put_sync:
+ pm_runtime_put_sync(kdev->dev);
+err_pm_disable:
+ pm_runtime_disable(kdev->dev);
+
+ return ret;
+}
+
+static int knav_dma_remove(struct platform_device *pdev)
+{
+ struct knav_dma_device *dma;
+
+ list_for_each_entry(dma, &kdev->list, list) {
+ if (atomic_dec_return(&dma->ref_count) == 0)
+ knav_dma_hw_destroy(dma);
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct of_device_id of_match[] = {
+ { .compatible = "ti,keystone-navigator-dma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver knav_dma_driver = {
+ .probe = knav_dma_probe,
+ .remove = knav_dma_remove,
+ .driver = {
+ .name = "keystone-navigator-dma",
+ .of_match_table = of_match,
+ },
+};
+module_platform_driver(knav_dma_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
+MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
new file mode 100644
index 000000000..bd040c29c
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss.h
@@ -0,0 +1,395 @@
+/*
+ * Keystone Navigator QMSS driver internal header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __KNAV_QMSS_H__
+#define __KNAV_QMSS_H__
+
+#include <linux/percpu.h>
+
+#define THRESH_GTE BIT(7)
+#define THRESH_LT 0
+
+#define PDSP_CTRL_PC_MASK 0xffff0000
+#define PDSP_CTRL_SOFT_RESET BIT(0)
+#define PDSP_CTRL_ENABLE BIT(1)
+#define PDSP_CTRL_RUNNING BIT(15)
+
+#define ACC_MAX_CHANNEL 48
+#define ACC_DEFAULT_PERIOD 25 /* usecs */
+
+#define ACC_CHANNEL_INT_BASE 2
+
+#define ACC_LIST_ENTRY_TYPE 1
+#define ACC_LIST_ENTRY_WORDS (1 << ACC_LIST_ENTRY_TYPE)
+#define ACC_LIST_ENTRY_QUEUE_IDX 0
+#define ACC_LIST_ENTRY_DESC_IDX (ACC_LIST_ENTRY_WORDS - 1)
+
+#define ACC_CMD_DISABLE_CHANNEL 0x80
+#define ACC_CMD_ENABLE_CHANNEL 0x81
+#define ACC_CFG_MULTI_QUEUE BIT(21)
+
+#define ACC_INTD_OFFSET_EOI (0x0010)
+#define ACC_INTD_OFFSET_COUNT(ch) (0x0300 + 4 * (ch))
+#define ACC_INTD_OFFSET_STATUS(ch) (0x0200 + 4 * ((ch) / 32))
+
+#define RANGE_MAX_IRQS 64
+
+#define ACC_DESCS_MAX SZ_1K
+#define ACC_DESCS_MASK (ACC_DESCS_MAX - 1)
+#define DESC_SIZE_MASK 0xful
+#define DESC_PTR_MASK (~DESC_SIZE_MASK)
+
+#define KNAV_NAME_SIZE 32
+
+enum knav_acc_result {
+ ACC_RET_IDLE,
+ ACC_RET_SUCCESS,
+ ACC_RET_INVALID_COMMAND,
+ ACC_RET_INVALID_CHANNEL,
+ ACC_RET_INACTIVE_CHANNEL,
+ ACC_RET_ACTIVE_CHANNEL,
+ ACC_RET_INVALID_QUEUE,
+ ACC_RET_INVALID_RET,
+};
+
+struct knav_reg_config {
+ u32 revision;
+ u32 __pad1;
+ u32 divert;
+ u32 link_ram_base0;
+ u32 link_ram_size0;
+ u32 link_ram_base1;
+ u32 __pad2[2];
+ u32 starvation[0];
+};
+
+struct knav_reg_region {
+ u32 base;
+ u32 start_index;
+ u32 size_count;
+ u32 __pad;
+};
+
+struct knav_reg_pdsp_regs {
+ u32 control;
+ u32 status;
+ u32 cycle_count;
+ u32 stall_count;
+};
+
+struct knav_reg_acc_command {
+ u32 command;
+ u32 queue_mask;
+ u32 list_dma;
+ u32 queue_num;
+ u32 timer_config;
+};
+
+struct knav_link_ram_block {
+ dma_addr_t dma;
+ void *virt;
+ size_t size;
+};
+
+struct knav_acc_info {
+ u32 pdsp_id;
+ u32 start_channel;
+ u32 list_entries;
+ u32 pacing_mode;
+ u32 timer_count;
+ int mem_size;
+ int list_size;
+ struct knav_pdsp_info *pdsp;
+};
+
+struct knav_acc_channel {
+ u32 channel;
+ u32 list_index;
+ u32 open_mask;
+ u32 *list_cpu[2];
+ dma_addr_t list_dma[2];
+ char name[KNAV_NAME_SIZE];
+ atomic_t retrigger_count;
+};
+
+struct knav_pdsp_info {
+ const char *name;
+ struct knav_reg_pdsp_regs __iomem *regs;
+ union {
+ void __iomem *command;
+ struct knav_reg_acc_command __iomem *acc_command;
+ u32 __iomem *qos_command;
+ };
+ void __iomem *intd;
+ u32 __iomem *iram;
+ u32 id;
+ struct list_head list;
+ bool loaded;
+ bool started;
+};
+
+struct knav_qmgr_info {
+ unsigned start_queue;
+ unsigned num_queues;
+ struct knav_reg_config __iomem *reg_config;
+ struct knav_reg_region __iomem *reg_region;
+ struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
+ void __iomem *reg_status;
+ struct list_head list;
+};
+
+#define KNAV_NUM_LINKRAM 2
+
+/**
+ * struct knav_queue_stats: queue statistics
+ * pushes: number of push operations
+ * pops: number of pop operations
+ * push_errors: number of push errors
+ * pop_errors: number of pop errors
+ * notifies: notifier counts
+ */
+struct knav_queue_stats {
+ unsigned int pushes;
+ unsigned int pops;
+ unsigned int push_errors;
+ unsigned int pop_errors;
+ unsigned int notifies;
+};
+
+/**
+ * struct knav_reg_queue: queue registers
+ * @entry_count: valid entries in the queue
+ * @byte_count: total byte count in thhe queue
+ * @packet_size: packet size for the queue
+ * @ptr_size_thresh: packet pointer size threshold
+ */
+struct knav_reg_queue {
+ u32 entry_count;
+ u32 byte_count;
+ u32 packet_size;
+ u32 ptr_size_thresh;
+};
+
+/**
+ * struct knav_region: qmss region info
+ * @dma_start, dma_end: start and end dma address
+ * @virt_start, virt_end: start and end virtual address
+ * @desc_size: descriptor size
+ * @used_desc: consumed descriptors
+ * @id: region number
+ * @num_desc: total descriptors
+ * @link_index: index of the first descriptor
+ * @name: region name
+ * @list: instance in the device's region list
+ * @pools: list of descriptor pools in the region
+ */
+struct knav_region {
+ dma_addr_t dma_start, dma_end;
+ void *virt_start, *virt_end;
+ unsigned desc_size;
+ unsigned used_desc;
+ unsigned id;
+ unsigned num_desc;
+ unsigned link_index;
+ const char *name;
+ struct list_head list;
+ struct list_head pools;
+};
+
+/**
+ * struct knav_pool: qmss pools
+ * @dev: device pointer
+ * @region: qmss region info
+ * @queue: queue registers
+ * @kdev: qmss device pointer
+ * @region_offset: offset from the base
+ * @num_desc: total descriptors
+ * @desc_size: descriptor size
+ * @region_id: region number
+ * @name: pool name
+ * @list: list head
+ * @region_inst: instance in the region's pool list
+ */
+struct knav_pool {
+ struct device *dev;
+ struct knav_region *region;
+ struct knav_queue *queue;
+ struct knav_device *kdev;
+ int region_offset;
+ int num_desc;
+ int desc_size;
+ int region_id;
+ const char *name;
+ struct list_head list;
+ struct list_head region_inst;
+};
+
+/**
+ * struct knav_queue_inst: qmss queue instace properties
+ * @descs: descriptor pointer
+ * @desc_head, desc_tail, desc_count: descriptor counters
+ * @acc: accumulator channel pointer
+ * @kdev: qmss device pointer
+ * @range: range info
+ * @qmgr: queue manager info
+ * @id: queue instace id
+ * @irq_num: irq line number
+ * @notify_needed: notifier needed based on queue type
+ * @num_notifiers: total notifiers
+ * @handles: list head
+ * @name: queue instance name
+ * @irq_name: irq line name
+ */
+struct knav_queue_inst {
+ u32 *descs;
+ atomic_t desc_head, desc_tail, desc_count;
+ struct knav_acc_channel *acc;
+ struct knav_device *kdev;
+ struct knav_range_info *range;
+ struct knav_qmgr_info *qmgr;
+ u32 id;
+ int irq_num;
+ int notify_needed;
+ atomic_t num_notifiers;
+ struct list_head handles;
+ const char *name;
+ const char *irq_name;
+};
+
+/**
+ * struct knav_queue: qmss queue properties
+ * @reg_push, reg_pop, reg_peek: push, pop queue registers
+ * @inst: qmss queue instace properties
+ * @notifier_fn: notifier function
+ * @notifier_fn_arg: notifier function argument
+ * @notifier_enabled: notier enabled for a give queue
+ * @rcu: rcu head
+ * @flags: queue flags
+ * @list: list head
+ */
+struct knav_queue {
+ struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
+ struct knav_queue_inst *inst;
+ struct knav_queue_stats __percpu *stats;
+ knav_queue_notify_fn notifier_fn;
+ void *notifier_fn_arg;
+ atomic_t notifier_enabled;
+ struct rcu_head rcu;
+ unsigned flags;
+ struct list_head list;
+};
+
+enum qmss_version {
+ QMSS,
+ QMSS_66AK2G,
+};
+
+struct knav_device {
+ struct device *dev;
+ unsigned base_id;
+ unsigned num_queues;
+ unsigned num_queues_in_use;
+ unsigned inst_shift;
+ struct knav_link_ram_block link_rams[KNAV_NUM_LINKRAM];
+ void *instances;
+ struct list_head regions;
+ struct list_head queue_ranges;
+ struct list_head pools;
+ struct list_head pdsps;
+ struct list_head qmgrs;
+ enum qmss_version version;
+};
+
+struct knav_range_ops {
+ int (*init_range)(struct knav_range_info *range);
+ int (*free_range)(struct knav_range_info *range);
+ int (*init_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst);
+ int (*open_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags);
+ int (*close_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst);
+ int (*set_notify)(struct knav_range_info *range,
+ struct knav_queue_inst *inst, bool enabled);
+};
+
+struct knav_irq_info {
+ int irq;
+ struct cpumask *cpu_mask;
+};
+
+struct knav_range_info {
+ const char *name;
+ struct knav_device *kdev;
+ unsigned queue_base;
+ unsigned num_queues;
+ void *queue_base_inst;
+ unsigned flags;
+ struct list_head list;
+ struct knav_range_ops *ops;
+ struct knav_acc_info acc_info;
+ struct knav_acc_channel *acc;
+ unsigned num_irqs;
+ struct knav_irq_info irqs[RANGE_MAX_IRQS];
+};
+
+#define RANGE_RESERVED BIT(0)
+#define RANGE_HAS_IRQ BIT(1)
+#define RANGE_HAS_ACCUMULATOR BIT(2)
+#define RANGE_MULTI_QUEUE BIT(3)
+
+#define for_each_region(kdev, region) \
+ list_for_each_entry(region, &kdev->regions, list)
+
+#define first_region(kdev) \
+ list_first_entry_or_null(&kdev->regions, \
+ struct knav_region, list)
+
+#define for_each_queue_range(kdev, range) \
+ list_for_each_entry(range, &kdev->queue_ranges, list)
+
+#define first_queue_range(kdev) \
+ list_first_entry_or_null(&kdev->queue_ranges, \
+ struct knav_range_info, list)
+
+#define for_each_pool(kdev, pool) \
+ list_for_each_entry(pool, &kdev->pools, list)
+
+#define for_each_pdsp(kdev, pdsp) \
+ list_for_each_entry(pdsp, &kdev->pdsps, list)
+
+#define for_each_qmgr(kdev, qmgr) \
+ list_for_each_entry(qmgr, &kdev->qmgrs, list)
+
+static inline struct knav_pdsp_info *
+knav_find_pdsp(struct knav_device *kdev, unsigned pdsp_id)
+{
+ struct knav_pdsp_info *pdsp;
+
+ for_each_pdsp(kdev, pdsp)
+ if (pdsp_id == pdsp->id)
+ return pdsp;
+ return NULL;
+}
+
+extern int knav_init_acc_range(struct knav_device *kdev,
+ struct device_node *node,
+ struct knav_range_info *range);
+extern void knav_queue_notify(struct knav_queue_inst *inst);
+
+#endif /* __KNAV_QMSS_H__ */
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
new file mode 100644
index 000000000..2f7fb2dcc
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -0,0 +1,592 @@
+/*
+ * Keystone accumulator queue manager
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/soc/ti/knav_qmss.h>
+
+#include "knav_qmss.h"
+
+#define knav_range_offset_to_inst(kdev, range, q) \
+ (range->queue_base_inst + (q << kdev->inst_shift))
+
+static void __knav_acc_notify(struct knav_range_info *range,
+ struct knav_acc_channel *acc)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_queue_inst *inst;
+ int range_base, queue;
+
+ range_base = kdev->base_id + range->queue_base;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ for (queue = 0; queue < range->num_queues; queue++) {
+ inst = knav_range_offset_to_inst(kdev, range,
+ queue);
+ if (inst->notify_needed) {
+ inst->notify_needed = 0;
+ dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
+ range_base + queue);
+ knav_queue_notify(inst);
+ }
+ }
+ } else {
+ queue = acc->channel - range->acc_info.start_channel;
+ inst = knav_range_offset_to_inst(kdev, range, queue);
+ dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
+ range_base + queue);
+ knav_queue_notify(inst);
+ }
+}
+
+static int knav_acc_set_notify(struct knav_range_info *range,
+ struct knav_queue_inst *kq,
+ bool enabled)
+{
+ struct knav_pdsp_info *pdsp = range->acc_info.pdsp;
+ struct knav_device *kdev = range->kdev;
+ u32 mask, offset;
+
+ /*
+ * when enabling, we need to re-trigger an interrupt if we
+ * have descriptors pending
+ */
+ if (!enabled || atomic_read(&kq->desc_count) <= 0)
+ return 0;
+
+ kq->notify_needed = 1;
+ atomic_inc(&kq->acc->retrigger_count);
+ mask = BIT(kq->acc->channel % 32);
+ offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel);
+ dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n",
+ kq->acc->name);
+ writel_relaxed(mask, pdsp->intd + offset);
+ return 0;
+}
+
+static irqreturn_t knav_acc_int_handler(int irq, void *_instdata)
+{
+ struct knav_acc_channel *acc;
+ struct knav_queue_inst *kq = NULL;
+ struct knav_range_info *range;
+ struct knav_pdsp_info *pdsp;
+ struct knav_acc_info *info;
+ struct knav_device *kdev;
+
+ u32 *list, *list_cpu, val, idx, notifies;
+ int range_base, channel, queue = 0;
+ dma_addr_t list_dma;
+
+ range = _instdata;
+ info = &range->acc_info;
+ kdev = range->kdev;
+ pdsp = range->acc_info.pdsp;
+ acc = range->acc;
+
+ range_base = kdev->base_id + range->queue_base;
+ if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
+ for (queue = 0; queue < range->num_irqs; queue++)
+ if (range->irqs[queue].irq == irq)
+ break;
+ kq = knav_range_offset_to_inst(kdev, range, queue);
+ acc += queue;
+ }
+
+ channel = acc->channel;
+ list_dma = acc->list_dma[acc->list_index];
+ list_cpu = acc->list_cpu[acc->list_index];
+ dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, dma %pad\n",
+ channel, acc->list_index, list_cpu, &list_dma);
+ if (atomic_read(&acc->retrigger_count)) {
+ atomic_dec(&acc->retrigger_count);
+ __knav_acc_notify(range, acc);
+ writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+ /* ack the interrupt */
+ writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
+ pdsp->intd + ACC_INTD_OFFSET_EOI);
+
+ return IRQ_HANDLED;
+ }
+
+ notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+ WARN_ON(!notifies);
+ dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size,
+ DMA_FROM_DEVICE);
+
+ for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32));
+ list += ACC_LIST_ENTRY_WORDS) {
+ if (ACC_LIST_ENTRY_WORDS == 1) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x\n",
+ acc->list_index, list, list[0]);
+ } else if (ACC_LIST_ENTRY_WORDS == 2) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x %08x\n",
+ acc->list_index, list, list[0], list[1]);
+ } else if (ACC_LIST_ENTRY_WORDS == 4) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n",
+ acc->list_index, list, list[0], list[1],
+ list[2], list[3]);
+ }
+
+ val = list[ACC_LIST_ENTRY_DESC_IDX];
+ if (!val)
+ break;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
+ if (queue < range_base ||
+ queue >= range_base + range->num_queues) {
+ dev_err(kdev->dev,
+ "bad queue %d, expecting %d-%d\n",
+ queue, range_base,
+ range_base + range->num_queues);
+ break;
+ }
+ queue -= range_base;
+ kq = knav_range_offset_to_inst(kdev, range,
+ queue);
+ }
+
+ if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) {
+ atomic_dec(&kq->desc_count);
+ dev_err(kdev->dev,
+ "acc-irq: queue %d full, entry dropped\n",
+ queue + range_base);
+ continue;
+ }
+
+ idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK;
+ kq->descs[idx] = val;
+ kq->notify_needed = 1;
+ dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n",
+ val, idx, queue + range_base);
+ }
+
+ __knav_acc_notify(range, acc);
+ memset(list_cpu, 0, info->list_size);
+ dma_sync_single_for_device(kdev->dev, list_dma, info->list_size,
+ DMA_TO_DEVICE);
+
+ /* flip to the other list */
+ acc->list_index ^= 1;
+
+ /* reset the interrupt counter */
+ writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+
+ /* ack the interrupt */
+ writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
+ pdsp->intd + ACC_INTD_OFFSET_EOI);
+
+ return IRQ_HANDLED;
+}
+
+static int knav_range_setup_acc_irq(struct knav_range_info *range,
+ int queue, bool enabled)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ struct cpumask *cpu_mask;
+ int ret = 0, irq;
+ u32 old, new;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ acc = range->acc;
+ irq = range->irqs[0].irq;
+ cpu_mask = range->irqs[0].cpu_mask;
+ } else {
+ acc = range->acc + queue;
+ irq = range->irqs[queue].irq;
+ cpu_mask = range->irqs[queue].cpu_mask;
+ }
+
+ old = acc->open_mask;
+ if (enabled)
+ new = old | BIT(queue);
+ else
+ new = old & ~BIT(queue);
+ acc->open_mask = new;
+
+ dev_dbg(kdev->dev,
+ "setup-acc-irq: open mask old %08x, new %08x, channel %s\n",
+ old, new, acc->name);
+
+ if (likely(new == old))
+ return 0;
+
+ if (new && !old) {
+ dev_dbg(kdev->dev,
+ "setup-acc-irq: requesting %s for channel %s\n",
+ acc->name, acc->name);
+ ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
+ range);
+ if (!ret && cpu_mask) {
+ ret = irq_set_affinity_hint(irq, cpu_mask);
+ if (ret) {
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ return ret;
+ }
+ }
+ }
+
+ if (old && !new) {
+ dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n",
+ acc->name, acc->name);
+ ret = irq_set_affinity_hint(irq, NULL);
+ if (ret)
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ free_irq(irq, range);
+ }
+
+ return ret;
+}
+
+static const char *knav_acc_result_str(enum knav_acc_result result)
+{
+ static const char * const result_str[] = {
+ [ACC_RET_IDLE] = "idle",
+ [ACC_RET_SUCCESS] = "success",
+ [ACC_RET_INVALID_COMMAND] = "invalid command",
+ [ACC_RET_INVALID_CHANNEL] = "invalid channel",
+ [ACC_RET_INACTIVE_CHANNEL] = "inactive channel",
+ [ACC_RET_ACTIVE_CHANNEL] = "active channel",
+ [ACC_RET_INVALID_QUEUE] = "invalid queue",
+ [ACC_RET_INVALID_RET] = "invalid return code",
+ };
+
+ if (result >= ARRAY_SIZE(result_str))
+ return result_str[ACC_RET_INVALID_RET];
+ else
+ return result_str[result];
+}
+
+static enum knav_acc_result
+knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp,
+ struct knav_reg_acc_command *cmd)
+{
+ u32 result;
+
+ dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n",
+ cmd->command, cmd->queue_mask, cmd->list_dma,
+ cmd->queue_num, cmd->timer_config);
+
+ writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config);
+ writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num);
+ writel_relaxed(cmd->list_dma, &pdsp->acc_command->list_dma);
+ writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);
+ writel_relaxed(cmd->command, &pdsp->acc_command->command);
+
+ /* wait for the command to clear */
+ do {
+ result = readl_relaxed(&pdsp->acc_command->command);
+ } while ((result >> 8) & 0xff);
+
+ return (result >> 24) & 0xff;
+}
+
+static void knav_acc_setup_cmd(struct knav_device *kdev,
+ struct knav_range_info *range,
+ struct knav_reg_acc_command *cmd,
+ int queue)
+{
+ struct knav_acc_info *info = &range->acc_info;
+ struct knav_acc_channel *acc;
+ int queue_base;
+ u32 queue_mask;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ acc = range->acc;
+ queue_base = range->queue_base;
+ queue_mask = BIT(range->num_queues) - 1;
+ } else {
+ acc = range->acc + queue;
+ queue_base = range->queue_base + queue;
+ queue_mask = 0;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->command = acc->channel;
+ cmd->queue_mask = queue_mask;
+ cmd->list_dma = (u32)acc->list_dma[0];
+ cmd->queue_num = info->list_entries << 16;
+ cmd->queue_num |= queue_base;
+
+ cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18;
+ if (range->flags & RANGE_MULTI_QUEUE)
+ cmd->timer_config |= ACC_CFG_MULTI_QUEUE;
+ cmd->timer_config |= info->pacing_mode << 16;
+ cmd->timer_config |= info->timer_count;
+}
+
+static void knav_acc_stop(struct knav_device *kdev,
+ struct knav_range_info *range,
+ int queue)
+{
+ struct knav_reg_acc_command cmd;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+
+ acc = range->acc + queue;
+
+ knav_acc_setup_cmd(kdev, range, &cmd, queue);
+ cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8;
+ result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
+
+ dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n",
+ acc->name, knav_acc_result_str(result));
+}
+
+static enum knav_acc_result knav_acc_start(struct knav_device *kdev,
+ struct knav_range_info *range,
+ int queue)
+{
+ struct knav_reg_acc_command cmd;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+
+ acc = range->acc + queue;
+
+ knav_acc_setup_cmd(kdev, range, &cmd, queue);
+ cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8;
+ result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
+
+ dev_dbg(kdev->dev, "started acc channel %s, result %s\n",
+ acc->name, knav_acc_result_str(result));
+
+ return result;
+}
+
+static int knav_acc_init_range(struct knav_range_info *range)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+ int queue;
+
+ for (queue = 0; queue < range->num_queues; queue++) {
+ acc = range->acc + queue;
+
+ knav_acc_stop(kdev, range, queue);
+ acc->list_index = 0;
+ result = knav_acc_start(kdev, range, queue);
+
+ if (result != ACC_RET_SUCCESS)
+ return -EIO;
+
+ if (range->flags & RANGE_MULTI_QUEUE)
+ return 0;
+ }
+ return 0;
+}
+
+static int knav_acc_init_queue(struct knav_range_info *range,
+ struct knav_queue_inst *kq)
+{
+ unsigned id = kq->id - range->queue_base;
+
+ kq->descs = devm_kcalloc(range->kdev->dev,
+ ACC_DESCS_MAX, sizeof(u32), GFP_KERNEL);
+ if (!kq->descs)
+ return -ENOMEM;
+
+ kq->acc = range->acc;
+ if ((range->flags & RANGE_MULTI_QUEUE) == 0)
+ kq->acc += id;
+ return 0;
+}
+
+static int knav_acc_open_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags)
+{
+ unsigned id = inst->id - range->queue_base;
+
+ return knav_range_setup_acc_irq(range, id, true);
+}
+
+static int knav_acc_close_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ unsigned id = inst->id - range->queue_base;
+
+ return knav_range_setup_acc_irq(range, id, false);
+}
+
+static int knav_acc_free_range(struct knav_range_info *range)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ struct knav_acc_info *info;
+ int channel, channels;
+
+ info = &range->acc_info;
+
+ if (range->flags & RANGE_MULTI_QUEUE)
+ channels = 1;
+ else
+ channels = range->num_queues;
+
+ for (channel = 0; channel < channels; channel++) {
+ acc = range->acc + channel;
+ if (!acc->list_cpu[0])
+ continue;
+ dma_unmap_single(kdev->dev, acc->list_dma[0],
+ info->mem_size, DMA_BIDIRECTIONAL);
+ free_pages_exact(acc->list_cpu[0], info->mem_size);
+ }
+ devm_kfree(range->kdev->dev, range->acc);
+ return 0;
+}
+
+struct knav_range_ops knav_acc_range_ops = {
+ .set_notify = knav_acc_set_notify,
+ .init_queue = knav_acc_init_queue,
+ .open_queue = knav_acc_open_queue,
+ .close_queue = knav_acc_close_queue,
+ .init_range = knav_acc_init_range,
+ .free_range = knav_acc_free_range,
+};
+
+/**
+ * knav_init_acc_range: Initialise accumulator ranges
+ *
+ * @kdev: qmss device
+ * @node: device node
+ * @range: qmms range information
+ *
+ * Return 0 on success or error
+ */
+int knav_init_acc_range(struct knav_device *kdev,
+ struct device_node *node,
+ struct knav_range_info *range)
+{
+ struct knav_acc_channel *acc;
+ struct knav_pdsp_info *pdsp;
+ struct knav_acc_info *info;
+ int ret, channel, channels;
+ int list_size, mem_size;
+ dma_addr_t list_dma;
+ void *list_mem;
+ u32 config[5];
+
+ range->flags |= RANGE_HAS_ACCUMULATOR;
+ info = &range->acc_info;
+
+ ret = of_property_read_u32_array(node, "accumulator", config, 5);
+ if (ret)
+ return ret;
+
+ info->pdsp_id = config[0];
+ info->start_channel = config[1];
+ info->list_entries = config[2];
+ info->pacing_mode = config[3];
+ info->timer_count = config[4] / ACC_DEFAULT_PERIOD;
+
+ if (info->start_channel > ACC_MAX_CHANNEL) {
+ dev_err(kdev->dev, "channel %d invalid for range %s\n",
+ info->start_channel, range->name);
+ return -EINVAL;
+ }
+
+ if (info->pacing_mode > 3) {
+ dev_err(kdev->dev, "pacing mode %d invalid for range %s\n",
+ info->pacing_mode, range->name);
+ return -EINVAL;
+ }
+
+ pdsp = knav_find_pdsp(kdev, info->pdsp_id);
+ if (!pdsp) {
+ dev_err(kdev->dev, "pdsp id %d not found for range %s\n",
+ info->pdsp_id, range->name);
+ return -EINVAL;
+ }
+
+ if (!pdsp->started) {
+ dev_err(kdev->dev, "pdsp id %d not started for range %s\n",
+ info->pdsp_id, range->name);
+ return -ENODEV;
+ }
+
+ info->pdsp = pdsp;
+ channels = range->num_queues;
+ if (of_get_property(node, "multi-queue", NULL)) {
+ range->flags |= RANGE_MULTI_QUEUE;
+ channels = 1;
+ if (range->queue_base & (32 - 1)) {
+ dev_err(kdev->dev,
+ "misaligned multi-queue accumulator range %s\n",
+ range->name);
+ return -EINVAL;
+ }
+ if (range->num_queues > 32) {
+ dev_err(kdev->dev,
+ "too many queues in accumulator range %s\n",
+ range->name);
+ return -EINVAL;
+ }
+ }
+
+ /* figure out list size */
+ list_size = info->list_entries;
+ list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32);
+ info->list_size = list_size;
+ mem_size = PAGE_ALIGN(list_size * 2);
+ info->mem_size = mem_size;
+ range->acc = devm_kcalloc(kdev->dev, channels, sizeof(*range->acc),
+ GFP_KERNEL);
+ if (!range->acc)
+ return -ENOMEM;
+
+ for (channel = 0; channel < channels; channel++) {
+ acc = range->acc + channel;
+ acc->channel = info->start_channel + channel;
+
+ /* allocate memory for the two lists */
+ list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA);
+ if (!list_mem)
+ return -ENOMEM;
+
+ list_dma = dma_map_single(kdev->dev, list_mem, mem_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev->dev, list_dma)) {
+ free_pages_exact(list_mem, mem_size);
+ return -ENOMEM;
+ }
+
+ memset(list_mem, 0, mem_size);
+ dma_sync_single_for_device(kdev->dev, list_dma, mem_size,
+ DMA_TO_DEVICE);
+ scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d",
+ acc->channel);
+ acc->list_cpu[0] = list_mem;
+ acc->list_cpu[1] = list_mem + list_size;
+ acc->list_dma[0] = list_dma;
+ acc->list_dma[1] = list_dma + list_size;
+ dev_dbg(kdev->dev, "%s: channel %d, dma %pad, virt %8p\n",
+ acc->name, acc->channel, &list_dma, list_mem);
+ }
+
+ range->ops = &knav_acc_range_ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(knav_init_acc_range);
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
new file mode 100644
index 000000000..9f5ce52e6
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -0,0 +1,1922 @@
+/*
+ * Keystone Queue Manager subsystem driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/knav_qmss.h>
+
+#include "knav_qmss.h"
+
+static struct knav_device *kdev;
+static DEFINE_MUTEX(knav_dev_lock);
+
+/* Queue manager register indices in DTS */
+#define KNAV_QUEUE_PEEK_REG_INDEX 0
+#define KNAV_QUEUE_STATUS_REG_INDEX 1
+#define KNAV_QUEUE_CONFIG_REG_INDEX 2
+#define KNAV_QUEUE_REGION_REG_INDEX 3
+#define KNAV_QUEUE_PUSH_REG_INDEX 4
+#define KNAV_QUEUE_POP_REG_INDEX 5
+
+/* Queue manager register indices in DTS for QMSS in K2G NAVSS.
+ * There are no status and vbusm push registers on this version
+ * of QMSS. Push registers are same as pop, So all indices above 1
+ * are to be re-defined
+ */
+#define KNAV_L_QUEUE_CONFIG_REG_INDEX 1
+#define KNAV_L_QUEUE_REGION_REG_INDEX 2
+#define KNAV_L_QUEUE_PUSH_REG_INDEX 3
+
+/* PDSP register indices in DTS */
+#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
+#define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
+#define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
+#define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
+
+#define knav_queue_idx_to_inst(kdev, idx) \
+ (kdev->instances + (idx << kdev->inst_shift))
+
+#define for_each_handle_rcu(qh, inst) \
+ list_for_each_entry_rcu(qh, &inst->handles, list)
+
+#define for_each_instance(idx, inst, kdev) \
+ for (idx = 0, inst = kdev->instances; \
+ idx < (kdev)->num_queues_in_use; \
+ idx++, inst = knav_queue_idx_to_inst(kdev, idx))
+
+/* All firmware file names end up here. List the firmware file names below.
+ * Newest followed by older ones. Search is done from start of the array
+ * until a firmware file is found.
+ */
+const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
+
+static bool device_ready;
+bool knav_qmss_device_ready(void)
+{
+ return device_ready;
+}
+EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
+
+/**
+ * knav_queue_notify: qmss queue notfier call
+ *
+ * @inst: qmss queue instance like accumulator
+ */
+void knav_queue_notify(struct knav_queue_inst *inst)
+{
+ struct knav_queue *qh;
+
+ if (!inst)
+ return;
+
+ rcu_read_lock();
+ for_each_handle_rcu(qh, inst) {
+ if (atomic_read(&qh->notifier_enabled) <= 0)
+ continue;
+ if (WARN_ON(!qh->notifier_fn))
+ continue;
+ this_cpu_inc(qh->stats->notifies);
+ qh->notifier_fn(qh->notifier_fn_arg);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(knav_queue_notify);
+
+static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
+{
+ struct knav_queue_inst *inst = _instdata;
+
+ knav_queue_notify(inst);
+ return IRQ_HANDLED;
+}
+
+static int knav_queue_setup_irq(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ unsigned queue = inst->id - range->queue_base;
+ int ret = 0, irq;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ irq = range->irqs[queue].irq;
+ ret = request_irq(irq, knav_queue_int_handler, 0,
+ inst->irq_name, inst);
+ if (ret)
+ return ret;
+ disable_irq(irq);
+ if (range->irqs[queue].cpu_mask) {
+ ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
+ if (ret) {
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+static void knav_queue_free_irq(struct knav_queue_inst *inst)
+{
+ struct knav_range_info *range = inst->range;
+ unsigned queue = inst->id - inst->range->queue_base;
+ int irq;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ irq = range->irqs[queue].irq;
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, inst);
+ }
+}
+
+static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
+{
+ return !list_empty(&inst->handles);
+}
+
+static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
+{
+ return inst->range->flags & RANGE_RESERVED;
+}
+
+static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
+{
+ struct knav_queue *tmp;
+
+ rcu_read_lock();
+ for_each_handle_rcu(tmp, inst) {
+ if (tmp->flags & KNAV_QUEUE_SHARED) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+ return false;
+}
+
+static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
+ unsigned type)
+{
+ if ((type == KNAV_QUEUE_QPEND) &&
+ (inst->range->flags & RANGE_HAS_IRQ)) {
+ return true;
+ } else if ((type == KNAV_QUEUE_ACC) &&
+ (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
+ return true;
+ } else if ((type == KNAV_QUEUE_GP) &&
+ !(inst->range->flags &
+ (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
+ return true;
+ }
+ return false;
+}
+
+static inline struct knav_queue_inst *
+knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
+{
+ struct knav_queue_inst *inst;
+ int idx;
+
+ for_each_instance(idx, inst, kdev) {
+ if (inst->id == id)
+ return inst;
+ }
+ return NULL;
+}
+
+static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
+{
+ if (kdev->base_id <= id &&
+ kdev->base_id + kdev->num_queues > id) {
+ id -= kdev->base_id;
+ return knav_queue_match_id_to_inst(kdev, id);
+ }
+ return NULL;
+}
+
+static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
+ const char *name, unsigned flags)
+{
+ struct knav_queue *qh;
+ unsigned id;
+ int ret = 0;
+
+ qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
+ if (!qh)
+ return ERR_PTR(-ENOMEM);
+
+ qh->stats = alloc_percpu(struct knav_queue_stats);
+ if (!qh->stats) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ qh->flags = flags;
+ qh->inst = inst;
+ id = inst->id - inst->qmgr->start_queue;
+ qh->reg_push = &inst->qmgr->reg_push[id];
+ qh->reg_pop = &inst->qmgr->reg_pop[id];
+ qh->reg_peek = &inst->qmgr->reg_peek[id];
+
+ /* first opener? */
+ if (!knav_queue_is_busy(inst)) {
+ struct knav_range_info *range = inst->range;
+
+ inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
+ if (range->ops && range->ops->open_queue)
+ ret = range->ops->open_queue(range, inst, flags);
+
+ if (ret)
+ goto err;
+ }
+ list_add_tail_rcu(&qh->list, &inst->handles);
+ return qh;
+
+err:
+ if (qh->stats)
+ free_percpu(qh->stats);
+ devm_kfree(inst->kdev->dev, qh);
+ return ERR_PTR(ret);
+}
+
+static struct knav_queue *
+knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
+{
+ struct knav_queue_inst *inst;
+ struct knav_queue *qh;
+
+ mutex_lock(&knav_dev_lock);
+
+ qh = ERR_PTR(-ENODEV);
+ inst = knav_queue_find_by_id(id);
+ if (!inst)
+ goto unlock_ret;
+
+ qh = ERR_PTR(-EEXIST);
+ if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
+ goto unlock_ret;
+
+ qh = ERR_PTR(-EBUSY);
+ if ((flags & KNAV_QUEUE_SHARED) &&
+ (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
+ goto unlock_ret;
+
+ qh = __knav_queue_open(inst, name, flags);
+
+unlock_ret:
+ mutex_unlock(&knav_dev_lock);
+
+ return qh;
+}
+
+static struct knav_queue *knav_queue_open_by_type(const char *name,
+ unsigned type, unsigned flags)
+{
+ struct knav_queue_inst *inst;
+ struct knav_queue *qh = ERR_PTR(-EINVAL);
+ int idx;
+
+ mutex_lock(&knav_dev_lock);
+
+ for_each_instance(idx, inst, kdev) {
+ if (knav_queue_is_reserved(inst))
+ continue;
+ if (!knav_queue_match_type(inst, type))
+ continue;
+ if (knav_queue_is_busy(inst))
+ continue;
+ qh = __knav_queue_open(inst, name, flags);
+ goto unlock_ret;
+ }
+
+unlock_ret:
+ mutex_unlock(&knav_dev_lock);
+ return qh;
+}
+
+static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
+{
+ struct knav_range_info *range = inst->range;
+
+ if (range->ops && range->ops->set_notify)
+ range->ops->set_notify(range, inst, enabled);
+}
+
+static int knav_queue_enable_notifier(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ bool first;
+
+ if (WARN_ON(!qh->notifier_fn))
+ return -EINVAL;
+
+ /* Adjust the per handle notifier count */
+ first = (atomic_inc_return(&qh->notifier_enabled) == 1);
+ if (!first)
+ return 0; /* nothing to do */
+
+ /* Now adjust the per instance notifier count */
+ first = (atomic_inc_return(&inst->num_notifiers) == 1);
+ if (first)
+ knav_queue_set_notify(inst, true);
+
+ return 0;
+}
+
+static int knav_queue_disable_notifier(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ bool last;
+
+ last = (atomic_dec_return(&qh->notifier_enabled) == 0);
+ if (!last)
+ return 0; /* nothing to do */
+
+ last = (atomic_dec_return(&inst->num_notifiers) == 0);
+ if (last)
+ knav_queue_set_notify(inst, false);
+
+ return 0;
+}
+
+static int knav_queue_set_notifier(struct knav_queue *qh,
+ struct knav_queue_notify_config *cfg)
+{
+ knav_queue_notify_fn old_fn = qh->notifier_fn;
+
+ if (!cfg)
+ return -EINVAL;
+
+ if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
+ return -ENOTSUPP;
+
+ if (!cfg->fn && old_fn)
+ knav_queue_disable_notifier(qh);
+
+ qh->notifier_fn = cfg->fn;
+ qh->notifier_fn_arg = cfg->fn_arg;
+
+ if (cfg->fn && !old_fn)
+ knav_queue_enable_notifier(qh);
+
+ return 0;
+}
+
+static int knav_gp_set_notify(struct knav_range_info *range,
+ struct knav_queue_inst *inst,
+ bool enabled)
+{
+ unsigned queue;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ queue = inst->id - range->queue_base;
+ if (enabled)
+ enable_irq(range->irqs[queue].irq);
+ else
+ disable_irq_nosync(range->irqs[queue].irq);
+ }
+ return 0;
+}
+
+static int knav_gp_open_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags)
+{
+ return knav_queue_setup_irq(range, inst);
+}
+
+static int knav_gp_close_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ knav_queue_free_irq(inst);
+ return 0;
+}
+
+struct knav_range_ops knav_gp_range_ops = {
+ .set_notify = knav_gp_set_notify,
+ .open_queue = knav_gp_open_queue,
+ .close_queue = knav_gp_close_queue,
+};
+
+
+static int knav_queue_get_count(void *qhandle)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+
+ return readl_relaxed(&qh->reg_peek[0].entry_count) +
+ atomic_read(&inst->desc_count);
+}
+
+static void knav_queue_debug_show_instance(struct seq_file *s,
+ struct knav_queue_inst *inst)
+{
+ struct knav_device *kdev = inst->kdev;
+ struct knav_queue *qh;
+ int cpu = 0;
+ int pushes = 0;
+ int pops = 0;
+ int push_errors = 0;
+ int pop_errors = 0;
+ int notifies = 0;
+
+ if (!knav_queue_is_busy(inst))
+ return;
+
+ seq_printf(s, "\tqueue id %d (%s)\n",
+ kdev->base_id + inst->id, inst->name);
+ for_each_handle_rcu(qh, inst) {
+ for_each_possible_cpu(cpu) {
+ pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
+ pops += per_cpu_ptr(qh->stats, cpu)->pops;
+ push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
+ pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
+ notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
+ }
+
+ seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
+ qh,
+ pushes,
+ pops,
+ knav_queue_get_count(qh),
+ notifies,
+ push_errors,
+ pop_errors);
+ }
+}
+
+static int knav_queue_debug_show(struct seq_file *s, void *v)
+{
+ struct knav_queue_inst *inst;
+ int idx;
+
+ mutex_lock(&knav_dev_lock);
+ seq_printf(s, "%s: %u-%u\n",
+ dev_name(kdev->dev), kdev->base_id,
+ kdev->base_id + kdev->num_queues - 1);
+ for_each_instance(idx, inst, kdev)
+ knav_queue_debug_show_instance(s, inst);
+ mutex_unlock(&knav_dev_lock);
+
+ return 0;
+}
+
+static int knav_queue_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, knav_queue_debug_show, NULL);
+}
+
+static const struct file_operations knav_queue_debug_ops = {
+ .open = knav_queue_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
+ u32 flags)
+{
+ unsigned long end;
+ u32 val = 0;
+
+ end = jiffies + msecs_to_jiffies(timeout);
+ while (time_after(end, jiffies)) {
+ val = readl_relaxed(addr);
+ if (flags)
+ val &= flags;
+ if (!val)
+ break;
+ cpu_relax();
+ }
+ return val ? -ETIMEDOUT : 0;
+}
+
+
+static int knav_queue_flush(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ unsigned id = inst->id - inst->qmgr->start_queue;
+
+ atomic_set(&inst->desc_count, 0);
+ writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
+ return 0;
+}
+
+/**
+ * knav_queue_open() - open a hardware queue
+ * @name - name to give the queue handle
+ * @id - desired queue number if any or specifes the type
+ * of queue
+ * @flags - the following flags are applicable to queues:
+ * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
+ * exclusive by default.
+ * Subsequent attempts to open a shared queue should
+ * also have this flag.
+ *
+ * Returns a handle to the open hardware queue if successful. Use IS_ERR()
+ * to check the returned value for error codes.
+ */
+void *knav_queue_open(const char *name, unsigned id,
+ unsigned flags)
+{
+ struct knav_queue *qh = ERR_PTR(-EINVAL);
+
+ switch (id) {
+ case KNAV_QUEUE_QPEND:
+ case KNAV_QUEUE_ACC:
+ case KNAV_QUEUE_GP:
+ qh = knav_queue_open_by_type(name, id, flags);
+ break;
+
+ default:
+ qh = knav_queue_open_by_id(name, id, flags);
+ break;
+ }
+ return qh;
+}
+EXPORT_SYMBOL_GPL(knav_queue_open);
+
+/**
+ * knav_queue_close() - close a hardware queue handle
+ * @qh - handle to close
+ */
+void knav_queue_close(void *qhandle)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+
+ while (atomic_read(&qh->notifier_enabled) > 0)
+ knav_queue_disable_notifier(qh);
+
+ mutex_lock(&knav_dev_lock);
+ list_del_rcu(&qh->list);
+ mutex_unlock(&knav_dev_lock);
+ synchronize_rcu();
+ if (!knav_queue_is_busy(inst)) {
+ struct knav_range_info *range = inst->range;
+
+ if (range->ops && range->ops->close_queue)
+ range->ops->close_queue(range, inst);
+ }
+ free_percpu(qh->stats);
+ devm_kfree(inst->kdev->dev, qh);
+}
+EXPORT_SYMBOL_GPL(knav_queue_close);
+
+/**
+ * knav_queue_device_control() - Perform control operations on a queue
+ * @qh - queue handle
+ * @cmd - control commands
+ * @arg - command argument
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_notify_config *cfg;
+ int ret;
+
+ switch ((int)cmd) {
+ case KNAV_QUEUE_GET_ID:
+ ret = qh->inst->kdev->base_id + qh->inst->id;
+ break;
+
+ case KNAV_QUEUE_FLUSH:
+ ret = knav_queue_flush(qh);
+ break;
+
+ case KNAV_QUEUE_SET_NOTIFIER:
+ cfg = (void *)arg;
+ ret = knav_queue_set_notifier(qh, cfg);
+ break;
+
+ case KNAV_QUEUE_ENABLE_NOTIFY:
+ ret = knav_queue_enable_notifier(qh);
+ break;
+
+ case KNAV_QUEUE_DISABLE_NOTIFY:
+ ret = knav_queue_disable_notifier(qh);
+ break;
+
+ case KNAV_QUEUE_GET_COUNT:
+ ret = knav_queue_get_count(qh);
+ break;
+
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(knav_queue_device_control);
+
+
+
+/**
+ * knav_queue_push() - push data (or descriptor) to the tail of a queue
+ * @qh - hardware queue handle
+ * @data - data to push
+ * @size - size of data to push
+ * @flags - can be used to pass additional information
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_queue_push(void *qhandle, dma_addr_t dma,
+ unsigned size, unsigned flags)
+{
+ struct knav_queue *qh = qhandle;
+ u32 val;
+
+ val = (u32)dma | ((size / 16) - 1);
+ writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
+
+ this_cpu_inc(qh->stats->pushes);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(knav_queue_push);
+
+/**
+ * knav_queue_pop() - pop data (or descriptor) from the head of a queue
+ * @qh - hardware queue handle
+ * @size - (optional) size of the data pop'ed.
+ *
+ * Returns a DMA address on success, 0 on failure.
+ */
+dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+ dma_addr_t dma;
+ u32 val, idx;
+
+ /* are we accumulated? */
+ if (inst->descs) {
+ if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
+ atomic_inc(&inst->desc_count);
+ return 0;
+ }
+ idx = atomic_inc_return(&inst->desc_head);
+ idx &= ACC_DESCS_MASK;
+ val = inst->descs[idx];
+ } else {
+ val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
+ if (unlikely(!val))
+ return 0;
+ }
+
+ dma = val & DESC_PTR_MASK;
+ if (size)
+ *size = ((val & DESC_SIZE_MASK) + 1) * 16;
+
+ this_cpu_inc(qh->stats->pops);
+ return dma;
+}
+EXPORT_SYMBOL_GPL(knav_queue_pop);
+
+/* carve out descriptors and push into queue */
+static void kdesc_fill_pool(struct knav_pool *pool)
+{
+ struct knav_region *region;
+ int i;
+
+ region = pool->region;
+ pool->desc_size = region->desc_size;
+ for (i = 0; i < pool->num_desc; i++) {
+ int index = pool->region_offset + i;
+ dma_addr_t dma_addr;
+ unsigned dma_size;
+ dma_addr = region->dma_start + (region->desc_size * index);
+ dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
+ dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
+ DMA_TO_DEVICE);
+ knav_queue_push(pool->queue, dma_addr, dma_size, 0);
+ }
+}
+
+/* pop out descriptors and close the queue */
+static void kdesc_empty_pool(struct knav_pool *pool)
+{
+ dma_addr_t dma;
+ unsigned size;
+ void *desc;
+ int i;
+
+ if (!pool->queue)
+ return;
+
+ for (i = 0;; i++) {
+ dma = knav_queue_pop(pool->queue, &size);
+ if (!dma)
+ break;
+ desc = knav_pool_desc_dma_to_virt(pool, dma);
+ if (!desc) {
+ dev_dbg(pool->kdev->dev,
+ "couldn't unmap desc, continuing\n");
+ continue;
+ }
+ }
+ WARN_ON(i != pool->num_desc);
+ knav_queue_close(pool->queue);
+}
+
+
+/* Get the DMA address of a descriptor */
+dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
+{
+ struct knav_pool *pool = ph;
+ return pool->region->dma_start + (virt - pool->region->virt_start);
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
+
+void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
+{
+ struct knav_pool *pool = ph;
+ return pool->region->virt_start + (dma - pool->region->dma_start);
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
+
+/**
+ * knav_pool_create() - Create a pool of descriptors
+ * @name - name to give the pool handle
+ * @num_desc - numbers of descriptors in the pool
+ * @region_id - QMSS region id from which the descriptors are to be
+ * allocated.
+ *
+ * Returns a pool handle on success.
+ * Use IS_ERR_OR_NULL() to identify error values on return.
+ */
+void *knav_pool_create(const char *name,
+ int num_desc, int region_id)
+{
+ struct knav_region *reg_itr, *region = NULL;
+ struct knav_pool *pool, *pi;
+ struct list_head *node;
+ unsigned last_offset;
+ bool slot_found;
+ int ret;
+
+ if (!kdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!kdev->dev)
+ return ERR_PTR(-ENODEV);
+
+ pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ dev_err(kdev->dev, "out of memory allocating pool\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_region(kdev, reg_itr) {
+ if (reg_itr->id != region_id)
+ continue;
+ region = reg_itr;
+ break;
+ }
+
+ if (!region) {
+ dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
+ if (IS_ERR_OR_NULL(pool->queue)) {
+ dev_err(kdev->dev,
+ "failed to open queue for pool(%s), error %ld\n",
+ name, PTR_ERR(pool->queue));
+ ret = PTR_ERR(pool->queue);
+ goto err;
+ }
+
+ pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
+ pool->kdev = kdev;
+ pool->dev = kdev->dev;
+
+ mutex_lock(&knav_dev_lock);
+
+ if (num_desc > (region->num_desc - region->used_desc)) {
+ dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
+ region_id, name);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ /* Region maintains a sorted (by region offset) list of pools
+ * use the first free slot which is large enough to accomodate
+ * the request
+ */
+ last_offset = 0;
+ slot_found = false;
+ node = &region->pools;
+ list_for_each_entry(pi, &region->pools, region_inst) {
+ if ((pi->region_offset - last_offset) >= num_desc) {
+ slot_found = true;
+ break;
+ }
+ last_offset = pi->region_offset + pi->num_desc;
+ }
+ node = &pi->region_inst;
+
+ if (slot_found) {
+ pool->region = region;
+ pool->num_desc = num_desc;
+ pool->region_offset = last_offset;
+ region->used_desc += num_desc;
+ list_add_tail(&pool->list, &kdev->pools);
+ list_add_tail(&pool->region_inst, node);
+ } else {
+ dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
+ name, region_id);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ mutex_unlock(&knav_dev_lock);
+ kdesc_fill_pool(pool);
+ return pool;
+
+err_unlock:
+ mutex_unlock(&knav_dev_lock);
+err:
+ kfree(pool->name);
+ devm_kfree(kdev->dev, pool);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(knav_pool_create);
+
+/**
+ * knav_pool_destroy() - Free a pool of descriptors
+ * @pool - pool handle
+ */
+void knav_pool_destroy(void *ph)
+{
+ struct knav_pool *pool = ph;
+
+ if (!pool)
+ return;
+
+ if (!pool->region)
+ return;
+
+ kdesc_empty_pool(pool);
+ mutex_lock(&knav_dev_lock);
+
+ pool->region->used_desc -= pool->num_desc;
+ list_del(&pool->region_inst);
+ list_del(&pool->list);
+
+ mutex_unlock(&knav_dev_lock);
+ kfree(pool->name);
+ devm_kfree(kdev->dev, pool);
+}
+EXPORT_SYMBOL_GPL(knav_pool_destroy);
+
+
+/**
+ * knav_pool_desc_get() - Get a descriptor from the pool
+ * @pool - pool handle
+ *
+ * Returns descriptor from the pool.
+ */
+void *knav_pool_desc_get(void *ph)
+{
+ struct knav_pool *pool = ph;
+ dma_addr_t dma;
+ unsigned size;
+ void *data;
+
+ dma = knav_queue_pop(pool->queue, &size);
+ if (unlikely(!dma))
+ return ERR_PTR(-ENOMEM);
+ data = knav_pool_desc_dma_to_virt(pool, dma);
+ return data;
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_get);
+
+/**
+ * knav_pool_desc_put() - return a descriptor to the pool
+ * @pool - pool handle
+ */
+void knav_pool_desc_put(void *ph, void *desc)
+{
+ struct knav_pool *pool = ph;
+ dma_addr_t dma;
+ dma = knav_pool_desc_virt_to_dma(pool, desc);
+ knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_put);
+
+/**
+ * knav_pool_desc_map() - Map descriptor for DMA transfer
+ * @pool - pool handle
+ * @desc - address of descriptor to map
+ * @size - size of descriptor to map
+ * @dma - DMA address return pointer
+ * @dma_sz - adjusted return pointer
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_pool_desc_map(void *ph, void *desc, unsigned size,
+ dma_addr_t *dma, unsigned *dma_sz)
+{
+ struct knav_pool *pool = ph;
+ *dma = knav_pool_desc_virt_to_dma(pool, desc);
+ size = min(size, pool->region->desc_size);
+ size = ALIGN(size, SMP_CACHE_BYTES);
+ *dma_sz = size;
+ dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
+
+ /* Ensure the descriptor reaches to the memory */
+ __iowmb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_map);
+
+/**
+ * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
+ * @pool - pool handle
+ * @dma - DMA address of descriptor to unmap
+ * @dma_sz - size of descriptor to unmap
+ *
+ * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
+ * error values on return.
+ */
+void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
+{
+ struct knav_pool *pool = ph;
+ unsigned desc_sz;
+ void *desc;
+
+ desc_sz = min(dma_sz, pool->region->desc_size);
+ desc = knav_pool_desc_dma_to_virt(pool, dma);
+ dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
+ prefetch(desc);
+ return desc;
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
+
+/**
+ * knav_pool_count() - Get the number of descriptors in pool.
+ * @pool - pool handle
+ * Returns number of elements in the pool.
+ */
+int knav_pool_count(void *ph)
+{
+ struct knav_pool *pool = ph;
+ return knav_queue_get_count(pool->queue);
+}
+EXPORT_SYMBOL_GPL(knav_pool_count);
+
+static void knav_queue_setup_region(struct knav_device *kdev,
+ struct knav_region *region)
+{
+ unsigned hw_num_desc, hw_desc_size, size;
+ struct knav_reg_region __iomem *regs;
+ struct knav_qmgr_info *qmgr;
+ struct knav_pool *pool;
+ int id = region->id;
+ struct page *page;
+
+ /* unused region? */
+ if (!region->num_desc) {
+ dev_warn(kdev->dev, "unused region %s\n", region->name);
+ return;
+ }
+
+ /* get hardware descriptor value */
+ hw_num_desc = ilog2(region->num_desc - 1) + 1;
+
+ /* did we force fit ourselves into nothingness? */
+ if (region->num_desc < 32) {
+ region->num_desc = 0;
+ dev_warn(kdev->dev, "too few descriptors in region %s\n",
+ region->name);
+ return;
+ }
+
+ size = region->num_desc * region->desc_size;
+ region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
+ GFP_DMA32);
+ if (!region->virt_start) {
+ region->num_desc = 0;
+ dev_err(kdev->dev, "memory alloc failed for region %s\n",
+ region->name);
+ return;
+ }
+ region->virt_end = region->virt_start + size;
+ page = virt_to_page(region->virt_start);
+
+ region->dma_start = dma_map_page(kdev->dev, page, 0, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev->dev, region->dma_start)) {
+ dev_err(kdev->dev, "dma map failed for region %s\n",
+ region->name);
+ goto fail;
+ }
+ region->dma_end = region->dma_start + size;
+
+ pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ dev_err(kdev->dev, "out of memory allocating dummy pool\n");
+ goto fail;
+ }
+ pool->num_desc = 0;
+ pool->region_offset = region->num_desc;
+ list_add(&pool->region_inst, &region->pools);
+
+ dev_dbg(kdev->dev,
+ "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
+ region->name, id, region->desc_size, region->num_desc,
+ region->link_index, &region->dma_start, &region->dma_end,
+ region->virt_start, region->virt_end);
+
+ hw_desc_size = (region->desc_size / 16) - 1;
+ hw_num_desc -= 5;
+
+ for_each_qmgr(kdev, qmgr) {
+ regs = qmgr->reg_region + id;
+ writel_relaxed((u32)region->dma_start, &regs->base);
+ writel_relaxed(region->link_index, &regs->start_index);
+ writel_relaxed(hw_desc_size << 16 | hw_num_desc,
+ &regs->size_count);
+ }
+ return;
+
+fail:
+ if (region->dma_start)
+ dma_unmap_page(kdev->dev, region->dma_start, size,
+ DMA_BIDIRECTIONAL);
+ if (region->virt_start)
+ free_pages_exact(region->virt_start, size);
+ region->num_desc = 0;
+ return;
+}
+
+static const char *knav_queue_find_name(struct device_node *node)
+{
+ const char *name;
+
+ if (of_property_read_string(node, "label", &name) < 0)
+ name = node->name;
+ if (!name)
+ name = "unknown";
+ return name;
+}
+
+static int knav_queue_setup_regions(struct knav_device *kdev,
+ struct device_node *regions)
+{
+ struct device *dev = kdev->dev;
+ struct knav_region *region;
+ struct device_node *child;
+ u32 temp[2];
+ int ret;
+
+ for_each_child_of_node(regions, child) {
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ dev_err(dev, "out of memory allocating region\n");
+ return -ENOMEM;
+ }
+
+ region->name = knav_queue_find_name(child);
+ of_property_read_u32(child, "id", &region->id);
+ ret = of_property_read_u32_array(child, "region-spec", temp, 2);
+ if (!ret) {
+ region->num_desc = temp[0];
+ region->desc_size = temp[1];
+ } else {
+ dev_err(dev, "invalid region info %s\n", region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+
+ if (!of_get_property(child, "link-index", NULL)) {
+ dev_err(dev, "No link info for %s\n", region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+ ret = of_property_read_u32(child, "link-index",
+ &region->link_index);
+ if (ret) {
+ dev_err(dev, "link index not found for %s\n",
+ region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+
+ INIT_LIST_HEAD(&region->pools);
+ list_add_tail(&region->list, &kdev->regions);
+ }
+ if (list_empty(&kdev->regions)) {
+ dev_err(dev, "no valid region information found\n");
+ return -ENODEV;
+ }
+
+ /* Next, we run through the regions and set things up */
+ for_each_region(kdev, region)
+ knav_queue_setup_region(kdev, region);
+
+ return 0;
+}
+
+static int knav_get_link_ram(struct knav_device *kdev,
+ const char *name,
+ struct knav_link_ram_block *block)
+{
+ struct platform_device *pdev = to_platform_device(kdev->dev);
+ struct device_node *node = pdev->dev.of_node;
+ u32 temp[2];
+
+ /*
+ * Note: link ram resources are specified in "entry" sized units. In
+ * reality, although entries are ~40bits in hardware, we treat them as
+ * 64-bit entities here.
+ *
+ * For example, to specify the internal link ram for Keystone-I class
+ * devices, we would set the linkram0 resource to 0x80000-0x83fff.
+ *
+ * This gets a bit weird when other link rams are used. For example,
+ * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
+ * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
+ * which accounts for 64-bits per entry, for 16K entries.
+ */
+ if (!of_property_read_u32_array(node, name , temp, 2)) {
+ if (temp[0]) {
+ /*
+ * queue_base specified => using internal or onchip
+ * link ram WARNING - we do not "reserve" this block
+ */
+ block->dma = (dma_addr_t)temp[0];
+ block->virt = NULL;
+ block->size = temp[1];
+ } else {
+ block->size = temp[1];
+ /* queue_base not specific => allocate requested size */
+ block->virt = dmam_alloc_coherent(kdev->dev,
+ 8 * block->size, &block->dma,
+ GFP_KERNEL);
+ if (!block->virt) {
+ dev_err(kdev->dev, "failed to alloc linkram\n");
+ return -ENOMEM;
+ }
+ }
+ } else {
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int knav_queue_setup_link_ram(struct knav_device *kdev)
+{
+ struct knav_link_ram_block *block;
+ struct knav_qmgr_info *qmgr;
+
+ for_each_qmgr(kdev, qmgr) {
+ block = &kdev->link_rams[0];
+ dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
+ &block->dma, block->virt, block->size);
+ writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
+ if (kdev->version == QMSS_66AK2G)
+ writel_relaxed(block->size,
+ &qmgr->reg_config->link_ram_size0);
+ else
+ writel_relaxed(block->size - 1,
+ &qmgr->reg_config->link_ram_size0);
+ block++;
+ if (!block->size)
+ continue;
+
+ dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
+ &block->dma, block->virt, block->size);
+ writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
+ }
+
+ return 0;
+}
+
+static int knav_setup_queue_range(struct knav_device *kdev,
+ struct device_node *node)
+{
+ struct device *dev = kdev->dev;
+ struct knav_range_info *range;
+ struct knav_qmgr_info *qmgr;
+ u32 temp[2], start, end, id, index;
+ int ret, i;
+
+ range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
+ if (!range) {
+ dev_err(dev, "out of memory allocating range\n");
+ return -ENOMEM;
+ }
+
+ range->kdev = kdev;
+ range->name = knav_queue_find_name(node);
+ ret = of_property_read_u32_array(node, "qrange", temp, 2);
+ if (!ret) {
+ range->queue_base = temp[0] - kdev->base_id;
+ range->num_queues = temp[1];
+ } else {
+ dev_err(dev, "invalid queue range %s\n", range->name);
+ devm_kfree(dev, range);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < RANGE_MAX_IRQS; i++) {
+ struct of_phandle_args oirq;
+
+ if (of_irq_parse_one(node, i, &oirq))
+ break;
+
+ range->irqs[i].irq = irq_create_of_mapping(&oirq);
+ if (range->irqs[i].irq == IRQ_NONE)
+ break;
+
+ range->num_irqs++;
+
+ if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
+ unsigned long mask;
+ int bit;
+
+ range->irqs[i].cpu_mask = devm_kzalloc(dev,
+ cpumask_size(), GFP_KERNEL);
+ if (!range->irqs[i].cpu_mask)
+ return -ENOMEM;
+
+ mask = (oirq.args[2] & 0x0000ff00) >> 8;
+ for_each_set_bit(bit, &mask, BITS_PER_LONG)
+ cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
+ }
+ }
+
+ range->num_irqs = min(range->num_irqs, range->num_queues);
+ if (range->num_irqs)
+ range->flags |= RANGE_HAS_IRQ;
+
+ if (of_get_property(node, "qalloc-by-id", NULL))
+ range->flags |= RANGE_RESERVED;
+
+ if (of_get_property(node, "accumulator", NULL)) {
+ ret = knav_init_acc_range(kdev, node, range);
+ if (ret < 0) {
+ devm_kfree(dev, range);
+ return ret;
+ }
+ } else {
+ range->ops = &knav_gp_range_ops;
+ }
+
+ /* set threshold to 1, and flush out the queues */
+ for_each_qmgr(kdev, qmgr) {
+ start = max(qmgr->start_queue, range->queue_base);
+ end = min(qmgr->start_queue + qmgr->num_queues,
+ range->queue_base + range->num_queues);
+ for (id = start; id < end; id++) {
+ index = id - qmgr->start_queue;
+ writel_relaxed(THRESH_GTE | 1,
+ &qmgr->reg_peek[index].ptr_size_thresh);
+ writel_relaxed(0,
+ &qmgr->reg_push[index].ptr_size_thresh);
+ }
+ }
+
+ list_add_tail(&range->list, &kdev->queue_ranges);
+ dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
+ range->name, range->queue_base,
+ range->queue_base + range->num_queues - 1,
+ range->num_irqs,
+ (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
+ (range->flags & RANGE_RESERVED) ? ", reserved" : "",
+ (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
+ kdev->num_queues_in_use += range->num_queues;
+ return 0;
+}
+
+static int knav_setup_queue_pools(struct knav_device *kdev,
+ struct device_node *queue_pools)
+{
+ struct device_node *type, *range;
+ int ret;
+
+ for_each_child_of_node(queue_pools, type) {
+ for_each_child_of_node(type, range) {
+ ret = knav_setup_queue_range(kdev, range);
+ /* return value ignored, we init the rest... */
+ }
+ }
+
+ /* ... and barf if they all failed! */
+ if (list_empty(&kdev->queue_ranges)) {
+ dev_err(kdev->dev, "no valid queue range found\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void knav_free_queue_range(struct knav_device *kdev,
+ struct knav_range_info *range)
+{
+ if (range->ops && range->ops->free_range)
+ range->ops->free_range(range);
+ list_del(&range->list);
+ devm_kfree(kdev->dev, range);
+}
+
+static void knav_free_queue_ranges(struct knav_device *kdev)
+{
+ struct knav_range_info *range;
+
+ for (;;) {
+ range = first_queue_range(kdev);
+ if (!range)
+ break;
+ knav_free_queue_range(kdev, range);
+ }
+}
+
+static void knav_queue_free_regions(struct knav_device *kdev)
+{
+ struct knav_region *region;
+ struct knav_pool *pool, *tmp;
+ unsigned size;
+
+ for (;;) {
+ region = first_region(kdev);
+ if (!region)
+ break;
+ list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
+ knav_pool_destroy(pool);
+
+ size = region->virt_end - region->virt_start;
+ if (size)
+ free_pages_exact(region->virt_start, size);
+ list_del(&region->list);
+ devm_kfree(kdev->dev, region);
+ }
+}
+
+static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
+ struct device_node *node, int index)
+{
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ ret = of_address_to_resource(node, index, &res);
+ if (ret) {
+ dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
+ node->name, index);
+ return ERR_PTR(ret);
+ }
+
+ regs = devm_ioremap_resource(kdev->dev, &res);
+ if (IS_ERR(regs))
+ dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
+ index, node->name);
+ return regs;
+}
+
+static int knav_queue_init_qmgrs(struct knav_device *kdev,
+ struct device_node *qmgrs)
+{
+ struct device *dev = kdev->dev;
+ struct knav_qmgr_info *qmgr;
+ struct device_node *child;
+ u32 temp[2];
+ int ret;
+
+ for_each_child_of_node(qmgrs, child) {
+ qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
+ if (!qmgr) {
+ dev_err(dev, "out of memory allocating qmgr\n");
+ return -ENOMEM;
+ }
+
+ ret = of_property_read_u32_array(child, "managed-queues",
+ temp, 2);
+ if (!ret) {
+ qmgr->start_queue = temp[0];
+ qmgr->num_queues = temp[1];
+ } else {
+ dev_err(dev, "invalid qmgr queue range\n");
+ devm_kfree(dev, qmgr);
+ continue;
+ }
+
+ dev_info(dev, "qmgr start queue %d, number of queues %d\n",
+ qmgr->start_queue, qmgr->num_queues);
+
+ qmgr->reg_peek =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PEEK_REG_INDEX);
+
+ if (kdev->version == QMSS) {
+ qmgr->reg_status =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_STATUS_REG_INDEX);
+ }
+
+ qmgr->reg_config =
+ knav_queue_map_reg(kdev, child,
+ (kdev->version == QMSS_66AK2G) ?
+ KNAV_L_QUEUE_CONFIG_REG_INDEX :
+ KNAV_QUEUE_CONFIG_REG_INDEX);
+ qmgr->reg_region =
+ knav_queue_map_reg(kdev, child,
+ (kdev->version == QMSS_66AK2G) ?
+ KNAV_L_QUEUE_REGION_REG_INDEX :
+ KNAV_QUEUE_REGION_REG_INDEX);
+
+ qmgr->reg_push =
+ knav_queue_map_reg(kdev, child,
+ (kdev->version == QMSS_66AK2G) ?
+ KNAV_L_QUEUE_PUSH_REG_INDEX :
+ KNAV_QUEUE_PUSH_REG_INDEX);
+
+ if (kdev->version == QMSS) {
+ qmgr->reg_pop =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_POP_REG_INDEX);
+ }
+
+ if (IS_ERR(qmgr->reg_peek) ||
+ ((kdev->version == QMSS) &&
+ (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
+ IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
+ IS_ERR(qmgr->reg_push)) {
+ dev_err(dev, "failed to map qmgr regs\n");
+ if (kdev->version == QMSS) {
+ if (!IS_ERR(qmgr->reg_status))
+ devm_iounmap(dev, qmgr->reg_status);
+ if (!IS_ERR(qmgr->reg_pop))
+ devm_iounmap(dev, qmgr->reg_pop);
+ }
+ if (!IS_ERR(qmgr->reg_peek))
+ devm_iounmap(dev, qmgr->reg_peek);
+ if (!IS_ERR(qmgr->reg_config))
+ devm_iounmap(dev, qmgr->reg_config);
+ if (!IS_ERR(qmgr->reg_region))
+ devm_iounmap(dev, qmgr->reg_region);
+ if (!IS_ERR(qmgr->reg_push))
+ devm_iounmap(dev, qmgr->reg_push);
+ devm_kfree(dev, qmgr);
+ continue;
+ }
+
+ /* Use same push register for pop as well */
+ if (kdev->version == QMSS_66AK2G)
+ qmgr->reg_pop = qmgr->reg_push;
+
+ list_add_tail(&qmgr->list, &kdev->qmgrs);
+ dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
+ qmgr->start_queue, qmgr->num_queues,
+ qmgr->reg_peek, qmgr->reg_status,
+ qmgr->reg_config, qmgr->reg_region,
+ qmgr->reg_push, qmgr->reg_pop);
+ }
+ return 0;
+}
+
+static int knav_queue_init_pdsps(struct knav_device *kdev,
+ struct device_node *pdsps)
+{
+ struct device *dev = kdev->dev;
+ struct knav_pdsp_info *pdsp;
+ struct device_node *child;
+
+ for_each_child_of_node(pdsps, child) {
+ pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
+ if (!pdsp) {
+ dev_err(dev, "out of memory allocating pdsp\n");
+ return -ENOMEM;
+ }
+ pdsp->name = knav_queue_find_name(child);
+ pdsp->iram =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
+ pdsp->regs =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_REGS_REG_INDEX);
+ pdsp->intd =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_INTD_REG_INDEX);
+ pdsp->command =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_CMD_REG_INDEX);
+
+ if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
+ IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
+ dev_err(dev, "failed to map pdsp %s regs\n",
+ pdsp->name);
+ if (!IS_ERR(pdsp->command))
+ devm_iounmap(dev, pdsp->command);
+ if (!IS_ERR(pdsp->iram))
+ devm_iounmap(dev, pdsp->iram);
+ if (!IS_ERR(pdsp->regs))
+ devm_iounmap(dev, pdsp->regs);
+ if (!IS_ERR(pdsp->intd))
+ devm_iounmap(dev, pdsp->intd);
+ devm_kfree(dev, pdsp);
+ continue;
+ }
+ of_property_read_u32(child, "id", &pdsp->id);
+ list_add_tail(&pdsp->list, &kdev->pdsps);
+ dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
+ pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
+ pdsp->intd);
+ }
+ return 0;
+}
+
+static int knav_queue_stop_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ u32 val, timeout = 1000;
+ int ret;
+
+ val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
+ writel_relaxed(val, &pdsp->regs->control);
+ ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
+ PDSP_CTRL_RUNNING);
+ if (ret < 0) {
+ dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
+ return ret;
+ }
+ pdsp->loaded = false;
+ pdsp->started = false;
+ return 0;
+}
+
+static int knav_queue_load_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ int i, ret, fwlen;
+ const struct firmware *fw;
+ bool found = false;
+ u32 *fwdata;
+
+ for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
+ if (knav_acc_firmwares[i]) {
+ ret = request_firmware_direct(&fw,
+ knav_acc_firmwares[i],
+ kdev->dev);
+ if (!ret) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ dev_err(kdev->dev, "failed to get firmware for pdsp\n");
+ return -ENODEV;
+ }
+
+ dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
+ knav_acc_firmwares[i]);
+
+ writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
+ /* download the firmware */
+ fwdata = (u32 *)fw->data;
+ fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
+ for (i = 0; i < fwlen; i++)
+ writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
+
+ release_firmware(fw);
+ return 0;
+}
+
+static int knav_queue_start_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ u32 val, timeout = 1000;
+ int ret;
+
+ /* write a command for sync */
+ writel_relaxed(0xffffffff, pdsp->command);
+ while (readl_relaxed(pdsp->command) != 0xffffffff)
+ cpu_relax();
+
+ /* soft reset the PDSP */
+ val = readl_relaxed(&pdsp->regs->control);
+ val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
+ writel_relaxed(val, &pdsp->regs->control);
+
+ /* enable pdsp */
+ val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
+ writel_relaxed(val, &pdsp->regs->control);
+
+ /* wait for command register to clear */
+ ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
+ if (ret < 0) {
+ dev_err(kdev->dev,
+ "timed out on pdsp %s command register wait\n",
+ pdsp->name);
+ return ret;
+ }
+ return 0;
+}
+
+static void knav_queue_stop_pdsps(struct knav_device *kdev)
+{
+ struct knav_pdsp_info *pdsp;
+
+ /* disable all pdsps */
+ for_each_pdsp(kdev, pdsp)
+ knav_queue_stop_pdsp(kdev, pdsp);
+}
+
+static int knav_queue_start_pdsps(struct knav_device *kdev)
+{
+ struct knav_pdsp_info *pdsp;
+ int ret;
+
+ knav_queue_stop_pdsps(kdev);
+ /* now load them all. We return success even if pdsp
+ * is not loaded as acc channels are optional on having
+ * firmware availability in the system. We set the loaded
+ * and stated flag and when initialize the acc range, check
+ * it and init the range only if pdsp is started.
+ */
+ for_each_pdsp(kdev, pdsp) {
+ ret = knav_queue_load_pdsp(kdev, pdsp);
+ if (!ret)
+ pdsp->loaded = true;
+ }
+
+ for_each_pdsp(kdev, pdsp) {
+ if (pdsp->loaded) {
+ ret = knav_queue_start_pdsp(kdev, pdsp);
+ if (!ret)
+ pdsp->started = true;
+ }
+ }
+ return 0;
+}
+
+static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
+{
+ struct knav_qmgr_info *qmgr;
+
+ for_each_qmgr(kdev, qmgr) {
+ if ((id >= qmgr->start_queue) &&
+ (id < qmgr->start_queue + qmgr->num_queues))
+ return qmgr;
+ }
+ return NULL;
+}
+
+static int knav_queue_init_queue(struct knav_device *kdev,
+ struct knav_range_info *range,
+ struct knav_queue_inst *inst,
+ unsigned id)
+{
+ char irq_name[KNAV_NAME_SIZE];
+ inst->qmgr = knav_find_qmgr(id);
+ if (!inst->qmgr)
+ return -1;
+
+ INIT_LIST_HEAD(&inst->handles);
+ inst->kdev = kdev;
+ inst->range = range;
+ inst->irq_num = -1;
+ inst->id = id;
+ scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
+ inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
+
+ if (range->ops && range->ops->init_queue)
+ return range->ops->init_queue(range, inst);
+ else
+ return 0;
+}
+
+static int knav_queue_init_queues(struct knav_device *kdev)
+{
+ struct knav_range_info *range;
+ int size, id, base_idx;
+ int idx = 0, ret = 0;
+
+ /* how much do we need for instance data? */
+ size = sizeof(struct knav_queue_inst);
+
+ /* round this up to a power of 2, keep the index to instance
+ * arithmetic fast.
+ * */
+ kdev->inst_shift = order_base_2(size);
+ size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
+ kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
+ if (!kdev->instances)
+ return -ENOMEM;
+
+ for_each_queue_range(kdev, range) {
+ if (range->ops && range->ops->init_range)
+ range->ops->init_range(range);
+ base_idx = idx;
+ for (id = range->queue_base;
+ id < range->queue_base + range->num_queues; id++, idx++) {
+ ret = knav_queue_init_queue(kdev, range,
+ knav_queue_idx_to_inst(kdev, idx), id);
+ if (ret < 0)
+ return ret;
+ }
+ range->queue_base_inst =
+ knav_queue_idx_to_inst(kdev, base_idx);
+ }
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id keystone_qmss_of_match[] = {
+ {
+ .compatible = "ti,keystone-navigator-qmss",
+ },
+ {
+ .compatible = "ti,66ak2g-navss-qm",
+ .data = (void *)QMSS_66AK2G,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
+
+static int knav_queue_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ u32 temp[2];
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
+ if (!kdev) {
+ dev_err(dev, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
+ if (match && match->data)
+ kdev->version = QMSS_66AK2G;
+
+ platform_set_drvdata(pdev, kdev);
+ kdev->dev = dev;
+ INIT_LIST_HEAD(&kdev->queue_ranges);
+ INIT_LIST_HEAD(&kdev->qmgrs);
+ INIT_LIST_HEAD(&kdev->pools);
+ INIT_LIST_HEAD(&kdev->regions);
+ INIT_LIST_HEAD(&kdev->pdsps);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ dev_err(dev, "Failed to enable QMSS\n");
+ return ret;
+ }
+
+ if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
+ dev_err(dev, "queue-range not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ kdev->base_id = temp[0];
+ kdev->num_queues = temp[1];
+
+ /* Initialize queue managers using device tree configuration */
+ qmgrs = of_get_child_by_name(node, "qmgrs");
+ if (!qmgrs) {
+ dev_err(dev, "queue manager info not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_queue_init_qmgrs(kdev, qmgrs);
+ of_node_put(qmgrs);
+ if (ret)
+ goto err;
+
+ /* get pdsp configuration values from device tree */
+ pdsps = of_get_child_by_name(node, "pdsps");
+ if (pdsps) {
+ ret = knav_queue_init_pdsps(kdev, pdsps);
+ if (ret)
+ goto err;
+
+ ret = knav_queue_start_pdsps(kdev);
+ if (ret)
+ goto err;
+ }
+ of_node_put(pdsps);
+
+ /* get usable queue range values from device tree */
+ queue_pools = of_get_child_by_name(node, "queue-pools");
+ if (!queue_pools) {
+ dev_err(dev, "queue-pools not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_setup_queue_pools(kdev, queue_pools);
+ of_node_put(queue_pools);
+ if (ret)
+ goto err;
+
+ ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
+ if (ret) {
+ dev_err(kdev->dev, "could not setup linking ram\n");
+ goto err;
+ }
+
+ ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
+ if (ret) {
+ /*
+ * nothing really, we have one linking ram already, so we just
+ * live within our means
+ */
+ }
+
+ ret = knav_queue_setup_link_ram(kdev);
+ if (ret)
+ goto err;
+
+ regions = of_get_child_by_name(node, "descriptor-regions");
+ if (!regions) {
+ dev_err(dev, "descriptor-regions not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_queue_setup_regions(kdev, regions);
+ of_node_put(regions);
+ if (ret)
+ goto err;
+
+ ret = knav_queue_init_queues(kdev);
+ if (ret < 0) {
+ dev_err(dev, "hwqueue initialization failed\n");
+ goto err;
+ }
+
+ debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
+ &knav_queue_debug_ops);
+ device_ready = true;
+ return 0;
+
+err:
+ knav_queue_stop_pdsps(kdev);
+ knav_queue_free_regions(kdev);
+ knav_free_queue_ranges(kdev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int knav_queue_remove(struct platform_device *pdev)
+{
+ /* TODO: Free resources */
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver keystone_qmss_driver = {
+ .probe = knav_queue_probe,
+ .remove = knav_queue_remove,
+ .driver = {
+ .name = "keystone-navigator-qmss",
+ .of_match_table = keystone_qmss_of_match,
+ },
+};
+module_platform_driver(keystone_qmss_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
+MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
new file mode 100644
index 000000000..d0dab3236
--- /dev/null
+++ b/drivers/soc/ti/pm33xx.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AM33XX Power Management Routines
+ *
+ * Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Vaibhav Bedia, Dave Gerlach
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/pm33xx.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/sram.h>
+#include <linux/suspend.h>
+#include <linux/ti-emif-sram.h>
+#include <linux/wkup_m3_ipc.h>
+
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+#include <asm/system_misc.h>
+
+#define AMX3_PM_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
+ (unsigned long)pm_sram->do_wfi)
+
+static int (*am33xx_do_wfi_sram)(unsigned long unused);
+static phys_addr_t am33xx_do_wfi_sram_phys;
+
+static struct gen_pool *sram_pool, *sram_pool_data;
+static unsigned long ocmcram_location, ocmcram_location_data;
+
+static struct am33xx_pm_platform_data *pm_ops;
+static struct am33xx_pm_sram_addr *pm_sram;
+
+static struct device *pm33xx_dev;
+static struct wkup_m3_ipc *m3_ipc;
+
+static unsigned long suspend_wfi_flags;
+
+static u32 sram_suspend_address(unsigned long addr)
+{
+ return ((unsigned long)am33xx_do_wfi_sram +
+ AMX3_PM_SRAM_SYMBOL_OFFSET(addr));
+}
+
+#ifdef CONFIG_SUSPEND
+static int am33xx_pm_suspend(suspend_state_t suspend_state)
+{
+ int i, ret = 0;
+
+ ret = pm_ops->soc_suspend((unsigned long)suspend_state,
+ am33xx_do_wfi_sram, suspend_wfi_flags);
+
+ if (ret) {
+ dev_err(pm33xx_dev, "PM: Kernel suspend failure\n");
+ } else {
+ i = m3_ipc->ops->request_pm_status(m3_ipc);
+
+ switch (i) {
+ case 0:
+ dev_info(pm33xx_dev,
+ "PM: Successfully put all powerdomains to target state\n");
+ break;
+ case 1:
+ dev_err(pm33xx_dev,
+ "PM: Could not transition all powerdomains to target state\n");
+ ret = -1;
+ break;
+ default:
+ dev_err(pm33xx_dev,
+ "PM: CM3 returned unknown result = %d\n", i);
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+static int am33xx_pm_enter(suspend_state_t suspend_state)
+{
+ int ret = 0;
+
+ switch (suspend_state) {
+ case PM_SUSPEND_MEM:
+ case PM_SUSPEND_STANDBY:
+ ret = am33xx_pm_suspend(suspend_state);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int am33xx_pm_begin(suspend_state_t state)
+{
+ int ret = -EINVAL;
+
+ switch (state) {
+ case PM_SUSPEND_MEM:
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP);
+ break;
+ case PM_SUSPEND_STANDBY:
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_STANDBY);
+ break;
+ }
+
+ return ret;
+}
+
+static void am33xx_pm_end(void)
+{
+ m3_ipc->ops->finish_low_power(m3_ipc);
+}
+
+static int am33xx_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static const struct platform_suspend_ops am33xx_pm_ops = {
+ .begin = am33xx_pm_begin,
+ .end = am33xx_pm_end,
+ .enter = am33xx_pm_enter,
+ .valid = am33xx_pm_valid,
+};
+#endif /* CONFIG_SUSPEND */
+
+static void am33xx_pm_set_ipc_ops(void)
+{
+ u32 resume_address;
+ int temp;
+
+ temp = ti_emif_get_mem_type();
+ if (temp < 0) {
+ dev_err(pm33xx_dev, "PM: Cannot determine memory type, no PM available\n");
+ return;
+ }
+ m3_ipc->ops->set_mem_type(m3_ipc, temp);
+
+ /* Physical resume address to be used by ROM code */
+ resume_address = am33xx_do_wfi_sram_phys +
+ *pm_sram->resume_offset + 0x4;
+
+ m3_ipc->ops->set_resume_address(m3_ipc, (void *)resume_address);
+}
+
+static void am33xx_pm_free_sram(void)
+{
+ gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
+ gen_pool_free(sram_pool_data, ocmcram_location_data,
+ sizeof(struct am33xx_pm_ro_sram_data));
+}
+
+/*
+ * Push the minimal suspend-resume code to SRAM
+ */
+static int am33xx_pm_alloc_sram(void)
+{
+ struct device_node *np;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
+ if (!np) {
+ np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+ if (!np) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
+ __func__);
+ return -ENODEV;
+ }
+ }
+
+ sram_pool = of_gen_pool_get(np, "pm-sram", 0);
+ if (!sram_pool) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
+ __func__);
+ ret = -ENODEV;
+ goto mpu_put_node;
+ }
+
+ sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
+ if (!sram_pool_data) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
+ __func__);
+ ret = -ENODEV;
+ goto mpu_put_node;
+ }
+
+ ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
+ if (!ocmcram_location) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
+ __func__);
+ ret = -ENOMEM;
+ goto mpu_put_node;
+ }
+
+ ocmcram_location_data = gen_pool_alloc(sram_pool_data,
+ sizeof(struct emif_regs_amx3));
+ if (!ocmcram_location_data) {
+ dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
+ gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
+ ret = -ENOMEM;
+ }
+
+mpu_put_node:
+ of_node_put(np);
+ return ret;
+}
+
+static int am33xx_push_sram_idle(void)
+{
+ struct am33xx_pm_ro_sram_data ro_sram_data;
+ int ret;
+ u32 table_addr, ro_data_addr;
+ void *copy_addr;
+
+ ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
+ ro_sram_data.amx3_pm_sram_data_phys =
+ gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
+ ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr();
+
+ /* Save physical address to calculate resume offset during pm init */
+ am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
+ ocmcram_location);
+
+ am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
+ pm_sram->do_wfi,
+ *pm_sram->do_wfi_sz);
+ if (!am33xx_do_wfi_sram) {
+ dev_err(pm33xx_dev,
+ "PM: %s: am33xx_do_wfi copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ table_addr =
+ sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
+ ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
+ if (ret) {
+ dev_dbg(pm33xx_dev,
+ "PM: %s: EMIF function copy failed\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ ro_data_addr =
+ sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
+ copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
+ &ro_sram_data,
+ sizeof(ro_sram_data));
+ if (!copy_addr) {
+ dev_err(pm33xx_dev,
+ "PM: %s: ro_sram_data copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int am33xx_pm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (!of_machine_is_compatible("ti,am33xx") &&
+ !of_machine_is_compatible("ti,am43"))
+ return -ENODEV;
+
+ pm_ops = dev->platform_data;
+ if (!pm_ops) {
+ dev_err(dev, "PM: Cannot get core PM ops!\n");
+ return -ENODEV;
+ }
+
+ pm_sram = pm_ops->get_sram_addrs();
+ if (!pm_sram) {
+ dev_err(dev, "PM: Cannot get PM asm function addresses!!\n");
+ return -ENODEV;
+ }
+
+ pm33xx_dev = dev;
+
+ ret = am33xx_pm_alloc_sram();
+ if (ret)
+ return ret;
+
+ ret = am33xx_push_sram_idle();
+ if (ret)
+ goto err_free_sram;
+
+ m3_ipc = wkup_m3_ipc_get();
+ if (!m3_ipc) {
+ dev_dbg(dev, "PM: Cannot get wkup_m3_ipc handle\n");
+ ret = -EPROBE_DEFER;
+ goto err_free_sram;
+ }
+
+ am33xx_pm_set_ipc_ops();
+
+#ifdef CONFIG_SUSPEND
+ suspend_set_ops(&am33xx_pm_ops);
+#endif /* CONFIG_SUSPEND */
+
+ /*
+ * For a system suspend we must flush the caches, we want
+ * the DDR in self-refresh, we want to save the context
+ * of the EMIF, and we want the wkup_m3 to handle low-power
+ * transition.
+ */
+ suspend_wfi_flags |= WFI_FLAG_FLUSH_CACHE;
+ suspend_wfi_flags |= WFI_FLAG_SELF_REFRESH;
+ suspend_wfi_flags |= WFI_FLAG_SAVE_EMIF;
+ suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
+
+ ret = pm_ops->init();
+ if (ret) {
+ dev_err(dev, "Unable to call core pm init!\n");
+ ret = -ENODEV;
+ goto err_put_wkup_m3_ipc;
+ }
+
+ return 0;
+
+err_put_wkup_m3_ipc:
+ wkup_m3_ipc_put(m3_ipc);
+err_free_sram:
+ am33xx_pm_free_sram();
+ pm33xx_dev = NULL;
+ return ret;
+}
+
+static int am33xx_pm_remove(struct platform_device *pdev)
+{
+ suspend_set_ops(NULL);
+ wkup_m3_ipc_put(m3_ipc);
+ am33xx_pm_free_sram();
+ return 0;
+}
+
+static struct platform_driver am33xx_pm_driver = {
+ .driver = {
+ .name = "pm33xx",
+ },
+ .probe = am33xx_pm_probe,
+ .remove = am33xx_pm_remove,
+};
+module_platform_driver(am33xx_pm_driver);
+
+MODULE_ALIAS("platform:pm33xx");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("am33xx power management driver");
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
new file mode 100644
index 000000000..de31b9389
--- /dev/null
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -0,0 +1,204 @@
+/*
+ * TI SCI Generic Power Domain Driver
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/**
+ * struct ti_sci_genpd_dev_data: holds data needed for every device attached
+ * to this genpd
+ * @idx: index of the device that identifies it with the system
+ * control processor.
+ */
+struct ti_sci_genpd_dev_data {
+ int idx;
+};
+
+/**
+ * struct ti_sci_pm_domain: TI specific data needed for power domain
+ * @ti_sci: handle to TI SCI protocol driver that provides ops to
+ * communicate with system control processor.
+ * @dev: pointer to dev for the driver for devm allocs
+ * @pd: generic_pm_domain for use with the genpd framework
+ */
+struct ti_sci_pm_domain {
+ const struct ti_sci_handle *ti_sci;
+ struct device *dev;
+ struct generic_pm_domain pd;
+};
+
+#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd)
+
+/**
+ * ti_sci_dev_id(): get prepopulated ti_sci id from struct dev
+ * @dev: pointer to device associated with this genpd
+ *
+ * Returns device_id stored from ti,sci_id property
+ */
+static int ti_sci_dev_id(struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
+ struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
+
+ return sci_dev_data->idx;
+}
+
+/**
+ * ti_sci_dev_to_sci_handle(): get pointer to ti_sci_handle
+ * @dev: pointer to device associated with this genpd
+ *
+ * Returns ti_sci_handle to be used to communicate with system
+ * control processor.
+ */
+static const struct ti_sci_handle *ti_sci_dev_to_sci_handle(struct device *dev)
+{
+ struct generic_pm_domain *pd = pd_to_genpd(dev->pm_domain);
+ struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(pd);
+
+ return ti_sci_genpd->ti_sci;
+}
+
+/**
+ * ti_sci_dev_start(): genpd device start hook called to turn device on
+ * @dev: pointer to device associated with this genpd to be powered on
+ */
+static int ti_sci_dev_start(struct device *dev)
+{
+ const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
+ int idx = ti_sci_dev_id(dev);
+
+ return ti_sci->ops.dev_ops.get_device(ti_sci, idx);
+}
+
+/**
+ * ti_sci_dev_stop(): genpd device stop hook called to turn device off
+ * @dev: pointer to device associated with this genpd to be powered off
+ */
+static int ti_sci_dev_stop(struct device *dev)
+{
+ const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
+ int idx = ti_sci_dev_id(dev);
+
+ return ti_sci->ops.dev_ops.put_device(ti_sci, idx);
+}
+
+static int ti_sci_pd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args pd_args;
+ struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(domain);
+ const struct ti_sci_handle *ti_sci = ti_sci_genpd->ti_sci;
+ struct ti_sci_genpd_dev_data *sci_dev_data;
+ struct generic_pm_domain_data *genpd_data;
+ int idx, ret = 0;
+
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0, &pd_args);
+ if (ret < 0)
+ return ret;
+
+ if (pd_args.args_count != 1)
+ return -EINVAL;
+
+ idx = pd_args.args[0];
+
+ /*
+ * Check the validity of the requested idx, if the index is not valid
+ * the PMMC will return a NAK here and we will not allocate it.
+ */
+ ret = ti_sci->ops.dev_ops.is_valid(ti_sci, idx);
+ if (ret)
+ return -EINVAL;
+
+ sci_dev_data = kzalloc(sizeof(*sci_dev_data), GFP_KERNEL);
+ if (!sci_dev_data)
+ return -ENOMEM;
+
+ sci_dev_data->idx = idx;
+
+ genpd_data = dev_gpd_data(dev);
+ genpd_data->data = sci_dev_data;
+
+ return 0;
+}
+
+static void ti_sci_pd_detach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
+ struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
+
+ kfree(sci_dev_data);
+ genpd_data->data = NULL;
+}
+
+static const struct of_device_id ti_sci_pm_domain_matches[] = {
+ { .compatible = "ti,sci-pm-domain", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches);
+
+static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct ti_sci_pm_domain *ti_sci_pd;
+ int ret;
+
+ ti_sci_pd = devm_kzalloc(dev, sizeof(*ti_sci_pd), GFP_KERNEL);
+ if (!ti_sci_pd)
+ return -ENOMEM;
+
+ ti_sci_pd->ti_sci = devm_ti_sci_get_handle(dev);
+ if (IS_ERR(ti_sci_pd->ti_sci))
+ return PTR_ERR(ti_sci_pd->ti_sci);
+
+ ti_sci_pd->dev = dev;
+
+ ti_sci_pd->pd.name = "ti_sci_pd";
+
+ ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev;
+ ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev;
+
+ ti_sci_pd->pd.dev_ops.start = ti_sci_dev_start;
+ ti_sci_pd->pd.dev_ops.stop = ti_sci_dev_stop;
+
+ pm_genpd_init(&ti_sci_pd->pd, NULL, true);
+
+ ret = of_genpd_add_provider_simple(np, &ti_sci_pd->pd);
+
+ return ret;
+}
+
+static struct platform_driver ti_sci_pm_domains_driver = {
+ .probe = ti_sci_pm_domain_probe,
+ .driver = {
+ .name = "ti_sci_pm_domains",
+ .of_match_table = ti_sci_pm_domain_matches,
+ },
+};
+module_platform_driver(ti_sci_pm_domains_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface (SCI) Power Domain driver");
+MODULE_AUTHOR("Dave Gerlach");
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
new file mode 100644
index 000000000..8358b9750
--- /dev/null
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -0,0 +1,583 @@
+/*
+ * AMx3 Wkup M3 IPC driver
+ *
+ * Copyright (C) 2015 Texas Instruments, Inc.
+ *
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/suspend.h>
+#include <linux/wkup_m3_ipc.h>
+
+#define AM33XX_CTRL_IPC_REG_COUNT 0x8
+#define AM33XX_CTRL_IPC_REG_OFFSET(m) (0x4 + 4 * (m))
+
+/* AM33XX M3_TXEV_EOI register */
+#define AM33XX_CONTROL_M3_TXEV_EOI 0x00
+
+#define AM33XX_M3_TXEV_ACK (0x1 << 0)
+#define AM33XX_M3_TXEV_ENABLE (0x0 << 0)
+
+#define IPC_CMD_DS0 0x4
+#define IPC_CMD_STANDBY 0xc
+#define IPC_CMD_IDLE 0x10
+#define IPC_CMD_RESET 0xe
+#define DS_IPC_DEFAULT 0xffffffff
+#define M3_VERSION_UNKNOWN 0x0000ffff
+#define M3_BASELINE_VERSION 0x191
+#define M3_STATUS_RESP_MASK (0xffff << 16)
+#define M3_FW_VERSION_MASK 0xffff
+#define M3_WAKE_SRC_MASK 0xff
+
+#define M3_STATE_UNKNOWN 0
+#define M3_STATE_RESET 1
+#define M3_STATE_INITED 2
+#define M3_STATE_MSG_FOR_LP 3
+#define M3_STATE_MSG_FOR_RESET 4
+
+static struct wkup_m3_ipc *m3_ipc_state;
+
+static const struct wkup_m3_wakeup_src wakeups[] = {
+ {.irq_nr = 35, .src = "USB0_PHY"},
+ {.irq_nr = 36, .src = "USB1_PHY"},
+ {.irq_nr = 40, .src = "I2C0"},
+ {.irq_nr = 41, .src = "RTC Timer"},
+ {.irq_nr = 42, .src = "RTC Alarm"},
+ {.irq_nr = 43, .src = "Timer0"},
+ {.irq_nr = 44, .src = "Timer1"},
+ {.irq_nr = 45, .src = "UART"},
+ {.irq_nr = 46, .src = "GPIO0"},
+ {.irq_nr = 48, .src = "MPU_WAKE"},
+ {.irq_nr = 49, .src = "WDT0"},
+ {.irq_nr = 50, .src = "WDT1"},
+ {.irq_nr = 51, .src = "ADC_TSC"},
+ {.irq_nr = 0, .src = "Unknown"},
+};
+
+static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
+{
+ writel(AM33XX_M3_TXEV_ACK,
+ m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
+}
+
+static void am33xx_txev_enable(struct wkup_m3_ipc *m3_ipc)
+{
+ writel(AM33XX_M3_TXEV_ENABLE,
+ m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
+}
+
+static void wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc *m3_ipc,
+ u32 val, int ipc_reg_num)
+{
+ if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
+ "ipc register operation out of range"))
+ return;
+
+ writel(val, m3_ipc->ipc_mem_base +
+ AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
+}
+
+static unsigned int wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc *m3_ipc,
+ int ipc_reg_num)
+{
+ if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
+ "ipc register operation out of range"))
+ return 0;
+
+ return readl(m3_ipc->ipc_mem_base +
+ AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
+}
+
+static int wkup_m3_fw_version_read(struct wkup_m3_ipc *m3_ipc)
+{
+ int val;
+
+ val = wkup_m3_ctrl_ipc_read(m3_ipc, 2);
+
+ return val & M3_FW_VERSION_MASK;
+}
+
+static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
+{
+ struct wkup_m3_ipc *m3_ipc = ipc_data;
+ struct device *dev = m3_ipc->dev;
+ int ver = 0;
+
+ am33xx_txev_eoi(m3_ipc);
+
+ switch (m3_ipc->state) {
+ case M3_STATE_RESET:
+ ver = wkup_m3_fw_version_read(m3_ipc);
+
+ if (ver == M3_VERSION_UNKNOWN ||
+ ver < M3_BASELINE_VERSION) {
+ dev_warn(dev, "CM3 Firmware Version %x not supported\n",
+ ver);
+ } else {
+ dev_info(dev, "CM3 Firmware Version = 0x%x\n", ver);
+ }
+
+ m3_ipc->state = M3_STATE_INITED;
+ complete(&m3_ipc->sync_complete);
+ break;
+ case M3_STATE_MSG_FOR_RESET:
+ m3_ipc->state = M3_STATE_INITED;
+ complete(&m3_ipc->sync_complete);
+ break;
+ case M3_STATE_MSG_FOR_LP:
+ complete(&m3_ipc->sync_complete);
+ break;
+ case M3_STATE_UNKNOWN:
+ dev_warn(dev, "Unknown CM3 State\n");
+ }
+
+ am33xx_txev_enable(m3_ipc);
+
+ return IRQ_HANDLED;
+}
+
+static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+{
+ struct device *dev = m3_ipc->dev;
+ mbox_msg_t dummy_msg = 0;
+ int ret;
+
+ if (!m3_ipc->mbox) {
+ dev_err(dev,
+ "No IPC channel to communicate with wkup_m3!\n");
+ return -EIO;
+ }
+
+ /*
+ * Write a dummy message to the mailbox in order to trigger the RX
+ * interrupt to alert the M3 that data is available in the IPC
+ * registers. We must enable the IRQ here and disable it after in
+ * the RX callback to avoid multiple interrupts being received
+ * by the CM3.
+ */
+ ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
+ if (ret < 0) {
+ dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&m3_ipc->sync_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(dev, "MPU<->CM3 sync failure\n");
+ m3_ipc->state = M3_STATE_UNKNOWN;
+ return -EIO;
+ }
+
+ mbox_client_txdone(m3_ipc->mbox, 0);
+ return 0;
+}
+
+static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
+{
+ struct device *dev = m3_ipc->dev;
+ mbox_msg_t dummy_msg = 0;
+ int ret;
+
+ if (!m3_ipc->mbox) {
+ dev_err(dev,
+ "No IPC channel to communicate with wkup_m3!\n");
+ return -EIO;
+ }
+
+ ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
+ if (ret < 0) {
+ dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ mbox_client_txdone(m3_ipc->mbox, 0);
+ return 0;
+}
+
+static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
+{
+ return ((m3_ipc->state != M3_STATE_RESET) &&
+ (m3_ipc->state != M3_STATE_UNKNOWN));
+}
+
+/* Public functions */
+/**
+ * wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
+ * @mem_type: memory type value read directly from emif
+ *
+ * wkup_m3 must know what memory type is in use to properly suspend
+ * and resume.
+ */
+static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
+{
+ m3_ipc->mem_type = mem_type;
+}
+
+/**
+ * wkup_m3_set_resume_address - Pass wkup_m3 resume address
+ * @addr: Physical address from which resume code should execute
+ */
+static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
+{
+ m3_ipc->resume_addr = (unsigned long)addr;
+}
+
+/**
+ * wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
+ *
+ * Returns code representing the status of a low power mode transition.
+ * 0 - Successful transition
+ * 1 - Failure to transition to low power state
+ */
+static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
+{
+ unsigned int i;
+ int val;
+
+ val = wkup_m3_ctrl_ipc_read(m3_ipc, 1);
+
+ i = M3_STATUS_RESP_MASK & val;
+ i >>= __ffs(M3_STATUS_RESP_MASK);
+
+ return i;
+}
+
+/**
+ * wkup_m3_prepare_low_power - Request preparation for transition to
+ * low power state
+ * @state: A kernel suspend state to enter, either MEM or STANDBY
+ *
+ * Returns 0 if preparation was successful, otherwise returns error code
+ */
+static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
+{
+ struct device *dev = m3_ipc->dev;
+ int m3_power_state;
+ int ret = 0;
+
+ if (!wkup_m3_is_available(m3_ipc))
+ return -ENODEV;
+
+ switch (state) {
+ case WKUP_M3_DEEPSLEEP:
+ m3_power_state = IPC_CMD_DS0;
+ break;
+ case WKUP_M3_STANDBY:
+ m3_power_state = IPC_CMD_STANDBY;
+ break;
+ case WKUP_M3_IDLE:
+ m3_power_state = IPC_CMD_IDLE;
+ break;
+ default:
+ return 1;
+ }
+
+ /* Program each required IPC register then write defaults to others */
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4);
+
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
+
+ m3_ipc->state = M3_STATE_MSG_FOR_LP;
+
+ if (state == WKUP_M3_IDLE)
+ ret = wkup_m3_ping_noirq(m3_ipc);
+ else
+ ret = wkup_m3_ping(m3_ipc);
+
+ if (ret) {
+ dev_err(dev, "Unable to ping CM3\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * wkup_m3_finish_low_power - Return m3 to reset state
+ *
+ * Returns 0 if reset was successful, otherwise returns error code
+ */
+static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc)
+{
+ struct device *dev = m3_ipc->dev;
+ int ret = 0;
+
+ if (!wkup_m3_is_available(m3_ipc))
+ return -ENODEV;
+
+ wkup_m3_ctrl_ipc_write(m3_ipc, IPC_CMD_RESET, 1);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
+
+ m3_ipc->state = M3_STATE_MSG_FOR_RESET;
+
+ ret = wkup_m3_ping(m3_ipc);
+ if (ret) {
+ dev_err(dev, "Unable to ping CM3\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * wkup_m3_request_wake_src - Get the wakeup source info passed from wkup_m3
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ */
+static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
+{
+ unsigned int wakeup_src_idx;
+ int j, val;
+
+ val = wkup_m3_ctrl_ipc_read(m3_ipc, 6);
+
+ wakeup_src_idx = val & M3_WAKE_SRC_MASK;
+
+ for (j = 0; j < ARRAY_SIZE(wakeups) - 1; j++) {
+ if (wakeups[j].irq_nr == wakeup_src_idx)
+ return wakeups[j].src;
+ }
+ return wakeups[j].src;
+}
+
+/**
+ * wkup_m3_set_rtc_only - Set the rtc_only flag
+ * @wkup_m3_wakeup: struct wkup_m3_wakeup_src * gets assigned the
+ * wakeup src value
+ */
+static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
+{
+ if (m3_ipc_state)
+ m3_ipc_state->is_rtc_only = true;
+}
+
+static struct wkup_m3_ipc_ops ipc_ops = {
+ .set_mem_type = wkup_m3_set_mem_type,
+ .set_resume_address = wkup_m3_set_resume_address,
+ .prepare_low_power = wkup_m3_prepare_low_power,
+ .finish_low_power = wkup_m3_finish_low_power,
+ .request_pm_status = wkup_m3_request_pm_status,
+ .request_wake_src = wkup_m3_request_wake_src,
+ .set_rtc_only = wkup_m3_set_rtc_only,
+};
+
+/**
+ * wkup_m3_ipc_get - Return handle to wkup_m3_ipc
+ *
+ * Returns NULL if the wkup_m3 is not yet available, otherwise returns
+ * pointer to wkup_m3_ipc struct.
+ */
+struct wkup_m3_ipc *wkup_m3_ipc_get(void)
+{
+ if (m3_ipc_state)
+ get_device(m3_ipc_state->dev);
+ else
+ return NULL;
+
+ return m3_ipc_state;
+}
+EXPORT_SYMBOL_GPL(wkup_m3_ipc_get);
+
+/**
+ * wkup_m3_ipc_put - Free handle to wkup_m3_ipc returned from wkup_m3_ipc_get
+ * @m3_ipc: A pointer to wkup_m3_ipc struct returned by wkup_m3_ipc_get
+ */
+void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc)
+{
+ if (m3_ipc_state)
+ put_device(m3_ipc_state->dev);
+}
+EXPORT_SYMBOL_GPL(wkup_m3_ipc_put);
+
+static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
+{
+ struct device *dev = m3_ipc->dev;
+ int ret;
+
+ init_completion(&m3_ipc->sync_complete);
+
+ ret = rproc_boot(m3_ipc->rproc);
+ if (ret)
+ dev_err(dev, "rproc_boot failed\n");
+ else
+ m3_ipc_state = m3_ipc;
+
+ do_exit(0);
+}
+
+static int wkup_m3_ipc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+ phandle rproc_phandle;
+ struct rproc *m3_rproc;
+ struct resource *res;
+ struct task_struct *task;
+ struct wkup_m3_ipc *m3_ipc;
+
+ m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
+ if (!m3_ipc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ m3_ipc->ipc_mem_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(m3_ipc->ipc_mem_base)) {
+ dev_err(dev, "could not ioremap ipc_mem\n");
+ return PTR_ERR(m3_ipc->ipc_mem_base);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
+ 0, "wkup_m3_txev", m3_ipc);
+ if (ret) {
+ dev_err(dev, "request_irq failed\n");
+ return ret;
+ }
+
+ m3_ipc->mbox_client.dev = dev;
+ m3_ipc->mbox_client.tx_done = NULL;
+ m3_ipc->mbox_client.tx_prepare = NULL;
+ m3_ipc->mbox_client.rx_callback = NULL;
+ m3_ipc->mbox_client.tx_block = false;
+ m3_ipc->mbox_client.knows_txdone = false;
+
+ m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
+
+ if (IS_ERR(m3_ipc->mbox)) {
+ dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
+ PTR_ERR(m3_ipc->mbox));
+ return PTR_ERR(m3_ipc->mbox);
+ }
+
+ if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
+ dev_err(&pdev->dev, "could not get rproc phandle\n");
+ ret = -ENODEV;
+ goto err_free_mbox;
+ }
+
+ m3_rproc = rproc_get_by_phandle(rproc_phandle);
+ if (!m3_rproc) {
+ dev_err(&pdev->dev, "could not get rproc handle\n");
+ ret = -EPROBE_DEFER;
+ goto err_free_mbox;
+ }
+
+ m3_ipc->rproc = m3_rproc;
+ m3_ipc->dev = dev;
+ m3_ipc->state = M3_STATE_RESET;
+
+ m3_ipc->ops = &ipc_ops;
+
+ /*
+ * Wait for firmware loading completion in a thread so we
+ * can boot the wkup_m3 as soon as it's ready without holding
+ * up kernel boot
+ */
+ task = kthread_run((void *)wkup_m3_rproc_boot_thread, m3_ipc,
+ "wkup_m3_rproc_loader");
+
+ if (IS_ERR(task)) {
+ dev_err(dev, "can't create rproc_boot thread\n");
+ ret = PTR_ERR(task);
+ goto err_put_rproc;
+ }
+
+ return 0;
+
+err_put_rproc:
+ rproc_put(m3_rproc);
+err_free_mbox:
+ mbox_free_channel(m3_ipc->mbox);
+ return ret;
+}
+
+static int wkup_m3_ipc_remove(struct platform_device *pdev)
+{
+ mbox_free_channel(m3_ipc_state->mbox);
+
+ rproc_shutdown(m3_ipc_state->rproc);
+ rproc_put(m3_ipc_state->rproc);
+
+ m3_ipc_state = NULL;
+
+ return 0;
+}
+
+static int __maybe_unused wkup_m3_ipc_suspend(struct device *dev)
+{
+ /*
+ * Nothing needs to be done on suspend even with rtc_only flag set
+ */
+ return 0;
+}
+
+static int __maybe_unused wkup_m3_ipc_resume(struct device *dev)
+{
+ if (m3_ipc_state->is_rtc_only) {
+ rproc_shutdown(m3_ipc_state->rproc);
+ rproc_boot(m3_ipc_state->rproc);
+ }
+
+ m3_ipc_state->is_rtc_only = false;
+
+ return 0;
+}
+
+static const struct dev_pm_ops wkup_m3_ipc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(wkup_m3_ipc_suspend, wkup_m3_ipc_resume)
+};
+
+static const struct of_device_id wkup_m3_ipc_of_match[] = {
+ { .compatible = "ti,am3352-wkup-m3-ipc", },
+ { .compatible = "ti,am4372-wkup-m3-ipc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, wkup_m3_ipc_of_match);
+
+static struct platform_driver wkup_m3_ipc_driver = {
+ .probe = wkup_m3_ipc_probe,
+ .remove = wkup_m3_ipc_remove,
+ .driver = {
+ .name = "wkup_m3_ipc",
+ .of_match_table = wkup_m3_ipc_of_match,
+ .pm = &wkup_m3_ipc_pm_ops,
+ },
+};
+
+module_platform_driver(wkup_m3_ipc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("wkup m3 remote processor ipc driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
diff --git a/drivers/soc/ux500/Kconfig b/drivers/soc/ux500/Kconfig
new file mode 100644
index 000000000..025a44aef
--- /dev/null
+++ b/drivers/soc/ux500/Kconfig
@@ -0,0 +1,7 @@
+config UX500_SOC_ID
+ bool "SoC bus for ST-Ericsson ux500"
+ depends on ARCH_U8500 || COMPILE_TEST
+ default ARCH_U8500
+ help
+ Include support for the SoC bus on the ARM RealView platforms
+ providing some sysfs information about the ASIC variant.
diff --git a/drivers/soc/ux500/Makefile b/drivers/soc/ux500/Makefile
new file mode 100644
index 000000000..0b87ad04b
--- /dev/null
+++ b/drivers/soc/ux500/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_UX500_SOC_ID) += ux500-soc-id.o
diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
new file mode 100644
index 000000000..6c1be74e5
--- /dev/null
+++ b/drivers/soc/ux500/ux500-soc-id.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/sys_soc.h>
+
+#include <asm/cputype.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mach/map.h>
+
+/**
+ * struct dbx500_asic_id - fields of the ASIC ID
+ * @process: the manufacturing process, 0x40 is 40 nm 0x00 is "standard"
+ * @partnumber: hithereto 0x8500 for DB8500
+ * @revision: version code in the series
+ */
+struct dbx500_asic_id {
+ u16 partnumber;
+ u8 revision;
+ u8 process;
+};
+
+static struct dbx500_asic_id dbx500_id;
+
+static unsigned int __init ux500_read_asicid(phys_addr_t addr)
+{
+ void __iomem *virt = ioremap(addr, 4);
+ unsigned int asicid;
+
+ if (!virt)
+ return 0;
+
+ asicid = readl(virt);
+ iounmap(virt);
+
+ return asicid;
+}
+
+static void ux500_print_soc_info(unsigned int asicid)
+{
+ unsigned int rev = dbx500_id.revision;
+
+ pr_info("DB%4x ", dbx500_id.partnumber);
+
+ if (rev == 0x01)
+ pr_cont("Early Drop");
+ else if (rev >= 0xA0)
+ pr_cont("v%d.%d" , (rev >> 4) - 0xA + 1, rev & 0xf);
+ else
+ pr_cont("Unknown");
+
+ pr_cont(" [%#010x]\n", asicid);
+}
+
+static unsigned int partnumber(unsigned int asicid)
+{
+ return (asicid >> 8) & 0xffff;
+}
+
+/*
+ * SOC MIDR ASICID ADDRESS ASICID VALUE
+ * DB8500ed 0x410fc090 0x9001FFF4 0x00850001
+ * DB8500v1 0x411fc091 0x9001FFF4 0x008500A0
+ * DB8500v1.1 0x411fc091 0x9001FFF4 0x008500A1
+ * DB8500v2 0x412fc091 0x9001DBF4 0x008500B0
+ * DB8520v2.2 0x412fc091 0x9001DBF4 0x008500B2
+ * DB5500v1 0x412fc091 0x9001FFF4 0x005500A0
+ * DB9540 0x413fc090 0xFFFFDBF4 0x009540xx
+ */
+
+static void __init ux500_setup_id(void)
+{
+ unsigned int cpuid = read_cpuid_id();
+ unsigned int asicid = 0;
+ phys_addr_t addr = 0;
+
+ switch (cpuid) {
+ case 0x410fc090: /* DB8500ed */
+ case 0x411fc091: /* DB8500v1 */
+ addr = 0x9001FFF4;
+ break;
+
+ case 0x412fc091: /* DB8520 / DB8500v2 / DB5500v1 */
+ asicid = ux500_read_asicid(0x9001DBF4);
+ if (partnumber(asicid) == 0x8500 ||
+ partnumber(asicid) == 0x8520)
+ /* DB8500v2 */
+ break;
+
+ /* DB5500v1 */
+ addr = 0x9001FFF4;
+ break;
+
+ case 0x413fc090: /* DB9540 */
+ addr = 0xFFFFDBF4;
+ break;
+ }
+
+ if (addr)
+ asicid = ux500_read_asicid(addr);
+
+ if (!asicid) {
+ pr_err("Unable to identify SoC\n");
+ BUG();
+ }
+
+ dbx500_id.process = asicid >> 24;
+ dbx500_id.partnumber = partnumber(asicid);
+ dbx500_id.revision = asicid & 0xff;
+
+ ux500_print_soc_info(asicid);
+}
+
+static const char * __init ux500_get_machine(void)
+{
+ return kasprintf(GFP_KERNEL, "DB%4x", dbx500_id.partnumber);
+}
+
+static const char * __init ux500_get_family(void)
+{
+ return kasprintf(GFP_KERNEL, "ux500");
+}
+
+static const char * __init ux500_get_revision(void)
+{
+ unsigned int rev = dbx500_id.revision;
+
+ if (rev == 0x01)
+ return kasprintf(GFP_KERNEL, "%s", "ED");
+ else if (rev >= 0xA0)
+ return kasprintf(GFP_KERNEL, "%d.%d",
+ (rev >> 4) - 0xA + 1, rev & 0xf);
+
+ return kasprintf(GFP_KERNEL, "%s", "Unknown");
+}
+
+static ssize_t ux500_get_process(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (dbx500_id.process == 0x00)
+ return sprintf(buf, "Standard\n");
+
+ return sprintf(buf, "%02xnm\n", dbx500_id.process);
+}
+
+static const char *db8500_read_soc_id(struct device_node *backupram)
+{
+ void __iomem *base;
+ void __iomem *uid;
+ const char *retstr;
+
+ base = of_iomap(backupram, 0);
+ if (!base)
+ return NULL;
+ uid = base + 0x1fc0;
+
+ /* Throw these device-specific numbers into the entropy pool */
+ add_device_randomness(uid, 0x14);
+ retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
+ readl((u32 *)uid+0),
+ readl((u32 *)uid+1), readl((u32 *)uid+2),
+ readl((u32 *)uid+3), readl((u32 *)uid+4));
+ iounmap(base);
+ return retstr;
+}
+
+static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr,
+ struct device_node *backupram)
+{
+ soc_dev_attr->soc_id = db8500_read_soc_id(backupram);
+ soc_dev_attr->machine = ux500_get_machine();
+ soc_dev_attr->family = ux500_get_family();
+ soc_dev_attr->revision = ux500_get_revision();
+}
+
+static const struct device_attribute ux500_soc_attr =
+ __ATTR(process, S_IRUGO, ux500_get_process, NULL);
+
+static int __init ux500_soc_device_init(void)
+{
+ struct device *parent;
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *backupram;
+
+ backupram = of_find_compatible_node(NULL, NULL, "ste,dbx500-backupram");
+ if (!backupram)
+ return 0;
+
+ ux500_setup_id();
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_info_populate(soc_dev_attr, backupram);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ parent = soc_device_to_device(soc_dev);
+ device_create_file(parent, &ux500_soc_attr);
+
+ return 0;
+}
+subsys_initcall(ux500_soc_device_init);
diff --git a/drivers/soc/versatile/Kconfig b/drivers/soc/versatile/Kconfig
new file mode 100644
index 000000000..a928a7fc6
--- /dev/null
+++ b/drivers/soc/versatile/Kconfig
@@ -0,0 +1,19 @@
+#
+# ARM Versatile SoC drivers
+#
+config SOC_INTEGRATOR_CM
+ bool "SoC bus device for the ARM Integrator platform core modules"
+ depends on ARCH_INTEGRATOR
+ select SOC_BUS
+ help
+ Include support for the SoC bus on the ARM Integrator platform
+ core modules providing some sysfs information about the ASIC
+ variant.
+
+config SOC_REALVIEW
+ bool "SoC bus device for the ARM RealView platforms"
+ depends on ARCH_REALVIEW
+ select SOC_BUS
+ help
+ Include support for the SoC bus on the ARM RealView platforms
+ providing some sysfs information about the ASIC variant.
diff --git a/drivers/soc/versatile/Makefile b/drivers/soc/versatile/Makefile
new file mode 100644
index 000000000..cf612fe3a
--- /dev/null
+++ b/drivers/soc/versatile/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SOC_INTEGRATOR_CM) += soc-integrator.o
+obj-$(CONFIG_SOC_REALVIEW) += soc-realview.o
diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c
new file mode 100644
index 000000000..a5d7d39ae
--- /dev/null
+++ b/drivers/soc/versatile/soc-integrator.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+
+#define INTEGRATOR_HDR_ID_OFFSET 0x00
+
+static u32 integrator_coreid;
+
+static const struct of_device_id integrator_cm_match[] = {
+ { .compatible = "arm,core-module-integrator", },
+ { }
+};
+
+static const char *integrator_arch_str(u32 id)
+{
+ switch ((id >> 16) & 0xff) {
+ case 0x00:
+ return "ASB little-endian";
+ case 0x01:
+ return "AHB little-endian";
+ case 0x03:
+ return "AHB-Lite system bus, bi-endian";
+ case 0x04:
+ return "AHB";
+ case 0x08:
+ return "AHB system bus, ASB processor bus";
+ default:
+ return "Unknown";
+ }
+}
+
+static const char *integrator_fpga_str(u32 id)
+{
+ switch ((id >> 12) & 0xf) {
+ case 0x01:
+ return "XC4062";
+ case 0x02:
+ return "XC4085";
+ case 0x03:
+ return "XVC600";
+ case 0x04:
+ return "EPM7256AE (Altera PLD)";
+ default:
+ return "Unknown";
+ }
+}
+
+static ssize_t integrator_get_manf(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%02x\n", integrator_coreid >> 24);
+}
+
+static struct device_attribute integrator_manf_attr =
+ __ATTR(manufacturer, S_IRUGO, integrator_get_manf, NULL);
+
+static ssize_t integrator_get_arch(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", integrator_arch_str(integrator_coreid));
+}
+
+static struct device_attribute integrator_arch_attr =
+ __ATTR(arch, S_IRUGO, integrator_get_arch, NULL);
+
+static ssize_t integrator_get_fpga(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", integrator_fpga_str(integrator_coreid));
+}
+
+static struct device_attribute integrator_fpga_attr =
+ __ATTR(fpga, S_IRUGO, integrator_get_fpga, NULL);
+
+static ssize_t integrator_get_build(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%02x\n", (integrator_coreid >> 4) & 0xFF);
+}
+
+static struct device_attribute integrator_build_attr =
+ __ATTR(build, S_IRUGO, integrator_get_build, NULL);
+
+static int __init integrator_soc_init(void)
+{
+ static struct regmap *syscon_regmap;
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *np;
+ struct device *dev;
+ u32 val;
+ int ret;
+
+ np = of_find_matching_node(NULL, integrator_cm_match);
+ if (!np)
+ return -ENODEV;
+
+ syscon_regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(syscon_regmap))
+ return PTR_ERR(syscon_regmap);
+
+ ret = regmap_read(syscon_regmap, INTEGRATOR_HDR_ID_OFFSET,
+ &val);
+ if (ret)
+ return -ENODEV;
+ integrator_coreid = val;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->soc_id = "Integrator";
+ soc_dev_attr->machine = "Integrator";
+ soc_dev_attr->family = "Versatile";
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return -ENODEV;
+ }
+ dev = soc_device_to_device(soc_dev);
+
+ device_create_file(dev, &integrator_manf_attr);
+ device_create_file(dev, &integrator_arch_attr);
+ device_create_file(dev, &integrator_fpga_attr);
+ device_create_file(dev, &integrator_build_attr);
+
+ dev_info(dev, "Detected ARM core module:\n");
+ dev_info(dev, " Manufacturer: %02x\n", (val >> 24));
+ dev_info(dev, " Architecture: %s\n", integrator_arch_str(val));
+ dev_info(dev, " FPGA: %s\n", integrator_fpga_str(val));
+ dev_info(dev, " Build: %02x\n", (val >> 4) & 0xFF);
+ dev_info(dev, " Rev: %c\n", ('A' + (val & 0x03)));
+
+ return 0;
+}
+device_initcall(integrator_soc_init);
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
new file mode 100644
index 000000000..caf698e5f
--- /dev/null
+++ b/drivers/soc/versatile/soc-realview.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+
+/* System ID in syscon */
+#define REALVIEW_SYS_ID_OFFSET 0x00
+
+static const struct of_device_id realview_soc_of_match[] = {
+ { .compatible = "arm,realview-eb-soc", },
+ { .compatible = "arm,realview-pb1176-soc", },
+ { .compatible = "arm,realview-pb11mp-soc", },
+ { .compatible = "arm,realview-pba8-soc", },
+ { .compatible = "arm,realview-pbx-soc", },
+ { }
+};
+
+static u32 realview_coreid;
+
+static const char *realview_arch_str(u32 id)
+{
+ switch ((id >> 8) & 0xf) {
+ case 0x04:
+ return "AHB";
+ case 0x05:
+ return "Multi-layer AXI";
+ default:
+ return "Unknown";
+ }
+}
+
+static ssize_t realview_get_manf(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%02x\n", realview_coreid >> 24);
+}
+
+static struct device_attribute realview_manf_attr =
+ __ATTR(manufacturer, S_IRUGO, realview_get_manf, NULL);
+
+static ssize_t realview_get_board(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "HBI-%03x\n", ((realview_coreid >> 16) & 0xfff));
+}
+
+static struct device_attribute realview_board_attr =
+ __ATTR(board, S_IRUGO, realview_get_board, NULL);
+
+static ssize_t realview_get_arch(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", realview_arch_str(realview_coreid));
+}
+
+static struct device_attribute realview_arch_attr =
+ __ATTR(fpga, S_IRUGO, realview_get_arch, NULL);
+
+static ssize_t realview_get_build(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%02x\n", (realview_coreid & 0xFF));
+}
+
+static struct device_attribute realview_build_attr =
+ __ATTR(build, S_IRUGO, realview_get_build, NULL);
+
+static int realview_soc_probe(struct platform_device *pdev)
+{
+ struct regmap *syscon_regmap;
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ syscon_regmap = syscon_regmap_lookup_by_phandle(np, "regmap");
+ if (IS_ERR(syscon_regmap))
+ return PTR_ERR(syscon_regmap);
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ ret = of_property_read_string(np, "compatible",
+ &soc_dev_attr->soc_id);
+ if (ret)
+ return -EINVAL;
+
+ soc_dev_attr->machine = "RealView";
+ soc_dev_attr->family = "Versatile";
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return -ENODEV;
+ }
+ ret = regmap_read(syscon_regmap, REALVIEW_SYS_ID_OFFSET,
+ &realview_coreid);
+ if (ret)
+ return -ENODEV;
+
+ device_create_file(soc_device_to_device(soc_dev), &realview_manf_attr);
+ device_create_file(soc_device_to_device(soc_dev), &realview_board_attr);
+ device_create_file(soc_device_to_device(soc_dev), &realview_arch_attr);
+ device_create_file(soc_device_to_device(soc_dev), &realview_build_attr);
+
+ dev_info(&pdev->dev, "RealView Syscon Core ID: 0x%08x, HBI-%03x\n",
+ realview_coreid,
+ ((realview_coreid >> 16) & 0xfff));
+ /* FIXME: add attributes for SoC to sysfs */
+ return 0;
+}
+
+static struct platform_driver realview_soc_driver = {
+ .probe = realview_soc_probe,
+ .driver = {
+ .name = "realview-soc",
+ .of_match_table = realview_soc_of_match,
+ },
+};
+builtin_platform_driver(realview_soc_driver);
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
new file mode 100644
index 000000000..687c8f3cd
--- /dev/null
+++ b/drivers/soc/xilinx/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Xilinx SoC drivers"
+
+config XILINX_VCU
+ tristate "Xilinx VCU logicoreIP Init"
+ depends on HAS_IOMEM
+ help
+ Provides the driver to enable and disable the isolation between the
+ processing system and programmable logic part by using the logicoreIP
+ register set. This driver also configures the frequency based on the
+ clock information from the logicoreIP register set.
+
+ If you say yes here you get support for the logicoreIP.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xlnx_vcu.
+
+endmenu
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
new file mode 100644
index 000000000..dee8fd51e
--- /dev/null
+++ b/drivers/soc/xilinx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
new file mode 100644
index 000000000..a840c0272
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx VCU Init
+ *
+ * Copyright (C) 2016 - 2017 Xilinx, Inc.
+ *
+ * Contacts Dhaval Shah <dshah@xilinx.com>
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* Address map for different registers implemented in the VCU LogiCORE IP. */
+#define VCU_ECODER_ENABLE 0x00
+#define VCU_DECODER_ENABLE 0x04
+#define VCU_MEMORY_DEPTH 0x08
+#define VCU_ENC_COLOR_DEPTH 0x0c
+#define VCU_ENC_VERTICAL_RANGE 0x10
+#define VCU_ENC_FRAME_SIZE_X 0x14
+#define VCU_ENC_FRAME_SIZE_Y 0x18
+#define VCU_ENC_COLOR_FORMAT 0x1c
+#define VCU_ENC_FPS 0x20
+#define VCU_MCU_CLK 0x24
+#define VCU_CORE_CLK 0x28
+#define VCU_PLL_BYPASS 0x2c
+#define VCU_ENC_CLK 0x30
+#define VCU_PLL_CLK 0x34
+#define VCU_ENC_VIDEO_STANDARD 0x38
+#define VCU_STATUS 0x3c
+#define VCU_AXI_ENC_CLK 0x40
+#define VCU_AXI_DEC_CLK 0x44
+#define VCU_AXI_MCU_CLK 0x48
+#define VCU_DEC_VIDEO_STANDARD 0x4c
+#define VCU_DEC_FRAME_SIZE_X 0x50
+#define VCU_DEC_FRAME_SIZE_Y 0x54
+#define VCU_DEC_FPS 0x58
+#define VCU_BUFFER_B_FRAME 0x5c
+#define VCU_WPP_EN 0x60
+#define VCU_PLL_CLK_DEC 0x64
+#define VCU_GASKET_INIT 0x74
+#define VCU_GASKET_VALUE 0x03
+
+/* vcu slcr registers, bitmask and shift */
+#define VCU_PLL_CTRL 0x24
+#define VCU_PLL_CTRL_RESET_MASK 0x01
+#define VCU_PLL_CTRL_RESET_SHIFT 0
+#define VCU_PLL_CTRL_BYPASS_MASK 0x01
+#define VCU_PLL_CTRL_BYPASS_SHIFT 3
+#define VCU_PLL_CTRL_FBDIV_MASK 0x7f
+#define VCU_PLL_CTRL_FBDIV_SHIFT 8
+#define VCU_PLL_CTRL_POR_IN_MASK 0x01
+#define VCU_PLL_CTRL_POR_IN_SHIFT 1
+#define VCU_PLL_CTRL_PWR_POR_MASK 0x01
+#define VCU_PLL_CTRL_PWR_POR_SHIFT 2
+#define VCU_PLL_CTRL_CLKOUTDIV_MASK 0x03
+#define VCU_PLL_CTRL_CLKOUTDIV_SHIFT 16
+#define VCU_PLL_CTRL_DEFAULT 0
+#define VCU_PLL_DIV2 2
+
+#define VCU_PLL_CFG 0x28
+#define VCU_PLL_CFG_RES_MASK 0x0f
+#define VCU_PLL_CFG_RES_SHIFT 0
+#define VCU_PLL_CFG_CP_MASK 0x0f
+#define VCU_PLL_CFG_CP_SHIFT 5
+#define VCU_PLL_CFG_LFHF_MASK 0x03
+#define VCU_PLL_CFG_LFHF_SHIFT 10
+#define VCU_PLL_CFG_LOCK_CNT_MASK 0x03ff
+#define VCU_PLL_CFG_LOCK_CNT_SHIFT 13
+#define VCU_PLL_CFG_LOCK_DLY_MASK 0x7f
+#define VCU_PLL_CFG_LOCK_DLY_SHIFT 25
+#define VCU_ENC_CORE_CTRL 0x30
+#define VCU_ENC_MCU_CTRL 0x34
+#define VCU_DEC_CORE_CTRL 0x38
+#define VCU_DEC_MCU_CTRL 0x3c
+#define VCU_PLL_DIVISOR_MASK 0x3f
+#define VCU_PLL_DIVISOR_SHIFT 4
+#define VCU_SRCSEL_MASK 0x01
+#define VCU_SRCSEL_SHIFT 0
+#define VCU_SRCSEL_PLL 1
+
+#define VCU_PLL_STATUS 0x60
+#define VCU_PLL_STATUS_LOCK_STATUS_MASK 0x01
+
+#define MHZ 1000000
+#define FVCO_MIN (1500U * MHZ)
+#define FVCO_MAX (3000U * MHZ)
+#define DIVISOR_MIN 0
+#define DIVISOR_MAX 63
+#define FRAC 100
+#define LIMIT (10 * MHZ)
+
+/**
+ * struct xvcu_device - Xilinx VCU init device structure
+ * @dev: Platform device
+ * @pll_ref: pll ref clock source
+ * @aclk: axi clock source
+ * @logicore_reg_ba: logicore reg base address
+ * @vcu_slcr_ba: vcu_slcr Register base address
+ * @coreclk: core clock frequency
+ */
+struct xvcu_device {
+ struct device *dev;
+ struct clk *pll_ref;
+ struct clk *aclk;
+ void __iomem *logicore_reg_ba;
+ void __iomem *vcu_slcr_ba;
+ u32 coreclk;
+};
+
+/**
+ * struct xvcu_pll_cfg - Helper data
+ * @fbdiv: The integer portion of the feedback divider to the PLL
+ * @cp: PLL charge pump control
+ * @res: PLL loop filter resistor control
+ * @lfhf: PLL loop filter high frequency capacitor control
+ * @lock_dly: Lock circuit configuration settings for lock windowsize
+ * @lock_cnt: Lock circuit counter setting
+ */
+struct xvcu_pll_cfg {
+ u32 fbdiv;
+ u32 cp;
+ u32 res;
+ u32 lfhf;
+ u32 lock_dly;
+ u32 lock_cnt;
+};
+
+static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
+ { 25, 3, 10, 3, 63, 1000 },
+ { 26, 3, 10, 3, 63, 1000 },
+ { 27, 4, 6, 3, 63, 1000 },
+ { 28, 4, 6, 3, 63, 1000 },
+ { 29, 4, 6, 3, 63, 1000 },
+ { 30, 4, 6, 3, 63, 1000 },
+ { 31, 6, 1, 3, 63, 1000 },
+ { 32, 6, 1, 3, 63, 1000 },
+ { 33, 4, 10, 3, 63, 1000 },
+ { 34, 5, 6, 3, 63, 1000 },
+ { 35, 5, 6, 3, 63, 1000 },
+ { 36, 5, 6, 3, 63, 1000 },
+ { 37, 5, 6, 3, 63, 1000 },
+ { 38, 5, 6, 3, 63, 975 },
+ { 39, 3, 12, 3, 63, 950 },
+ { 40, 3, 12, 3, 63, 925 },
+ { 41, 3, 12, 3, 63, 900 },
+ { 42, 3, 12, 3, 63, 875 },
+ { 43, 3, 12, 3, 63, 850 },
+ { 44, 3, 12, 3, 63, 850 },
+ { 45, 3, 12, 3, 63, 825 },
+ { 46, 3, 12, 3, 63, 800 },
+ { 47, 3, 12, 3, 63, 775 },
+ { 48, 3, 12, 3, 63, 775 },
+ { 49, 3, 12, 3, 63, 750 },
+ { 50, 3, 12, 3, 63, 750 },
+ { 51, 3, 2, 3, 63, 725 },
+ { 52, 3, 2, 3, 63, 700 },
+ { 53, 3, 2, 3, 63, 700 },
+ { 54, 3, 2, 3, 63, 675 },
+ { 55, 3, 2, 3, 63, 675 },
+ { 56, 3, 2, 3, 63, 650 },
+ { 57, 3, 2, 3, 63, 650 },
+ { 58, 3, 2, 3, 63, 625 },
+ { 59, 3, 2, 3, 63, 625 },
+ { 60, 3, 2, 3, 63, 625 },
+ { 61, 3, 2, 3, 63, 600 },
+ { 62, 3, 2, 3, 63, 600 },
+ { 63, 3, 2, 3, 63, 600 },
+ { 64, 3, 2, 3, 63, 600 },
+ { 65, 3, 2, 3, 63, 600 },
+ { 66, 3, 2, 3, 63, 600 },
+ { 67, 3, 2, 3, 63, 600 },
+ { 68, 3, 2, 3, 63, 600 },
+ { 69, 3, 2, 3, 63, 600 },
+ { 70, 3, 2, 3, 63, 600 },
+ { 71, 3, 2, 3, 63, 600 },
+ { 72, 3, 2, 3, 63, 600 },
+ { 73, 3, 2, 3, 63, 600 },
+ { 74, 3, 2, 3, 63, 600 },
+ { 75, 3, 2, 3, 63, 600 },
+ { 76, 3, 2, 3, 63, 600 },
+ { 77, 3, 2, 3, 63, 600 },
+ { 78, 3, 2, 3, 63, 600 },
+ { 79, 3, 2, 3, 63, 600 },
+ { 80, 3, 2, 3, 63, 600 },
+ { 81, 3, 2, 3, 63, 600 },
+ { 82, 3, 2, 3, 63, 600 },
+ { 83, 4, 2, 3, 63, 600 },
+ { 84, 4, 2, 3, 63, 600 },
+ { 85, 4, 2, 3, 63, 600 },
+ { 86, 4, 2, 3, 63, 600 },
+ { 87, 4, 2, 3, 63, 600 },
+ { 88, 4, 2, 3, 63, 600 },
+ { 89, 4, 2, 3, 63, 600 },
+ { 90, 4, 2, 3, 63, 600 },
+ { 91, 4, 2, 3, 63, 600 },
+ { 92, 4, 2, 3, 63, 600 },
+ { 93, 4, 2, 3, 63, 600 },
+ { 94, 4, 2, 3, 63, 600 },
+ { 95, 4, 2, 3, 63, 600 },
+ { 96, 4, 2, 3, 63, 600 },
+ { 97, 4, 2, 3, 63, 600 },
+ { 98, 4, 2, 3, 63, 600 },
+ { 99, 4, 2, 3, 63, 600 },
+ { 100, 4, 2, 3, 63, 600 },
+ { 101, 4, 2, 3, 63, 600 },
+ { 102, 4, 2, 3, 63, 600 },
+ { 103, 5, 2, 3, 63, 600 },
+ { 104, 5, 2, 3, 63, 600 },
+ { 105, 5, 2, 3, 63, 600 },
+ { 106, 5, 2, 3, 63, 600 },
+ { 107, 3, 4, 3, 63, 600 },
+ { 108, 3, 4, 3, 63, 600 },
+ { 109, 3, 4, 3, 63, 600 },
+ { 110, 3, 4, 3, 63, 600 },
+ { 111, 3, 4, 3, 63, 600 },
+ { 112, 3, 4, 3, 63, 600 },
+ { 113, 3, 4, 3, 63, 600 },
+ { 114, 3, 4, 3, 63, 600 },
+ { 115, 3, 4, 3, 63, 600 },
+ { 116, 3, 4, 3, 63, 600 },
+ { 117, 3, 4, 3, 63, 600 },
+ { 118, 3, 4, 3, 63, 600 },
+ { 119, 3, 4, 3, 63, 600 },
+ { 120, 3, 4, 3, 63, 600 },
+ { 121, 3, 4, 3, 63, 600 },
+ { 122, 3, 4, 3, 63, 600 },
+ { 123, 3, 4, 3, 63, 600 },
+ { 124, 3, 4, 3, 63, 600 },
+ { 125, 3, 4, 3, 63, 600 },
+};
+
+/**
+ * xvcu_read - Read from the VCU register space
+ * @iomem: vcu reg space base address
+ * @offset: vcu reg offset from base
+ *
+ * Return: Returns 32bit value from VCU register specified
+ *
+ */
+static inline u32 xvcu_read(void __iomem *iomem, u32 offset)
+{
+ return ioread32(iomem + offset);
+}
+
+/**
+ * xvcu_write - Write to the VCU register space
+ * @iomem: vcu reg space base address
+ * @offset: vcu reg offset from base
+ * @value: Value to write
+ */
+static inline void xvcu_write(void __iomem *iomem, u32 offset, u32 value)
+{
+ iowrite32(value, iomem + offset);
+}
+
+/**
+ * xvcu_write_field_reg - Write to the vcu reg field
+ * @iomem: vcu reg space base address
+ * @offset: vcu reg offset from base
+ * @field: vcu reg field to write to
+ * @mask: vcu reg mask
+ * @shift: vcu reg number of bits to shift the bitfield
+ */
+static void xvcu_write_field_reg(void __iomem *iomem, int offset,
+ u32 field, u32 mask, int shift)
+{
+ u32 val = xvcu_read(iomem, offset);
+
+ val &= ~(mask << shift);
+ val |= (field & mask) << shift;
+
+ xvcu_write(iomem, offset, val);
+}
+
+/**
+ * xvcu_set_vcu_pll_info - Set the VCU PLL info
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Programming the VCU PLL based on the user configuration
+ * (ref clock freq, core clock freq, mcu clock freq).
+ * Core clock frequency has higher priority than mcu clock frequency
+ * Errors in following cases
+ * - When mcu or clock clock get from logicoreIP is 0
+ * - When VCU PLL DIV related bits value other than 1
+ * - When proper data not found for given data
+ * - When sis570_1 clocksource related operation failed
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
+{
+ u32 refclk, coreclk, mcuclk, inte, deci;
+ u32 divisor_mcu, divisor_core, fvco;
+ u32 clkoutdiv, vcu_pll_ctrl, pll_clk;
+ u32 cfg_val, mod, ctrl;
+ int ret, i;
+ const struct xvcu_pll_cfg *found = NULL;
+
+ inte = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK);
+ deci = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC);
+ coreclk = xvcu_read(xvcu->logicore_reg_ba, VCU_CORE_CLK) * MHZ;
+ mcuclk = xvcu_read(xvcu->logicore_reg_ba, VCU_MCU_CLK) * MHZ;
+ if (!mcuclk || !coreclk) {
+ dev_err(xvcu->dev, "Invalid mcu and core clock data\n");
+ return -EINVAL;
+ }
+
+ refclk = (inte * MHZ) + (deci * (MHZ / FRAC));
+ dev_dbg(xvcu->dev, "Ref clock from logicoreIP is %uHz\n", refclk);
+ dev_dbg(xvcu->dev, "Core clock from logicoreIP is %uHz\n", coreclk);
+ dev_dbg(xvcu->dev, "Mcu clock from logicoreIP is %uHz\n", mcuclk);
+
+ clk_disable_unprepare(xvcu->pll_ref);
+ ret = clk_set_rate(xvcu->pll_ref, refclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP refclk rate\n");
+
+ ret = clk_prepare_enable(xvcu->pll_ref);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable pll_ref clock source\n");
+ return ret;
+ }
+
+ refclk = clk_get_rate(xvcu->pll_ref);
+
+ /*
+ * The divide-by-2 should be always enabled (==1)
+ * to meet the timing in the design.
+ * Otherwise, it's an error
+ */
+ vcu_pll_ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_CTRL);
+ clkoutdiv = vcu_pll_ctrl >> VCU_PLL_CTRL_CLKOUTDIV_SHIFT;
+ clkoutdiv = clkoutdiv & VCU_PLL_CTRL_CLKOUTDIV_MASK;
+ if (clkoutdiv != 1) {
+ dev_err(xvcu->dev, "clkoutdiv value is invalid\n");
+ return -EINVAL;
+ }
+
+ for (i = ARRAY_SIZE(xvcu_pll_cfg) - 1; i >= 0; i--) {
+ const struct xvcu_pll_cfg *cfg = &xvcu_pll_cfg[i];
+
+ fvco = cfg->fbdiv * refclk;
+ if (fvco >= FVCO_MIN && fvco <= FVCO_MAX) {
+ pll_clk = fvco / VCU_PLL_DIV2;
+ if (fvco % VCU_PLL_DIV2 != 0)
+ pll_clk++;
+ mod = pll_clk % coreclk;
+ if (mod < LIMIT) {
+ divisor_core = pll_clk / coreclk;
+ } else if (coreclk - mod < LIMIT) {
+ divisor_core = pll_clk / coreclk;
+ divisor_core++;
+ } else {
+ continue;
+ }
+ if (divisor_core >= DIVISOR_MIN &&
+ divisor_core <= DIVISOR_MAX) {
+ found = cfg;
+ divisor_mcu = pll_clk / mcuclk;
+ mod = pll_clk % mcuclk;
+ if (mcuclk - mod < LIMIT)
+ divisor_mcu++;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ dev_err(xvcu->dev, "Invalid clock combination.\n");
+ return -EINVAL;
+ }
+
+ xvcu->coreclk = pll_clk / divisor_core;
+ mcuclk = pll_clk / divisor_mcu;
+ dev_dbg(xvcu->dev, "Actual Ref clock freq is %uHz\n", refclk);
+ dev_dbg(xvcu->dev, "Actual Core clock freq is %uHz\n", xvcu->coreclk);
+ dev_dbg(xvcu->dev, "Actual Mcu clock freq is %uHz\n", mcuclk);
+
+ vcu_pll_ctrl &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
+ vcu_pll_ctrl |= (found->fbdiv & VCU_PLL_CTRL_FBDIV_MASK) <<
+ VCU_PLL_CTRL_FBDIV_SHIFT;
+ vcu_pll_ctrl &= ~(VCU_PLL_CTRL_POR_IN_MASK <<
+ VCU_PLL_CTRL_POR_IN_SHIFT);
+ vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_POR_IN_MASK) <<
+ VCU_PLL_CTRL_POR_IN_SHIFT;
+ vcu_pll_ctrl &= ~(VCU_PLL_CTRL_PWR_POR_MASK <<
+ VCU_PLL_CTRL_PWR_POR_SHIFT);
+ vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_PWR_POR_MASK) <<
+ VCU_PLL_CTRL_PWR_POR_SHIFT;
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CTRL, vcu_pll_ctrl);
+
+ /* Set divisor for the core and mcu clock */
+ ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL);
+ ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
+ ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
+ VCU_PLL_DIVISOR_SHIFT;
+ ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
+ ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL, ctrl);
+
+ ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL);
+ ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
+ ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
+ VCU_PLL_DIVISOR_SHIFT;
+ ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
+ ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL, ctrl);
+
+ ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL);
+ ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
+ ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
+ ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
+ ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL, ctrl);
+
+ ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL);
+ ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
+ ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
+ ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
+ ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL, ctrl);
+
+ /* Set RES, CP, LFHF, LOCK_CNT and LOCK_DLY cfg values */
+ cfg_val = (found->res << VCU_PLL_CFG_RES_SHIFT) |
+ (found->cp << VCU_PLL_CFG_CP_SHIFT) |
+ (found->lfhf << VCU_PLL_CFG_LFHF_SHIFT) |
+ (found->lock_cnt << VCU_PLL_CFG_LOCK_CNT_SHIFT) |
+ (found->lock_dly << VCU_PLL_CFG_LOCK_DLY_SHIFT);
+ xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CFG, cfg_val);
+
+ return 0;
+}
+
+/**
+ * xvcu_set_pll - PLL init sequence
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Call the api to set the PLL info and once that is done then
+ * init the PLL sequence to make the PLL stable.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int xvcu_set_pll(struct xvcu_device *xvcu)
+{
+ u32 lock_status;
+ unsigned long timeout;
+ int ret;
+
+ ret = xvcu_set_vcu_pll_info(xvcu);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to set pll info\n");
+ return ret;
+ }
+
+ xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
+ 1, VCU_PLL_CTRL_BYPASS_MASK,
+ VCU_PLL_CTRL_BYPASS_SHIFT);
+ xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
+ 1, VCU_PLL_CTRL_RESET_MASK,
+ VCU_PLL_CTRL_RESET_SHIFT);
+ xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
+ 0, VCU_PLL_CTRL_RESET_MASK,
+ VCU_PLL_CTRL_RESET_SHIFT);
+ /*
+ * Defined the timeout for the max time to wait the
+ * PLL_STATUS to be locked.
+ */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ do {
+ lock_status = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_STATUS);
+ if (lock_status & VCU_PLL_STATUS_LOCK_STATUS_MASK) {
+ xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
+ 0, VCU_PLL_CTRL_BYPASS_MASK,
+ VCU_PLL_CTRL_BYPASS_SHIFT);
+ return 0;
+ }
+ } while (!time_after(jiffies, timeout));
+
+ /* PLL is not locked even after the timeout of the 2sec */
+ dev_err(xvcu->dev, "PLL is not locked\n");
+ return -ETIMEDOUT;
+}
+
+/**
+ * xvcu_probe - Probe existence of the logicoreIP
+ * and initialize PLL
+ *
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Returns 0 on success
+ * Negative error code otherwise
+ */
+static int xvcu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct xvcu_device *xvcu;
+ int ret;
+
+ xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
+ if (!xvcu)
+ return -ENOMEM;
+
+ xvcu->dev = &pdev->dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
+ if (!res) {
+ dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->vcu_slcr_ba) {
+ dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
+ if (!res) {
+ dev_err(&pdev->dev, "get logicore memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->logicore_reg_ba) {
+ dev_err(&pdev->dev, "logicore register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
+ if (IS_ERR(xvcu->aclk)) {
+ dev_err(&pdev->dev, "Could not get aclk clock\n");
+ return PTR_ERR(xvcu->aclk);
+ }
+
+ xvcu->pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
+ if (IS_ERR(xvcu->pll_ref)) {
+ dev_err(&pdev->dev, "Could not get pll_ref clock\n");
+ return PTR_ERR(xvcu->pll_ref);
+ }
+
+ ret = clk_prepare_enable(xvcu->aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "aclk clock enable failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xvcu->pll_ref);
+ if (ret) {
+ dev_err(&pdev->dev, "pll_ref clock enable failed\n");
+ goto error_aclk;
+ }
+
+ /*
+ * Do the Gasket isolation and put the VCU out of reset
+ * Bit 0 : Gasket isolation
+ * Bit 1 : put VCU out of reset
+ */
+ xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
+
+ /* Do the PLL Settings based on the ref clk,core and mcu clk freq */
+ ret = xvcu_set_pll(xvcu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set the pll\n");
+ goto error_pll_ref;
+ }
+
+ dev_set_drvdata(&pdev->dev, xvcu);
+
+ dev_info(&pdev->dev, "%s: Probed successfully\n", __func__);
+
+ return 0;
+
+error_pll_ref:
+ clk_disable_unprepare(xvcu->pll_ref);
+error_aclk:
+ clk_disable_unprepare(xvcu->aclk);
+ return ret;
+}
+
+/**
+ * xvcu_remove - Insert gasket isolation
+ * and disable the clock
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Returns 0 on success
+ * Negative error code otherwise
+ */
+static int xvcu_remove(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu;
+
+ xvcu = platform_get_drvdata(pdev);
+ if (!xvcu)
+ return -ENODEV;
+
+ /* Add the the Gasket isolation and put the VCU in reset. */
+ xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
+
+ clk_disable_unprepare(xvcu->pll_ref);
+ clk_disable_unprepare(xvcu->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xvcu_of_id_table[] = {
+ { .compatible = "xlnx,vcu" },
+ { .compatible = "xlnx,vcu-logicoreip-1.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvcu_of_id_table);
+
+static struct platform_driver xvcu_driver = {
+ .driver = {
+ .name = "xilinx-vcu",
+ .of_match_table = xvcu_of_id_table,
+ },
+ .probe = xvcu_probe,
+ .remove = xvcu_remove,
+};
+
+module_platform_driver(xvcu_driver);
+
+MODULE_AUTHOR("Dhaval Shah <dshah@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx VCU init Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/zte/Kconfig b/drivers/soc/zte/Kconfig
new file mode 100644
index 000000000..e9d750c51
--- /dev/null
+++ b/drivers/soc/zte/Kconfig
@@ -0,0 +1,14 @@
+#
+# ZTE SoC drivers
+#
+menuconfig SOC_ZTE
+ depends on ARCH_ZX || COMPILE_TEST
+ bool "ZTE SoC driver support"
+
+if SOC_ZTE
+
+config ZX2967_PM_DOMAINS
+ bool "ZX2967 PM domains"
+ depends on PM_GENERIC_DOMAINS
+
+endif
diff --git a/drivers/soc/zte/Makefile b/drivers/soc/zte/Makefile
new file mode 100644
index 000000000..96b7cd4c9
--- /dev/null
+++ b/drivers/soc/zte/Makefile
@@ -0,0 +1,5 @@
+#
+# ZTE SOC drivers
+#
+obj-$(CONFIG_ZX2967_PM_DOMAINS) += zx2967_pm_domains.o
+obj-$(CONFIG_ZX2967_PM_DOMAINS) += zx296718_pm_domains.o
diff --git a/drivers/soc/zte/zx296718_pm_domains.c b/drivers/soc/zte/zx296718_pm_domains.c
new file mode 100644
index 000000000..4dc5d62ee
--- /dev/null
+++ b/drivers/soc/zte/zx296718_pm_domains.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2017 ZTE Ltd.
+ *
+ * Author: Baoyou Xie <baoyou.xie@linaro.org>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <dt-bindings/soc/zte,pm_domains.h>
+#include "zx2967_pm_domains.h"
+
+static u16 zx296718_offsets[REG_ARRAY_SIZE] = {
+ [REG_CLKEN] = 0x18,
+ [REG_ISOEN] = 0x1c,
+ [REG_RSTEN] = 0x20,
+ [REG_PWREN] = 0x24,
+ [REG_ACK_SYNC] = 0x28,
+};
+
+enum {
+ PCU_DM_VOU = 0,
+ PCU_DM_SAPPU,
+ PCU_DM_VDE,
+ PCU_DM_VCE,
+ PCU_DM_HDE,
+ PCU_DM_VIU,
+ PCU_DM_USB20,
+ PCU_DM_USB21,
+ PCU_DM_USB30,
+ PCU_DM_HSIC,
+ PCU_DM_GMAC,
+ PCU_DM_TS,
+};
+
+static struct zx2967_pm_domain vou_domain = {
+ .dm = {
+ .name = "vou_domain",
+ },
+ .bit = PCU_DM_VOU,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain sappu_domain = {
+ .dm = {
+ .name = "sappu_domain",
+ },
+ .bit = PCU_DM_SAPPU,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain vde_domain = {
+ .dm = {
+ .name = "vde_domain",
+ },
+ .bit = PCU_DM_VDE,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain vce_domain = {
+ .dm = {
+ .name = "vce_domain",
+ },
+ .bit = PCU_DM_VCE,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain hde_domain = {
+ .dm = {
+ .name = "hde_domain",
+ },
+ .bit = PCU_DM_HDE,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain viu_domain = {
+ .dm = {
+ .name = "viu_domain",
+ },
+ .bit = PCU_DM_VIU,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain usb20_domain = {
+ .dm = {
+ .name = "usb20_domain",
+ },
+ .bit = PCU_DM_USB20,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain usb21_domain = {
+ .dm = {
+ .name = "usb21_domain",
+ },
+ .bit = PCU_DM_USB21,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain usb30_domain = {
+ .dm = {
+ .name = "usb30_domain",
+ },
+ .bit = PCU_DM_USB30,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain hsic_domain = {
+ .dm = {
+ .name = "hsic_domain",
+ },
+ .bit = PCU_DM_HSIC,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain gmac_domain = {
+ .dm = {
+ .name = "gmac_domain",
+ },
+ .bit = PCU_DM_GMAC,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct zx2967_pm_domain ts_domain = {
+ .dm = {
+ .name = "ts_domain",
+ },
+ .bit = PCU_DM_TS,
+ .polarity = PWREN,
+ .reg_offset = zx296718_offsets,
+};
+
+static struct generic_pm_domain *zx296718_pm_domains[] = {
+ [DM_ZX296718_VOU] = &vou_domain.dm,
+ [DM_ZX296718_SAPPU] = &sappu_domain.dm,
+ [DM_ZX296718_VDE] = &vde_domain.dm,
+ [DM_ZX296718_VCE] = &vce_domain.dm,
+ [DM_ZX296718_HDE] = &hde_domain.dm,
+ [DM_ZX296718_VIU] = &viu_domain.dm,
+ [DM_ZX296718_USB20] = &usb20_domain.dm,
+ [DM_ZX296718_USB21] = &usb21_domain.dm,
+ [DM_ZX296718_USB30] = &usb30_domain.dm,
+ [DM_ZX296718_HSIC] = &hsic_domain.dm,
+ [DM_ZX296718_GMAC] = &gmac_domain.dm,
+ [DM_ZX296718_TS] = &ts_domain.dm,
+};
+
+static int zx296718_pd_probe(struct platform_device *pdev)
+{
+ return zx2967_pd_probe(pdev,
+ zx296718_pm_domains,
+ ARRAY_SIZE(zx296718_pm_domains));
+}
+
+static const struct of_device_id zx296718_pm_domain_matches[] = {
+ { .compatible = "zte,zx296718-pcu", },
+ { },
+};
+
+static struct platform_driver zx296718_pd_driver = {
+ .driver = {
+ .name = "zx296718-powerdomain",
+ .of_match_table = zx296718_pm_domain_matches,
+ },
+ .probe = zx296718_pd_probe,
+};
+
+static int __init zx296718_pd_init(void)
+{
+ return platform_driver_register(&zx296718_pd_driver);
+}
+subsys_initcall(zx296718_pd_init);
diff --git a/drivers/soc/zte/zx2967_pm_domains.c b/drivers/soc/zte/zx2967_pm_domains.c
new file mode 100644
index 000000000..c42aeaaa3
--- /dev/null
+++ b/drivers/soc/zte/zx2967_pm_domains.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2017 ZTE Ltd.
+ *
+ * Author: Baoyou Xie <baoyou.xie@linaro.org>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include "zx2967_pm_domains.h"
+
+#define PCU_DM_CLKEN(zpd) ((zpd)->reg_offset[REG_CLKEN])
+#define PCU_DM_ISOEN(zpd) ((zpd)->reg_offset[REG_ISOEN])
+#define PCU_DM_RSTEN(zpd) ((zpd)->reg_offset[REG_RSTEN])
+#define PCU_DM_PWREN(zpd) ((zpd)->reg_offset[REG_PWREN])
+#define PCU_DM_ACK_SYNC(zpd) ((zpd)->reg_offset[REG_ACK_SYNC])
+
+static void __iomem *pcubase;
+
+static int zx2967_power_on(struct generic_pm_domain *domain)
+{
+ struct zx2967_pm_domain *zpd = (struct zx2967_pm_domain *)domain;
+ unsigned long loop = 1000;
+ u32 val;
+
+ val = readl_relaxed(pcubase + PCU_DM_PWREN(zpd));
+ if (zpd->polarity == PWREN)
+ val |= BIT(zpd->bit);
+ else
+ val &= ~BIT(zpd->bit);
+ writel_relaxed(val, pcubase + PCU_DM_PWREN(zpd));
+
+ do {
+ udelay(1);
+ val = readl_relaxed(pcubase + PCU_DM_ACK_SYNC(zpd))
+ & BIT(zpd->bit);
+ } while (--loop && !val);
+
+ if (!loop) {
+ pr_err("Error: %s %s fail\n", __func__, domain->name);
+ return -EIO;
+ }
+
+ val = readl_relaxed(pcubase + PCU_DM_RSTEN(zpd));
+ val |= BIT(zpd->bit);
+ writel_relaxed(val, pcubase + PCU_DM_RSTEN(zpd));
+ udelay(5);
+
+ val = readl_relaxed(pcubase + PCU_DM_ISOEN(zpd));
+ val &= ~BIT(zpd->bit);
+ writel_relaxed(val, pcubase + PCU_DM_ISOEN(zpd));
+ udelay(5);
+
+ val = readl_relaxed(pcubase + PCU_DM_CLKEN(zpd));
+ val |= BIT(zpd->bit);
+ writel_relaxed(val, pcubase + PCU_DM_CLKEN(zpd));
+ udelay(5);
+
+ pr_debug("poweron %s\n", domain->name);
+
+ return 0;
+}
+
+static int zx2967_power_off(struct generic_pm_domain *domain)
+{
+ struct zx2967_pm_domain *zpd = (struct zx2967_pm_domain *)domain;
+ unsigned long loop = 1000;
+ u32 val;
+
+ val = readl_relaxed(pcubase + PCU_DM_CLKEN(zpd));
+ val &= ~BIT(zpd->bit);
+ writel_relaxed(val, pcubase + PCU_DM_CLKEN(zpd));
+ udelay(5);
+
+ val = readl_relaxed(pcubase + PCU_DM_ISOEN(zpd));
+ val |= BIT(zpd->bit);
+ writel_relaxed(val, pcubase + PCU_DM_ISOEN(zpd));
+ udelay(5);
+
+ val = readl_relaxed(pcubase + PCU_DM_RSTEN(zpd));
+ val &= ~BIT(zpd->bit);
+ writel_relaxed(val, pcubase + PCU_DM_RSTEN(zpd));
+ udelay(5);
+
+ val = readl_relaxed(pcubase + PCU_DM_PWREN(zpd));
+ if (zpd->polarity == PWREN)
+ val &= ~BIT(zpd->bit);
+ else
+ val |= BIT(zpd->bit);
+ writel_relaxed(val, pcubase + PCU_DM_PWREN(zpd));
+
+ do {
+ udelay(1);
+ val = readl_relaxed(pcubase + PCU_DM_ACK_SYNC(zpd))
+ & BIT(zpd->bit);
+ } while (--loop && val);
+
+ if (!loop) {
+ pr_err("Error: %s %s fail\n", __func__, domain->name);
+ return -EIO;
+ }
+
+ pr_debug("poweroff %s\n", domain->name);
+
+ return 0;
+}
+
+int zx2967_pd_probe(struct platform_device *pdev,
+ struct generic_pm_domain **zx_pm_domains,
+ int domain_num)
+{
+ struct genpd_onecell_data *genpd_data;
+ struct resource *res;
+ int i;
+
+ genpd_data = devm_kzalloc(&pdev->dev, sizeof(*genpd_data), GFP_KERNEL);
+ if (!genpd_data)
+ return -ENOMEM;
+
+ genpd_data->domains = zx_pm_domains;
+ genpd_data->num_domains = domain_num;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pcubase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pcubase))
+ return PTR_ERR(pcubase);
+
+ for (i = 0; i < domain_num; ++i) {
+ zx_pm_domains[i]->power_on = zx2967_power_on;
+ zx_pm_domains[i]->power_off = zx2967_power_off;
+
+ pm_genpd_init(zx_pm_domains[i], NULL, false);
+ }
+
+ of_genpd_add_provider_onecell(pdev->dev.of_node, genpd_data);
+ dev_info(&pdev->dev, "powerdomain init ok\n");
+ return 0;
+}
diff --git a/drivers/soc/zte/zx2967_pm_domains.h b/drivers/soc/zte/zx2967_pm_domains.h
new file mode 100644
index 000000000..cb46595a7
--- /dev/null
+++ b/drivers/soc/zte/zx2967_pm_domains.h
@@ -0,0 +1,44 @@
+/*
+ * Header for ZTE's Power Domain Driver support
+ *
+ * Copyright (C) 2017 ZTE Ltd.
+ *
+ * Author: Baoyou Xie <baoyou.xie@linaro.org>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __ZTE_ZX2967_PM_DOMAIN_H
+#define __ZTE_ZX2967_PM_DOMAIN_H
+
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+
+enum {
+ REG_CLKEN,
+ REG_ISOEN,
+ REG_RSTEN,
+ REG_PWREN,
+ REG_PWRDN,
+ REG_ACK_SYNC,
+
+ /* The size of the array - must be last */
+ REG_ARRAY_SIZE,
+};
+
+enum zx2967_power_polarity {
+ PWREN,
+ PWRDN,
+};
+
+struct zx2967_pm_domain {
+ struct generic_pm_domain dm;
+ const u16 bit;
+ const enum zx2967_power_polarity polarity;
+ const u16 *reg_offset;
+};
+
+int zx2967_pd_probe(struct platform_device *pdev,
+ struct generic_pm_domain **zx_pm_domains,
+ int domain_num);
+
+#endif /* __ZTE_ZX2967_PM_DOMAIN_H */