summaryrefslogtreecommitdiffstats
path: root/drivers/soc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/soc
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/Kconfig35
-rw-r--r--drivers/soc/Makefile37
-rw-r--r--drivers/soc/actions/Kconfig17
-rw-r--r--drivers/soc/amlogic/Kconfig74
-rw-r--r--drivers/soc/amlogic/Makefile5
-rw-r--r--drivers/soc/amlogic/meson-canvas.c212
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c691
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c204
-rw-r--r--drivers/soc/amlogic/meson-mx-socinfo.c176
-rw-r--r--drivers/soc/apple/Kconfig46
-rw-r--r--drivers/soc/apple/Makefile6
-rw-r--r--drivers/soc/apple/rtkit-crashlog.c247
-rw-r--r--drivers/soc/apple/rtkit-internal.h62
-rw-r--r--drivers/soc/apple/rtkit.c1003
-rw-r--r--drivers/soc/apple/sart.c333
-rw-r--r--drivers/soc/aspeed/Kconfig57
-rw-r--r--drivers/soc/aspeed/Makefile6
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c366
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c379
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c444
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c154
-rw-r--r--drivers/soc/aspeed/aspeed-uart-routing.c602
-rw-r--r--drivers/soc/atmel/Kconfig18
-rw-r--r--drivers/soc/atmel/Makefile3
-rw-r--r--drivers/soc/atmel/sfr.c98
-rw-r--r--drivers/soc/atmel/soc.c388
-rw-r--r--drivers/soc/atmel/soc.h142
-rw-r--r--drivers/soc/bcm/Kconfig68
-rw-r--r--drivers/soc/bcm/Makefile2
-rw-r--r--drivers/soc/bcm/brcmstb/Kconfig9
-rw-r--r--drivers/soc/bcm/brcmstb/Makefile3
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c364
-rw-r--r--drivers/soc/bcm/brcmstb/common.c113
-rw-r--r--drivers/soc/bcm/brcmstb/pm/Makefile2
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-mips.c456
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm.h81
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s2-mips.S192
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s3-mips.S138
-rw-r--r--drivers/soc/canaan/Kconfig11
-rw-r--r--drivers/soc/canaan/Makefile3
-rw-r--r--drivers/soc/canaan/k210-sysctl.c78
-rw-r--r--drivers/soc/dove/Makefile2
-rw-r--r--drivers/soc/dove/pmu.c455
-rw-r--r--drivers/soc/fsl/Kconfig55
-rw-r--r--drivers/soc/fsl/Makefile12
-rw-r--r--drivers/soc/fsl/dpaa2-console.c331
-rw-r--r--drivers/soc/fsl/dpio/Makefile8
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h58
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c337
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c898
-rw-r--r--drivers/soc/fsl/dpio/dpio.c238
-rw-r--r--drivers/soc/fsl/dpio/dpio.h94
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c1853
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h664
-rw-r--r--drivers/soc/fsl/guts.c279
-rw-r--r--drivers/soc/fsl/qbman/Kconfig68
-rw-r--r--drivers/soc/fsl/qbman/Makefile13
-rw-r--r--drivers/soc/fsl/qbman/bman.c819
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c320
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c244
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h83
-rw-r--r--drivers/soc/fsl/qbman/bman_test.c53
-rw-r--r--drivers/soc/fsl/qbman/bman_test.h35
-rw-r--r--drivers/soc/fsl/qbman/bman_test_api.c151
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c89
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h134
-rw-r--r--drivers/soc/fsl/qbman/qman.c3053
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c917
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c342
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h282
-rw-r--r--drivers/soc/fsl/qbman/qman_test.c62
-rw-r--r--drivers/soc/fsl/qbman/qman_test.h34
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c247
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c629
-rw-r--r--drivers/soc/fsl/qe/Kconfig68
-rw-r--r--drivers/soc/fsl/qe/Makefile14
-rw-r--r--drivers/soc/fsl/qe/gpio.c335
-rw-r--r--drivers/soc/fsl/qe/qe.c682
-rw-r--r--drivers/soc/fsl/qe/qe_common.c250
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c487
-rw-r--r--drivers/soc/fsl/qe/qe_io.c186
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c217
-rw-r--r--drivers/soc/fsl/qe/qmc.c1536
-rw-r--r--drivers/soc/fsl/qe/tsa.c846
-rw-r--r--drivers/soc/fsl/qe/tsa.h42
-rw-r--r--drivers/soc/fsl/qe/ucc.c657
-rw-r--r--drivers/soc/fsl/qe/ucc_fast.c395
-rw-r--r--drivers/soc/fsl/qe/ucc_slow.c359
-rw-r--r--drivers/soc/fsl/qe/usb.c52
-rw-r--r--drivers/soc/fsl/rcpm.c199
-rw-r--r--drivers/soc/fujitsu/Kconfig16
-rw-r--r--drivers/soc/fujitsu/Makefile3
-rw-r--r--drivers/soc/fujitsu/a64fx-diag.c153
-rw-r--r--drivers/soc/gemini/Makefile2
-rw-r--r--drivers/soc/gemini/soc-gemini.c71
-rw-r--r--drivers/soc/hisilicon/Kconfig21
-rw-r--r--drivers/soc/hisilicon/Makefile2
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c1276
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h191
-rw-r--r--drivers/soc/imx/Kconfig42
-rw-r--r--drivers/soc/imx/Makefile6
-rw-r--r--drivers/soc/imx/imx93-src.c32
-rw-r--r--drivers/soc/imx/soc-imx.c211
-rw-r--r--drivers/soc/imx/soc-imx8m.c255
-rw-r--r--drivers/soc/ixp4xx/Kconfig22
-rw-r--r--drivers/soc/ixp4xx/Makefile3
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c781
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c487
-rw-r--r--drivers/soc/lantiq/Makefile2
-rw-r--r--drivers/soc/lantiq/fpi-bus.c83
-rw-r--r--drivers/soc/litex/Kconfig20
-rw-r--r--drivers/soc/litex/Makefile3
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c143
-rw-r--r--drivers/soc/loongson/Kconfig29
-rw-r--r--drivers/soc/loongson/Makefile7
-rw-r--r--drivers/soc/loongson/loongson2_guts.c190
-rw-r--r--drivers/soc/loongson/loongson2_pm.c220
-rw-r--r--drivers/soc/mediatek/Kconfig94
-rw-r--r--drivers/soc/mediatek/Makefile9
-rw-r--r--drivers/soc/mediatek/mt8167-mmsys.h35
-rw-r--r--drivers/soc/mediatek/mt8173-mmsys.h95
-rw-r--r--drivers/soc/mediatek/mt8183-mmsys.h63
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h123
-rw-r--r--drivers/soc/mediatek/mt8188-mmsys.h149
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h77
-rw-r--r--drivers/soc/mediatek/mt8195-mmsys.h529
-rw-r--r--drivers/soc/mediatek/mt8365-mmsys.h82
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c444
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c317
-rw-r--r--drivers/soc/mediatek/mtk-infracfg.c93
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c458
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h275
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c1060
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c2675
-rw-r--r--drivers/soc/mediatek/mtk-regulator-coupler.c159
-rw-r--r--drivers/soc/mediatek/mtk-svs.c2434
-rw-r--r--drivers/soc/microchip/Kconfig10
-rw-r--r--drivers/soc/microchip/Makefile1
-rw-r--r--drivers/soc/microchip/mpfs-sys-controller.c216
-rw-r--r--drivers/soc/nuvoton/Kconfig11
-rw-r--r--drivers/soc/nuvoton/Makefile2
-rw-r--r--drivers/soc/nuvoton/wpcm450-soc.c109
-rw-r--r--drivers/soc/pxa/Kconfig8
-rw-r--r--drivers/soc/pxa/Makefile6
-rw-r--r--drivers/soc/pxa/mfp.c282
-rw-r--r--drivers/soc/pxa/ssp.c225
-rw-r--r--drivers/soc/qcom/Kconfig294
-rw-r--r--drivers/soc/qcom/Makefile34
-rw-r--r--drivers/soc/qcom/apr.c736
-rw-r--r--drivers/soc/qcom/cmd-db.c368
-rw-r--r--drivers/soc/qcom/icc-bwmon.c875
-rw-r--r--drivers/soc/qcom/ice.c368
-rw-r--r--drivers/soc/qcom/kryo-l2-accessors.c57
-rw-r--r--drivers/soc/qcom/llcc-qcom.c1083
-rw-r--r--drivers/soc/qcom/mdt_loader.c449
-rw-r--r--drivers/soc/qcom/ocmem.c459
-rw-r--r--drivers/soc/qcom/pdr_interface.c755
-rw-r--r--drivers/soc/qcom/pdr_internal.h379
-rw-r--r--drivers/soc/qcom/pmic_glink.c379
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c551
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c982
-rw-r--r--drivers/soc/qcom/qcom_aoss.c573
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c244
-rw-r--r--drivers/soc/qcom/qcom_stats.c295
-rw-r--r--drivers/soc/qcom/qmi_encdec.c816
-rw-r--r--drivers/soc/qcom/qmi_interface.c854
-rw-r--r--drivers/soc/qcom/ramp_controller.c346
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c359
-rw-r--r--drivers/soc/qcom/rpm-proc.c77
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c163
-rw-r--r--drivers/soc/qcom/rpmh-internal.h148
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c1160
-rw-r--r--drivers/soc/qcom/rpmh.c503
-rw-r--r--drivers/soc/qcom/smd-rpm.c249
-rw-r--r--drivers/soc/qcom/smem.c1230
-rw-r--r--drivers/soc/qcom/smem_state.c230
-rw-r--r--drivers/soc/qcom/smp2p.c700
-rw-r--r--drivers/soc/qcom/smsm.c647
-rw-r--r--drivers/soc/qcom/socinfo.c802
-rw-r--r--drivers/soc/qcom/spm.c335
-rw-r--r--drivers/soc/qcom/trace-rpmh.h87
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c366
-rw-r--r--drivers/soc/renesas/Kconfig461
-rw-r--r--drivers/soc/renesas/Makefile12
-rw-r--r--drivers/soc/renesas/pwc-rzv2m.c141
-rw-r--r--drivers/soc/renesas/r9a06g032-smp.c96
-rw-r--r--drivers/soc/renesas/rcar-rst.c184
-rw-r--r--drivers/soc/renesas/renesas-soc.c539
-rw-r--r--drivers/soc/rockchip/Kconfig45
-rw-r--r--drivers/soc/rockchip/Makefile7
-rw-r--r--drivers/soc/rockchip/dtpm.c65
-rw-r--r--drivers/soc/rockchip/grf.c208
-rw-r--r--drivers/soc/rockchip/io-domain.c720
-rw-r--r--drivers/soc/samsung/Kconfig81
-rw-r--r--drivers/soc/samsung/Makefile15
-rw-r--r--drivers/soc/samsung/exynos-asv.c160
-rw-r--r--drivers/soc/samsung/exynos-asv.h73
-rw-r--r--drivers/soc/samsung/exynos-chipid.c209
-rw-r--r--drivers/soc/samsung/exynos-pmu.c173
-rw-r--r--drivers/soc/samsung/exynos-pmu.h44
-rw-r--r--drivers/soc/samsung/exynos-regulator-coupler.c221
-rw-r--r--drivers/soc/samsung/exynos-usi.c285
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c171
-rw-r--r--drivers/soc/samsung/exynos4-pmu.c218
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c191
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c276
-rw-r--r--drivers/soc/samsung/exynos5422-asv.c506
-rw-r--r--drivers/soc/samsung/exynos5422-asv.h31
-rw-r--r--drivers/soc/samsung/s3c-pm-check.c233
-rw-r--r--drivers/soc/sifive/Kconfig10
-rw-r--r--drivers/soc/sifive/Makefile3
-rw-r--r--drivers/soc/sifive/sifive_ccache.c272
-rw-r--r--drivers/soc/starfive/Kconfig12
-rw-r--r--drivers/soc/sunxi/Kconfig30
-rw-r--r--drivers/soc/sunxi/Makefile3
-rw-r--r--drivers/soc/sunxi/sunxi_mbus.c127
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c426
-rw-r--r--drivers/soc/tegra/Kconfig177
-rw-r--r--drivers/soc/tegra/Makefile10
-rw-r--r--drivers/soc/tegra/ari-tegra186.c80
-rw-r--r--drivers/soc/tegra/cbb/Makefile9
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c170
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c2356
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c1210
-rw-r--r--drivers/soc/tegra/common.c170
-rw-r--r--drivers/soc/tegra/flowctrl.c226
-rw-r--r--drivers/soc/tegra/fuse/Makefile11
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c541
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra20.c198
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c680
-rw-r--r--drivers/soc/tegra/fuse/fuse.h138
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra114.c99
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra124.c148
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra20.c99
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c169
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra30.c277
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c241
-rw-r--r--drivers/soc/tegra/pmc.c4440
-rw-r--r--drivers/soc/tegra/regulators-tegra20.c560
-rw-r--r--drivers/soc/tegra/regulators-tegra30.c534
-rw-r--r--drivers/soc/ti/Kconfig103
-rw-r--r--drivers/soc/ti/Makefile14
-rw-r--r--drivers/soc/ti/k3-ringacc.c1577
-rw-r--r--drivers/soc/ti/k3-socinfo.c159
-rw-r--r--drivers/soc/ti/knav_dma.c811
-rw-r--r--drivers/soc/ti/knav_qmss.h387
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c584
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c1908
-rw-r--r--drivers/soc/ti/pm33xx.c611
-rw-r--r--drivers/soc/ti/pruss.c619
-rw-r--r--drivers/soc/ti/pruss.h88
-rw-r--r--drivers/soc/ti/smartreflex.c1005
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c121
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c775
-rw-r--r--drivers/soc/ux500/Kconfig8
-rw-r--r--drivers/soc/ux500/Makefile2
-rw-r--r--drivers/soc/ux500/ux500-soc-id.c225
-rw-r--r--drivers/soc/versatile/Kconfig20
-rw-r--r--drivers/soc/versatile/Makefile3
-rw-r--r--drivers/soc/versatile/soc-integrator.c149
-rw-r--r--drivers/soc/versatile/soc-realview.c132
-rw-r--r--drivers/soc/xilinx/Kconfig38
-rw-r--r--drivers/soc/xilinx/Makefile3
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c704
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c304
265 files changed, 87915 insertions, 0 deletions
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
new file mode 100644
index 0000000000..d21e75d692
--- /dev/null
+++ b/drivers/soc/Kconfig
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "SOC (System On Chip) specific Drivers"
+
+source "drivers/soc/actions/Kconfig"
+source "drivers/soc/amlogic/Kconfig"
+source "drivers/soc/apple/Kconfig"
+source "drivers/soc/aspeed/Kconfig"
+source "drivers/soc/atmel/Kconfig"
+source "drivers/soc/bcm/Kconfig"
+source "drivers/soc/canaan/Kconfig"
+source "drivers/soc/fsl/Kconfig"
+source "drivers/soc/fujitsu/Kconfig"
+source "drivers/soc/hisilicon/Kconfig"
+source "drivers/soc/imx/Kconfig"
+source "drivers/soc/ixp4xx/Kconfig"
+source "drivers/soc/litex/Kconfig"
+source "drivers/soc/loongson/Kconfig"
+source "drivers/soc/mediatek/Kconfig"
+source "drivers/soc/microchip/Kconfig"
+source "drivers/soc/nuvoton/Kconfig"
+source "drivers/soc/pxa/Kconfig"
+source "drivers/soc/qcom/Kconfig"
+source "drivers/soc/renesas/Kconfig"
+source "drivers/soc/rockchip/Kconfig"
+source "drivers/soc/samsung/Kconfig"
+source "drivers/soc/sifive/Kconfig"
+source "drivers/soc/starfive/Kconfig"
+source "drivers/soc/sunxi/Kconfig"
+source "drivers/soc/tegra/Kconfig"
+source "drivers/soc/ti/Kconfig"
+source "drivers/soc/ux500/Kconfig"
+source "drivers/soc/versatile/Kconfig"
+source "drivers/soc/xilinx/Kconfig"
+
+endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
new file mode 100644
index 0000000000..0706a27d13
--- /dev/null
+++ b/drivers/soc/Makefile
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux Kernel SOC specific device drivers.
+#
+
+obj-y += apple/
+obj-y += aspeed/
+obj-$(CONFIG_ARCH_AT91) += atmel/
+obj-y += bcm/
+obj-$(CONFIG_SOC_CANAAN) += canaan/
+obj-$(CONFIG_ARCH_DOVE) += dove/
+obj-$(CONFIG_MACH_DOVE) += dove/
+obj-y += fsl/
+obj-y += fujitsu/
+obj-$(CONFIG_ARCH_GEMINI) += gemini/
+obj-y += hisilicon/
+obj-y += imx/
+obj-y += ixp4xx/
+obj-$(CONFIG_SOC_XWAY) += lantiq/
+obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
+obj-y += loongson/
+obj-y += mediatek/
+obj-y += microchip/
+obj-y += nuvoton/
+obj-y += pxa/
+obj-y += amlogic/
+obj-y += qcom/
+obj-y += renesas/
+obj-y += rockchip/
+obj-$(CONFIG_SOC_SAMSUNG) += samsung/
+obj-y += sifive/
+obj-y += sunxi/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-y += ti/
+obj-$(CONFIG_ARCH_U8500) += ux500/
+obj-$(CONFIG_PLAT_VERSATILE) += versatile/
+obj-y += xilinx/
diff --git a/drivers/soc/actions/Kconfig b/drivers/soc/actions/Kconfig
new file mode 100644
index 0000000000..1aca2058a4
--- /dev/null
+++ b/drivers/soc/actions/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if ARCH_ACTIONS || COMPILE_TEST
+
+config OWL_PM_DOMAINS_HELPER
+ bool
+
+config OWL_PM_DOMAINS
+ bool "Actions Semi SPS power domains"
+ depends on PM
+ select OWL_PM_DOMAINS_HELPER
+ select PM_GENERIC_DOMAINS
+ help
+ Say 'y' here to enable support for Smart Power System (SPS)
+ power-gating on Actions Semiconductor S500, S700 and S900 SoCs.
+ If unsure, say 'n'.
+
+endif
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
new file mode 100644
index 0000000000..174a9b0114
--- /dev/null
+++ b/drivers/soc/amlogic/Kconfig
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Amlogic SoC drivers"
+
+config MESON_CANVAS
+ tristate "Amlogic Meson Canvas driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ default n
+ help
+ Say yes to support the canvas IP for Amlogic SoCs.
+
+config MESON_CLK_MEASURE
+ tristate "Amlogic Meson SoC Clock Measure driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ default ARCH_MESON
+ select REGMAP_MMIO
+ help
+ Say yes to support of Measuring a set of internal SoC clocks
+ from the debugfs interface.
+
+config MESON_GX_SOCINFO
+ bool "Amlogic Meson GX SoC Information driver"
+ depends on (ARM64 && ARCH_MESON) || COMPILE_TEST
+ default ARCH_MESON
+ select SOC_BUS
+ help
+ Say yes to support decoding of Amlogic Meson GX SoC family
+ information about the type, package and version.
+
+config MESON_GX_PM_DOMAINS
+ tristate "Amlogic Meson GX Power Domains driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on PM && OF
+ default ARCH_MESON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Say yes to expose Amlogic Meson GX Power Domains as
+ Generic Power Domains.
+
+config MESON_EE_PM_DOMAINS
+ tristate "Amlogic Meson Everything-Else Power Domains driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on PM && OF
+ default ARCH_MESON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Say yes to expose Amlogic Meson Everything-Else Power Domains as
+ Generic Power Domains.
+
+config MESON_SECURE_PM_DOMAINS
+ tristate "Amlogic Meson Secure Power Domains driver"
+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
+ depends on PM && OF
+ depends on HAVE_ARM_SMCCC
+ default ARCH_MESON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Support for the power controller on Amlogic A1/C1 series.
+ Say yes to expose Amlogic Meson Secure Power Domains as Generic
+ Power Domains.
+
+config MESON_MX_SOCINFO
+ bool "Amlogic Meson MX SoC Information driver"
+ depends on (ARM && ARCH_MESON) || COMPILE_TEST
+ default ARCH_MESON
+ select SOC_BUS
+ help
+ Say yes to support decoding of Amlogic Meson6, Meson8,
+ Meson8b and Meson8m2 SoC family information about the type
+ and version.
+
+endmenu
diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile
new file mode 100644
index 0000000000..c25f835e6a
--- /dev/null
+++ b/drivers/soc/amlogic/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MESON_CANVAS) += meson-canvas.o
+obj-$(CONFIG_MESON_CLK_MEASURE) += meson-clk-measure.o
+obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o
+obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
new file mode 100644
index 0000000000..b6e06c4d21
--- /dev/null
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ * Copyright (C) 2014 Endless Mobile
+ */
+
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/soc/amlogic/meson-canvas.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#define NUM_CANVAS 256
+
+/* DMC Registers */
+#define DMC_CAV_LUT_DATAL 0x00
+ #define CANVAS_WIDTH_LBIT 29
+ #define CANVAS_WIDTH_LWID 3
+#define DMC_CAV_LUT_DATAH 0x04
+ #define CANVAS_WIDTH_HBIT 0
+ #define CANVAS_HEIGHT_BIT 9
+ #define CANVAS_WRAP_BIT 22
+ #define CANVAS_BLKMODE_BIT 24
+ #define CANVAS_ENDIAN_BIT 26
+#define DMC_CAV_LUT_ADDR 0x08
+ #define CANVAS_LUT_WR_EN BIT(9)
+ #define CANVAS_LUT_RD_EN BIT(8)
+
+struct meson_canvas {
+ struct device *dev;
+ void __iomem *reg_base;
+ spinlock_t lock; /* canvas device lock */
+ u8 used[NUM_CANVAS];
+ bool supports_endianness;
+};
+
+static void canvas_write(struct meson_canvas *canvas, u32 reg, u32 val)
+{
+ writel_relaxed(val, canvas->reg_base + reg);
+}
+
+static u32 canvas_read(struct meson_canvas *canvas, u32 reg)
+{
+ return readl_relaxed(canvas->reg_base + reg);
+}
+
+struct meson_canvas *meson_canvas_get(struct device *dev)
+{
+ struct device_node *canvas_node;
+ struct platform_device *canvas_pdev;
+ struct meson_canvas *canvas;
+
+ canvas_node = of_parse_phandle(dev->of_node, "amlogic,canvas", 0);
+ if (!canvas_node)
+ return ERR_PTR(-ENODEV);
+
+ canvas_pdev = of_find_device_by_node(canvas_node);
+ if (!canvas_pdev) {
+ of_node_put(canvas_node);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ of_node_put(canvas_node);
+
+ /*
+ * If priv is NULL, it's probably because the canvas hasn't
+ * properly initialized. Bail out with -EINVAL because, in the
+ * current state, this driver probe cannot return -EPROBE_DEFER
+ */
+ canvas = dev_get_drvdata(&canvas_pdev->dev);
+ if (!canvas) {
+ put_device(&canvas_pdev->dev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return canvas;
+}
+EXPORT_SYMBOL_GPL(meson_canvas_get);
+
+int meson_canvas_config(struct meson_canvas *canvas, u8 canvas_index,
+ u32 addr, u32 stride, u32 height,
+ unsigned int wrap,
+ unsigned int blkmode,
+ unsigned int endian)
+{
+ unsigned long flags;
+
+ if (endian && !canvas->supports_endianness) {
+ dev_err(canvas->dev,
+ "Endianness is not supported on this SoC\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&canvas->lock, flags);
+ if (!canvas->used[canvas_index]) {
+ dev_err(canvas->dev,
+ "Trying to setup non allocated canvas %u\n",
+ canvas_index);
+ spin_unlock_irqrestore(&canvas->lock, flags);
+ return -EINVAL;
+ }
+
+ canvas_write(canvas, DMC_CAV_LUT_DATAL,
+ ((addr + 7) >> 3) |
+ (((stride + 7) >> 3) << CANVAS_WIDTH_LBIT));
+
+ canvas_write(canvas, DMC_CAV_LUT_DATAH,
+ ((((stride + 7) >> 3) >> CANVAS_WIDTH_LWID) <<
+ CANVAS_WIDTH_HBIT) |
+ (height << CANVAS_HEIGHT_BIT) |
+ (wrap << CANVAS_WRAP_BIT) |
+ (blkmode << CANVAS_BLKMODE_BIT) |
+ (endian << CANVAS_ENDIAN_BIT));
+
+ canvas_write(canvas, DMC_CAV_LUT_ADDR,
+ CANVAS_LUT_WR_EN | canvas_index);
+
+ /* Force a read-back to make sure everything is flushed. */
+ canvas_read(canvas, DMC_CAV_LUT_DATAH);
+ spin_unlock_irqrestore(&canvas->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_canvas_config);
+
+int meson_canvas_alloc(struct meson_canvas *canvas, u8 *canvas_index)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&canvas->lock, flags);
+ for (i = 0; i < NUM_CANVAS; ++i) {
+ if (!canvas->used[i]) {
+ canvas->used[i] = 1;
+ spin_unlock_irqrestore(&canvas->lock, flags);
+ *canvas_index = i;
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&canvas->lock, flags);
+
+ dev_err(canvas->dev, "No more canvas available\n");
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(meson_canvas_alloc);
+
+int meson_canvas_free(struct meson_canvas *canvas, u8 canvas_index)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&canvas->lock, flags);
+ if (!canvas->used[canvas_index]) {
+ dev_err(canvas->dev,
+ "Trying to free unused canvas %u\n", canvas_index);
+ spin_unlock_irqrestore(&canvas->lock, flags);
+ return -EINVAL;
+ }
+ canvas->used[canvas_index] = 0;
+ spin_unlock_irqrestore(&canvas->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(meson_canvas_free);
+
+static int meson_canvas_probe(struct platform_device *pdev)
+{
+ struct meson_canvas *canvas;
+ struct device *dev = &pdev->dev;
+
+ canvas = devm_kzalloc(dev, sizeof(*canvas), GFP_KERNEL);
+ if (!canvas)
+ return -ENOMEM;
+
+ canvas->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(canvas->reg_base))
+ return PTR_ERR(canvas->reg_base);
+
+ canvas->supports_endianness = of_device_get_match_data(dev);
+
+ canvas->dev = dev;
+ spin_lock_init(&canvas->lock);
+ dev_set_drvdata(dev, canvas);
+
+ return 0;
+}
+
+static const struct of_device_id canvas_dt_match[] = {
+ { .compatible = "amlogic,meson8-canvas", .data = (void *)false, },
+ { .compatible = "amlogic,meson8b-canvas", .data = (void *)false, },
+ { .compatible = "amlogic,meson8m2-canvas", .data = (void *)false, },
+ { .compatible = "amlogic,canvas", .data = (void *)true, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, canvas_dt_match);
+
+static struct platform_driver meson_canvas_driver = {
+ .probe = meson_canvas_probe,
+ .driver = {
+ .name = "amlogic-canvas",
+ .of_match_table = canvas_dt_match,
+ },
+};
+module_platform_driver(meson_canvas_driver);
+
+MODULE_DESCRIPTION("Amlogic Canvas driver");
+MODULE_AUTHOR("Maxime Jourdan <mjourdan@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
new file mode 100644
index 0000000000..3f30396003
--- /dev/null
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/bitfield.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+
+static DEFINE_MUTEX(measure_lock);
+
+#define MSR_CLK_DUTY 0x0
+#define MSR_CLK_REG0 0x4
+#define MSR_CLK_REG1 0x8
+#define MSR_CLK_REG2 0xc
+
+#define MSR_DURATION GENMASK(15, 0)
+#define MSR_ENABLE BIT(16)
+#define MSR_CONT BIT(17) /* continuous measurement */
+#define MSR_INTR BIT(18) /* interrupts */
+#define MSR_RUN BIT(19)
+#define MSR_CLK_SRC GENMASK(26, 20)
+#define MSR_BUSY BIT(31)
+
+#define MSR_VAL_MASK GENMASK(15, 0)
+
+#define DIV_MIN 32
+#define DIV_STEP 32
+#define DIV_MAX 640
+
+#define CLK_MSR_MAX 128
+
+struct meson_msr_id {
+ struct meson_msr *priv;
+ unsigned int id;
+ const char *name;
+};
+
+struct meson_msr {
+ struct regmap *regmap;
+ struct meson_msr_id msr_table[CLK_MSR_MAX];
+};
+
+#define CLK_MSR_ID(__id, __name) \
+ [__id] = {.id = __id, .name = __name,}
+
+static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee2"),
+ CLK_MSR_ID(3, "a9_ring_osck"),
+ CLK_MSR_ID(6, "vid_pll"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(8, "encp"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(11, "eth_rmii"),
+ CLK_MSR_ID(13, "amclk"),
+ CLK_MSR_ID(14, "fec_clk_0"),
+ CLK_MSR_ID(15, "fec_clk_1"),
+ CLK_MSR_ID(16, "fec_clk_2"),
+ CLK_MSR_ID(18, "a9_clk_div16"),
+ CLK_MSR_ID(19, "hdmi_sys"),
+ CLK_MSR_ID(20, "rtc_osc_clk_out"),
+ CLK_MSR_ID(21, "i2s_clk_in_src0"),
+ CLK_MSR_ID(22, "clk_rmii_from_pad"),
+ CLK_MSR_ID(23, "hdmi_ch0_tmds"),
+ CLK_MSR_ID(24, "lvds_fifo"),
+ CLK_MSR_ID(26, "sc_clk_int"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(30, "mpll_clk_test_out"),
+ CLK_MSR_ID(31, "audac_clkpi"),
+ CLK_MSR_ID(32, "vdac"),
+ CLK_MSR_ID(33, "sdhc_rx"),
+ CLK_MSR_ID(34, "sdhc_sd"),
+ CLK_MSR_ID(35, "mali"),
+ CLK_MSR_ID(36, "hdmi_tx_pixel"),
+ CLK_MSR_ID(38, "vdin_meas"),
+ CLK_MSR_ID(39, "pcm_sclk"),
+ CLK_MSR_ID(40, "pcm_mclk"),
+ CLK_MSR_ID(41, "eth_rx_tx"),
+ CLK_MSR_ID(42, "pwm_d"),
+ CLK_MSR_ID(43, "pwm_c"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "pcm2_sclk"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "pwm_f"),
+ CLK_MSR_ID(49, "pwm_e"),
+ CLK_MSR_ID(59, "hcodec"),
+ CLK_MSR_ID(60, "usb_32k_alt"),
+ CLK_MSR_ID(61, "gpio"),
+ CLK_MSR_ID(62, "vid2_pll"),
+ CLK_MSR_ID(63, "mipi_csi_cfg"),
+};
+
+static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "a53_ring_osc"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(6, "enci"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(8, "encp"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(10, "vdac"),
+ CLK_MSR_ID(11, "rgmii_tx"),
+ CLK_MSR_ID(12, "pdm"),
+ CLK_MSR_ID(13, "amclk"),
+ CLK_MSR_ID(14, "fec_0"),
+ CLK_MSR_ID(15, "fec_1"),
+ CLK_MSR_ID(16, "fec_2"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(19, "hdmitx_sys"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(21, "i2s_in_src0"),
+ CLK_MSR_ID(22, "eth_phy_ref"),
+ CLK_MSR_ID(23, "hdmi_todig"),
+ CLK_MSR_ID(26, "sc_int"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(32, "vdec"),
+ CLK_MSR_ID(35, "mali"),
+ CLK_MSR_ID(36, "hdmi_tx_pixel"),
+ CLK_MSR_ID(37, "i958"),
+ CLK_MSR_ID(38, "vdin_meas"),
+ CLK_MSR_ID(39, "pcm_sclk"),
+ CLK_MSR_ID(40, "pcm_mclk"),
+ CLK_MSR_ID(41, "eth_rx_or_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "nand_core"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(53, "sd_emmc_a"),
+ CLK_MSR_ID(55, "vid_pll_div_out"),
+ CLK_MSR_ID(56, "cci"),
+ CLK_MSR_ID(57, "wave420l_c"),
+ CLK_MSR_ID(58, "wave420l_b"),
+ CLK_MSR_ID(59, "hcodec"),
+ CLK_MSR_ID(60, "alt_32k"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(62, "hevc"),
+ CLK_MSR_ID(66, "vid_lock"),
+ CLK_MSR_ID(70, "pwm_f"),
+ CLK_MSR_ID(71, "pwm_e"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(75, "aoclkx2_int"),
+ CLK_MSR_ID(76, "aoclk_int"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+};
+
+static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "a53_ring_osc"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(5, "gp1_pll"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(23, "mmc_clk"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(40, "mod_eth_tx_clk"),
+ CLK_MSR_ID(41, "mod_eth_rx_clk_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "sd_emmm_c"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(66, "audio_slv_lrclk_c"),
+ CLK_MSR_ID(67, "audio_slv_lrclk_b"),
+ CLK_MSR_ID(68, "audio_slv_lrclk_a"),
+ CLK_MSR_ID(69, "audio_slv_sclk_c"),
+ CLK_MSR_ID(70, "audio_slv_sclk_b"),
+ CLK_MSR_ID(71, "audio_slv_sclk_a"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(74, "wifi_beacon"),
+ CLK_MSR_ID(75, "tdmin_lb_lrcl"),
+ CLK_MSR_ID(76, "tdmin_lb_sclk"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+ CLK_MSR_ID(84, "audio_resample"),
+ CLK_MSR_ID(85, "audio_pdm_sys"),
+ CLK_MSR_ID(86, "audio_spdifout"),
+ CLK_MSR_ID(87, "audio_spdifin"),
+ CLK_MSR_ID(88, "audio_lrclk_f"),
+ CLK_MSR_ID(89, "audio_lrclk_e"),
+ CLK_MSR_ID(90, "audio_lrclk_d"),
+ CLK_MSR_ID(91, "audio_lrclk_c"),
+ CLK_MSR_ID(92, "audio_lrclk_b"),
+ CLK_MSR_ID(93, "audio_lrclk_a"),
+ CLK_MSR_ID(94, "audio_sclk_f"),
+ CLK_MSR_ID(95, "audio_sclk_e"),
+ CLK_MSR_ID(96, "audio_sclk_d"),
+ CLK_MSR_ID(97, "audio_sclk_c"),
+ CLK_MSR_ID(98, "audio_sclk_b"),
+ CLK_MSR_ID(99, "audio_sclk_a"),
+ CLK_MSR_ID(100, "audio_mclk_f"),
+ CLK_MSR_ID(101, "audio_mclk_e"),
+ CLK_MSR_ID(102, "audio_mclk_d"),
+ CLK_MSR_ID(103, "audio_mclk_c"),
+ CLK_MSR_ID(104, "audio_mclk_b"),
+ CLK_MSR_ID(105, "audio_mclk_a"),
+ CLK_MSR_ID(106, "pcie_refclk_n"),
+ CLK_MSR_ID(107, "pcie_refclk_p"),
+ CLK_MSR_ID(108, "audio_locker_out"),
+ CLK_MSR_ID(109, "audio_locker_in"),
+};
+
+static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "sys_cpu_ring_osc"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(6, "enci"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(8, "encp"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(10, "vdac"),
+ CLK_MSR_ID(11, "eth_tx"),
+ CLK_MSR_ID(12, "hifi_pll"),
+ CLK_MSR_ID(13, "mod_tcon"),
+ CLK_MSR_ID(14, "fec_0"),
+ CLK_MSR_ID(15, "fec_1"),
+ CLK_MSR_ID(16, "fec_2"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(19, "lcd_an_ph2"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(21, "lcd_an_ph3"),
+ CLK_MSR_ID(22, "eth_phy_ref"),
+ CLK_MSR_ID(23, "mpll_50m"),
+ CLK_MSR_ID(24, "eth_125m"),
+ CLK_MSR_ID(25, "eth_rmii"),
+ CLK_MSR_ID(26, "sc_int"),
+ CLK_MSR_ID(27, "in_mac"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(29, "pcie_inp"),
+ CLK_MSR_ID(30, "pcie_inn"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(32, "vdec"),
+ CLK_MSR_ID(33, "sys_cpu_ring_osc_1"),
+ CLK_MSR_ID(34, "eth_mpll_50m"),
+ CLK_MSR_ID(35, "mali"),
+ CLK_MSR_ID(36, "hdmi_tx_pixel"),
+ CLK_MSR_ID(37, "cdac"),
+ CLK_MSR_ID(38, "vdin_meas"),
+ CLK_MSR_ID(39, "bt656"),
+ CLK_MSR_ID(41, "eth_rx_or_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "sd_emmc_c"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(53, "sd_emmc_a"),
+ CLK_MSR_ID(54, "vpu_clkc"),
+ CLK_MSR_ID(55, "vid_pll_div_out"),
+ CLK_MSR_ID(56, "wave420l_a"),
+ CLK_MSR_ID(57, "wave420l_c"),
+ CLK_MSR_ID(58, "wave420l_b"),
+ CLK_MSR_ID(59, "hcodec"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(62, "hevcb"),
+ CLK_MSR_ID(63, "dsi_meas"),
+ CLK_MSR_ID(64, "spicc_1"),
+ CLK_MSR_ID(65, "spicc_0"),
+ CLK_MSR_ID(66, "vid_lock"),
+ CLK_MSR_ID(67, "dsi_phy"),
+ CLK_MSR_ID(68, "hdcp22_esm"),
+ CLK_MSR_ID(69, "hdcp22_skp"),
+ CLK_MSR_ID(70, "pwm_f"),
+ CLK_MSR_ID(71, "pwm_e"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(75, "hevcf"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+ CLK_MSR_ID(83, "co_rx"),
+ CLK_MSR_ID(84, "co_tx"),
+ CLK_MSR_ID(89, "hdmi_todig"),
+ CLK_MSR_ID(90, "hdmitx_sys"),
+ CLK_MSR_ID(91, "sys_cpub_div16"),
+ CLK_MSR_ID(92, "sys_pll_cpub_div16"),
+ CLK_MSR_ID(94, "eth_phy_rx"),
+ CLK_MSR_ID(95, "eth_phy_pll"),
+ CLK_MSR_ID(96, "vpu_b"),
+ CLK_MSR_ID(97, "cpu_b_tmp"),
+ CLK_MSR_ID(98, "ts"),
+ CLK_MSR_ID(99, "ring_osc_out_ee_3"),
+ CLK_MSR_ID(100, "ring_osc_out_ee_4"),
+ CLK_MSR_ID(101, "ring_osc_out_ee_5"),
+ CLK_MSR_ID(102, "ring_osc_out_ee_6"),
+ CLK_MSR_ID(103, "ring_osc_out_ee_7"),
+ CLK_MSR_ID(104, "ring_osc_out_ee_8"),
+ CLK_MSR_ID(105, "ring_osc_out_ee_9"),
+ CLK_MSR_ID(106, "ephy_test"),
+ CLK_MSR_ID(107, "au_dac_g128x"),
+ CLK_MSR_ID(108, "audio_locker_out"),
+ CLK_MSR_ID(109, "audio_locker_in"),
+ CLK_MSR_ID(110, "audio_tdmout_c_sclk"),
+ CLK_MSR_ID(111, "audio_tdmout_b_sclk"),
+ CLK_MSR_ID(112, "audio_tdmout_a_sclk"),
+ CLK_MSR_ID(113, "audio_tdmin_lb_sclk"),
+ CLK_MSR_ID(114, "audio_tdmin_c_sclk"),
+ CLK_MSR_ID(115, "audio_tdmin_b_sclk"),
+ CLK_MSR_ID(116, "audio_tdmin_a_sclk"),
+ CLK_MSR_ID(117, "audio_resample"),
+ CLK_MSR_ID(118, "audio_pdm_sys"),
+ CLK_MSR_ID(119, "audio_spdifout_b"),
+ CLK_MSR_ID(120, "audio_spdifout"),
+ CLK_MSR_ID(121, "audio_spdifin"),
+ CLK_MSR_ID(122, "audio_pdm_dclk"),
+};
+
+static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "ring_osc_out_ee_3"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(5, "gp1_pll"),
+ CLK_MSR_ID(6, "enci"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(8, "encp"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(10, "vdac"),
+ CLK_MSR_ID(11, "eth_tx"),
+ CLK_MSR_ID(12, "hifi_pll"),
+ CLK_MSR_ID(13, "mod_tcon"),
+ CLK_MSR_ID(14, "fec_0"),
+ CLK_MSR_ID(15, "fec_1"),
+ CLK_MSR_ID(16, "fec_2"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(19, "lcd_an_ph2"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(21, "lcd_an_ph3"),
+ CLK_MSR_ID(22, "eth_phy_ref"),
+ CLK_MSR_ID(23, "mpll_50m"),
+ CLK_MSR_ID(24, "eth_125m"),
+ CLK_MSR_ID(25, "eth_rmii"),
+ CLK_MSR_ID(26, "sc_int"),
+ CLK_MSR_ID(27, "in_mac"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(29, "pcie_inp"),
+ CLK_MSR_ID(30, "pcie_inn"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(32, "vdec"),
+ CLK_MSR_ID(34, "eth_mpll_50m"),
+ CLK_MSR_ID(35, "mali"),
+ CLK_MSR_ID(36, "hdmi_tx_pixel"),
+ CLK_MSR_ID(37, "cdac"),
+ CLK_MSR_ID(38, "vdin_meas"),
+ CLK_MSR_ID(39, "bt656"),
+ CLK_MSR_ID(40, "arm_ring_osc_out_4"),
+ CLK_MSR_ID(41, "eth_rx_or_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "sd_emmc_c"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(53, "sd_emmc_a"),
+ CLK_MSR_ID(54, "vpu_clkc"),
+ CLK_MSR_ID(55, "vid_pll_div_out"),
+ CLK_MSR_ID(56, "wave420l_a"),
+ CLK_MSR_ID(57, "wave420l_c"),
+ CLK_MSR_ID(58, "wave420l_b"),
+ CLK_MSR_ID(59, "hcodec"),
+ CLK_MSR_ID(60, "arm_ring_osc_out_5"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(62, "hevcb"),
+ CLK_MSR_ID(63, "dsi_meas"),
+ CLK_MSR_ID(64, "spicc_1"),
+ CLK_MSR_ID(65, "spicc_0"),
+ CLK_MSR_ID(66, "vid_lock"),
+ CLK_MSR_ID(67, "dsi_phy"),
+ CLK_MSR_ID(68, "hdcp22_esm"),
+ CLK_MSR_ID(69, "hdcp22_skp"),
+ CLK_MSR_ID(70, "pwm_f"),
+ CLK_MSR_ID(71, "pwm_e"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(74, "arm_ring_osc_out_6"),
+ CLK_MSR_ID(75, "hevcf"),
+ CLK_MSR_ID(76, "arm_ring_osc_out_7"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+ CLK_MSR_ID(83, "co_rx"),
+ CLK_MSR_ID(84, "co_tx"),
+ CLK_MSR_ID(85, "arm_ring_osc_out_8"),
+ CLK_MSR_ID(86, "arm_ring_osc_out_9"),
+ CLK_MSR_ID(87, "mipi_dsi_phy"),
+ CLK_MSR_ID(88, "cis2_adapt"),
+ CLK_MSR_ID(89, "hdmi_todig"),
+ CLK_MSR_ID(90, "hdmitx_sys"),
+ CLK_MSR_ID(91, "nna_core"),
+ CLK_MSR_ID(92, "nna_axi"),
+ CLK_MSR_ID(93, "vad"),
+ CLK_MSR_ID(94, "eth_phy_rx"),
+ CLK_MSR_ID(95, "eth_phy_pll"),
+ CLK_MSR_ID(96, "vpu_b"),
+ CLK_MSR_ID(97, "cpu_b_tmp"),
+ CLK_MSR_ID(98, "ts"),
+ CLK_MSR_ID(99, "arm_ring_osc_out_10"),
+ CLK_MSR_ID(100, "arm_ring_osc_out_11"),
+ CLK_MSR_ID(101, "arm_ring_osc_out_12"),
+ CLK_MSR_ID(102, "arm_ring_osc_out_13"),
+ CLK_MSR_ID(103, "arm_ring_osc_out_14"),
+ CLK_MSR_ID(104, "arm_ring_osc_out_15"),
+ CLK_MSR_ID(105, "arm_ring_osc_out_16"),
+ CLK_MSR_ID(106, "ephy_test"),
+ CLK_MSR_ID(107, "au_dac_g128x"),
+ CLK_MSR_ID(108, "audio_locker_out"),
+ CLK_MSR_ID(109, "audio_locker_in"),
+ CLK_MSR_ID(110, "audio_tdmout_c_sclk"),
+ CLK_MSR_ID(111, "audio_tdmout_b_sclk"),
+ CLK_MSR_ID(112, "audio_tdmout_a_sclk"),
+ CLK_MSR_ID(113, "audio_tdmin_lb_sclk"),
+ CLK_MSR_ID(114, "audio_tdmin_c_sclk"),
+ CLK_MSR_ID(115, "audio_tdmin_b_sclk"),
+ CLK_MSR_ID(116, "audio_tdmin_a_sclk"),
+ CLK_MSR_ID(117, "audio_resample"),
+ CLK_MSR_ID(118, "audio_pdm_sys"),
+ CLK_MSR_ID(119, "audio_spdifout_b"),
+ CLK_MSR_ID(120, "audio_spdifout"),
+ CLK_MSR_ID(121, "audio_spdifin"),
+ CLK_MSR_ID(122, "audio_pdm_dclk"),
+ CLK_MSR_ID(123, "audio_resampled"),
+ CLK_MSR_ID(124, "earcrx_pll"),
+ CLK_MSR_ID(125, "earcrx_pll_test"),
+ CLK_MSR_ID(126, "csi_phy0"),
+ CLK_MSR_ID(127, "csi2_data"),
+};
+
+static int meson_measure_id(struct meson_msr_id *clk_msr_id,
+ unsigned int duration)
+{
+ struct meson_msr *priv = clk_msr_id->priv;
+ unsigned int val;
+ int ret;
+
+ ret = mutex_lock_interruptible(&measure_lock);
+ if (ret)
+ return ret;
+
+ regmap_write(priv->regmap, MSR_CLK_REG0, 0);
+
+ /* Set measurement duration */
+ regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_DURATION,
+ FIELD_PREP(MSR_DURATION, duration - 1));
+
+ /* Set ID */
+ regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_CLK_SRC,
+ FIELD_PREP(MSR_CLK_SRC, clk_msr_id->id));
+
+ /* Enable & Start */
+ regmap_update_bits(priv->regmap, MSR_CLK_REG0,
+ MSR_RUN | MSR_ENABLE,
+ MSR_RUN | MSR_ENABLE);
+
+ ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0,
+ val, !(val & MSR_BUSY), 10, 10000);
+ if (ret) {
+ mutex_unlock(&measure_lock);
+ return ret;
+ }
+
+ /* Disable */
+ regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0);
+
+ /* Get the value in multiple of gate time counts */
+ regmap_read(priv->regmap, MSR_CLK_REG2, &val);
+
+ mutex_unlock(&measure_lock);
+
+ if (val >= MSR_VAL_MASK)
+ return -EINVAL;
+
+ return DIV_ROUND_CLOSEST_ULL((val & MSR_VAL_MASK) * 1000000ULL,
+ duration);
+}
+
+static int meson_measure_best_id(struct meson_msr_id *clk_msr_id,
+ unsigned int *precision)
+{
+ unsigned int duration = DIV_MAX;
+ int ret;
+
+ /* Start from max duration and down to min duration */
+ do {
+ ret = meson_measure_id(clk_msr_id, duration);
+ if (ret >= 0)
+ *precision = (2 * 1000000) / duration;
+ else
+ duration -= DIV_STEP;
+ } while (duration >= DIV_MIN && ret == -EINVAL);
+
+ return ret;
+}
+
+static int clk_msr_show(struct seq_file *s, void *data)
+{
+ struct meson_msr_id *clk_msr_id = s->private;
+ unsigned int precision = 0;
+ int val;
+
+ val = meson_measure_best_id(clk_msr_id, &precision);
+ if (val < 0)
+ return val;
+
+ seq_printf(s, "%d\t+/-%dHz\n", val, precision);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(clk_msr);
+
+static int clk_msr_summary_show(struct seq_file *s, void *data)
+{
+ struct meson_msr_id *msr_table = s->private;
+ unsigned int precision = 0;
+ int val, i;
+
+ seq_puts(s, " clock rate precision\n");
+ seq_puts(s, "---------------------------------------------\n");
+
+ for (i = 0 ; i < CLK_MSR_MAX ; ++i) {
+ if (!msr_table[i].name)
+ continue;
+
+ val = meson_measure_best_id(&msr_table[i], &precision);
+ if (val < 0)
+ return val;
+
+ seq_printf(s, " %-20s %10d +/-%dHz\n",
+ msr_table[i].name, val, precision);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(clk_msr_summary);
+
+static const struct regmap_config meson_clk_msr_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = MSR_CLK_REG2,
+};
+
+static int meson_msr_probe(struct platform_device *pdev)
+{
+ const struct meson_msr_id *match_data;
+ struct meson_msr *priv;
+ struct dentry *root, *clks;
+ void __iomem *base;
+ int i;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct meson_msr),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ match_data = device_get_match_data(&pdev->dev);
+ if (!match_data) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
+ memcpy(priv->msr_table, match_data, sizeof(priv->msr_table));
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &meson_clk_msr_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ root = debugfs_create_dir("meson-clk-msr", NULL);
+ clks = debugfs_create_dir("clks", root);
+
+ debugfs_create_file("measure_summary", 0444, root,
+ priv->msr_table, &clk_msr_summary_fops);
+
+ for (i = 0 ; i < CLK_MSR_MAX ; ++i) {
+ if (!priv->msr_table[i].name)
+ continue;
+
+ priv->msr_table[i].priv = priv;
+
+ debugfs_create_file(priv->msr_table[i].name, 0444, clks,
+ &priv->msr_table[i], &clk_msr_fops);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id meson_msr_match_table[] = {
+ {
+ .compatible = "amlogic,meson-gx-clk-measure",
+ .data = (void *)clk_msr_gx,
+ },
+ {
+ .compatible = "amlogic,meson8-clk-measure",
+ .data = (void *)clk_msr_m8,
+ },
+ {
+ .compatible = "amlogic,meson8b-clk-measure",
+ .data = (void *)clk_msr_m8,
+ },
+ {
+ .compatible = "amlogic,meson-axg-clk-measure",
+ .data = (void *)clk_msr_axg,
+ },
+ {
+ .compatible = "amlogic,meson-g12a-clk-measure",
+ .data = (void *)clk_msr_g12a,
+ },
+ {
+ .compatible = "amlogic,meson-sm1-clk-measure",
+ .data = (void *)clk_msr_sm1,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_msr_match_table);
+
+static struct platform_driver meson_msr_driver = {
+ .probe = meson_msr_probe,
+ .driver = {
+ .name = "meson_msr",
+ .of_match_table = meson_msr_match_table,
+ },
+};
+module_platform_driver(meson_msr_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
new file mode 100644
index 0000000000..6abb730344
--- /dev/null
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define AO_SEC_SD_CFG8 0xe0
+#define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8
+
+#define SOCINFO_MAJOR GENMASK(31, 24)
+#define SOCINFO_PACK GENMASK(23, 16)
+#define SOCINFO_MINOR GENMASK(15, 8)
+#define SOCINFO_MISC GENMASK(7, 0)
+
+static const struct meson_gx_soc_id {
+ const char *name;
+ unsigned int id;
+} soc_ids[] = {
+ { "GXBB", 0x1f },
+ { "GXTVBB", 0x20 },
+ { "GXL", 0x21 },
+ { "GXM", 0x22 },
+ { "TXL", 0x23 },
+ { "TXLX", 0x24 },
+ { "AXG", 0x25 },
+ { "GXLX", 0x26 },
+ { "TXHD", 0x27 },
+ { "G12A", 0x28 },
+ { "G12B", 0x29 },
+ { "SM1", 0x2b },
+ { "A1", 0x2c },
+};
+
+static const struct meson_gx_package_id {
+ const char *name;
+ unsigned int major_id;
+ unsigned int pack_id;
+ unsigned int pack_mask;
+} soc_packages[] = {
+ { "S905", 0x1f, 0, 0x20 }, /* pack_id != 0x20 */
+ { "S905H", 0x1f, 0x3, 0xf }, /* pack_id & 0xf == 0x3 */
+ { "S905M", 0x1f, 0x20, 0xf0 }, /* pack_id == 0x20 */
+ { "S905D", 0x21, 0, 0xf0 },
+ { "S905X", 0x21, 0x80, 0xf0 },
+ { "S905W", 0x21, 0xa0, 0xf0 },
+ { "S905L", 0x21, 0xc0, 0xf0 },
+ { "S905M2", 0x21, 0xe0, 0xf0 },
+ { "S805X", 0x21, 0x30, 0xf0 },
+ { "S805Y", 0x21, 0xb0, 0xf0 },
+ { "S912", 0x22, 0, 0x0 }, /* Only S912 is known for GXM */
+ { "962X", 0x24, 0x10, 0xf0 },
+ { "962E", 0x24, 0x20, 0xf0 },
+ { "A113X", 0x25, 0x37, 0xff },
+ { "A113D", 0x25, 0x22, 0xff },
+ { "S905D2", 0x28, 0x10, 0xf0 },
+ { "S905Y2", 0x28, 0x30, 0xf0 },
+ { "S905X2", 0x28, 0x40, 0xf0 },
+ { "A311D", 0x29, 0x10, 0xf0 },
+ { "S922X", 0x29, 0x40, 0xf0 },
+ { "S905D3", 0x2b, 0x4, 0xf5 },
+ { "S905X3", 0x2b, 0x5, 0xf5 },
+ { "S905X3", 0x2b, 0x10, 0x3f },
+ { "S905D3", 0x2b, 0x30, 0x3f },
+ { "A113L", 0x2c, 0x0, 0xf8 },
+};
+
+static inline unsigned int socinfo_to_major(u32 socinfo)
+{
+ return FIELD_GET(SOCINFO_MAJOR, socinfo);
+}
+
+static inline unsigned int socinfo_to_minor(u32 socinfo)
+{
+ return FIELD_GET(SOCINFO_MINOR, socinfo);
+}
+
+static inline unsigned int socinfo_to_pack(u32 socinfo)
+{
+ return FIELD_GET(SOCINFO_PACK, socinfo);
+}
+
+static inline unsigned int socinfo_to_misc(u32 socinfo)
+{
+ return FIELD_GET(SOCINFO_MISC, socinfo);
+}
+
+static const char *socinfo_to_package_id(u32 socinfo)
+{
+ unsigned int pack = socinfo_to_pack(socinfo);
+ unsigned int major = socinfo_to_major(socinfo);
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(soc_packages) ; ++i) {
+ if (soc_packages[i].major_id == major &&
+ soc_packages[i].pack_id ==
+ (pack & soc_packages[i].pack_mask))
+ return soc_packages[i].name;
+ }
+
+ return "Unknown";
+}
+
+static const char *socinfo_to_soc_id(u32 socinfo)
+{
+ unsigned int id = socinfo_to_major(socinfo);
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(soc_ids) ; ++i) {
+ if (soc_ids[i].id == id)
+ return soc_ids[i].name;
+ }
+
+ return "Unknown";
+}
+
+static int __init meson_gx_socinfo_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ struct regmap *regmap;
+ unsigned int socinfo;
+ struct device *dev;
+ int ret;
+
+ /* look up for chipid node */
+ np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gx-ao-secure");
+ if (!np)
+ return -ENODEV;
+
+ /* check if interface is enabled */
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ /* check if chip-id is available */
+ if (!of_property_read_bool(np, "amlogic,has-chip-id")) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ /* node should be a syscon */
+ regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(regmap)) {
+ pr_err("%s: failed to get regmap\n", __func__);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(regmap, AO_SEC_SOCINFO_OFFSET, &socinfo);
+ if (ret < 0)
+ return ret;
+
+ if (!socinfo) {
+ pr_err("%s: invalid chipid value\n", __func__);
+ return -EINVAL;
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENODEV;
+
+ soc_dev_attr->family = "Amlogic Meson";
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%x:%x - %x:%x",
+ socinfo_to_major(socinfo),
+ socinfo_to_minor(socinfo),
+ socinfo_to_pack(socinfo),
+ socinfo_to_misc(socinfo));
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%s (%s)",
+ socinfo_to_soc_id(socinfo),
+ socinfo_to_package_id(socinfo));
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+ dev = soc_device_to_device(soc_dev);
+
+ dev_info(dev, "Amlogic Meson %s Revision %x:%x (%x:%x) Detected\n",
+ soc_dev_attr->soc_id,
+ socinfo_to_major(socinfo),
+ socinfo_to_minor(socinfo),
+ socinfo_to_pack(socinfo),
+ socinfo_to_misc(socinfo));
+
+ return 0;
+}
+device_initcall(meson_gx_socinfo_init);
diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c
new file mode 100644
index 0000000000..92125dd65f
--- /dev/null
+++ b/drivers/soc/amlogic/meson-mx-socinfo.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define MESON_SOCINFO_MAJOR_VER_MESON6 0x16
+#define MESON_SOCINFO_MAJOR_VER_MESON8 0x19
+#define MESON_SOCINFO_MAJOR_VER_MESON8B 0x1b
+
+#define MESON_MX_ASSIST_HW_REV 0x14c
+
+#define MESON_MX_ANALOG_TOP_METAL_REVISION 0x0
+
+#define MESON_MX_BOOTROM_MISC_VER 0x4
+
+static const char *meson_mx_socinfo_revision(unsigned int major_ver,
+ unsigned int misc_ver,
+ unsigned int metal_rev)
+{
+ unsigned int minor_ver;
+
+ switch (major_ver) {
+ case MESON_SOCINFO_MAJOR_VER_MESON6:
+ minor_ver = 0xa;
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8:
+ if (metal_rev == 0x11111112)
+ major_ver = 0x1d;
+
+ if (metal_rev == 0x11111111 || metal_rev == 0x11111112)
+ minor_ver = 0xa;
+ else if (metal_rev == 0x11111113)
+ minor_ver = 0xb;
+ else if (metal_rev == 0x11111133)
+ minor_ver = 0xc;
+ else
+ minor_ver = 0xd;
+
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8B:
+ if (metal_rev == 0x11111111)
+ minor_ver = 0xa;
+ else
+ minor_ver = 0xb;
+
+ break;
+
+ default:
+ minor_ver = 0x0;
+ break;
+ }
+
+ return kasprintf(GFP_KERNEL, "Rev%X (%x - 0:%X)", minor_ver, major_ver,
+ misc_ver);
+}
+
+static const char *meson_mx_socinfo_soc_id(unsigned int major_ver,
+ unsigned int metal_rev)
+{
+ const char *soc_id;
+
+ switch (major_ver) {
+ case MESON_SOCINFO_MAJOR_VER_MESON6:
+ soc_id = "Meson6 (AML8726-MX)";
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8:
+ if (metal_rev == 0x11111112)
+ soc_id = "Meson8m2 (S812)";
+ else
+ soc_id = "Meson8 (S802)";
+
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8B:
+ soc_id = "Meson8b (S805)";
+ break;
+
+ default:
+ soc_id = "Unknown";
+ break;
+ }
+
+ return kstrdup_const(soc_id, GFP_KERNEL);
+}
+
+static const struct of_device_id meson_mx_socinfo_analog_top_ids[] = {
+ { .compatible = "amlogic,meson8-analog-top", },
+ { .compatible = "amlogic,meson8b-analog-top", },
+ { /* sentinel */ }
+};
+
+static int __init meson_mx_socinfo_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ struct regmap *assist_regmap, *bootrom_regmap, *analog_top_regmap;
+ unsigned int major_ver, misc_ver, metal_rev = 0;
+ int ret;
+
+ assist_regmap =
+ syscon_regmap_lookup_by_compatible("amlogic,meson-mx-assist");
+ if (IS_ERR(assist_regmap))
+ return PTR_ERR(assist_regmap);
+
+ bootrom_regmap =
+ syscon_regmap_lookup_by_compatible("amlogic,meson-mx-bootrom");
+ if (IS_ERR(bootrom_regmap))
+ return PTR_ERR(bootrom_regmap);
+
+ np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids);
+ if (np) {
+ analog_top_regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(analog_top_regmap))
+ return PTR_ERR(analog_top_regmap);
+
+ ret = regmap_read(analog_top_regmap,
+ MESON_MX_ANALOG_TOP_METAL_REVISION,
+ &metal_rev);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(assist_regmap, MESON_MX_ASSIST_HW_REV, &major_ver);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(bootrom_regmap, MESON_MX_BOOTROM_MISC_VER,
+ &misc_ver);
+ if (ret < 0)
+ return ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENODEV;
+
+ soc_dev_attr->family = "Amlogic Meson";
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->revision = meson_mx_socinfo_revision(major_ver, misc_ver,
+ metal_rev);
+ soc_dev_attr->soc_id = meson_mx_socinfo_soc_id(major_ver, metal_rev);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree_const(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ dev_info(soc_device_to_device(soc_dev), "Amlogic %s %s detected\n",
+ soc_dev_attr->soc_id, soc_dev_attr->revision);
+
+ return 0;
+}
+device_initcall(meson_mx_socinfo_init);
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
new file mode 100644
index 0000000000..a1596fefac
--- /dev/null
+++ b/drivers/soc/apple/Kconfig
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+if ARCH_APPLE || COMPILE_TEST
+
+menu "Apple SoC drivers"
+
+config APPLE_PMGR_PWRSTATE
+ bool "Apple SoC PMGR power state control"
+ depends on PM
+ select REGMAP
+ select MFD_SYSCON
+ select PM_GENERIC_DOMAINS
+ select RESET_CONTROLLER
+ default ARCH_APPLE
+ help
+ The PMGR block in Apple SoCs provides high-level power state
+ controls for SoC devices. This driver manages them through the
+ generic power domain framework, and also provides reset support.
+
+config APPLE_RTKIT
+ tristate "Apple RTKit co-processor IPC protocol"
+ depends on MAILBOX
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Apple SoCs such as the M1 come with various co-processors running
+ their proprietary RTKit operating system. This option enables support
+ for the protocol library used to communicate with those. It is used
+ by various client drivers.
+
+ Say 'y' here if you have an Apple SoC.
+
+config APPLE_SART
+ tristate "Apple SART DMA address filter"
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Apple SART is a simple DMA address filter used on Apple SoCs such
+ as the M1. It is usually required for the NVMe coprocessor which does
+ not use a proper IOMMU.
+
+ Say 'y' here if you have an Apple SoC.
+
+endmenu
+
+endif
diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile
new file mode 100644
index 0000000000..b241e6a65e
--- /dev/null
+++ b/drivers/soc/apple/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o
+apple-rtkit-y = rtkit.o rtkit-crashlog.o
+
+obj-$(CONFIG_APPLE_SART) += apple-sart.o
+apple-sart-y = sart.o
diff --git a/drivers/soc/apple/rtkit-crashlog.c b/drivers/soc/apple/rtkit-crashlog.c
new file mode 100644
index 0000000000..8319e36511
--- /dev/null
+++ b/drivers/soc/apple/rtkit-crashlog.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+#include "rtkit-internal.h"
+
+#define FOURCC(a, b, c, d) \
+ (((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d)))
+
+#define APPLE_RTKIT_CRASHLOG_HEADER FOURCC('C', 'L', 'H', 'E')
+#define APPLE_RTKIT_CRASHLOG_STR FOURCC('C', 's', 't', 'r')
+#define APPLE_RTKIT_CRASHLOG_VERSION FOURCC('C', 'v', 'e', 'r')
+#define APPLE_RTKIT_CRASHLOG_MBOX FOURCC('C', 'm', 'b', 'x')
+#define APPLE_RTKIT_CRASHLOG_TIME FOURCC('C', 't', 'i', 'm')
+#define APPLE_RTKIT_CRASHLOG_REGS FOURCC('C', 'r', 'g', '8')
+
+/* For COMPILE_TEST on non-ARM64 architectures */
+#ifndef PSR_MODE_EL0t
+#define PSR_MODE_EL0t 0x00000000
+#define PSR_MODE_EL1t 0x00000004
+#define PSR_MODE_EL1h 0x00000005
+#define PSR_MODE_EL2t 0x00000008
+#define PSR_MODE_EL2h 0x00000009
+#define PSR_MODE_MASK 0x0000000f
+#endif
+
+struct apple_rtkit_crashlog_header {
+ u32 fourcc;
+ u32 version;
+ u32 size;
+ u32 flags;
+ u8 _unk[16];
+};
+static_assert(sizeof(struct apple_rtkit_crashlog_header) == 0x20);
+
+struct apple_rtkit_crashlog_mbox_entry {
+ u64 msg0;
+ u64 msg1;
+ u32 timestamp;
+ u8 _unk[4];
+};
+static_assert(sizeof(struct apple_rtkit_crashlog_mbox_entry) == 0x18);
+
+struct apple_rtkit_crashlog_regs {
+ u32 unk_0;
+ u32 unk_4;
+ u64 regs[31];
+ u64 sp;
+ u64 pc;
+ u64 psr;
+ u64 cpacr;
+ u64 fpsr;
+ u64 fpcr;
+ u64 unk[64];
+ u64 far;
+ u64 unk_X;
+ u64 esr;
+ u64 unk_Z;
+} __packed;
+static_assert(sizeof(struct apple_rtkit_crashlog_regs) == 0x350);
+
+static void apple_rtkit_crashlog_dump_str(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ u32 idx;
+ u8 *ptr, *end;
+
+ memcpy(&idx, bfr, 4);
+
+ ptr = bfr + 4;
+ end = bfr + size;
+ while (ptr < end) {
+ u8 *newline = memchr(ptr, '\n', end - ptr);
+
+ if (newline) {
+ u8 tmp = *newline;
+ *newline = '\0';
+ dev_warn(rtk->dev, "RTKit: Message (id=%x): %s\n", idx,
+ ptr);
+ *newline = tmp;
+ ptr = newline + 1;
+ } else {
+ dev_warn(rtk->dev, "RTKit: Message (id=%x): %s", idx,
+ ptr);
+ break;
+ }
+ }
+}
+
+static void apple_rtkit_crashlog_dump_version(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ dev_warn(rtk->dev, "RTKit: Version: %s", bfr + 16);
+}
+
+static void apple_rtkit_crashlog_dump_time(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ u64 crash_time;
+
+ memcpy(&crash_time, bfr, 8);
+ dev_warn(rtk->dev, "RTKit: Crash time: %lld", crash_time);
+}
+
+static void apple_rtkit_crashlog_dump_mailbox(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ u32 type, index, i;
+ size_t n_messages;
+ struct apple_rtkit_crashlog_mbox_entry entry;
+
+ memcpy(&type, bfr + 16, 4);
+ memcpy(&index, bfr + 24, 4);
+ n_messages = (size - 28) / sizeof(entry);
+
+ dev_warn(rtk->dev, "RTKit: Mailbox history (type = %d, index = %d)",
+ type, index);
+ for (i = 0; i < n_messages; ++i) {
+ memcpy(&entry, bfr + 28 + i * sizeof(entry), sizeof(entry));
+ dev_warn(rtk->dev, "RTKit: #%03d@%08x: %016llx %016llx", i,
+ entry.timestamp, entry.msg0, entry.msg1);
+ }
+}
+
+static void apple_rtkit_crashlog_dump_regs(struct apple_rtkit *rtk, u8 *bfr,
+ size_t size)
+{
+ struct apple_rtkit_crashlog_regs *regs;
+ const char *el;
+ int i;
+
+ if (size < sizeof(*regs)) {
+ dev_warn(rtk->dev, "RTKit: Regs section too small: 0x%zx", size);
+ return;
+ }
+
+ regs = (struct apple_rtkit_crashlog_regs *)bfr;
+
+ switch (regs->psr & PSR_MODE_MASK) {
+ case PSR_MODE_EL0t:
+ el = "EL0t";
+ break;
+ case PSR_MODE_EL1t:
+ el = "EL1t";
+ break;
+ case PSR_MODE_EL1h:
+ el = "EL1h";
+ break;
+ case PSR_MODE_EL2t:
+ el = "EL2t";
+ break;
+ case PSR_MODE_EL2h:
+ el = "EL2h";
+ break;
+ default:
+ el = "unknown";
+ break;
+ }
+
+ dev_warn(rtk->dev, "RTKit: Exception dump:");
+ dev_warn(rtk->dev, " == Exception taken from %s ==", el);
+ dev_warn(rtk->dev, " PSR = 0x%llx", regs->psr);
+ dev_warn(rtk->dev, " PC = 0x%llx\n", regs->pc);
+ dev_warn(rtk->dev, " ESR = 0x%llx\n", regs->esr);
+ dev_warn(rtk->dev, " FAR = 0x%llx\n", regs->far);
+ dev_warn(rtk->dev, " SP = 0x%llx\n", regs->sp);
+ dev_warn(rtk->dev, "\n");
+
+ for (i = 0; i < 31; i += 4) {
+ if (i < 28)
+ dev_warn(rtk->dev,
+ " x%02d-x%02d = %016llx %016llx %016llx %016llx\n",
+ i, i + 3,
+ regs->regs[i], regs->regs[i + 1],
+ regs->regs[i + 2], regs->regs[i + 3]);
+ else
+ dev_warn(rtk->dev,
+ " x%02d-x%02d = %016llx %016llx %016llx\n", i, i + 3,
+ regs->regs[i], regs->regs[i + 1], regs->regs[i + 2]);
+ }
+
+ dev_warn(rtk->dev, "\n");
+}
+
+void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size)
+{
+ size_t offset;
+ u32 section_fourcc, section_size;
+ struct apple_rtkit_crashlog_header header;
+
+ memcpy(&header, bfr, sizeof(header));
+ if (header.fourcc != APPLE_RTKIT_CRASHLOG_HEADER) {
+ dev_warn(rtk->dev, "RTKit: Expected crashlog header but got %x",
+ header.fourcc);
+ return;
+ }
+
+ if (header.size > size) {
+ dev_warn(rtk->dev, "RTKit: Crashlog size (%x) is too large",
+ header.size);
+ return;
+ }
+
+ size = header.size;
+ offset = sizeof(header);
+
+ while (offset < size) {
+ memcpy(&section_fourcc, bfr + offset, 4);
+ memcpy(&section_size, bfr + offset + 12, 4);
+
+ switch (section_fourcc) {
+ case APPLE_RTKIT_CRASHLOG_HEADER:
+ dev_dbg(rtk->dev, "RTKit: End of crashlog reached");
+ return;
+ case APPLE_RTKIT_CRASHLOG_STR:
+ apple_rtkit_crashlog_dump_str(rtk, bfr + offset + 16,
+ section_size);
+ break;
+ case APPLE_RTKIT_CRASHLOG_VERSION:
+ apple_rtkit_crashlog_dump_version(
+ rtk, bfr + offset + 16, section_size);
+ break;
+ case APPLE_RTKIT_CRASHLOG_MBOX:
+ apple_rtkit_crashlog_dump_mailbox(
+ rtk, bfr + offset + 16, section_size);
+ break;
+ case APPLE_RTKIT_CRASHLOG_TIME:
+ apple_rtkit_crashlog_dump_time(rtk, bfr + offset + 16,
+ section_size);
+ break;
+ case APPLE_RTKIT_CRASHLOG_REGS:
+ apple_rtkit_crashlog_dump_regs(rtk, bfr + offset + 16,
+ section_size);
+ break;
+ default:
+ dev_warn(rtk->dev,
+ "RTKit: Unknown crashlog section: %x",
+ section_fourcc);
+ }
+
+ offset += section_size;
+ }
+
+ dev_warn(rtk->dev,
+ "RTKit: End of crashlog reached but no footer present");
+}
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
new file mode 100644
index 0000000000..24bd619ec5
--- /dev/null
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#ifndef _APPLE_RTKIT_INTERAL_H
+#define _APPLE_RTKIT_INTERAL_H
+
+#include <linux/apple-mailbox.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/workqueue.h>
+
+#define APPLE_RTKIT_APP_ENDPOINT_START 0x20
+#define APPLE_RTKIT_MAX_ENDPOINTS 0x100
+
+struct apple_rtkit {
+ void *cookie;
+ const struct apple_rtkit_ops *ops;
+ struct device *dev;
+
+ const char *mbox_name;
+ int mbox_idx;
+ struct mbox_client mbox_cl;
+ struct mbox_chan *mbox_chan;
+
+ struct completion epmap_completion;
+ struct completion iop_pwr_ack_completion;
+ struct completion ap_pwr_ack_completion;
+
+ int boot_result;
+ int version;
+
+ unsigned int iop_power_state;
+ unsigned int ap_power_state;
+ bool crashed;
+
+ DECLARE_BITMAP(endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+
+ struct apple_rtkit_shmem ioreport_buffer;
+ struct apple_rtkit_shmem crashlog_buffer;
+
+ struct apple_rtkit_shmem syslog_buffer;
+ char *syslog_msg_buffer;
+ size_t syslog_n_entries;
+ size_t syslog_msg_size;
+
+ struct workqueue_struct *wq;
+};
+
+void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size);
+
+#endif
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
new file mode 100644
index 0000000000..d9f19dc99d
--- /dev/null
+++ b/drivers/soc/apple/rtkit.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include "rtkit-internal.h"
+
+enum {
+ APPLE_RTKIT_PWR_STATE_OFF = 0x00, /* power off, cannot be restarted */
+ APPLE_RTKIT_PWR_STATE_SLEEP = 0x01, /* sleeping, can be restarted */
+ APPLE_RTKIT_PWR_STATE_IDLE = 0x201, /* sleeping, retain state */
+ APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
+ APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
+};
+
+enum {
+ APPLE_RTKIT_EP_MGMT = 0,
+ APPLE_RTKIT_EP_CRASHLOG = 1,
+ APPLE_RTKIT_EP_SYSLOG = 2,
+ APPLE_RTKIT_EP_DEBUG = 3,
+ APPLE_RTKIT_EP_IOREPORT = 4,
+ APPLE_RTKIT_EP_OSLOG = 8,
+};
+
+#define APPLE_RTKIT_MGMT_TYPE GENMASK_ULL(59, 52)
+
+enum {
+ APPLE_RTKIT_MGMT_HELLO = 1,
+ APPLE_RTKIT_MGMT_HELLO_REPLY = 2,
+ APPLE_RTKIT_MGMT_STARTEP = 5,
+ APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE = 6,
+ APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK = 7,
+ APPLE_RTKIT_MGMT_EPMAP = 8,
+ APPLE_RTKIT_MGMT_EPMAP_REPLY = 8,
+ APPLE_RTKIT_MGMT_SET_AP_PWR_STATE = 0xb,
+ APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK = 0xb,
+};
+
+#define APPLE_RTKIT_MGMT_HELLO_MINVER GENMASK_ULL(15, 0)
+#define APPLE_RTKIT_MGMT_HELLO_MAXVER GENMASK_ULL(31, 16)
+
+#define APPLE_RTKIT_MGMT_EPMAP_LAST BIT_ULL(51)
+#define APPLE_RTKIT_MGMT_EPMAP_BASE GENMASK_ULL(34, 32)
+#define APPLE_RTKIT_MGMT_EPMAP_BITMAP GENMASK_ULL(31, 0)
+
+#define APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE BIT_ULL(0)
+
+#define APPLE_RTKIT_MGMT_STARTEP_EP GENMASK_ULL(39, 32)
+#define APPLE_RTKIT_MGMT_STARTEP_FLAG BIT_ULL(1)
+
+#define APPLE_RTKIT_MGMT_PWR_STATE GENMASK_ULL(15, 0)
+
+#define APPLE_RTKIT_CRASHLOG_CRASH 1
+
+#define APPLE_RTKIT_BUFFER_REQUEST 1
+#define APPLE_RTKIT_BUFFER_REQUEST_SIZE GENMASK_ULL(51, 44)
+#define APPLE_RTKIT_BUFFER_REQUEST_IOVA GENMASK_ULL(43, 0)
+
+#define APPLE_RTKIT_SYSLOG_TYPE GENMASK_ULL(59, 52)
+
+#define APPLE_RTKIT_SYSLOG_LOG 5
+
+#define APPLE_RTKIT_SYSLOG_INIT 8
+#define APPLE_RTKIT_SYSLOG_N_ENTRIES GENMASK_ULL(7, 0)
+#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
+
+#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
+#define APPLE_RTKIT_OSLOG_INIT 1
+#define APPLE_RTKIT_OSLOG_ACK 3
+
+#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
+#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
+
+struct apple_rtkit_msg {
+ struct completion *completion;
+ struct apple_mbox_msg mbox_msg;
+};
+
+struct apple_rtkit_rx_work {
+ struct apple_rtkit *rtk;
+ u8 ep;
+ u64 msg;
+ struct work_struct work;
+};
+
+bool apple_rtkit_is_running(struct apple_rtkit *rtk)
+{
+ if (rtk->crashed)
+ return false;
+ if ((rtk->iop_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON)
+ return false;
+ if ((rtk->ap_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON)
+ return false;
+ return true;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_is_running);
+
+bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
+{
+ return rtk->crashed;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
+
+static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
+ u64 msg)
+{
+ msg &= ~APPLE_RTKIT_MGMT_TYPE;
+ msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+}
+
+static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
+{
+ u64 reply;
+
+ int min_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MINVER, msg);
+ int max_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MAXVER, msg);
+ int want_ver = min(APPLE_RTKIT_MAX_SUPPORTED_VERSION, max_ver);
+
+ dev_dbg(rtk->dev, "RTKit: Min ver %d, max ver %d\n", min_ver, max_ver);
+
+ if (min_ver > APPLE_RTKIT_MAX_SUPPORTED_VERSION) {
+ dev_err(rtk->dev, "RTKit: Firmware min version %d is too new\n",
+ min_ver);
+ goto abort_boot;
+ }
+
+ if (max_ver < APPLE_RTKIT_MIN_SUPPORTED_VERSION) {
+ dev_err(rtk->dev, "RTKit: Firmware max version %d is too old\n",
+ max_ver);
+ goto abort_boot;
+ }
+
+ dev_info(rtk->dev, "RTKit: Initializing (protocol version %d)\n",
+ want_ver);
+ rtk->version = want_ver;
+
+ reply = FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MINVER, want_ver);
+ reply |= FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MAXVER, want_ver);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_HELLO_REPLY, reply);
+
+ return;
+
+abort_boot:
+ rtk->boot_result = -EINVAL;
+ complete_all(&rtk->epmap_completion);
+}
+
+static void apple_rtkit_management_rx_epmap(struct apple_rtkit *rtk, u64 msg)
+{
+ int i, ep;
+ u64 reply;
+ unsigned long bitmap = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BITMAP, msg);
+ u32 base = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BASE, msg);
+
+ dev_dbg(rtk->dev,
+ "RTKit: received endpoint bitmap 0x%lx with base 0x%x\n",
+ bitmap, base);
+
+ for_each_set_bit(i, &bitmap, 32) {
+ ep = 32 * base + i;
+ dev_dbg(rtk->dev, "RTKit: Discovered endpoint 0x%02x\n", ep);
+ set_bit(ep, rtk->endpoints);
+ }
+
+ reply = FIELD_PREP(APPLE_RTKIT_MGMT_EPMAP_BASE, base);
+ if (msg & APPLE_RTKIT_MGMT_EPMAP_LAST)
+ reply |= APPLE_RTKIT_MGMT_EPMAP_LAST;
+ else
+ reply |= APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE;
+
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_EPMAP_REPLY, reply);
+
+ if (!(msg & APPLE_RTKIT_MGMT_EPMAP_LAST))
+ return;
+
+ for_each_set_bit(ep, rtk->endpoints, APPLE_RTKIT_APP_ENDPOINT_START) {
+ switch (ep) {
+ /* the management endpoint is started by default */
+ case APPLE_RTKIT_EP_MGMT:
+ break;
+
+ /* without starting these RTKit refuses to boot */
+ case APPLE_RTKIT_EP_SYSLOG:
+ case APPLE_RTKIT_EP_CRASHLOG:
+ case APPLE_RTKIT_EP_DEBUG:
+ case APPLE_RTKIT_EP_IOREPORT:
+ case APPLE_RTKIT_EP_OSLOG:
+ dev_dbg(rtk->dev,
+ "RTKit: Starting system endpoint 0x%02x\n", ep);
+ apple_rtkit_start_ep(rtk, ep);
+ break;
+
+ default:
+ dev_warn(rtk->dev,
+ "RTKit: Unknown system endpoint: 0x%02x\n",
+ ep);
+ }
+ }
+
+ rtk->boot_result = 0;
+ complete_all(&rtk->epmap_completion);
+}
+
+static void apple_rtkit_management_rx_iop_pwr_ack(struct apple_rtkit *rtk,
+ u64 msg)
+{
+ unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg);
+
+ dev_dbg(rtk->dev, "RTKit: IOP power state transition: 0x%x -> 0x%x\n",
+ rtk->iop_power_state, new_state);
+ rtk->iop_power_state = new_state;
+
+ complete_all(&rtk->iop_pwr_ack_completion);
+}
+
+static void apple_rtkit_management_rx_ap_pwr_ack(struct apple_rtkit *rtk,
+ u64 msg)
+{
+ unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg);
+
+ dev_dbg(rtk->dev, "RTKit: AP power state transition: 0x%x -> 0x%x\n",
+ rtk->ap_power_state, new_state);
+ rtk->ap_power_state = new_state;
+
+ complete_all(&rtk->ap_pwr_ack_completion);
+}
+
+static void apple_rtkit_management_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_MGMT_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_MGMT_HELLO:
+ apple_rtkit_management_rx_hello(rtk, msg);
+ break;
+ case APPLE_RTKIT_MGMT_EPMAP:
+ apple_rtkit_management_rx_epmap(rtk, msg);
+ break;
+ case APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK:
+ apple_rtkit_management_rx_iop_pwr_ack(rtk, msg);
+ break;
+ case APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK:
+ apple_rtkit_management_rx_ap_pwr_ack(rtk, msg);
+ break;
+ default:
+ dev_warn(
+ rtk->dev,
+ "RTKit: unknown management message: 0x%llx (type: 0x%02x)\n",
+ msg, type);
+ }
+}
+
+static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
+ struct apple_rtkit_shmem *buffer,
+ u8 ep, u64 msg)
+{
+ size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
+ u64 reply;
+ int err;
+
+ buffer->buffer = NULL;
+ buffer->iomem = NULL;
+ buffer->is_mapped = false;
+ buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+ buffer->size = n_4kpages << 12;
+
+ dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
+ buffer->size, &buffer->iova);
+
+ if (buffer->iova &&
+ (!rtk->ops->shmem_setup || !rtk->ops->shmem_destroy)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (rtk->ops->shmem_setup) {
+ err = rtk->ops->shmem_setup(rtk->cookie, buffer);
+ if (err)
+ goto error;
+ } else {
+ buffer->buffer = dma_alloc_coherent(rtk->dev, buffer->size,
+ &buffer->iova, GFP_KERNEL);
+ if (!buffer->buffer) {
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+
+ if (!buffer->is_mapped) {
+ reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+ APPLE_RTKIT_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+ buffer->iova);
+ apple_rtkit_send_message(rtk, ep, reply, NULL, false);
+ }
+
+ return 0;
+
+error:
+ buffer->buffer = NULL;
+ buffer->iomem = NULL;
+ buffer->iova = 0;
+ buffer->size = 0;
+ buffer->is_mapped = false;
+ return err;
+}
+
+static void apple_rtkit_free_buffer(struct apple_rtkit *rtk,
+ struct apple_rtkit_shmem *bfr)
+{
+ if (bfr->size == 0)
+ return;
+
+ if (rtk->ops->shmem_destroy)
+ rtk->ops->shmem_destroy(rtk->cookie, bfr);
+ else if (bfr->buffer)
+ dma_free_coherent(rtk->dev, bfr->size, bfr->buffer, bfr->iova);
+
+ bfr->buffer = NULL;
+ bfr->iomem = NULL;
+ bfr->iova = 0;
+ bfr->size = 0;
+ bfr->is_mapped = false;
+}
+
+static void apple_rtkit_memcpy(struct apple_rtkit *rtk, void *dst,
+ struct apple_rtkit_shmem *bfr, size_t offset,
+ size_t len)
+{
+ if (bfr->iomem)
+ memcpy_fromio(dst, bfr->iomem + offset, len);
+ else
+ memcpy(dst, bfr->buffer + offset, len);
+}
+
+static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+ u8 *bfr;
+
+ if (type != APPLE_RTKIT_CRASHLOG_CRASH) {
+ dev_warn(rtk->dev, "RTKit: Unknown crashlog message: %llx\n",
+ msg);
+ return;
+ }
+
+ if (!rtk->crashlog_buffer.size) {
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->crashlog_buffer,
+ APPLE_RTKIT_EP_CRASHLOG, msg);
+ return;
+ }
+
+ dev_err(rtk->dev, "RTKit: co-processor has crashed\n");
+
+ /*
+ * create a shadow copy here to make sure the co-processor isn't able
+ * to change the log while we're dumping it. this also ensures
+ * the buffer is in normal memory and not iomem for e.g. the SMC
+ */
+ bfr = kzalloc(rtk->crashlog_buffer.size, GFP_KERNEL);
+ if (bfr) {
+ apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
+ rtk->crashlog_buffer.size);
+ apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
+ kfree(bfr);
+ } else {
+ dev_err(rtk->dev,
+ "RTKit: Couldn't allocate crashlog shadow buffer\n");
+ }
+
+ rtk->crashed = true;
+ if (rtk->ops->crashed)
+ rtk->ops->crashed(rtk->cookie);
+}
+
+static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->ioreport_buffer,
+ APPLE_RTKIT_EP_IOREPORT, msg);
+ break;
+ /* unknown, must be ACKed or the co-processor will hang */
+ case 0x8:
+ case 0xc:
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_IOREPORT, msg,
+ NULL, false);
+ break;
+ default:
+ dev_warn(rtk->dev, "RTKit: Unknown ioreport message: %llx\n",
+ msg);
+ }
+}
+
+static void apple_rtkit_syslog_rx_init(struct apple_rtkit *rtk, u64 msg)
+{
+ rtk->syslog_n_entries = FIELD_GET(APPLE_RTKIT_SYSLOG_N_ENTRIES, msg);
+ rtk->syslog_msg_size = FIELD_GET(APPLE_RTKIT_SYSLOG_MSG_SIZE, msg);
+
+ rtk->syslog_msg_buffer = kzalloc(rtk->syslog_msg_size, GFP_KERNEL);
+
+ dev_dbg(rtk->dev,
+ "RTKit: syslog initialized: entries: %zd, msg_size: %zd\n",
+ rtk->syslog_n_entries, rtk->syslog_msg_size);
+}
+
+static bool should_crop_syslog_char(char c)
+{
+ return c == '\n' || c == '\r' || c == ' ' || c == '\0';
+}
+
+static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 idx = msg & 0xff;
+ char log_context[24];
+ size_t entry_size = 0x20 + rtk->syslog_msg_size;
+ int msglen;
+
+ if (!rtk->syslog_msg_buffer) {
+ dev_warn(
+ rtk->dev,
+ "RTKit: received syslog message but no syslog_msg_buffer\n");
+ goto done;
+ }
+ if (!rtk->syslog_buffer.size) {
+ dev_warn(
+ rtk->dev,
+ "RTKit: received syslog message but syslog_buffer.size is zero\n");
+ goto done;
+ }
+ if (!rtk->syslog_buffer.buffer && !rtk->syslog_buffer.iomem) {
+ dev_warn(
+ rtk->dev,
+ "RTKit: received syslog message but no syslog_buffer.buffer or syslog_buffer.iomem\n");
+ goto done;
+ }
+ if (idx > rtk->syslog_n_entries) {
+ dev_warn(rtk->dev, "RTKit: syslog index %d out of range\n",
+ idx);
+ goto done;
+ }
+
+ apple_rtkit_memcpy(rtk, log_context, &rtk->syslog_buffer,
+ idx * entry_size + 8, sizeof(log_context));
+ apple_rtkit_memcpy(rtk, rtk->syslog_msg_buffer, &rtk->syslog_buffer,
+ idx * entry_size + 8 + sizeof(log_context),
+ rtk->syslog_msg_size);
+
+ log_context[sizeof(log_context) - 1] = 0;
+
+ msglen = rtk->syslog_msg_size - 1;
+ while (msglen > 0 &&
+ should_crop_syslog_char(rtk->syslog_msg_buffer[msglen - 1]))
+ msglen--;
+
+ rtk->syslog_msg_buffer[msglen] = 0;
+ dev_info(rtk->dev, "RTKit: syslog message: %s: %s\n", log_context,
+ rtk->syslog_msg_buffer);
+
+done:
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_SYSLOG, msg, NULL, false);
+}
+
+static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->syslog_buffer,
+ APPLE_RTKIT_EP_SYSLOG, msg);
+ break;
+ case APPLE_RTKIT_SYSLOG_INIT:
+ apple_rtkit_syslog_rx_init(rtk, msg);
+ break;
+ case APPLE_RTKIT_SYSLOG_LOG:
+ apple_rtkit_syslog_rx_log(rtk, msg);
+ break;
+ default:
+ dev_warn(rtk->dev, "RTKit: Unknown syslog message: %llx\n",
+ msg);
+ }
+}
+
+static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
+{
+ u64 ack;
+
+ dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
+ ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
+ apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
+}
+
+static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+ u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
+
+ switch (type) {
+ case APPLE_RTKIT_OSLOG_INIT:
+ apple_rtkit_oslog_rx_init(rtk, msg);
+ break;
+ default:
+ dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
+ }
+}
+
+static void apple_rtkit_rx_work(struct work_struct *work)
+{
+ struct apple_rtkit_rx_work *rtk_work =
+ container_of(work, struct apple_rtkit_rx_work, work);
+ struct apple_rtkit *rtk = rtk_work->rtk;
+
+ switch (rtk_work->ep) {
+ case APPLE_RTKIT_EP_MGMT:
+ apple_rtkit_management_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_CRASHLOG:
+ apple_rtkit_crashlog_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_SYSLOG:
+ apple_rtkit_syslog_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_IOREPORT:
+ apple_rtkit_ioreport_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_EP_OSLOG:
+ apple_rtkit_oslog_rx(rtk, rtk_work->msg);
+ break;
+ case APPLE_RTKIT_APP_ENDPOINT_START ... 0xff:
+ if (rtk->ops->recv_message)
+ rtk->ops->recv_message(rtk->cookie, rtk_work->ep,
+ rtk_work->msg);
+ else
+ dev_warn(
+ rtk->dev,
+ "Received unexpected message to EP%02d: %llx\n",
+ rtk_work->ep, rtk_work->msg);
+ break;
+ default:
+ dev_warn(rtk->dev,
+ "RTKit: message to unknown endpoint %02x: %llx\n",
+ rtk_work->ep, rtk_work->msg);
+ }
+
+ kfree(rtk_work);
+}
+
+static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
+{
+ struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl);
+ struct apple_mbox_msg *msg = mssg;
+ struct apple_rtkit_rx_work *work;
+ u8 ep = msg->msg1;
+
+ /*
+ * The message was read from a MMIO FIFO and we have to make
+ * sure all reads from buffers sent with that message happen
+ * afterwards.
+ */
+ dma_rmb();
+
+ if (!test_bit(ep, rtk->endpoints))
+ dev_warn(rtk->dev,
+ "RTKit: Message to undiscovered endpoint 0x%02x\n",
+ ep);
+
+ if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
+ rtk->ops->recv_message_early &&
+ rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0))
+ return;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ work->rtk = rtk;
+ work->ep = ep;
+ work->msg = msg->msg0;
+ INIT_WORK(&work->work, apple_rtkit_rx_work);
+ queue_work(rtk->wq, &work->work);
+}
+
+static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+ struct apple_rtkit_msg *msg =
+ container_of(mssg, struct apple_rtkit_msg, mbox_msg);
+
+ if (r == -ETIME)
+ return;
+
+ if (msg->completion)
+ complete(msg->completion);
+ kfree(msg);
+}
+
+int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
+ struct completion *completion, bool atomic)
+{
+ struct apple_rtkit_msg *msg;
+ int ret;
+ gfp_t flags;
+
+ if (rtk->crashed)
+ return -EINVAL;
+ if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
+ !apple_rtkit_is_running(rtk))
+ return -EINVAL;
+
+ if (atomic)
+ flags = GFP_ATOMIC;
+ else
+ flags = GFP_KERNEL;
+
+ msg = kzalloc(sizeof(*msg), flags);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->mbox_msg.msg0 = message;
+ msg->mbox_msg.msg1 = ep;
+ msg->completion = completion;
+
+ /*
+ * The message will be sent with a MMIO write. We need the barrier
+ * here to ensure any previous writes to buffers are visible to the
+ * device before that MMIO write happens.
+ */
+ dma_wmb();
+
+ ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg);
+ if (ret < 0) {
+ kfree(msg);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_send_message);
+
+int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
+ unsigned long timeout, bool atomic)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ int ret;
+ long t;
+
+ ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic);
+ if (ret < 0)
+ return ret;
+
+ if (atomic) {
+ ret = mbox_flush(rtk->mbox_chan, timeout);
+ if (ret < 0)
+ return ret;
+
+ if (try_wait_for_completion(&completion))
+ return 0;
+
+ return -ETIME;
+ } else {
+ t = wait_for_completion_interruptible_timeout(
+ &completion, msecs_to_jiffies(timeout));
+ if (t < 0)
+ return t;
+ else if (t == 0)
+ return -ETIME;
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
+
+int apple_rtkit_poll(struct apple_rtkit *rtk)
+{
+ return mbox_client_peek_data(rtk->mbox_chan);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_poll);
+
+int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
+{
+ u64 msg;
+
+ if (!test_bit(endpoint, rtk->endpoints))
+ return -EINVAL;
+ if (endpoint >= APPLE_RTKIT_APP_ENDPOINT_START &&
+ !apple_rtkit_is_running(rtk))
+ return -EINVAL;
+
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_STARTEP_EP, endpoint);
+ msg |= APPLE_RTKIT_MGMT_STARTEP_FLAG;
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_STARTEP, msg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_start_ep);
+
+static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk)
+{
+ if (rtk->mbox_name)
+ rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl,
+ rtk->mbox_name);
+ else
+ rtk->mbox_chan =
+ mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx);
+
+ if (IS_ERR(rtk->mbox_chan))
+ return PTR_ERR(rtk->mbox_chan);
+ return 0;
+}
+
+struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops)
+{
+ struct apple_rtkit *rtk;
+ int ret;
+
+ if (!ops)
+ return ERR_PTR(-EINVAL);
+
+ rtk = kzalloc(sizeof(*rtk), GFP_KERNEL);
+ if (!rtk)
+ return ERR_PTR(-ENOMEM);
+
+ rtk->dev = dev;
+ rtk->cookie = cookie;
+ rtk->ops = ops;
+
+ init_completion(&rtk->epmap_completion);
+ init_completion(&rtk->iop_pwr_ack_completion);
+ init_completion(&rtk->ap_pwr_ack_completion);
+
+ bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+ set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
+
+ rtk->mbox_name = mbox_name;
+ rtk->mbox_idx = mbox_idx;
+ rtk->mbox_cl.dev = dev;
+ rtk->mbox_cl.tx_block = false;
+ rtk->mbox_cl.knows_txdone = false;
+ rtk->mbox_cl.rx_callback = &apple_rtkit_rx;
+ rtk->mbox_cl.tx_done = &apple_rtkit_tx_done;
+
+ rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
+ dev_name(rtk->dev));
+ if (!rtk->wq) {
+ ret = -ENOMEM;
+ goto free_rtk;
+ }
+
+ ret = apple_rtkit_request_mbox_chan(rtk);
+ if (ret)
+ goto destroy_wq;
+
+ return rtk;
+
+destroy_wq:
+ destroy_workqueue(rtk->wq);
+free_rtk:
+ kfree(rtk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_init);
+
+static int apple_rtkit_wait_for_completion(struct completion *c)
+{
+ long t;
+
+ t = wait_for_completion_interruptible_timeout(c,
+ msecs_to_jiffies(1000));
+ if (t < 0)
+ return t;
+ else if (t == 0)
+ return -ETIME;
+ else
+ return 0;
+}
+
+int apple_rtkit_reinit(struct apple_rtkit *rtk)
+{
+ /* make sure we don't handle any messages while reinitializing */
+ mbox_free_channel(rtk->mbox_chan);
+ flush_workqueue(rtk->wq);
+
+ apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
+
+ kfree(rtk->syslog_msg_buffer);
+
+ rtk->syslog_msg_buffer = NULL;
+ rtk->syslog_n_entries = 0;
+ rtk->syslog_msg_size = 0;
+
+ bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+ set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
+
+ reinit_completion(&rtk->epmap_completion);
+ reinit_completion(&rtk->iop_pwr_ack_completion);
+ reinit_completion(&rtk->ap_pwr_ack_completion);
+
+ rtk->crashed = false;
+ rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF;
+ rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF;
+
+ return apple_rtkit_request_mbox_chan(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_reinit);
+
+static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
+ unsigned int state)
+{
+ u64 msg;
+ int ret;
+
+ reinit_completion(&rtk->ap_pwr_ack_completion);
+
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
+ msg);
+
+ ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
+ if (ret)
+ return ret;
+
+ if (rtk->ap_power_state != state)
+ return -EINVAL;
+ return 0;
+}
+
+static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
+ unsigned int state)
+{
+ u64 msg;
+ int ret;
+
+ reinit_completion(&rtk->iop_pwr_ack_completion);
+
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+
+ ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
+ if (ret)
+ return ret;
+
+ if (rtk->iop_power_state != state)
+ return -EINVAL;
+ return 0;
+}
+
+int apple_rtkit_boot(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ if (apple_rtkit_is_running(rtk))
+ return 0;
+ if (rtk->crashed)
+ return -EINVAL;
+
+ dev_dbg(rtk->dev, "RTKit: waiting for boot to finish\n");
+ ret = apple_rtkit_wait_for_completion(&rtk->epmap_completion);
+ if (ret)
+ return ret;
+ if (rtk->boot_result)
+ return rtk->boot_result;
+
+ dev_dbg(rtk->dev, "RTKit: waiting for IOP power state ACK\n");
+ ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
+ if (ret)
+ return ret;
+
+ return apple_rtkit_set_ap_power_state(rtk, APPLE_RTKIT_PWR_STATE_ON);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_boot);
+
+int apple_rtkit_shutdown(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ /* if OFF is used here the co-processor will not wake up again */
+ ret = apple_rtkit_set_ap_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_QUIESCED);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_set_iop_power_state(rtk, APPLE_RTKIT_PWR_STATE_SLEEP);
+ if (ret)
+ return ret;
+
+ return apple_rtkit_reinit(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_shutdown);
+
+int apple_rtkit_idle(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ /* if OFF is used here the co-processor will not wake up again */
+ ret = apple_rtkit_set_ap_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_IDLE);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_set_iop_power_state(rtk, APPLE_RTKIT_PWR_STATE_IDLE);
+ if (ret)
+ return ret;
+
+ rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_IDLE;
+ rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_IDLE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_idle);
+
+int apple_rtkit_quiesce(struct apple_rtkit *rtk)
+{
+ int ret;
+
+ ret = apple_rtkit_set_ap_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_QUIESCED);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_set_iop_power_state(rtk,
+ APPLE_RTKIT_PWR_STATE_QUIESCED);
+ if (ret)
+ return ret;
+
+ ret = apple_rtkit_reinit(rtk);
+ if (ret)
+ return ret;
+
+ rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED;
+ rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
+
+int apple_rtkit_wake(struct apple_rtkit *rtk)
+{
+ u64 msg;
+
+ if (apple_rtkit_is_running(rtk))
+ return -EINVAL;
+
+ reinit_completion(&rtk->iop_pwr_ack_completion);
+
+ /*
+ * Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
+ * will wait for the completion anyway.
+ */
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
+ apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+
+ return apple_rtkit_boot(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_wake);
+
+void apple_rtkit_free(struct apple_rtkit *rtk)
+{
+ mbox_free_channel(rtk->mbox_chan);
+ destroy_workqueue(rtk->wq);
+
+ apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
+
+ kfree(rtk->syslog_msg_buffer);
+ kfree(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_free);
+
+static void apple_rtkit_free_wrapper(void *data)
+{
+ apple_rtkit_free(data);
+}
+
+struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops)
+{
+ struct apple_rtkit *rtk;
+ int ret;
+
+ rtk = apple_rtkit_init(dev, cookie, mbox_name, mbox_idx, ops);
+ if (IS_ERR(rtk))
+ return rtk;
+
+ ret = devm_add_action_or_reset(dev, apple_rtkit_free_wrapper, rtk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return rtk;
+}
+EXPORT_SYMBOL_GPL(devm_apple_rtkit_init);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple RTKit driver");
diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c
new file mode 100644
index 0000000000..afa1117368
--- /dev/null
+++ b/drivers/soc/apple/sart.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SART device driver
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple SART is a simple address filter for some DMA transactions.
+ * Regions of physical memory must be added to the SART's allow
+ * list before any DMA can target these. Unlike a proper
+ * IOMMU no remapping can be done and special support in the
+ * consumer driver is required since not all DMA transactions of
+ * a single device are subject to SART filtering.
+ */
+
+#include <linux/soc/apple/sart.h>
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define APPLE_SART_MAX_ENTRIES 16
+
+/* This is probably a bitfield but the exact meaning of each bit is unknown. */
+#define APPLE_SART_FLAGS_ALLOW 0xff
+
+/* SARTv2 registers */
+#define APPLE_SART2_CONFIG(idx) (0x00 + 4 * (idx))
+#define APPLE_SART2_CONFIG_FLAGS GENMASK(31, 24)
+#define APPLE_SART2_CONFIG_SIZE GENMASK(23, 0)
+#define APPLE_SART2_CONFIG_SIZE_SHIFT 12
+#define APPLE_SART2_CONFIG_SIZE_MAX GENMASK(23, 0)
+
+#define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART2_PADDR_SHIFT 12
+
+/* SARTv3 registers */
+#define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx))
+
+#define APPLE_SART3_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART3_PADDR_SHIFT 12
+
+#define APPLE_SART3_SIZE(idx) (0x80 + 4 * (idx))
+#define APPLE_SART3_SIZE_SHIFT 12
+#define APPLE_SART3_SIZE_MAX GENMASK(29, 0)
+
+struct apple_sart_ops {
+ void (*get_entry)(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size);
+ void (*set_entry)(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted);
+ unsigned int size_shift;
+ unsigned int paddr_shift;
+ size_t size_max;
+};
+
+struct apple_sart {
+ struct device *dev;
+ void __iomem *regs;
+
+ const struct apple_sart_ops *ops;
+
+ unsigned long protected_entries;
+ unsigned long used_entries;
+};
+
+static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size)
+{
+ u32 cfg = readl(sart->regs + APPLE_SART2_CONFIG(index));
+ phys_addr_t paddr_ = readl(sart->regs + APPLE_SART2_PADDR(index));
+ size_t size_ = FIELD_GET(APPLE_SART2_CONFIG_SIZE, cfg);
+
+ *flags = FIELD_GET(APPLE_SART2_CONFIG_FLAGS, cfg);
+ *size = size_ << APPLE_SART2_CONFIG_SIZE_SHIFT;
+ *paddr = paddr_ << APPLE_SART2_PADDR_SHIFT;
+}
+
+static void sart2_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted)
+{
+ u32 cfg;
+
+ cfg = FIELD_PREP(APPLE_SART2_CONFIG_FLAGS, flags);
+ cfg |= FIELD_PREP(APPLE_SART2_CONFIG_SIZE, size_shifted);
+
+ writel(paddr_shifted, sart->regs + APPLE_SART2_PADDR(index));
+ writel(cfg, sart->regs + APPLE_SART2_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v2 = {
+ .get_entry = sart2_get_entry,
+ .set_entry = sart2_set_entry,
+ .size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT,
+ .paddr_shift = APPLE_SART2_PADDR_SHIFT,
+ .size_max = APPLE_SART2_CONFIG_SIZE_MAX,
+};
+
+static void sart3_get_entry(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size)
+{
+ phys_addr_t paddr_ = readl(sart->regs + APPLE_SART3_PADDR(index));
+ size_t size_ = readl(sart->regs + APPLE_SART3_SIZE(index));
+
+ *flags = readl(sart->regs + APPLE_SART3_CONFIG(index));
+ *size = size_ << APPLE_SART3_SIZE_SHIFT;
+ *paddr = paddr_ << APPLE_SART3_PADDR_SHIFT;
+}
+
+static void sart3_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted)
+{
+ writel(paddr_shifted, sart->regs + APPLE_SART3_PADDR(index));
+ writel(size_shifted, sart->regs + APPLE_SART3_SIZE(index));
+ writel(flags, sart->regs + APPLE_SART3_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v3 = {
+ .get_entry = sart3_get_entry,
+ .set_entry = sart3_set_entry,
+ .size_shift = APPLE_SART3_SIZE_SHIFT,
+ .paddr_shift = APPLE_SART3_PADDR_SHIFT,
+ .size_max = APPLE_SART3_SIZE_MAX,
+};
+
+static int apple_sart_probe(struct platform_device *pdev)
+{
+ int i;
+ struct apple_sart *sart;
+ struct device *dev = &pdev->dev;
+
+ sart = devm_kzalloc(dev, sizeof(*sart), GFP_KERNEL);
+ if (!sart)
+ return -ENOMEM;
+
+ sart->dev = dev;
+ sart->ops = of_device_get_match_data(dev);
+
+ sart->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sart->regs))
+ return PTR_ERR(sart->regs);
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ u8 flags;
+ size_t size;
+ phys_addr_t paddr;
+
+ sart->ops->get_entry(sart, i, &flags, &paddr, &size);
+
+ if (!flags)
+ continue;
+
+ dev_dbg(sart->dev,
+ "SART bootloader entry: index %02d; flags: 0x%02x; paddr: %pa; size: 0x%zx\n",
+ i, flags, &paddr, size);
+ set_bit(i, &sart->protected_entries);
+ }
+
+ platform_set_drvdata(pdev, sart);
+ return 0;
+}
+
+static void apple_sart_put_device(void *dev)
+{
+ put_device(dev);
+}
+
+struct apple_sart *devm_apple_sart_get(struct device *dev)
+{
+ struct device_node *sart_node;
+ struct platform_device *sart_pdev;
+ struct apple_sart *sart;
+ int ret;
+
+ sart_node = of_parse_phandle(dev->of_node, "apple,sart", 0);
+ if (!sart_node)
+ return ERR_PTR(-ENODEV);
+
+ sart_pdev = of_find_device_by_node(sart_node);
+ of_node_put(sart_node);
+
+ if (!sart_pdev)
+ return ERR_PTR(-ENODEV);
+
+ sart = dev_get_drvdata(&sart_pdev->dev);
+ if (!sart) {
+ put_device(&sart_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ ret = devm_add_action_or_reset(dev, apple_sart_put_device,
+ &sart_pdev->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ device_link_add(dev, &sart_pdev->dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+
+ return sart;
+}
+EXPORT_SYMBOL_GPL(devm_apple_sart_get);
+
+static int sart_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr, size_t size)
+{
+ if (size & ((1 << sart->ops->size_shift) - 1))
+ return -EINVAL;
+ if (paddr & ((1 << sart->ops->paddr_shift) - 1))
+ return -EINVAL;
+
+ paddr >>= sart->ops->size_shift;
+ size >>= sart->ops->paddr_shift;
+
+ if (size > sart->ops->size_max)
+ return -EINVAL;
+
+ sart->ops->set_entry(sart, index, flags, paddr, size);
+ return 0;
+}
+
+int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size)
+{
+ int i, ret;
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ if (test_bit(i, &sart->protected_entries))
+ continue;
+ if (test_and_set_bit(i, &sart->used_entries))
+ continue;
+
+ ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr,
+ size);
+ if (ret) {
+ dev_dbg(sart->dev,
+ "unable to set entry %d to [%pa, 0x%zx]\n",
+ i, &paddr, size);
+ clear_bit(i, &sart->used_entries);
+ return ret;
+ }
+
+ dev_dbg(sart->dev, "wrote [%pa, 0x%zx] to %d\n", &paddr, size,
+ i);
+ return 0;
+ }
+
+ dev_warn(sart->dev,
+ "no free entries left to add [paddr: 0x%pa, size: 0x%zx]\n",
+ &paddr, size);
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(apple_sart_add_allowed_region);
+
+int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size)
+{
+ int i;
+
+ dev_dbg(sart->dev,
+ "will remove [paddr: %pa, size: 0x%zx] from allowed regions\n",
+ &paddr, size);
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ u8 eflags;
+ size_t esize;
+ phys_addr_t epaddr;
+
+ if (test_bit(i, &sart->protected_entries))
+ continue;
+
+ sart->ops->get_entry(sart, i, &eflags, &epaddr, &esize);
+
+ if (epaddr != paddr || esize != size)
+ continue;
+
+ sart->ops->set_entry(sart, i, 0, 0, 0);
+
+ clear_bit(i, &sart->used_entries);
+ dev_dbg(sart->dev, "cleared entry %d\n", i);
+ return 0;
+ }
+
+ dev_warn(sart->dev, "entry [paddr: 0x%pa, size: 0x%zx] not found\n",
+ &paddr, size);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apple_sart_remove_allowed_region);
+
+static void apple_sart_shutdown(struct platform_device *pdev)
+{
+ struct apple_sart *sart = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+ if (test_bit(i, &sart->protected_entries))
+ continue;
+
+ sart->ops->set_entry(sart, i, 0, 0, 0);
+ }
+}
+
+static const struct of_device_id apple_sart_of_match[] = {
+ {
+ .compatible = "apple,t6000-sart",
+ .data = &sart_ops_v3,
+ },
+ {
+ .compatible = "apple,t8103-sart",
+ .data = &sart_ops_v2,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_sart_of_match);
+
+static struct platform_driver apple_sart_driver = {
+ .driver = {
+ .name = "apple-sart",
+ .of_match_table = apple_sart_of_match,
+ },
+ .probe = apple_sart_probe,
+ .shutdown = apple_sart_shutdown,
+};
+module_platform_driver(apple_sart_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple SART driver");
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
new file mode 100644
index 0000000000..f579ee0b5a
--- /dev/null
+++ b/drivers/soc/aspeed/Kconfig
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+if ARCH_ASPEED || COMPILE_TEST
+
+menu "ASPEED SoC drivers"
+
+config ASPEED_LPC_CTRL
+ tristate "ASPEED LPC firmware cycle control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
+ help
+ Control LPC firmware cycle mappings through ioctl()s. The driver
+ also provides a read/write interface to a BMC ram region where the
+ host LPC read/write region can be buffered.
+
+config ASPEED_LPC_SNOOP
+ tristate "ASPEED LPC snoop support"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
+ help
+ Provides a driver to control the LPC snoop interface which
+ allows the BMC to listen on and save the data written by
+ the host to an arbitrary LPC I/O port.
+
+config ASPEED_UART_ROUTING
+ tristate "ASPEED uart routing control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
+ help
+ Provides a driver to control the UART routing paths, allowing
+ users to perform runtime configuration of the RX muxes among
+ the UART controllers and I/O pins.
+
+config ASPEED_P2A_CTRL
+ tristate "ASPEED P2A (VGA MMIO to BMC) bridge control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
+ help
+ Control ASPEED P2A VGA MMIO to BMC mappings through ioctl()s. The
+ driver also provides an interface for userspace mappings to a
+ pre-defined region.
+
+config ASPEED_SOCINFO
+ bool "ASPEED SoC Information driver"
+ default ARCH_ASPEED
+ select SOC_BUS
+ default ARCH_ASPEED
+ help
+ Say yes to support decoding of ASPEED BMC information.
+
+endmenu
+
+endif
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
new file mode 100644
index 0000000000..b35d745929
--- /dev/null
+++ b/drivers/soc/aspeed/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
+obj-$(CONFIG_ASPEED_UART_ROUTING) += aspeed-uart-routing.o
+obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
+obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
new file mode 100644
index 0000000000..258894ed23
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2017 IBM Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/log2.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+
+#include <linux/aspeed-lpc-ctrl.h>
+
+#define DEVICE_NAME "aspeed-lpc-ctrl"
+
+#define HICR5 0x80
+#define HICR5_ENL2H BIT(8)
+#define HICR5_ENFWH BIT(10)
+
+#define HICR6 0x84
+#define SW_FWH2AHB BIT(17)
+
+#define HICR7 0x88
+#define HICR8 0x8c
+
+struct aspeed_lpc_ctrl {
+ struct miscdevice miscdev;
+ struct regmap *regmap;
+ struct clk *clk;
+ phys_addr_t mem_base;
+ resource_size_t mem_size;
+ u32 pnor_size;
+ u32 pnor_base;
+ bool fwh2ahb;
+ struct regmap *scu;
+};
+
+static struct aspeed_lpc_ctrl *file_aspeed_lpc_ctrl(struct file *file)
+{
+ return container_of(file->private_data, struct aspeed_lpc_ctrl,
+ miscdev);
+}
+
+static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file);
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ pgprot_t prot = vma->vm_page_prot;
+
+ if (vma->vm_pgoff + vma_pages(vma) > lpc_ctrl->mem_size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ /* ast2400/2500 AHB accesses are not cache coherent */
+ prot = pgprot_noncached(prot);
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ (lpc_ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
+ vsize, prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file);
+ struct device *dev = file->private_data;
+ void __user *p = (void __user *)param;
+ struct aspeed_lpc_ctrl_mapping map;
+ u32 addr;
+ u32 size;
+ long rc;
+
+ if (copy_from_user(&map, p, sizeof(map)))
+ return -EFAULT;
+
+ if (map.flags != 0)
+ return -EINVAL;
+
+ switch (cmd) {
+ case ASPEED_LPC_CTRL_IOCTL_GET_SIZE:
+ /* The flash windows don't report their size */
+ if (map.window_type != ASPEED_LPC_CTRL_WINDOW_MEMORY)
+ return -EINVAL;
+
+ /* Support more than one window id in the future */
+ if (map.window_id != 0)
+ return -EINVAL;
+
+ /* If memory-region is not described in device tree */
+ if (!lpc_ctrl->mem_size) {
+ dev_dbg(dev, "Didn't find reserved memory\n");
+ return -ENXIO;
+ }
+
+ map.size = lpc_ctrl->mem_size;
+
+ return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0;
+ case ASPEED_LPC_CTRL_IOCTL_MAP:
+
+ /*
+ * The top half of HICR7 is the MSB of the BMC address of the
+ * mapping.
+ * The bottom half of HICR7 is the MSB of the HOST LPC
+ * firmware space address of the mapping.
+ *
+ * The 1 bits in the top of half of HICR8 represent the bits
+ * (in the requested address) that should be ignored and
+ * replaced with those from the top half of HICR7.
+ * The 1 bits in the bottom half of HICR8 represent the bits
+ * (in the requested address) that should be kept and pass
+ * into the BMC address space.
+ */
+
+ /*
+ * It doesn't make sense to talk about a size or offset with
+ * low 16 bits set. Both HICR7 and HICR8 talk about the top 16
+ * bits of addresses and sizes.
+ */
+
+ if ((map.size & 0x0000ffff) || (map.offset & 0x0000ffff))
+ return -EINVAL;
+
+ /*
+ * Because of the way the masks work in HICR8 offset has to
+ * be a multiple of size.
+ */
+ if (map.offset & (map.size - 1))
+ return -EINVAL;
+
+ if (map.window_type == ASPEED_LPC_CTRL_WINDOW_FLASH) {
+ if (!lpc_ctrl->pnor_size) {
+ dev_dbg(dev, "Didn't find host pnor flash\n");
+ return -ENXIO;
+ }
+ addr = lpc_ctrl->pnor_base;
+ size = lpc_ctrl->pnor_size;
+ } else if (map.window_type == ASPEED_LPC_CTRL_WINDOW_MEMORY) {
+ /* If memory-region is not described in device tree */
+ if (!lpc_ctrl->mem_size) {
+ dev_dbg(dev, "Didn't find reserved memory\n");
+ return -ENXIO;
+ }
+ addr = lpc_ctrl->mem_base;
+ size = lpc_ctrl->mem_size;
+ } else {
+ return -EINVAL;
+ }
+
+ /* Check overflow first! */
+ if (map.offset + map.size < map.offset ||
+ map.offset + map.size > size)
+ return -EINVAL;
+
+ if (map.size == 0 || map.size > size)
+ return -EINVAL;
+
+ addr += map.offset;
+
+ /*
+ * addr (host lpc address) is safe regardless of values. This
+ * simply changes the address the host has to request on its
+ * side of the LPC bus. This cannot impact the hosts own
+ * memory space by surprise as LPC specific accessors are
+ * required. The only strange thing that could be done is
+ * setting the lower 16 bits but the shift takes care of that.
+ */
+
+ rc = regmap_write(lpc_ctrl->regmap, HICR7,
+ (addr | (map.addr >> 16)));
+ if (rc)
+ return rc;
+
+ rc = regmap_write(lpc_ctrl->regmap, HICR8,
+ (~(map.size - 1)) | ((map.size >> 16) - 1));
+ if (rc)
+ return rc;
+
+ /*
+ * Switch to FWH2AHB mode, AST2600 only.
+ */
+ if (lpc_ctrl->fwh2ahb) {
+ /*
+ * Enable FWH2AHB in SCU debug control register 2. This
+ * does not turn it on, but makes it available for it
+ * to be configured in HICR6.
+ */
+ regmap_update_bits(lpc_ctrl->scu, 0x0D8, BIT(2), 0);
+
+ /*
+ * The other bits in this register are interrupt status bits
+ * that are cleared by writing 1. As we don't want to clear
+ * them, set only the bit of interest.
+ */
+ regmap_write(lpc_ctrl->regmap, HICR6, SW_FWH2AHB);
+ }
+
+ /*
+ * Enable LPC FHW cycles. This is required for the host to
+ * access the regions specified.
+ */
+ return regmap_update_bits(lpc_ctrl->regmap, HICR5,
+ HICR5_ENFWH | HICR5_ENL2H,
+ HICR5_ENFWH | HICR5_ENL2H);
+ }
+
+ return -EINVAL;
+}
+
+static const struct file_operations aspeed_lpc_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .mmap = aspeed_lpc_ctrl_mmap,
+ .unlocked_ioctl = aspeed_lpc_ctrl_ioctl,
+};
+
+static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
+{
+ struct aspeed_lpc_ctrl *lpc_ctrl;
+ struct device_node *node;
+ struct resource resm;
+ struct device *dev;
+ struct device_node *np;
+ int rc;
+
+ dev = &pdev->dev;
+
+ lpc_ctrl = devm_kzalloc(dev, sizeof(*lpc_ctrl), GFP_KERNEL);
+ if (!lpc_ctrl)
+ return -ENOMEM;
+
+ /* If flash is described in device tree then store */
+ node = of_parse_phandle(dev->of_node, "flash", 0);
+ if (!node) {
+ dev_dbg(dev, "Didn't find host pnor flash node\n");
+ } else {
+ rc = of_address_to_resource(node, 1, &resm);
+ of_node_put(node);
+ if (rc) {
+ dev_err(dev, "Couldn't address to resource for flash\n");
+ return rc;
+ }
+
+ lpc_ctrl->pnor_size = resource_size(&resm);
+ lpc_ctrl->pnor_base = resm.start;
+ }
+
+
+ dev_set_drvdata(&pdev->dev, lpc_ctrl);
+
+ /* If memory-region is described in device tree then store */
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_dbg(dev, "Didn't find reserved memory\n");
+ } else {
+ rc = of_address_to_resource(node, 0, &resm);
+ of_node_put(node);
+ if (rc) {
+ dev_err(dev, "Couldn't address to resource for reserved memory\n");
+ return -ENXIO;
+ }
+
+ lpc_ctrl->mem_size = resource_size(&resm);
+ lpc_ctrl->mem_base = resm.start;
+
+ if (!is_power_of_2(lpc_ctrl->mem_size)) {
+ dev_err(dev, "Reserved memory size must be a power of 2, got %u\n",
+ (unsigned int)lpc_ctrl->mem_size);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(lpc_ctrl->mem_base, lpc_ctrl->mem_size)) {
+ dev_err(dev, "Reserved memory must be naturally aligned for size %u\n",
+ (unsigned int)lpc_ctrl->mem_size);
+ return -EINVAL;
+ }
+ }
+
+ np = pdev->dev.parent->of_node;
+ if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
+ dev_err(dev, "unsupported LPC device binding\n");
+ return -ENODEV;
+ }
+
+ lpc_ctrl->regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(lpc_ctrl->regmap)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ if (of_device_is_compatible(dev->of_node, "aspeed,ast2600-lpc-ctrl")) {
+ lpc_ctrl->fwh2ahb = true;
+
+ lpc_ctrl->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2600-scu");
+ if (IS_ERR(lpc_ctrl->scu)) {
+ dev_err(dev, "couldn't find scu\n");
+ return PTR_ERR(lpc_ctrl->scu);
+ }
+ }
+
+ lpc_ctrl->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(lpc_ctrl->clk))
+ return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk),
+ "couldn't get clock\n");
+ rc = clk_prepare_enable(lpc_ctrl->clk);
+ if (rc) {
+ dev_err(dev, "couldn't enable clock\n");
+ return rc;
+ }
+
+ lpc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
+ lpc_ctrl->miscdev.name = DEVICE_NAME;
+ lpc_ctrl->miscdev.fops = &aspeed_lpc_ctrl_fops;
+ lpc_ctrl->miscdev.parent = dev;
+ rc = misc_register(&lpc_ctrl->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register device\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ clk_disable_unprepare(lpc_ctrl->clk);
+ return rc;
+}
+
+static int aspeed_lpc_ctrl_remove(struct platform_device *pdev)
+{
+ struct aspeed_lpc_ctrl *lpc_ctrl = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&lpc_ctrl->miscdev);
+ clk_disable_unprepare(lpc_ctrl->clk);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_lpc_ctrl_match[] = {
+ { .compatible = "aspeed,ast2400-lpc-ctrl" },
+ { .compatible = "aspeed,ast2500-lpc-ctrl" },
+ { .compatible = "aspeed,ast2600-lpc-ctrl" },
+ { },
+};
+
+static struct platform_driver aspeed_lpc_ctrl_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_lpc_ctrl_match,
+ },
+ .probe = aspeed_lpc_ctrl_probe,
+ .remove = aspeed_lpc_ctrl_remove,
+};
+
+module_platform_driver(aspeed_lpc_ctrl_driver);
+
+MODULE_DEVICE_TABLE(of, aspeed_lpc_ctrl_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cyril Bur <cyrilbur@gmail.com>");
+MODULE_DESCRIPTION("Control for ASPEED LPC HOST to BMC mappings");
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
new file mode 100644
index 0000000000..773dbcbc03
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2017 Google Inc
+ *
+ * Provides a simple driver to control the ASPEED LPC snoop interface which
+ * allows the BMC to listen on and save the data written by
+ * the host to an arbitrary LPC I/O port.
+ *
+ * Typically used by the BMC to "watch" host boot progress via port
+ * 0x80 writes made by the BIOS during the boot process.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/kfifo.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+
+#define DEVICE_NAME "aspeed-lpc-snoop"
+
+#define NUM_SNOOP_CHANNELS 2
+#define SNOOP_FIFO_SIZE 2048
+
+#define HICR5 0x80
+#define HICR5_EN_SNP0W BIT(0)
+#define HICR5_ENINT_SNP0W BIT(1)
+#define HICR5_EN_SNP1W BIT(2)
+#define HICR5_ENINT_SNP1W BIT(3)
+#define HICR6 0x84
+#define HICR6_STR_SNP0W BIT(0)
+#define HICR6_STR_SNP1W BIT(1)
+#define SNPWADR 0x90
+#define SNPWADR_CH0_MASK GENMASK(15, 0)
+#define SNPWADR_CH0_SHIFT 0
+#define SNPWADR_CH1_MASK GENMASK(31, 16)
+#define SNPWADR_CH1_SHIFT 16
+#define SNPWDR 0x94
+#define SNPWDR_CH0_MASK GENMASK(7, 0)
+#define SNPWDR_CH0_SHIFT 0
+#define SNPWDR_CH1_MASK GENMASK(15, 8)
+#define SNPWDR_CH1_SHIFT 8
+#define HICRB 0x100
+#define HICRB_ENSNP0D BIT(14)
+#define HICRB_ENSNP1D BIT(15)
+
+struct aspeed_lpc_snoop_model_data {
+ /* The ast2400 has bits 14 and 15 as reserved, whereas the ast2500
+ * can use them.
+ */
+ unsigned int has_hicrb_ensnp;
+};
+
+struct aspeed_lpc_snoop_channel {
+ struct kfifo fifo;
+ wait_queue_head_t wq;
+ struct miscdevice miscdev;
+};
+
+struct aspeed_lpc_snoop {
+ struct regmap *regmap;
+ int irq;
+ struct clk *clk;
+ struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
+};
+
+static struct aspeed_lpc_snoop_channel *snoop_file_to_chan(struct file *file)
+{
+ return container_of(file->private_data,
+ struct aspeed_lpc_snoop_channel,
+ miscdev);
+}
+
+static ssize_t snoop_file_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
+ unsigned int copied;
+ int ret = 0;
+
+ if (kfifo_is_empty(&chan->fifo)) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_event_interruptible(chan->wq,
+ !kfifo_is_empty(&chan->fifo));
+ if (ret == -ERESTARTSYS)
+ return -EINTR;
+ }
+ ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
+ if (ret)
+ return ret;
+
+ return copied;
+}
+
+static __poll_t snoop_file_poll(struct file *file,
+ struct poll_table_struct *pt)
+{
+ struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
+
+ poll_wait(file, &chan->wq, pt);
+ return !kfifo_is_empty(&chan->fifo) ? EPOLLIN : 0;
+}
+
+static const struct file_operations snoop_fops = {
+ .owner = THIS_MODULE,
+ .read = snoop_file_read,
+ .poll = snoop_file_poll,
+ .llseek = noop_llseek,
+};
+
+/* Save a byte to a FIFO and discard the oldest byte if FIFO is full */
+static void put_fifo_with_discard(struct aspeed_lpc_snoop_channel *chan, u8 val)
+{
+ if (!kfifo_initialized(&chan->fifo))
+ return;
+ if (kfifo_is_full(&chan->fifo))
+ kfifo_skip(&chan->fifo);
+ kfifo_put(&chan->fifo, val);
+ wake_up_interruptible(&chan->wq);
+}
+
+static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg)
+{
+ struct aspeed_lpc_snoop *lpc_snoop = arg;
+ u32 reg, data;
+
+ if (regmap_read(lpc_snoop->regmap, HICR6, &reg))
+ return IRQ_NONE;
+
+ /* Check if one of the snoop channels is interrupting */
+ reg &= (HICR6_STR_SNP0W | HICR6_STR_SNP1W);
+ if (!reg)
+ return IRQ_NONE;
+
+ /* Ack pending IRQs */
+ regmap_write(lpc_snoop->regmap, HICR6, reg);
+
+ /* Read and save most recent snoop'ed data byte to FIFO */
+ regmap_read(lpc_snoop->regmap, SNPWDR, &data);
+
+ if (reg & HICR6_STR_SNP0W) {
+ u8 val = (data & SNPWDR_CH0_MASK) >> SNPWDR_CH0_SHIFT;
+
+ put_fifo_with_discard(&lpc_snoop->chan[0], val);
+ }
+ if (reg & HICR6_STR_SNP1W) {
+ u8 val = (data & SNPWDR_CH1_MASK) >> SNPWDR_CH1_SHIFT;
+
+ put_fifo_with_discard(&lpc_snoop->chan[1], val);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ lpc_snoop->irq = platform_get_irq(pdev, 0);
+ if (!lpc_snoop->irq)
+ return -ENODEV;
+
+ rc = devm_request_irq(dev, lpc_snoop->irq,
+ aspeed_lpc_snoop_irq, IRQF_SHARED,
+ DEVICE_NAME, lpc_snoop);
+ if (rc < 0) {
+ dev_warn(dev, "Unable to request IRQ %d\n", lpc_snoop->irq);
+ lpc_snoop->irq = 0;
+ return rc;
+ }
+
+ return 0;
+}
+
+static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ struct device *dev,
+ int channel, u16 lpc_port)
+{
+ int rc = 0;
+ u32 hicr5_en, snpwadr_mask, snpwadr_shift, hicrb_en;
+ const struct aspeed_lpc_snoop_model_data *model_data =
+ of_device_get_match_data(dev);
+
+ init_waitqueue_head(&lpc_snoop->chan[channel].wq);
+ /* Create FIFO datastructure */
+ rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo,
+ SNOOP_FIFO_SIZE, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR;
+ lpc_snoop->chan[channel].miscdev.name =
+ devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel);
+ lpc_snoop->chan[channel].miscdev.fops = &snoop_fops;
+ lpc_snoop->chan[channel].miscdev.parent = dev;
+ rc = misc_register(&lpc_snoop->chan[channel].miscdev);
+ if (rc)
+ return rc;
+
+ /* Enable LPC snoop channel at requested port */
+ switch (channel) {
+ case 0:
+ hicr5_en = HICR5_EN_SNP0W | HICR5_ENINT_SNP0W;
+ snpwadr_mask = SNPWADR_CH0_MASK;
+ snpwadr_shift = SNPWADR_CH0_SHIFT;
+ hicrb_en = HICRB_ENSNP0D;
+ break;
+ case 1:
+ hicr5_en = HICR5_EN_SNP1W | HICR5_ENINT_SNP1W;
+ snpwadr_mask = SNPWADR_CH1_MASK;
+ snpwadr_shift = SNPWADR_CH1_SHIFT;
+ hicrb_en = HICRB_ENSNP1D;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
+ regmap_update_bits(lpc_snoop->regmap, SNPWADR, snpwadr_mask,
+ lpc_port << snpwadr_shift);
+ if (model_data->has_hicrb_ensnp)
+ regmap_update_bits(lpc_snoop->regmap, HICRB,
+ hicrb_en, hicrb_en);
+
+ return rc;
+}
+
+static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ int channel)
+{
+ switch (channel) {
+ case 0:
+ regmap_update_bits(lpc_snoop->regmap, HICR5,
+ HICR5_EN_SNP0W | HICR5_ENINT_SNP0W,
+ 0);
+ break;
+ case 1:
+ regmap_update_bits(lpc_snoop->regmap, HICR5,
+ HICR5_EN_SNP1W | HICR5_ENINT_SNP1W,
+ 0);
+ break;
+ default:
+ return;
+ }
+
+ kfifo_free(&lpc_snoop->chan[channel].fifo);
+ misc_deregister(&lpc_snoop->chan[channel].miscdev);
+}
+
+static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
+{
+ struct aspeed_lpc_snoop *lpc_snoop;
+ struct device *dev;
+ struct device_node *np;
+ u32 port;
+ int rc;
+
+ dev = &pdev->dev;
+
+ lpc_snoop = devm_kzalloc(dev, sizeof(*lpc_snoop), GFP_KERNEL);
+ if (!lpc_snoop)
+ return -ENOMEM;
+
+ np = pdev->dev.parent->of_node;
+ if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
+ !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
+ dev_err(dev, "unsupported LPC device binding\n");
+ return -ENODEV;
+ }
+
+ lpc_snoop->regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(lpc_snoop->regmap)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ dev_set_drvdata(&pdev->dev, lpc_snoop);
+
+ rc = of_property_read_u32_index(dev->of_node, "snoop-ports", 0, &port);
+ if (rc) {
+ dev_err(dev, "no snoop ports configured\n");
+ return -ENODEV;
+ }
+
+ lpc_snoop->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(lpc_snoop->clk)) {
+ rc = PTR_ERR(lpc_snoop->clk);
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "couldn't get clock\n");
+ return rc;
+ }
+ rc = clk_prepare_enable(lpc_snoop->clk);
+ if (rc) {
+ dev_err(dev, "couldn't enable clock\n");
+ return rc;
+ }
+
+ rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
+ if (rc)
+ goto err;
+
+ rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
+ if (rc)
+ goto err;
+
+ /* Configuration of 2nd snoop channel port is optional */
+ if (of_property_read_u32_index(dev->of_node, "snoop-ports",
+ 1, &port) == 0) {
+ rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
+ if (rc) {
+ aspeed_lpc_disable_snoop(lpc_snoop, 0);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ clk_disable_unprepare(lpc_snoop->clk);
+
+ return rc;
+}
+
+static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
+{
+ struct aspeed_lpc_snoop *lpc_snoop = dev_get_drvdata(&pdev->dev);
+
+ /* Disable both snoop channels */
+ aspeed_lpc_disable_snoop(lpc_snoop, 0);
+ aspeed_lpc_disable_snoop(lpc_snoop, 1);
+
+ clk_disable_unprepare(lpc_snoop->clk);
+
+ return 0;
+}
+
+static const struct aspeed_lpc_snoop_model_data ast2400_model_data = {
+ .has_hicrb_ensnp = 0,
+};
+
+static const struct aspeed_lpc_snoop_model_data ast2500_model_data = {
+ .has_hicrb_ensnp = 1,
+};
+
+static const struct of_device_id aspeed_lpc_snoop_match[] = {
+ { .compatible = "aspeed,ast2400-lpc-snoop",
+ .data = &ast2400_model_data },
+ { .compatible = "aspeed,ast2500-lpc-snoop",
+ .data = &ast2500_model_data },
+ { .compatible = "aspeed,ast2600-lpc-snoop",
+ .data = &ast2500_model_data },
+ { },
+};
+
+static struct platform_driver aspeed_lpc_snoop_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_lpc_snoop_match,
+ },
+ .probe = aspeed_lpc_snoop_probe,
+ .remove = aspeed_lpc_snoop_remove,
+};
+
+module_platform_driver(aspeed_lpc_snoop_driver);
+
+MODULE_DEVICE_TABLE(of, aspeed_lpc_snoop_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Lippert <rlippert@google.com>");
+MODULE_DESCRIPTION("Linux driver to control Aspeed LPC snoop functionality");
diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
new file mode 100644
index 0000000000..548f44da28
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 Google Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Provides a simple driver to control the ASPEED P2A interface which allows
+ * the host to read and write to various regions of the BMC's memory.
+ */
+
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/aspeed-p2a-ctrl.h>
+
+#define DEVICE_NAME "aspeed-p2a-ctrl"
+
+/* SCU2C is a Misc. Control Register. */
+#define SCU2C 0x2c
+/* SCU180 is the PCIe Configuration Setting Control Register. */
+#define SCU180 0x180
+/* Bit 1 controls the P2A bridge, while bit 0 controls the entire VGA device
+ * on the PCI bus.
+ */
+#define SCU180_ENP2A BIT(1)
+
+/* The ast2400/2500 both have six ranges. */
+#define P2A_REGION_COUNT 6
+
+struct region {
+ u64 min;
+ u64 max;
+ u32 bit;
+};
+
+struct aspeed_p2a_model_data {
+ /* min, max, bit */
+ struct region regions[P2A_REGION_COUNT];
+};
+
+struct aspeed_p2a_ctrl {
+ struct miscdevice miscdev;
+ struct regmap *regmap;
+
+ const struct aspeed_p2a_model_data *config;
+
+ /* Access to these needs to be locked, held via probe, mapping ioctl,
+ * and release, remove.
+ */
+ struct mutex tracking;
+ u32 readers;
+ u32 readerwriters[P2A_REGION_COUNT];
+
+ phys_addr_t mem_base;
+ resource_size_t mem_size;
+};
+
+struct aspeed_p2a_user {
+ struct file *file;
+ struct aspeed_p2a_ctrl *parent;
+
+ /* The entire memory space is opened for reading once the bridge is
+ * enabled, therefore this needs only to be tracked once per user.
+ * If any user has it open for read, the bridge must stay enabled.
+ */
+ u32 read;
+
+ /* Each entry of the array corresponds to a P2A Region. If the user
+ * opens for read or readwrite, the reference goes up here. On
+ * release, this array is walked and references adjusted accordingly.
+ */
+ u32 readwrite[P2A_REGION_COUNT];
+};
+
+static void aspeed_p2a_enable_bridge(struct aspeed_p2a_ctrl *p2a_ctrl)
+{
+ regmap_update_bits(p2a_ctrl->regmap,
+ SCU180, SCU180_ENP2A, SCU180_ENP2A);
+}
+
+static void aspeed_p2a_disable_bridge(struct aspeed_p2a_ctrl *p2a_ctrl)
+{
+ regmap_update_bits(p2a_ctrl->regmap, SCU180, SCU180_ENP2A, 0);
+}
+
+static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long vsize;
+ pgprot_t prot;
+ struct aspeed_p2a_user *priv = file->private_data;
+ struct aspeed_p2a_ctrl *ctrl = priv->parent;
+
+ if (ctrl->mem_base == 0 && ctrl->mem_size == 0)
+ return -EINVAL;
+
+ vsize = vma->vm_end - vma->vm_start;
+ prot = vma->vm_page_prot;
+
+ if (vma->vm_pgoff + vma_pages(vma) > ctrl->mem_size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ /* ast2400/2500 AHB accesses are not cache coherent */
+ prot = pgprot_noncached(prot);
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ (ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
+ vsize, prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static bool aspeed_p2a_region_acquire(struct aspeed_p2a_user *priv,
+ struct aspeed_p2a_ctrl *ctrl,
+ struct aspeed_p2a_ctrl_mapping *map)
+{
+ int i;
+ u64 base, end;
+ bool matched = false;
+
+ base = map->addr;
+ end = map->addr + (map->length - 1);
+
+ /* If the value is a legal u32, it will find a match. */
+ for (i = 0; i < P2A_REGION_COUNT; i++) {
+ const struct region *curr = &ctrl->config->regions[i];
+
+ /* If the top of this region is lower than your base, skip it.
+ */
+ if (curr->max < base)
+ continue;
+
+ /* If the bottom of this region is higher than your end, bail.
+ */
+ if (curr->min > end)
+ break;
+
+ /* Lock this and update it, therefore it someone else is
+ * closing their file out, this'll preserve the increment.
+ */
+ mutex_lock(&ctrl->tracking);
+ ctrl->readerwriters[i] += 1;
+ mutex_unlock(&ctrl->tracking);
+
+ /* Track with the user, so when they close their file, we can
+ * decrement properly.
+ */
+ priv->readwrite[i] += 1;
+
+ /* Enable the region as read-write. */
+ regmap_update_bits(ctrl->regmap, SCU2C, curr->bit, 0);
+ matched = true;
+ }
+
+ return matched;
+}
+
+static long aspeed_p2a_ioctl(struct file *file, unsigned int cmd,
+ unsigned long data)
+{
+ struct aspeed_p2a_user *priv = file->private_data;
+ struct aspeed_p2a_ctrl *ctrl = priv->parent;
+ void __user *arg = (void __user *)data;
+ struct aspeed_p2a_ctrl_mapping map;
+
+ if (copy_from_user(&map, arg, sizeof(map)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ASPEED_P2A_CTRL_IOCTL_SET_WINDOW:
+ /* If they want a region to be read-only, since the entire
+ * region is read-only once enabled, we just need to track this
+ * user wants to read from the bridge, and if it's not enabled.
+ * Enable it.
+ */
+ if (map.flags == ASPEED_P2A_CTRL_READ_ONLY) {
+ mutex_lock(&ctrl->tracking);
+ ctrl->readers += 1;
+ mutex_unlock(&ctrl->tracking);
+
+ /* Track with the user, so when they close their file,
+ * we can decrement properly.
+ */
+ priv->read += 1;
+ } else if (map.flags == ASPEED_P2A_CTRL_READWRITE) {
+ /* If we don't acquire any region return error. */
+ if (!aspeed_p2a_region_acquire(priv, ctrl, &map)) {
+ return -EINVAL;
+ }
+ } else {
+ /* Invalid map flags. */
+ return -EINVAL;
+ }
+
+ aspeed_p2a_enable_bridge(ctrl);
+ return 0;
+ case ASPEED_P2A_CTRL_IOCTL_GET_MEMORY_CONFIG:
+ /* This is a request for the memory-region and corresponding
+ * length that is used by the driver for mmap.
+ */
+
+ map.flags = 0;
+ map.addr = ctrl->mem_base;
+ map.length = ctrl->mem_size;
+
+ return copy_to_user(arg, &map, sizeof(map)) ? -EFAULT : 0;
+ }
+
+ return -EINVAL;
+}
+
+
+/*
+ * When a user opens this file, we create a structure to track their mappings.
+ *
+ * A user can map a region as read-only (bridge enabled), or read-write (bit
+ * flipped, and bridge enabled). Either way, this tracking is used, s.t. when
+ * they release the device references are handled.
+ *
+ * The bridge is not enabled until a user calls an ioctl to map a region,
+ * simply opening the device does not enable it.
+ */
+static int aspeed_p2a_open(struct inode *inode, struct file *file)
+{
+ struct aspeed_p2a_user *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->file = file;
+ priv->read = 0;
+ memset(priv->readwrite, 0, sizeof(priv->readwrite));
+
+ /* The file's private_data is initialized to the p2a_ctrl. */
+ priv->parent = file->private_data;
+
+ /* Set the file's private_data to the user's data. */
+ file->private_data = priv;
+
+ return 0;
+}
+
+/*
+ * This will close the users mappings. It will go through what they had opened
+ * for readwrite, and decrement those counts. If at the end, this is the last
+ * user, it'll close the bridge.
+ */
+static int aspeed_p2a_release(struct inode *inode, struct file *file)
+{
+ int i;
+ u32 bits = 0;
+ bool open_regions = false;
+ struct aspeed_p2a_user *priv = file->private_data;
+
+ /* Lock others from changing these values until everything is updated
+ * in one pass.
+ */
+ mutex_lock(&priv->parent->tracking);
+
+ priv->parent->readers -= priv->read;
+
+ for (i = 0; i < P2A_REGION_COUNT; i++) {
+ priv->parent->readerwriters[i] -= priv->readwrite[i];
+
+ if (priv->parent->readerwriters[i] > 0)
+ open_regions = true;
+ else
+ bits |= priv->parent->config->regions[i].bit;
+ }
+
+ /* Setting a bit to 1 disables the region, so let's just OR with the
+ * above to disable any.
+ */
+
+ /* Note, if another user is trying to ioctl, they can't grab tracking,
+ * and therefore can't grab either register mutex.
+ * If another user is trying to close, they can't grab tracking either.
+ */
+ regmap_update_bits(priv->parent->regmap, SCU2C, bits, bits);
+
+ /* If parent->readers is zero and open windows is 0, disable the
+ * bridge.
+ */
+ if (!open_regions && priv->parent->readers == 0)
+ aspeed_p2a_disable_bridge(priv->parent);
+
+ mutex_unlock(&priv->parent->tracking);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static const struct file_operations aspeed_p2a_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .mmap = aspeed_p2a_mmap,
+ .unlocked_ioctl = aspeed_p2a_ioctl,
+ .open = aspeed_p2a_open,
+ .release = aspeed_p2a_release,
+};
+
+/* The regions are controlled by SCU2C */
+static void aspeed_p2a_disable_all(struct aspeed_p2a_ctrl *p2a_ctrl)
+{
+ int i;
+ u32 value = 0;
+
+ for (i = 0; i < P2A_REGION_COUNT; i++)
+ value |= p2a_ctrl->config->regions[i].bit;
+
+ regmap_update_bits(p2a_ctrl->regmap, SCU2C, value, value);
+
+ /* Disable the bridge. */
+ aspeed_p2a_disable_bridge(p2a_ctrl);
+}
+
+static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
+{
+ struct aspeed_p2a_ctrl *misc_ctrl;
+ struct device *dev;
+ struct resource resm;
+ struct device_node *node;
+ int rc = 0;
+
+ dev = &pdev->dev;
+
+ misc_ctrl = devm_kzalloc(dev, sizeof(*misc_ctrl), GFP_KERNEL);
+ if (!misc_ctrl)
+ return -ENOMEM;
+
+ mutex_init(&misc_ctrl->tracking);
+
+ /* optional. */
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node) {
+ rc = of_address_to_resource(node, 0, &resm);
+ of_node_put(node);
+ if (rc) {
+ dev_err(dev, "Couldn't address to resource for reserved memory\n");
+ return -ENODEV;
+ }
+
+ misc_ctrl->mem_size = resource_size(&resm);
+ misc_ctrl->mem_base = resm.start;
+ }
+
+ misc_ctrl->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(misc_ctrl->regmap)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ misc_ctrl->config = of_device_get_match_data(dev);
+
+ dev_set_drvdata(&pdev->dev, misc_ctrl);
+
+ aspeed_p2a_disable_all(misc_ctrl);
+
+ misc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
+ misc_ctrl->miscdev.name = DEVICE_NAME;
+ misc_ctrl->miscdev.fops = &aspeed_p2a_ctrl_fops;
+ misc_ctrl->miscdev.parent = dev;
+
+ rc = misc_register(&misc_ctrl->miscdev);
+ if (rc)
+ dev_err(dev, "Unable to register device\n");
+
+ return rc;
+}
+
+static int aspeed_p2a_ctrl_remove(struct platform_device *pdev)
+{
+ struct aspeed_p2a_ctrl *p2a_ctrl = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&p2a_ctrl->miscdev);
+
+ return 0;
+}
+
+#define SCU2C_DRAM BIT(25)
+#define SCU2C_SPI BIT(24)
+#define SCU2C_SOC BIT(23)
+#define SCU2C_FLASH BIT(22)
+
+static const struct aspeed_p2a_model_data ast2400_model_data = {
+ .regions = {
+ {0x00000000, 0x17FFFFFF, SCU2C_FLASH},
+ {0x18000000, 0x1FFFFFFF, SCU2C_SOC},
+ {0x20000000, 0x2FFFFFFF, SCU2C_FLASH},
+ {0x30000000, 0x3FFFFFFF, SCU2C_SPI},
+ {0x40000000, 0x5FFFFFFF, SCU2C_DRAM},
+ {0x60000000, 0xFFFFFFFF, SCU2C_SOC},
+ }
+};
+
+static const struct aspeed_p2a_model_data ast2500_model_data = {
+ .regions = {
+ {0x00000000, 0x0FFFFFFF, SCU2C_FLASH},
+ {0x10000000, 0x1FFFFFFF, SCU2C_SOC},
+ {0x20000000, 0x3FFFFFFF, SCU2C_FLASH},
+ {0x40000000, 0x5FFFFFFF, SCU2C_SOC},
+ {0x60000000, 0x7FFFFFFF, SCU2C_SPI},
+ {0x80000000, 0xFFFFFFFF, SCU2C_DRAM},
+ }
+};
+
+static const struct of_device_id aspeed_p2a_ctrl_match[] = {
+ { .compatible = "aspeed,ast2400-p2a-ctrl",
+ .data = &ast2400_model_data },
+ { .compatible = "aspeed,ast2500-p2a-ctrl",
+ .data = &ast2500_model_data },
+ { },
+};
+
+static struct platform_driver aspeed_p2a_ctrl_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_p2a_ctrl_match,
+ },
+ .probe = aspeed_p2a_ctrl_probe,
+ .remove = aspeed_p2a_ctrl_remove,
+};
+
+module_platform_driver(aspeed_p2a_ctrl_driver);
+
+MODULE_DEVICE_TABLE(of, aspeed_p2a_ctrl_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick Venture <venture@google.com>");
+MODULE_DESCRIPTION("Control for aspeed 2400/2500 P2A VGA HOST to BMC mappings");
diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
new file mode 100644
index 0000000000..3f759121dc
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-socinfo.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright 2019 IBM Corp. */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+static struct {
+ const char *name;
+ const u32 id;
+} const rev_table[] = {
+ /* AST2400 */
+ { "AST2400", 0x02000303 },
+ { "AST1400", 0x02010103 },
+ { "AST1250", 0x02010303 },
+ /* AST2500 */
+ { "AST2500", 0x04000303 },
+ { "AST2510", 0x04000103 },
+ { "AST2520", 0x04000203 },
+ { "AST2530", 0x04000403 },
+ /* AST2600 */
+ { "AST2600", 0x05000303 },
+ { "AST2620", 0x05010203 },
+ { "AST2605", 0x05030103 },
+ { "AST2625", 0x05030403 },
+};
+
+static const char *siliconid_to_name(u32 siliconid)
+{
+ unsigned int id = siliconid & 0xff00ffff;
+ unsigned int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rev_table) ; ++i) {
+ if (rev_table[i].id == id)
+ return rev_table[i].name;
+ }
+
+ return "Unknown";
+}
+
+static const char *siliconid_to_rev(u32 siliconid)
+{
+ unsigned int rev = (siliconid >> 16) & 0xff;
+ unsigned int gen = (siliconid >> 24) & 0xff;
+
+ if (gen < 0x5) {
+ /* AST2500 and below */
+ switch (rev) {
+ case 0:
+ return "A0";
+ case 1:
+ return "A1";
+ case 3:
+ return "A2";
+ }
+ } else {
+ /* AST2600 */
+ switch (rev) {
+ case 0:
+ return "A0";
+ case 1:
+ return "A1";
+ case 2:
+ return "A2";
+ case 3:
+ return "A3";
+ }
+ }
+
+ return "??";
+}
+
+static int __init aspeed_socinfo_init(void)
+{
+ struct soc_device_attribute *attrs;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ void __iomem *reg;
+ bool has_chipid = false;
+ u32 siliconid;
+ u32 chipid[2];
+ const char *machine = NULL;
+
+ np = of_find_compatible_node(NULL, NULL, "aspeed,silicon-id");
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ reg = of_iomap(np, 0);
+ if (!reg) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+ siliconid = readl(reg);
+ iounmap(reg);
+
+ /* This is optional, the ast2400 does not have it */
+ reg = of_iomap(np, 1);
+ if (reg) {
+ has_chipid = true;
+ chipid[0] = readl(reg);
+ chipid[1] = readl(reg + 4);
+ iounmap(reg);
+ }
+ of_node_put(np);
+
+ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENODEV;
+
+ /*
+ * Machine: Romulus BMC
+ * Family: AST2500
+ * Revision: A1
+ * SoC ID: raw silicon revision id
+ * Serial Number: 64-bit chipid
+ */
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &machine);
+ if (machine)
+ attrs->machine = kstrdup(machine, GFP_KERNEL);
+ of_node_put(np);
+
+ attrs->family = siliconid_to_name(siliconid);
+ attrs->revision = siliconid_to_rev(siliconid);
+ attrs->soc_id = kasprintf(GFP_KERNEL, "%08x", siliconid);
+
+ if (has_chipid)
+ attrs->serial_number = kasprintf(GFP_KERNEL, "%08x%08x",
+ chipid[1], chipid[0]);
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev)) {
+ kfree(attrs->machine);
+ kfree(attrs->soc_id);
+ kfree(attrs->serial_number);
+ kfree(attrs);
+ return PTR_ERR(soc_dev);
+ }
+
+ pr_info("ASPEED %s rev %s (%s)\n",
+ attrs->family,
+ attrs->revision,
+ attrs->soc_id);
+
+ return 0;
+}
+early_initcall(aspeed_socinfo_init);
diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c
new file mode 100644
index 0000000000..3a4c1f28cb
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-uart-routing.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2018 Google LLC
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+
+/* register offsets */
+#define HICR9 0x98
+#define HICRA 0x9c
+
+/* attributes options */
+#define UART_ROUTING_IO1 "io1"
+#define UART_ROUTING_IO2 "io2"
+#define UART_ROUTING_IO3 "io3"
+#define UART_ROUTING_IO4 "io4"
+#define UART_ROUTING_IO5 "io5"
+#define UART_ROUTING_IO6 "io6"
+#define UART_ROUTING_IO10 "io10"
+#define UART_ROUTING_UART1 "uart1"
+#define UART_ROUTING_UART2 "uart2"
+#define UART_ROUTING_UART3 "uart3"
+#define UART_ROUTING_UART4 "uart4"
+#define UART_ROUTING_UART5 "uart5"
+#define UART_ROUTING_UART6 "uart6"
+#define UART_ROUTING_UART10 "uart10"
+#define UART_ROUTING_RES "reserved"
+
+struct aspeed_uart_routing {
+ struct regmap *map;
+ struct attribute_group const *attr_grp;
+};
+
+struct aspeed_uart_routing_selector {
+ struct device_attribute dev_attr;
+ uint8_t reg;
+ uint8_t mask;
+ uint8_t shift;
+ const char *const options[];
+};
+
+#define to_routing_selector(_dev_attr) \
+ container_of(_dev_attr, struct aspeed_uart_routing_selector, dev_attr)
+
+static ssize_t aspeed_uart_routing_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static ssize_t aspeed_uart_routing_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+
+#define ROUTING_ATTR(_name) { \
+ .attr = {.name = _name, \
+ .mode = VERIFY_OCTAL_PERMISSIONS(0644) }, \
+ .show = aspeed_uart_routing_show, \
+ .store = aspeed_uart_routing_store, \
+}
+
+/* routing selector for AST25xx */
+static struct aspeed_uart_routing_selector ast2500_io6_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO6),
+ .reg = HICR9,
+ .shift = 8,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO5,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart5_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART5),
+ .reg = HICRA,
+ .shift = 28,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_IO5,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART4),
+ .reg = HICRA,
+ .shift = 25,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART3),
+ .reg = HICRA,
+ .shift = 22,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART2),
+ .reg = HICRA,
+ .shift = 19,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART1),
+ .reg = HICRA,
+ .shift = 16,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io5_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO5),
+ .reg = HICRA,
+ .shift = 12,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO4),
+ .reg = HICRA,
+ .shift = 9,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO3),
+ .reg = HICRA,
+ .shift = 6,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO2),
+ .reg = HICRA,
+ .shift = 3,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO1),
+ .reg = HICRA,
+ .shift = 0,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct attribute *ast2500_uart_routing_attrs[] = {
+ &ast2500_io6_sel.dev_attr.attr,
+ &ast2500_uart5_sel.dev_attr.attr,
+ &ast2500_uart4_sel.dev_attr.attr,
+ &ast2500_uart3_sel.dev_attr.attr,
+ &ast2500_uart2_sel.dev_attr.attr,
+ &ast2500_uart1_sel.dev_attr.attr,
+ &ast2500_io5_sel.dev_attr.attr,
+ &ast2500_io4_sel.dev_attr.attr,
+ &ast2500_io3_sel.dev_attr.attr,
+ &ast2500_io2_sel.dev_attr.attr,
+ &ast2500_io1_sel.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ast2500_uart_routing_attr_group = {
+ .attrs = ast2500_uart_routing_attrs,
+};
+
+/* routing selector for AST26xx */
+static struct aspeed_uart_routing_selector ast2600_uart10_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART10),
+ .reg = HICR9,
+ .shift = 12,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_IO10,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_RES,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io10_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO10),
+ .reg = HICR9,
+ .shift = 8,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_RES,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_RES,
+ UART_ROUTING_UART10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART4),
+ .reg = HICRA,
+ .shift = 25,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART3),
+ .reg = HICRA,
+ .shift = 22,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART2),
+ .reg = HICRA,
+ .shift = 19,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART1),
+ .reg = HICRA,
+ .shift = 16,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO4),
+ .reg = HICRA,
+ .shift = 9,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO3),
+ .reg = HICRA,
+ .shift = 6,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO2),
+ .reg = HICRA,
+ .shift = 3,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO1),
+ .reg = HICRA,
+ .shift = 0,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct attribute *ast2600_uart_routing_attrs[] = {
+ &ast2600_uart10_sel.dev_attr.attr,
+ &ast2600_io10_sel.dev_attr.attr,
+ &ast2600_uart4_sel.dev_attr.attr,
+ &ast2600_uart3_sel.dev_attr.attr,
+ &ast2600_uart2_sel.dev_attr.attr,
+ &ast2600_uart1_sel.dev_attr.attr,
+ &ast2600_io4_sel.dev_attr.attr,
+ &ast2600_io3_sel.dev_attr.attr,
+ &ast2600_io2_sel.dev_attr.attr,
+ &ast2600_io1_sel.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ast2600_uart_routing_attr_group = {
+ .attrs = ast2600_uart_routing_attrs,
+};
+
+static ssize_t aspeed_uart_routing_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aspeed_uart_routing *uart_routing = dev_get_drvdata(dev);
+ struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
+ int val, pos, len;
+
+ regmap_read(uart_routing->map, sel->reg, &val);
+ val = (val >> sel->shift) & sel->mask;
+
+ len = 0;
+ for (pos = 0; sel->options[pos] != NULL; ++pos) {
+ if (pos == val)
+ len += sysfs_emit_at(buf, len, "[%s] ", sel->options[pos]);
+ else
+ len += sysfs_emit_at(buf, len, "%s ", sel->options[pos]);
+ }
+
+ if (val >= pos)
+ len += sysfs_emit_at(buf, len, "[unknown(%d)]", val);
+
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static ssize_t aspeed_uart_routing_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_uart_routing *uart_routing = dev_get_drvdata(dev);
+ struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
+ int val;
+
+ val = __sysfs_match_string(sel->options, -1, buf);
+ if (val < 0) {
+ dev_err(dev, "invalid value \"%s\"\n", buf);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(uart_routing->map, sel->reg,
+ (sel->mask << sel->shift),
+ (val & sel->mask) << sel->shift);
+
+ return count;
+}
+
+static int aspeed_uart_routing_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct device *dev = &pdev->dev;
+ struct aspeed_uart_routing *uart_routing;
+
+ uart_routing = devm_kzalloc(&pdev->dev, sizeof(*uart_routing), GFP_KERNEL);
+ if (!uart_routing)
+ return -ENOMEM;
+
+ uart_routing->map = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(uart_routing->map)) {
+ dev_err(dev, "cannot get regmap\n");
+ return PTR_ERR(uart_routing->map);
+ }
+
+ uart_routing->attr_grp = of_device_get_match_data(dev);
+
+ rc = sysfs_create_group(&dev->kobj, uart_routing->attr_grp);
+ if (rc < 0)
+ return rc;
+
+ dev_set_drvdata(dev, uart_routing);
+
+ dev_info(dev, "module loaded\n");
+
+ return 0;
+}
+
+static int aspeed_uart_routing_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct aspeed_uart_routing *uart_routing = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&dev->kobj, uart_routing->attr_grp);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_uart_routing_table[] = {
+ { .compatible = "aspeed,ast2400-uart-routing",
+ .data = &ast2500_uart_routing_attr_group },
+ { .compatible = "aspeed,ast2500-uart-routing",
+ .data = &ast2500_uart_routing_attr_group },
+ { .compatible = "aspeed,ast2600-uart-routing",
+ .data = &ast2600_uart_routing_attr_group },
+ { },
+};
+
+static struct platform_driver aspeed_uart_routing_driver = {
+ .driver = {
+ .name = "aspeed-uart-routing",
+ .of_match_table = aspeed_uart_routing_table,
+ },
+ .probe = aspeed_uart_routing_probe,
+ .remove = aspeed_uart_routing_remove,
+};
+
+module_platform_driver(aspeed_uart_routing_driver);
+
+MODULE_AUTHOR("Oskar Senft <osk@google.com>");
+MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Driver to configure Aspeed UART routing");
diff --git a/drivers/soc/atmel/Kconfig b/drivers/soc/atmel/Kconfig
new file mode 100644
index 0000000000..50caf6db9c
--- /dev/null
+++ b/drivers/soc/atmel/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config AT91_SOC_ID
+ bool "SoC bus for Atmel ARM SoCs"
+ depends on ARCH_AT91 || COMPILE_TEST
+ default ARCH_AT91
+ help
+ Include support for the SoC bus on the Atmel ARM SoCs.
+
+config AT91_SOC_SFR
+ tristate "Special Function Registers support"
+ depends on ARCH_AT91 || COMPILE_TEST
+ help
+ This is a driver for the Special Function Registers available on
+ Atmel SAMA5Dx SoCs, providing access to specific aspects of the
+ integrated memory, bridge implementations, processor etc.
+
+ This driver can also be built as a module. If so, the module
+ will be called sfr.
diff --git a/drivers/soc/atmel/Makefile b/drivers/soc/atmel/Makefile
new file mode 100644
index 0000000000..d849a897cd
--- /dev/null
+++ b/drivers/soc/atmel/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_AT91_SOC_ID) += soc.o
+obj-$(CONFIG_AT91_SOC_SFR) += sfr.o
diff --git a/drivers/soc/atmel/sfr.c b/drivers/soc/atmel/sfr.c
new file mode 100644
index 0000000000..cc94ca1b49
--- /dev/null
+++ b/drivers/soc/atmel/sfr.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sfr.c - driver for special function registers
+ *
+ * Copyright (C) 2019 Bootlin.
+ *
+ */
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/random.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define SFR_SN0 0x4c
+#define SFR_SN_SIZE 8
+
+struct atmel_sfr_priv {
+ struct regmap *regmap;
+};
+
+static int atmel_sfr_read(void *context, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct atmel_sfr_priv *priv = context;
+
+ return regmap_bulk_read(priv->regmap, SFR_SN0 + offset,
+ buf, bytes / 4);
+}
+
+static struct nvmem_config atmel_sfr_nvmem_config = {
+ .name = "atmel-sfr",
+ .read_only = true,
+ .word_size = 4,
+ .stride = 4,
+ .size = SFR_SN_SIZE,
+ .reg_read = atmel_sfr_read,
+};
+
+static int atmel_sfr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct nvmem_device *nvmem;
+ struct atmel_sfr_priv *priv;
+ u8 sn[SFR_SN_SIZE];
+ int ret;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "cannot get parent's regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ atmel_sfr_nvmem_config.dev = dev;
+ atmel_sfr_nvmem_config.priv = priv;
+
+ nvmem = devm_nvmem_register(dev, &atmel_sfr_nvmem_config);
+ if (IS_ERR(nvmem)) {
+ dev_err(dev, "error registering nvmem config\n");
+ return PTR_ERR(nvmem);
+ }
+
+ ret = atmel_sfr_read(priv, 0, sn, SFR_SN_SIZE);
+ if (ret == 0)
+ add_device_randomness(sn, SFR_SN_SIZE);
+
+ return ret;
+}
+
+static const struct of_device_id atmel_sfr_dt_ids[] = {
+ {
+ .compatible = "atmel,sama5d2-sfr",
+ }, {
+ .compatible = "atmel,sama5d4-sfr",
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, atmel_sfr_dt_ids);
+
+static struct platform_driver atmel_sfr_driver = {
+ .probe = atmel_sfr_probe,
+ .driver = {
+ .name = "atmel-sfr",
+ .of_match_table = atmel_sfr_dt_ids,
+ },
+};
+module_platform_driver(atmel_sfr_driver);
+
+MODULE_AUTHOR("Kamel Bouhara <kamel.bouhara@bootlin.com>");
+MODULE_DESCRIPTION("Atmel SFR SN driver for SAMA5D2/4 SoC family");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
new file mode 100644
index 0000000000..cc9a3e1074
--- /dev/null
+++ b/drivers/soc/atmel/soc.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Atmel
+ *
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com
+ * Boris Brezillon <boris.brezillon@free-electrons.com
+ */
+
+#define pr_fmt(fmt) "AT91: " fmt
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include "soc.h"
+
+#define AT91_DBGU_CIDR 0x40
+#define AT91_DBGU_EXID 0x44
+#define AT91_CHIPID_CIDR 0x00
+#define AT91_CHIPID_EXID 0x04
+#define AT91_CIDR_VERSION(x, m) ((x) & (m))
+#define AT91_CIDR_VERSION_MASK GENMASK(4, 0)
+#define AT91_CIDR_VERSION_MASK_SAMA7G5 GENMASK(3, 0)
+#define AT91_CIDR_EXT BIT(31)
+#define AT91_CIDR_MATCH_MASK GENMASK(30, 5)
+#define AT91_CIDR_MASK_SAMA7G5 GENMASK(27, 5)
+
+static const struct at91_soc socs[] __initconst = {
+#ifdef CONFIG_SOC_AT91RM9200
+ AT91_SOC(AT91RM9200_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91rm9200 BGA", "at91rm9200"),
+#endif
+#ifdef CONFIG_SOC_AT91SAM9
+ AT91_SOC(AT91SAM9260_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9260", NULL),
+ AT91_SOC(AT91SAM9261_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9261", NULL),
+ AT91_SOC(AT91SAM9263_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9263", NULL),
+ AT91_SOC(AT91SAM9G20_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9g20", NULL),
+ AT91_SOC(AT91SAM9RL64_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9rl64", NULL),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9M11_EXID_MATCH,
+ "at91sam9m11", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9M10_EXID_MATCH,
+ "at91sam9m10", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9G46_EXID_MATCH,
+ "at91sam9g46", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9G45_EXID_MATCH,
+ "at91sam9g45", "at91sam9g45"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9G15_EXID_MATCH,
+ "at91sam9g15", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9G35_EXID_MATCH,
+ "at91sam9g35", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9X35_EXID_MATCH,
+ "at91sam9x35", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9G25_EXID_MATCH,
+ "at91sam9g25", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9X25_EXID_MATCH,
+ "at91sam9x25", "at91sam9x5"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9CN12_EXID_MATCH,
+ "at91sam9cn12", "at91sam9n12"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9N12_EXID_MATCH,
+ "at91sam9n12", "at91sam9n12"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, AT91SAM9CN11_EXID_MATCH,
+ "at91sam9cn11", "at91sam9n12"),
+ AT91_SOC(AT91SAM9XE128_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9xe128", "at91sam9xe128"),
+ AT91_SOC(AT91SAM9XE256_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9xe256", "at91sam9xe256"),
+ AT91_SOC(AT91SAM9XE512_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, 0, "at91sam9xe512", "at91sam9xe512"),
+#endif
+#ifdef CONFIG_SOC_SAM9X60
+ AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X60_EXID_MATCH,
+ "sam9x60", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X60_D5M_EXID_MATCH,
+ "sam9x60 64MiB DDR2 SiP", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X60_D1G_EXID_MATCH,
+ "sam9x60 128MiB DDR2 SiP", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAM9X60_D6K_EXID_MATCH,
+ "sam9x60 8MiB SDRAM SiP", "sam9x60"),
+#endif
+#ifdef CONFIG_SOC_SAMA5
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D21CU_EXID_MATCH,
+ "sama5d21", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D22CU_EXID_MATCH,
+ "sama5d22", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D225C_D1M_EXID_MATCH,
+ "sama5d225c 16MiB SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D23CU_EXID_MATCH,
+ "sama5d23", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D24CX_EXID_MATCH,
+ "sama5d24", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D24CU_EXID_MATCH,
+ "sama5d24", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D26CU_EXID_MATCH,
+ "sama5d26", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27CU_EXID_MATCH,
+ "sama5d27", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27CN_EXID_MATCH,
+ "sama5d27", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27C_D1G_EXID_MATCH,
+ "sama5d27c 128MiB SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27C_D5M_EXID_MATCH,
+ "sama5d27c 64MiB SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27C_LD1G_EXID_MATCH,
+ "sama5d27c 128MiB LPDDR2 SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D27C_LD2G_EXID_MATCH,
+ "sama5d27c 256MiB LPDDR2 SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D28CU_EXID_MATCH,
+ "sama5d28", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D28CN_EXID_MATCH,
+ "sama5d28", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D28C_D1G_EXID_MATCH,
+ "sama5d28c 128MiB SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D28C_LD1G_EXID_MATCH,
+ "sama5d28c 128MiB LPDDR2 SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D28C_LD2G_EXID_MATCH,
+ "sama5d28c 256MiB LPDDR2 SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D29CN_EXID_MATCH,
+ "sama5d29", "sama5d2"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D31_EXID_MATCH,
+ "sama5d31", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D33_EXID_MATCH,
+ "sama5d33", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D34_EXID_MATCH,
+ "sama5d34", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D35_EXID_MATCH,
+ "sama5d35", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D36_EXID_MATCH,
+ "sama5d36", "sama5d3"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D41_EXID_MATCH,
+ "sama5d41", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D42_EXID_MATCH,
+ "sama5d42", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D43_EXID_MATCH,
+ "sama5d43", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMA5D44_EXID_MATCH,
+ "sama5d44", "sama5d4"),
+#endif
+#ifdef CONFIG_SOC_SAMV7
+ AT91_SOC(SAME70Q21_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAME70Q21_EXID_MATCH,
+ "same70q21", "same7"),
+ AT91_SOC(SAME70Q20_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAME70Q20_EXID_MATCH,
+ "same70q20", "same7"),
+ AT91_SOC(SAME70Q19_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAME70Q19_EXID_MATCH,
+ "same70q19", "same7"),
+ AT91_SOC(SAMS70Q21_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMS70Q21_EXID_MATCH,
+ "sams70q21", "sams7"),
+ AT91_SOC(SAMS70Q20_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMS70Q20_EXID_MATCH,
+ "sams70q20", "sams7"),
+ AT91_SOC(SAMS70Q19_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMS70Q19_EXID_MATCH,
+ "sams70q19", "sams7"),
+ AT91_SOC(SAMV71Q21_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMV71Q21_EXID_MATCH,
+ "samv71q21", "samv7"),
+ AT91_SOC(SAMV71Q20_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMV71Q20_EXID_MATCH,
+ "samv71q20", "samv7"),
+ AT91_SOC(SAMV71Q19_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMV71Q19_EXID_MATCH,
+ "samv71q19", "samv7"),
+ AT91_SOC(SAMV70Q20_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMV70Q20_EXID_MATCH,
+ "samv70q20", "samv7"),
+ AT91_SOC(SAMV70Q19_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK, SAMV70Q19_EXID_MATCH,
+ "samv70q19", "samv7"),
+#endif
+#ifdef CONFIG_SOC_SAMA7
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G51_EXID_MATCH,
+ "sama7g51", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G52_EXID_MATCH,
+ "sama7g52", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G53_EXID_MATCH,
+ "sama7g53", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G54_EXID_MATCH,
+ "sama7g54", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G54_D1G_EXID_MATCH,
+ "SAMA7G54 1Gb DDR3L SiP", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G54_D2G_EXID_MATCH,
+ "SAMA7G54 2Gb DDR3L SiP", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G54_D4G_EXID_MATCH,
+ "SAMA7G54 4Gb DDR3L SiP", "sama7g5"),
+#endif
+ { /* sentinel */ },
+};
+
+static int __init at91_get_cidr_exid_from_dbgu(u32 *cidr, u32 *exid)
+{
+ struct device_node *np;
+ void __iomem *regs;
+
+ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-dbgu");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "atmel,at91sam9260-dbgu");
+ if (!np)
+ return -ENODEV;
+
+ regs = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!regs) {
+ pr_warn("Could not map DBGU iomem range");
+ return -ENXIO;
+ }
+
+ *cidr = readl(regs + AT91_DBGU_CIDR);
+ *exid = readl(regs + AT91_DBGU_EXID);
+
+ iounmap(regs);
+
+ return 0;
+}
+
+static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid)
+{
+ struct device_node *np;
+ void __iomem *regs;
+ static const struct of_device_id chipids[] = {
+ { .compatible = "atmel,sama5d2-chipid" },
+ { .compatible = "microchip,sama7g5-chipid" },
+ { },
+ };
+
+ np = of_find_matching_node(NULL, chipids);
+ if (!np)
+ return -ENODEV;
+
+ regs = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!regs) {
+ pr_warn("Could not map DBGU iomem range");
+ return -ENXIO;
+ }
+
+ *cidr = readl(regs + AT91_CHIPID_CIDR);
+ *exid = readl(regs + AT91_CHIPID_EXID);
+
+ iounmap(regs);
+
+ return 0;
+}
+
+struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const struct at91_soc *soc;
+ struct soc_device *soc_dev;
+ u32 cidr, exid;
+ int ret;
+
+ /*
+ * With SAMA5D2 and later SoCs, CIDR and EXID registers are no more
+ * in the dbgu device but in the chipid device whose purpose is only
+ * to expose these two registers.
+ */
+ ret = at91_get_cidr_exid_from_dbgu(&cidr, &exid);
+ if (ret)
+ ret = at91_get_cidr_exid_from_chipid(&cidr, &exid);
+ if (ret) {
+ if (ret == -ENODEV)
+ pr_warn("Could not find identification node");
+ return NULL;
+ }
+
+ for (soc = socs; soc->name; soc++) {
+ if (soc->cidr_match != (cidr & soc->cidr_mask))
+ continue;
+
+ if (!(cidr & AT91_CIDR_EXT) || soc->exid_match == exid)
+ break;
+ }
+
+ if (!soc->name) {
+ pr_warn("Could not find matching SoC description\n");
+ return NULL;
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return NULL;
+
+ soc_dev_attr->family = soc->family;
+ soc_dev_attr->soc_id = soc->name;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%X",
+ AT91_CIDR_VERSION(cidr, soc->version_mask));
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr);
+ pr_warn("Could not register SoC device\n");
+ return NULL;
+ }
+
+ if (soc->family)
+ pr_info("Detected SoC family: %s\n", soc->family);
+ pr_info("Detected SoC: %s, revision %X\n", soc->name,
+ AT91_CIDR_VERSION(cidr, soc->version_mask));
+
+ return soc_dev;
+}
+
+static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+ { .compatible = "atmel,at91rm9200", },
+ { .compatible = "atmel,at91sam9", },
+ { .compatible = "atmel,sama5", },
+ { .compatible = "atmel,samv7", },
+ { .compatible = "microchip,sama7g5", },
+ { }
+};
+
+static int __init atmel_soc_device_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+
+ if (!of_match_node(at91_soc_allowed_list, np))
+ return 0;
+
+ at91_soc_init(socs);
+
+ return 0;
+}
+subsys_initcall(atmel_soc_device_init);
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
new file mode 100644
index 0000000000..7a9f47ce85
--- /dev/null
+++ b/drivers/soc/atmel/soc.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Atmel
+ *
+ * Boris Brezillon <boris.brezillon@free-electrons.com
+ */
+
+#ifndef __AT91_SOC_H
+#define __AT91_SOC_H
+
+#include <linux/sys_soc.h>
+
+struct at91_soc {
+ u32 cidr_match;
+ u32 cidr_mask;
+ u32 version_mask;
+ u32 exid_match;
+ const char *name;
+ const char *family;
+};
+
+#define AT91_SOC(__cidr, __cidr_mask, __version_mask, __exid, \
+ __name, __family) \
+ { \
+ .cidr_match = (__cidr), \
+ .cidr_mask = (__cidr_mask), \
+ .version_mask = (__version_mask), \
+ .exid_match = (__exid), \
+ .name = (__name), \
+ .family = (__family), \
+ }
+
+struct soc_device * __init
+at91_soc_init(const struct at91_soc *socs);
+
+#define AT91RM9200_CIDR_MATCH 0x09290780
+
+#define AT91SAM9260_CIDR_MATCH 0x019803a0
+#define AT91SAM9261_CIDR_MATCH 0x019703a0
+#define AT91SAM9263_CIDR_MATCH 0x019607a0
+#define AT91SAM9G20_CIDR_MATCH 0x019905a0
+#define AT91SAM9RL64_CIDR_MATCH 0x019b03a0
+#define AT91SAM9G45_CIDR_MATCH 0x019b05a0
+#define AT91SAM9X5_CIDR_MATCH 0x019a05a0
+#define AT91SAM9N12_CIDR_MATCH 0x019a07a0
+#define SAM9X60_CIDR_MATCH 0x019b35a0
+#define SAMA7G5_CIDR_MATCH 0x00162100
+
+#define AT91SAM9M11_EXID_MATCH 0x00000001
+#define AT91SAM9M10_EXID_MATCH 0x00000002
+#define AT91SAM9G46_EXID_MATCH 0x00000003
+#define AT91SAM9G45_EXID_MATCH 0x00000004
+
+#define AT91SAM9G15_EXID_MATCH 0x00000000
+#define AT91SAM9G35_EXID_MATCH 0x00000001
+#define AT91SAM9X35_EXID_MATCH 0x00000002
+#define AT91SAM9G25_EXID_MATCH 0x00000003
+#define AT91SAM9X25_EXID_MATCH 0x00000004
+
+#define AT91SAM9CN12_EXID_MATCH 0x00000005
+#define AT91SAM9N12_EXID_MATCH 0x00000006
+#define AT91SAM9CN11_EXID_MATCH 0x00000009
+
+#define SAM9X60_EXID_MATCH 0x00000000
+#define SAM9X60_D5M_EXID_MATCH 0x00000001
+#define SAM9X60_D1G_EXID_MATCH 0x00000010
+#define SAM9X60_D6K_EXID_MATCH 0x00000011
+
+#define SAMA7G51_EXID_MATCH 0x3
+#define SAMA7G52_EXID_MATCH 0x2
+#define SAMA7G53_EXID_MATCH 0x1
+#define SAMA7G54_EXID_MATCH 0x0
+#define SAMA7G54_D1G_EXID_MATCH 0x00000018
+#define SAMA7G54_D2G_EXID_MATCH 0x00000020
+#define SAMA7G54_D4G_EXID_MATCH 0x00000028
+
+#define AT91SAM9XE128_CIDR_MATCH 0x329973a0
+#define AT91SAM9XE256_CIDR_MATCH 0x329a93a0
+#define AT91SAM9XE512_CIDR_MATCH 0x329aa3a0
+
+#define SAMA5D2_CIDR_MATCH 0x0a5c08c0
+#define SAMA5D21CU_EXID_MATCH 0x0000005a
+#define SAMA5D225C_D1M_EXID_MATCH 0x00000053
+#define SAMA5D22CU_EXID_MATCH 0x00000059
+#define SAMA5D22CN_EXID_MATCH 0x00000069
+#define SAMA5D23CU_EXID_MATCH 0x00000058
+#define SAMA5D24CX_EXID_MATCH 0x00000004
+#define SAMA5D24CU_EXID_MATCH 0x00000014
+#define SAMA5D26CU_EXID_MATCH 0x00000012
+#define SAMA5D27C_D1G_EXID_MATCH 0x00000033
+#define SAMA5D27C_D5M_EXID_MATCH 0x00000032
+#define SAMA5D27C_LD1G_EXID_MATCH 0x00000061
+#define SAMA5D27C_LD2G_EXID_MATCH 0x00000062
+#define SAMA5D27CU_EXID_MATCH 0x00000011
+#define SAMA5D27CN_EXID_MATCH 0x00000021
+#define SAMA5D28C_D1G_EXID_MATCH 0x00000013
+#define SAMA5D28C_LD1G_EXID_MATCH 0x00000071
+#define SAMA5D28C_LD2G_EXID_MATCH 0x00000072
+#define SAMA5D28CU_EXID_MATCH 0x00000010
+#define SAMA5D28CN_EXID_MATCH 0x00000020
+#define SAMA5D29CN_EXID_MATCH 0x00000023
+
+#define SAMA5D3_CIDR_MATCH 0x0a5c07c0
+#define SAMA5D31_EXID_MATCH 0x00444300
+#define SAMA5D33_EXID_MATCH 0x00414300
+#define SAMA5D34_EXID_MATCH 0x00414301
+#define SAMA5D35_EXID_MATCH 0x00584300
+#define SAMA5D36_EXID_MATCH 0x00004301
+
+#define SAMA5D4_CIDR_MATCH 0x0a5c07c0
+#define SAMA5D41_EXID_MATCH 0x00000001
+#define SAMA5D42_EXID_MATCH 0x00000002
+#define SAMA5D43_EXID_MATCH 0x00000003
+#define SAMA5D44_EXID_MATCH 0x00000004
+
+#define SAME70Q21_CIDR_MATCH 0x21020e00
+#define SAME70Q21_EXID_MATCH 0x00000002
+#define SAME70Q20_CIDR_MATCH 0x21020c00
+#define SAME70Q20_EXID_MATCH 0x00000002
+#define SAME70Q19_CIDR_MATCH 0x210d0a00
+#define SAME70Q19_EXID_MATCH 0x00000002
+
+#define SAMS70Q21_CIDR_MATCH 0x21120e00
+#define SAMS70Q21_EXID_MATCH 0x00000002
+#define SAMS70Q20_CIDR_MATCH 0x21120c00
+#define SAMS70Q20_EXID_MATCH 0x00000002
+#define SAMS70Q19_CIDR_MATCH 0x211d0a00
+#define SAMS70Q19_EXID_MATCH 0x00000002
+
+#define SAMV71Q21_CIDR_MATCH 0x21220e00
+#define SAMV71Q21_EXID_MATCH 0x00000002
+#define SAMV71Q20_CIDR_MATCH 0x21220c00
+#define SAMV71Q20_EXID_MATCH 0x00000002
+#define SAMV71Q19_CIDR_MATCH 0x212d0a00
+#define SAMV71Q19_EXID_MATCH 0x00000002
+
+#define SAMV70Q20_CIDR_MATCH 0x21320c00
+#define SAMV70Q20_EXID_MATCH 0x00000002
+#define SAMV70Q19_CIDR_MATCH 0x213d0a00
+#define SAMV70Q19_EXID_MATCH 0x00000002
+
+#endif /* __AT91_SOC_H */
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
new file mode 100644
index 0000000000..f96906795f
--- /dev/null
+++ b/drivers/soc/bcm/Kconfig
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Broadcom SoC drivers"
+
+config BCM2835_POWER
+ bool "BCM2835 power domain driver"
+ depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
+ default y if ARCH_BCM2835
+ select PM_GENERIC_DOMAINS if PM
+ select RESET_CONTROLLER
+ help
+ This enables support for the BCM2835 power domains and reset
+ controller. Any usage of power domains by the Raspberry Pi
+ firmware means that Linux usage of the same power domain
+ must be accessed using the RASPBERRYPI_POWER driver
+
+config RASPBERRYPI_POWER
+ bool "Raspberry Pi power domain driver"
+ depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
+ depends on RASPBERRYPI_FIRMWARE=y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the RPi power domains which can be enabled
+ or disabled via the RPi firmware.
+
+config SOC_BCM63XX
+ bool "Broadcom 63xx SoC drivers"
+ depends on BMIPS_GENERIC || COMPILE_TEST
+ help
+ Enables drivers for the Broadcom 63xx series of chips.
+ Drivers can be enabled individually within this menu.
+
+ If unsure, say N.
+
+config SOC_BRCMSTB
+ bool "Broadcom STB SoC drivers"
+ depends on ARM || ARM64 || BMIPS_GENERIC || COMPILE_TEST
+ select SOC_BUS
+ help
+ Enables drivers for the Broadcom Set-Top Box (STB) series of chips.
+ This option alone enables only some support code, while the drivers
+ can be enabled individually within this menu.
+
+ If unsure, say N.
+
+config BCM_PMB
+ bool "Broadcom PMB (Power Management Bus) driver"
+ depends on ARCH_BCMBCA || (COMPILE_TEST && OF)
+ default ARCH_BCMBCA
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the Broadcom's PMB (Power Management Bus) that
+ is used for disabling and enabling SoC devices.
+
+if SOC_BCM63XX
+
+config BCM63XX_POWER
+ bool "BCM63xx power domain driver"
+ depends on BMIPS_GENERIC || (COMPILE_TEST && OF)
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the BCM63xx power domains controller on
+ BCM6318, BCM6328, BCM6362 and BCM63268 SoCs.
+
+endif # SOC_BCM63XX
+
+source "drivers/soc/bcm/brcmstb/Kconfig"
+
+endmenu
diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile
new file mode 100644
index 0000000000..32424b1032
--- /dev/null
+++ b/drivers/soc/bcm/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
diff --git a/drivers/soc/bcm/brcmstb/Kconfig b/drivers/soc/bcm/brcmstb/Kconfig
new file mode 100644
index 0000000000..c68d0e5267
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if SOC_BRCMSTB
+
+config BRCMSTB_PM
+ bool "Support suspend/resume for STB platforms"
+ default y
+ depends on PM && BMIPS_GENERIC
+
+endif # SOC_BRCMSTB
diff --git a/drivers/soc/bcm/brcmstb/Makefile b/drivers/soc/bcm/brcmstb/Makefile
new file mode 100644
index 0000000000..fe5c43d26d
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += common.o biuctrl.o
+obj-$(CONFIG_BRCMSTB_PM) += pm/
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
new file mode 100644
index 0000000000..364ddbe365
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Broadcom STB SoCs Bus Unit Interface controls
+ *
+ * Copyright (C) 2015, Broadcom Corporation
+ */
+
+#define pr_fmt(fmt) "brcmstb: " KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+
+#define RACENPREF_MASK 0x3
+#define RACPREFINST_SHIFT 0
+#define RACENINST_SHIFT 2
+#define RACPREFDATA_SHIFT 4
+#define RACENDATA_SHIFT 6
+#define RAC_CPU_SHIFT 8
+#define RACCFG_MASK 0xff
+#define DPREF_LINE_2_SHIFT 24
+#define DPREF_LINE_2_MASK 0xff
+
+/* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
+#define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \
+ RACENPREF_MASK << RACENINST_SHIFT | \
+ 1 << RACPREFDATA_SHIFT | \
+ RACENPREF_MASK << RACENDATA_SHIFT)
+
+#define CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK 0x70000000
+#define CPU_CREDIT_REG_MCPx_READ_CRED_MASK 0xf
+#define CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK 0xf
+#define CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(x) ((x) * 8)
+#define CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(x) (((x) * 8) + 4)
+
+#define CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(x) ((x) * 8)
+#define CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK 0xff
+
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_THRESHOLD_MASK 0xf
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_MASK 0xf
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT 4
+#define CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_ENABLE BIT(8)
+
+static void __iomem *cpubiuctrl_base;
+static bool mcp_wr_pairing_en;
+static const int *cpubiuctrl_regs;
+
+enum cpubiuctrl_regs {
+ CPU_CREDIT_REG = 0,
+ CPU_MCP_FLOW_REG,
+ CPU_WRITEBACK_CTRL_REG,
+ RAC_CONFIG0_REG,
+ RAC_CONFIG1_REG,
+ NUM_CPU_BIUCTRL_REGS,
+};
+
+static inline u32 cbc_readl(int reg)
+{
+ int offset = cpubiuctrl_regs[reg];
+
+ if (offset == -1 ||
+ (IS_ENABLED(CONFIG_CACHE_B15_RAC) && reg >= RAC_CONFIG0_REG))
+ return (u32)-1;
+
+ return readl_relaxed(cpubiuctrl_base + offset);
+}
+
+static inline void cbc_writel(u32 val, int reg)
+{
+ int offset = cpubiuctrl_regs[reg];
+
+ if (offset == -1 ||
+ (IS_ENABLED(CONFIG_CACHE_B15_RAC) && reg >= RAC_CONFIG0_REG))
+ return;
+
+ writel(val, cpubiuctrl_base + offset);
+}
+
+static const int b15_cpubiuctrl_regs[] = {
+ [CPU_CREDIT_REG] = 0x184,
+ [CPU_MCP_FLOW_REG] = -1,
+ [CPU_WRITEBACK_CTRL_REG] = -1,
+ [RAC_CONFIG0_REG] = -1,
+ [RAC_CONFIG1_REG] = -1,
+};
+
+/* Odd cases, e.g: 7260A0 */
+static const int b53_cpubiuctrl_no_wb_regs[] = {
+ [CPU_CREDIT_REG] = 0x0b0,
+ [CPU_MCP_FLOW_REG] = 0x0b4,
+ [CPU_WRITEBACK_CTRL_REG] = -1,
+ [RAC_CONFIG0_REG] = 0x78,
+ [RAC_CONFIG1_REG] = 0x7c,
+};
+
+static const int b53_cpubiuctrl_regs[] = {
+ [CPU_CREDIT_REG] = 0x0b0,
+ [CPU_MCP_FLOW_REG] = 0x0b4,
+ [CPU_WRITEBACK_CTRL_REG] = 0x22c,
+ [RAC_CONFIG0_REG] = 0x78,
+ [RAC_CONFIG1_REG] = 0x7c,
+};
+
+static const int a72_cpubiuctrl_regs[] = {
+ [CPU_CREDIT_REG] = 0x18,
+ [CPU_MCP_FLOW_REG] = 0x1c,
+ [CPU_WRITEBACK_CTRL_REG] = 0x20,
+ [RAC_CONFIG0_REG] = 0x08,
+ [RAC_CONFIG1_REG] = 0x0c,
+};
+
+static int __init mcp_write_pairing_set(void)
+{
+ u32 creds = 0;
+
+ if (!cpubiuctrl_base)
+ return -1;
+
+ creds = cbc_readl(CPU_CREDIT_REG);
+ if (mcp_wr_pairing_en) {
+ pr_info("MCP: Enabling write pairing\n");
+ cbc_writel(creds | CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
+ CPU_CREDIT_REG);
+ } else if (creds & CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK) {
+ pr_info("MCP: Disabling write pairing\n");
+ cbc_writel(creds & ~CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
+ CPU_CREDIT_REG);
+ } else {
+ pr_info("MCP: Write pairing already disabled\n");
+ }
+
+ return 0;
+}
+
+static const u32 a72_b53_mach_compat[] = {
+ 0x7211,
+ 0x72113,
+ 0x72116,
+ 0x7216,
+ 0x72164,
+ 0x72165,
+ 0x7255,
+ 0x7260,
+ 0x7268,
+ 0x7271,
+ 0x7278,
+};
+
+/* The read-ahead cache present in the Brahma-B53 CPU is a special piece of
+ * hardware after the integrated L2 cache of the B53 CPU complex whose purpose
+ * is to prefetch instruction and/or data with a line size of either 64 bytes
+ * or 256 bytes. The rationale is that the data-bus of the CPU interface is
+ * optimized for 256-byte transactions, and enabling the read-ahead cache
+ * provides a significant performance boost (typically twice the performance
+ * for a memcpy benchmark application).
+ *
+ * The read-ahead cache is transparent for Virtual Address cache maintenance
+ * operations: IC IVAU, DC IVAC, DC CVAC, DC CVAU and DC CIVAC. So no special
+ * handling is needed for the DMA API above and beyond what is included in the
+ * arm64 implementation.
+ *
+ * In addition, since the Point of Unification is typically between L1 and L2
+ * for the Brahma-B53 processor no special read-ahead cache handling is needed
+ * for the IC IALLU and IC IALLUIS cache maintenance operations.
+ *
+ * However, it is not possible to specify the cache level (L3) for the cache
+ * maintenance instructions operating by set/way to operate on the read-ahead
+ * cache. The read-ahead cache will maintain coherency when inner cache lines
+ * are cleaned by set/way, but if it is necessary to invalidate inner cache
+ * lines by set/way to maintain coherency with system masters operating on
+ * shared memory that does not have hardware support for coherency, then it
+ * will also be necessary to explicitly invalidate the read-ahead cache.
+ */
+static void __init a72_b53_rac_enable_all(struct device_node *np)
+{
+ unsigned int cpu;
+ u32 enable = 0, pref_dist, shift;
+
+ if (IS_ENABLED(CONFIG_CACHE_B15_RAC))
+ return;
+
+ if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
+ return;
+
+ pref_dist = cbc_readl(RAC_CONFIG1_REG);
+ for_each_possible_cpu(cpu) {
+ shift = cpu * RAC_CPU_SHIFT + RACPREFDATA_SHIFT;
+ enable |= RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT);
+ if (cpubiuctrl_regs == a72_cpubiuctrl_regs) {
+ enable &= ~(RACENPREF_MASK << shift);
+ enable |= 3 << shift;
+ pref_dist |= 1 << (cpu + DPREF_LINE_2_SHIFT);
+ }
+ }
+
+ cbc_writel(enable, RAC_CONFIG0_REG);
+ cbc_writel(pref_dist, RAC_CONFIG1_REG);
+
+ pr_info("%pOF: Broadcom %s read-ahead cache\n",
+ np, cpubiuctrl_regs == a72_cpubiuctrl_regs ?
+ "Cortex-A72" : "Brahma-B53");
+}
+
+static void __init mcp_a72_b53_set(void)
+{
+ unsigned int i;
+ u32 reg;
+
+ reg = brcmstb_get_family_id();
+
+ for (i = 0; i < ARRAY_SIZE(a72_b53_mach_compat); i++) {
+ if (BRCM_ID(reg) == a72_b53_mach_compat[i])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(a72_b53_mach_compat))
+ return;
+
+ /* Set all 3 MCP interfaces to 8 credits */
+ reg = cbc_readl(CPU_CREDIT_REG);
+ for (i = 0; i < 3; i++) {
+ reg &= ~(CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK <<
+ CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i));
+ reg &= ~(CPU_CREDIT_REG_MCPx_READ_CRED_MASK <<
+ CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i));
+ reg |= 8 << CPU_CREDIT_REG_MCPx_WRITE_CRED_SHIFT(i);
+ reg |= 8 << CPU_CREDIT_REG_MCPx_READ_CRED_SHIFT(i);
+ }
+ cbc_writel(reg, CPU_CREDIT_REG);
+
+ /* Max out the number of in-flight Jwords reads on the MCP interface */
+ reg = cbc_readl(CPU_MCP_FLOW_REG);
+ for (i = 0; i < 3; i++)
+ reg |= CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_MASK <<
+ CPU_MCP_FLOW_REG_MCPx_RDBUFF_CRED_SHIFT(i);
+ cbc_writel(reg, CPU_MCP_FLOW_REG);
+
+ /* Enable writeback throttling, set timeout to 128 cycles, 256 cycles
+ * threshold
+ */
+ reg = cbc_readl(CPU_WRITEBACK_CTRL_REG);
+ reg |= CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_ENABLE;
+ reg &= ~CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_THRESHOLD_MASK;
+ reg &= ~(CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_MASK <<
+ CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT);
+ reg |= 8;
+ reg |= 7 << CPU_WRITEBACK_CTRL_REG_WB_THROTTLE_TIMEOUT_SHIFT;
+ cbc_writel(reg, CPU_WRITEBACK_CTRL_REG);
+}
+
+static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
+{
+ struct device_node *cpu_dn;
+ u32 family_id;
+ int ret = 0;
+
+ cpubiuctrl_base = of_iomap(np, 0);
+ if (!cpubiuctrl_base) {
+ pr_err("failed to remap BIU control base\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mcp_wr_pairing_en = of_property_read_bool(np, "brcm,write-pairing");
+
+ cpu_dn = of_get_cpu_node(0, NULL);
+ if (!cpu_dn) {
+ pr_err("failed to obtain CPU device node\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
+ cpubiuctrl_regs = b15_cpubiuctrl_regs;
+ else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
+ cpubiuctrl_regs = b53_cpubiuctrl_regs;
+ else if (of_device_is_compatible(cpu_dn, "arm,cortex-a72"))
+ cpubiuctrl_regs = a72_cpubiuctrl_regs;
+ else {
+ pr_err("unsupported CPU\n");
+ ret = -EINVAL;
+ }
+ of_node_put(cpu_dn);
+
+ family_id = brcmstb_get_family_id();
+ if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
+ cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
+out:
+ if (ret && cpubiuctrl_base) {
+ iounmap(cpubiuctrl_base);
+ cpubiuctrl_base = NULL;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static u32 cpubiuctrl_reg_save[NUM_CPU_BIUCTRL_REGS];
+
+static int brcmstb_cpu_credit_reg_suspend(void)
+{
+ unsigned int i;
+
+ if (!cpubiuctrl_base)
+ return 0;
+
+ for (i = 0; i < NUM_CPU_BIUCTRL_REGS; i++)
+ cpubiuctrl_reg_save[i] = cbc_readl(i);
+
+ return 0;
+}
+
+static void brcmstb_cpu_credit_reg_resume(void)
+{
+ unsigned int i;
+
+ if (!cpubiuctrl_base)
+ return;
+
+ for (i = 0; i < NUM_CPU_BIUCTRL_REGS; i++)
+ cbc_writel(cpubiuctrl_reg_save[i], i);
+}
+
+static struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
+ .suspend = brcmstb_cpu_credit_reg_suspend,
+ .resume = brcmstb_cpu_credit_reg_resume,
+};
+#endif
+
+
+static int __init brcmstb_biuctrl_init(void)
+{
+ struct device_node *np;
+ int ret;
+
+ /* We might be running on a multi-platform kernel, don't make this a
+ * fatal error, just bail out early
+ */
+ np = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
+ if (!np)
+ return 0;
+
+ ret = setup_hifcpubiuctrl_regs(np);
+ if (ret)
+ goto out_put;
+
+ ret = mcp_write_pairing_set();
+ if (ret) {
+ pr_err("MCP: Unable to disable write pairing!\n");
+ goto out_put;
+ }
+
+ a72_b53_rac_enable_all(np);
+ mcp_a72_b53_set();
+#ifdef CONFIG_PM_SLEEP
+ register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
+#endif
+ ret = 0;
+out_put:
+ of_node_put(np);
+ return ret;
+}
+early_initcall(brcmstb_biuctrl_init);
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
new file mode 100644
index 0000000000..2a010881f4
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ * Copyright © 2015 Broadcom Corporation
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+#include <linux/sys_soc.h>
+
+static u32 family_id;
+static u32 product_id;
+
+u32 brcmstb_get_family_id(void)
+{
+ return family_id;
+}
+EXPORT_SYMBOL(brcmstb_get_family_id);
+
+u32 brcmstb_get_product_id(void)
+{
+ return product_id;
+}
+EXPORT_SYMBOL(brcmstb_get_product_id);
+
+static const struct of_device_id sun_top_ctrl_match[] = {
+ { .compatible = "brcm,bcm7125-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7346-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7358-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7360-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7362-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7420-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7425-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7429-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7435-sun-top-ctrl", },
+ { .compatible = "brcm,brcmstb-sun-top-ctrl", },
+ { }
+};
+
+static int __init brcmstb_soc_device_early_init(void)
+{
+ struct device_node *sun_top_ctrl;
+ void __iomem *sun_top_ctrl_base;
+ int ret = 0;
+
+ /* We could be on a multi-platform kernel, don't make this fatal but
+ * bail out early
+ */
+ sun_top_ctrl = of_find_matching_node(NULL, sun_top_ctrl_match);
+ if (!sun_top_ctrl)
+ return ret;
+
+ sun_top_ctrl_base = of_iomap(sun_top_ctrl, 0);
+ if (!sun_top_ctrl_base) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ family_id = readl(sun_top_ctrl_base);
+ product_id = readl(sun_top_ctrl_base + 0x4);
+ iounmap(sun_top_ctrl_base);
+out:
+ of_node_put(sun_top_ctrl);
+ return ret;
+}
+early_initcall(brcmstb_soc_device_early_init);
+
+static int __init brcmstb_soc_device_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *sun_top_ctrl;
+ struct soc_device *soc_dev;
+ int ret = 0;
+
+ /* We could be on a multi-platform kernel, don't make this fatal but
+ * bail out early
+ */
+ sun_top_ctrl = of_find_matching_node(NULL, sun_top_ctrl_match);
+ if (!sun_top_ctrl)
+ return ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "%x",
+ family_id >> 28 ?
+ family_id >> 16 : family_id >> 8);
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%x",
+ product_id >> 28 ?
+ product_id >> 16 : product_id >> 8);
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c%d",
+ ((product_id & 0xf0) >> 4) + 'A',
+ product_id & 0xf);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr);
+ ret = -ENOMEM;
+ }
+out:
+ of_node_put(sun_top_ctrl);
+ return ret;
+}
+arch_initcall(brcmstb_soc_device_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/Makefile b/drivers/soc/bcm/brcmstb/pm/Makefile
new file mode 100644
index 0000000000..9133a9ee07
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_BMIPS_GENERIC) += s2-mips.o s3-mips.o pm-mips.o
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-mips.c b/drivers/soc/bcm/brcmstb/pm/pm-mips.c
new file mode 100644
index 0000000000..4dfb5a8503
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm-mips.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MIPS-specific support for Broadcom STB S2/S3/S5 power management
+ *
+ * Copyright (C) 2016-2017 Broadcom
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <asm/bmips.h>
+#include <asm/tlbflush.h>
+
+#include "pm.h"
+
+#define S2_NUM_PARAMS 6
+#define MAX_NUM_MEMC 3
+
+/* S3 constants */
+#define MAX_GP_REGS 16
+#define MAX_CP0_REGS 32
+#define NUM_MEMC_CLIENTS 128
+#define AON_CTRL_RAM_SIZE 128
+#define BRCMSTB_S3_MAGIC 0x5AFEB007
+
+#define CLEAR_RESET_MASK 0x01
+
+/* Index each CP0 register that needs to be saved */
+#define CONTEXT 0
+#define USER_LOCAL 1
+#define PGMK 2
+#define HWRENA 3
+#define COMPARE 4
+#define STATUS 5
+#define CONFIG 6
+#define MODE 7
+#define EDSP 8
+#define BOOT_VEC 9
+#define EBASE 10
+
+struct brcmstb_memc {
+ void __iomem *ddr_phy_base;
+ void __iomem *arb_base;
+};
+
+struct brcmstb_pm_control {
+ void __iomem *aon_ctrl_base;
+ void __iomem *aon_sram_base;
+ void __iomem *timers_base;
+ struct brcmstb_memc memcs[MAX_NUM_MEMC];
+ int num_memc;
+};
+
+struct brcm_pm_s3_context {
+ u32 cp0_regs[MAX_CP0_REGS];
+ u32 memc0_rts[NUM_MEMC_CLIENTS];
+ u32 sc_boot_vec;
+};
+
+struct brcmstb_mem_transfer;
+
+struct brcmstb_mem_transfer {
+ struct brcmstb_mem_transfer *next;
+ void *src;
+ void *dst;
+ dma_addr_t pa_src;
+ dma_addr_t pa_dst;
+ u32 len;
+ u8 key;
+ u8 mode;
+ u8 src_remapped;
+ u8 dst_remapped;
+ u8 src_dst_remapped;
+};
+
+#define AON_SAVE_SRAM(base, idx, val) \
+ __raw_writel(val, base + (idx << 2))
+
+/* Used for saving registers in asm */
+u32 gp_regs[MAX_GP_REGS];
+
+#define BSP_CLOCK_STOP 0x00
+#define PM_INITIATE 0x01
+
+static struct brcmstb_pm_control ctrl;
+
+static void brcm_pm_save_cp0_context(struct brcm_pm_s3_context *ctx)
+{
+ /* Generic MIPS */
+ ctx->cp0_regs[CONTEXT] = read_c0_context();
+ ctx->cp0_regs[USER_LOCAL] = read_c0_userlocal();
+ ctx->cp0_regs[PGMK] = read_c0_pagemask();
+ ctx->cp0_regs[HWRENA] = read_c0_cache();
+ ctx->cp0_regs[COMPARE] = read_c0_compare();
+ ctx->cp0_regs[STATUS] = read_c0_status();
+
+ /* Broadcom specific */
+ ctx->cp0_regs[CONFIG] = read_c0_brcm_config();
+ ctx->cp0_regs[MODE] = read_c0_brcm_mode();
+ ctx->cp0_regs[EDSP] = read_c0_brcm_edsp();
+ ctx->cp0_regs[BOOT_VEC] = read_c0_brcm_bootvec();
+ ctx->cp0_regs[EBASE] = read_c0_ebase();
+
+ ctx->sc_boot_vec = bmips_read_zscm_reg(0xa0);
+}
+
+static void brcm_pm_restore_cp0_context(struct brcm_pm_s3_context *ctx)
+{
+ /* Restore cp0 state */
+ bmips_write_zscm_reg(0xa0, ctx->sc_boot_vec);
+
+ /* Generic MIPS */
+ write_c0_context(ctx->cp0_regs[CONTEXT]);
+ write_c0_userlocal(ctx->cp0_regs[USER_LOCAL]);
+ write_c0_pagemask(ctx->cp0_regs[PGMK]);
+ write_c0_cache(ctx->cp0_regs[HWRENA]);
+ write_c0_compare(ctx->cp0_regs[COMPARE]);
+ write_c0_status(ctx->cp0_regs[STATUS]);
+
+ /* Broadcom specific */
+ write_c0_brcm_config(ctx->cp0_regs[CONFIG]);
+ write_c0_brcm_mode(ctx->cp0_regs[MODE]);
+ write_c0_brcm_edsp(ctx->cp0_regs[EDSP]);
+ write_c0_brcm_bootvec(ctx->cp0_regs[BOOT_VEC]);
+ write_c0_ebase(ctx->cp0_regs[EBASE]);
+}
+
+static void brcmstb_pm_handshake(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+ u32 tmp;
+
+ /* BSP power handshake, v1 */
+ tmp = __raw_readl(base + AON_CTRL_HOST_MISC_CMDS);
+ tmp &= ~1UL;
+ __raw_writel(tmp, base + AON_CTRL_HOST_MISC_CMDS);
+ (void)__raw_readl(base + AON_CTRL_HOST_MISC_CMDS);
+
+ __raw_writel(0, base + AON_CTRL_PM_INITIATE);
+ (void)__raw_readl(base + AON_CTRL_PM_INITIATE);
+ __raw_writel(BSP_CLOCK_STOP | PM_INITIATE,
+ base + AON_CTRL_PM_INITIATE);
+ /*
+ * HACK: BSP may have internal race on the CLOCK_STOP command.
+ * Avoid touching the BSP for a few milliseconds.
+ */
+ mdelay(3);
+}
+
+static void brcmstb_pm_s5(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+
+ brcmstb_pm_handshake();
+
+ /* Clear magic s3 warm-boot value */
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 0, 0);
+
+ /* Set the countdown */
+ __raw_writel(0x10, base + AON_CTRL_PM_CPU_WAIT_COUNT);
+ (void)__raw_readl(base + AON_CTRL_PM_CPU_WAIT_COUNT);
+
+ /* Prepare to S5 cold boot */
+ __raw_writel(PM_COLD_CONFIG, base + AON_CTRL_PM_CTRL);
+ (void)__raw_readl(base + AON_CTRL_PM_CTRL);
+
+ __raw_writel((PM_COLD_CONFIG | PM_PWR_DOWN), base +
+ AON_CTRL_PM_CTRL);
+ (void)__raw_readl(base + AON_CTRL_PM_CTRL);
+
+ __asm__ __volatile__(
+ " wait\n"
+ : : : "memory");
+}
+
+static int brcmstb_pm_s3(void)
+{
+ struct brcm_pm_s3_context s3_context;
+ void __iomem *memc_arb_base;
+ unsigned long flags;
+ u32 tmp;
+ int i;
+
+ /* Prepare for s3 */
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 0, BRCMSTB_S3_MAGIC);
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 1, (u32)&s3_reentry);
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 2, 0);
+
+ /* Clear RESET_HISTORY */
+ tmp = __raw_readl(ctrl.aon_ctrl_base + AON_CTRL_RESET_CTRL);
+ tmp &= ~CLEAR_RESET_MASK;
+ __raw_writel(tmp, ctrl.aon_ctrl_base + AON_CTRL_RESET_CTRL);
+
+ local_irq_save(flags);
+
+ /* Inhibit DDR_RSTb pulse for both MMCs*/
+ for (i = 0; i < ctrl.num_memc; i++) {
+ tmp = __raw_readl(ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+
+ tmp &= ~0x0f;
+ __raw_writel(tmp, ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+ tmp |= (0x05 | BIT(5));
+ __raw_writel(tmp, ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+ }
+
+ /* Save CP0 context */
+ brcm_pm_save_cp0_context(&s3_context);
+
+ /* Save RTS(skip debug register) */
+ memc_arb_base = ctrl.memcs[0].arb_base + 4;
+ for (i = 0; i < NUM_MEMC_CLIENTS; i++) {
+ s3_context.memc0_rts[i] = __raw_readl(memc_arb_base);
+ memc_arb_base += 4;
+ }
+
+ /* Save I/O context */
+ local_flush_tlb_all();
+ _dma_cache_wback_inv(0, ~0);
+
+ brcm_pm_do_s3(ctrl.aon_ctrl_base, current_cpu_data.dcache.linesz);
+
+ /* CPU reconfiguration */
+ local_flush_tlb_all();
+ bmips_cpu_setup();
+ cpumask_clear(&bmips_booted_mask);
+
+ /* Restore RTS (skip debug register) */
+ memc_arb_base = ctrl.memcs[0].arb_base + 4;
+ for (i = 0; i < NUM_MEMC_CLIENTS; i++) {
+ __raw_writel(s3_context.memc0_rts[i], memc_arb_base);
+ memc_arb_base += 4;
+ }
+
+ /* restore CP0 context */
+ brcm_pm_restore_cp0_context(&s3_context);
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int brcmstb_pm_s2(void)
+{
+ /*
+ * We need to pass 6 arguments to an assembly function. Lets avoid the
+ * stack and pass arguments in a explicit 4 byte array. The assembly
+ * code assumes all arguments are 4 bytes and arguments are ordered
+ * like so:
+ *
+ * 0: AON_CTRl base register
+ * 1: DDR_PHY base register
+ * 2: TIMERS base resgister
+ * 3: I-Cache line size
+ * 4: Restart vector address
+ * 5: Restart vector size
+ */
+ u32 s2_params[6];
+
+ /* Prepare s2 parameters */
+ s2_params[0] = (u32)ctrl.aon_ctrl_base;
+ s2_params[1] = (u32)ctrl.memcs[0].ddr_phy_base;
+ s2_params[2] = (u32)ctrl.timers_base;
+ s2_params[3] = (u32)current_cpu_data.icache.linesz;
+ s2_params[4] = (u32)BMIPS_WARM_RESTART_VEC;
+ s2_params[5] = (u32)(bmips_smp_int_vec_end -
+ bmips_smp_int_vec);
+
+ /* Drop to standby */
+ brcm_pm_do_s2(s2_params);
+
+ return 0;
+}
+
+static int brcmstb_pm_standby(bool deep_standby)
+{
+ brcmstb_pm_handshake();
+
+ /* Send IRQs to BMIPS_WARM_RESTART_VEC */
+ clear_c0_cause(CAUSEF_IV);
+ irq_disable_hazard();
+ set_c0_status(ST0_BEV);
+ irq_disable_hazard();
+
+ if (deep_standby)
+ brcmstb_pm_s3();
+ else
+ brcmstb_pm_s2();
+
+ /* Send IRQs to normal runtime vectors */
+ clear_c0_status(ST0_BEV);
+ irq_disable_hazard();
+ set_c0_cause(CAUSEF_IV);
+ irq_disable_hazard();
+
+ return 0;
+}
+
+static int brcmstb_pm_enter(suspend_state_t state)
+{
+ int ret = -EINVAL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ ret = brcmstb_pm_standby(false);
+ break;
+ case PM_SUSPEND_MEM:
+ ret = brcmstb_pm_standby(true);
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmstb_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return true;
+ case PM_SUSPEND_MEM:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct platform_suspend_ops brcmstb_pm_ops = {
+ .enter = brcmstb_pm_enter,
+ .valid = brcmstb_pm_valid,
+};
+
+static const struct of_device_id aon_ctrl_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-aon-ctrl" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id ddr_phy_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-ddr-phy" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id arb_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-memc-arb" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id timers_ids[] = {
+ { .compatible = "brcm,brcmstb-timers" },
+ { /* sentinel */ }
+};
+
+static inline void __iomem *brcmstb_ioremap_node(struct device_node *dn,
+ int index)
+{
+ return of_io_request_and_map(dn, index, dn->full_name);
+}
+
+static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
+ int index, const void **ofdata)
+{
+ struct device_node *dn;
+ const struct of_device_id *match;
+
+ dn = of_find_matching_node_and_match(NULL, matches, &match);
+ if (!dn)
+ return ERR_PTR(-EINVAL);
+
+ if (ofdata)
+ *ofdata = match->data;
+
+ return brcmstb_ioremap_node(dn, index);
+}
+
+static int brcmstb_pm_init(void)
+{
+ struct device_node *dn;
+ void __iomem *base;
+ int i;
+
+ /* AON ctrl registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_CTRL\n");
+ goto aon_err;
+ }
+ ctrl.aon_ctrl_base = base;
+
+ /* AON SRAM registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_SRAM\n");
+ goto sram_err;
+ }
+ ctrl.aon_sram_base = base;
+
+ ctrl.num_memc = 0;
+ /* Map MEMC DDR PHY registers */
+ for_each_matching_node(dn, ddr_phy_dt_ids) {
+ i = ctrl.num_memc;
+ if (i >= MAX_NUM_MEMC) {
+ pr_warn("Too many MEMCs (max %d)\n", MAX_NUM_MEMC);
+ of_node_put(dn);
+ break;
+ }
+ base = brcmstb_ioremap_node(dn, 0);
+ if (IS_ERR(base)) {
+ of_node_put(dn);
+ goto ddr_err;
+ }
+
+ ctrl.memcs[i].ddr_phy_base = base;
+ ctrl.num_memc++;
+ }
+
+ /* MEMC ARB registers */
+ base = brcmstb_ioremap_match(arb_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping MEMC ARB\n");
+ goto ddr_err;
+ }
+ ctrl.memcs[0].arb_base = base;
+
+ /* Timer registers */
+ base = brcmstb_ioremap_match(timers_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping timers\n");
+ goto tmr_err;
+ }
+ ctrl.timers_base = base;
+
+ /* s3 cold boot aka s5 */
+ pm_power_off = brcmstb_pm_s5;
+
+ suspend_set_ops(&brcmstb_pm_ops);
+
+ return 0;
+
+tmr_err:
+ iounmap(ctrl.memcs[0].arb_base);
+ddr_err:
+ for (i = 0; i < ctrl.num_memc; i++)
+ iounmap(ctrl.memcs[i].ddr_phy_base);
+
+ iounmap(ctrl.aon_sram_base);
+sram_err:
+ iounmap(ctrl.aon_ctrl_base);
+aon_err:
+ return PTR_ERR(base);
+}
+arch_initcall(brcmstb_pm_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm.h b/drivers/soc/bcm/brcmstb/pm/pm.h
new file mode 100644
index 0000000000..94a380470a
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Definitions for Broadcom STB power management / Always ON (AON) block
+ *
+ * Copyright © 2016-2017 Broadcom
+ */
+
+#ifndef __BRCMSTB_PM_H__
+#define __BRCMSTB_PM_H__
+
+#define AON_CTRL_RESET_CTRL 0x00
+#define AON_CTRL_PM_CTRL 0x04
+#define AON_CTRL_PM_STATUS 0x08
+#define AON_CTRL_PM_CPU_WAIT_COUNT 0x10
+#define AON_CTRL_PM_INITIATE 0x88
+#define AON_CTRL_HOST_MISC_CMDS 0x8c
+#define AON_CTRL_SYSTEM_DATA_RAM_OFS 0x200
+
+/* MIPS PM constants */
+/* MEMC0 offsets */
+#define DDR40_PHY_CONTROL_REGS_0_PLL_STATUS 0x10
+#define DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL 0xa4
+
+/* TIMER offsets */
+#define TIMER_TIMER1_CTRL 0x0c
+#define TIMER_TIMER1_STAT 0x1c
+
+/* TIMER defines */
+#define RESET_TIMER 0x0
+#define START_TIMER 0xbfffffff
+#define TIMER_MASK 0x3fffffff
+
+/* PM_CTRL bitfield (Method #0) */
+#define PM_FAST_PWRDOWN (1 << 6)
+#define PM_WARM_BOOT (1 << 5)
+#define PM_DEEP_STANDBY (1 << 4)
+#define PM_CPU_PWR (1 << 3)
+#define PM_USE_CPU_RDY (1 << 2)
+#define PM_PLL_PWRDOWN (1 << 1)
+#define PM_PWR_DOWN (1 << 0)
+
+/* PM_CTRL bitfield (Method #1) */
+#define PM_DPHY_STANDBY_CLEAR (1 << 20)
+#define PM_MIN_S3_WIDTH_TIMER_BYPASS (1 << 7)
+
+#define PM_S2_COMMAND (PM_PLL_PWRDOWN | PM_USE_CPU_RDY | PM_PWR_DOWN)
+
+/* Method 0 bitmasks */
+#define PM_COLD_CONFIG (PM_PLL_PWRDOWN | PM_DEEP_STANDBY)
+#define PM_WARM_CONFIG (PM_COLD_CONFIG | PM_USE_CPU_RDY | PM_WARM_BOOT)
+
+/* Method 1 bitmask */
+#define M1_PM_WARM_CONFIG (PM_DPHY_STANDBY_CLEAR | \
+ PM_MIN_S3_WIDTH_TIMER_BYPASS | \
+ PM_WARM_BOOT | PM_DEEP_STANDBY | \
+ PM_PLL_PWRDOWN | PM_PWR_DOWN)
+
+#define M1_PM_COLD_CONFIG (PM_DPHY_STANDBY_CLEAR | \
+ PM_MIN_S3_WIDTH_TIMER_BYPASS | \
+ PM_DEEP_STANDBY | \
+ PM_PLL_PWRDOWN | PM_PWR_DOWN)
+
+#ifndef __ASSEMBLY__
+
+#ifndef CONFIG_MIPS
+extern const unsigned long brcmstb_pm_do_s2_sz;
+extern asmlinkage int brcmstb_pm_do_s2(void __iomem *aon_ctrl_base,
+ void __iomem *ddr_phy_pll_status);
+#else
+/* s2 asm */
+extern asmlinkage int brcm_pm_do_s2(u32 *s2_params);
+
+/* s3 asm */
+extern asmlinkage int brcm_pm_do_s3(void __iomem *aon_ctrl_base,
+ int dcache_linesz);
+extern int s3_reentry;
+#endif /* CONFIG_MIPS */
+
+#endif
+
+#endif /* __BRCMSTB_PM_H__ */
diff --git a/drivers/soc/bcm/brcmstb/pm/s2-mips.S b/drivers/soc/bcm/brcmstb/pm/s2-mips.S
new file mode 100644
index 0000000000..2a26a94eb9
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s2-mips.S
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Broadcom Corporation
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+#include "pm.h"
+
+ .text
+ .set noreorder
+ .align 5
+
+/*
+ * a0: u32 params array
+ */
+LEAF(brcm_pm_do_s2)
+
+ subu sp, 64
+ sw ra, 0(sp)
+ sw s0, 4(sp)
+ sw s1, 8(sp)
+ sw s2, 12(sp)
+ sw s3, 16(sp)
+ sw s4, 20(sp)
+ sw s5, 24(sp)
+ sw s6, 28(sp)
+ sw s7, 32(sp)
+
+ /*
+ * Dereference the params array
+ * s0: AON_CTRL base register
+ * s1: DDR_PHY base register
+ * s2: TIMERS base register
+ * s3: I-Cache line size
+ * s4: Restart vector address
+ * s5: Restart vector size
+ */
+ move t0, a0
+
+ lw s0, 0(t0)
+ lw s1, 4(t0)
+ lw s2, 8(t0)
+ lw s3, 12(t0)
+ lw s4, 16(t0)
+ lw s5, 20(t0)
+
+ /* Lock this asm section into the I-cache */
+ addiu t1, s3, -1
+ not t1
+
+ la t0, brcm_pm_do_s2
+ and t0, t1
+
+ la t2, asm_end
+ and t2, t1
+
+1: cache 0x1c, 0(t0)
+ bne t0, t2, 1b
+ addu t0, s3
+
+ /* Lock the interrupt vector into the I-cache */
+ move t0, zero
+
+2: move t1, s4
+ cache 0x1c, 0(t1)
+ addu t1, s3
+ addu t0, s3
+ ble t0, s5, 2b
+ nop
+
+ sync
+
+ /* Power down request */
+ li t0, PM_S2_COMMAND
+ sw zero, AON_CTRL_PM_CTRL(s0)
+ lw zero, AON_CTRL_PM_CTRL(s0)
+ sw t0, AON_CTRL_PM_CTRL(s0)
+ lw t0, AON_CTRL_PM_CTRL(s0)
+
+ /* Enable CP0 interrupt 2 and wait for interrupt */
+ mfc0 t0, CP0_STATUS
+ /* Save cp0 sr for restoring later */
+ move s6, t0
+
+ li t1, ~(ST0_IM | ST0_IE)
+ and t0, t1
+ ori t0, STATUSF_IP2
+ mtc0 t0, CP0_STATUS
+ nop
+ nop
+ nop
+ ori t0, ST0_IE
+ mtc0 t0, CP0_STATUS
+
+ /* Wait for interrupt */
+ wait
+ nop
+
+ /* Wait for memc0 */
+1: lw t0, DDR40_PHY_CONTROL_REGS_0_PLL_STATUS(s1)
+ andi t0, 1
+ beqz t0, 1b
+ nop
+
+ /* 1ms delay needed for stable recovery */
+ /* Use TIMER1 to count 1 ms */
+ li t0, RESET_TIMER
+ sw t0, TIMER_TIMER1_CTRL(s2)
+ lw t0, TIMER_TIMER1_CTRL(s2)
+
+ li t0, START_TIMER
+ sw t0, TIMER_TIMER1_CTRL(s2)
+ lw t0, TIMER_TIMER1_CTRL(s2)
+
+ /* Prepare delay */
+ li t0, TIMER_MASK
+ lw t1, TIMER_TIMER1_STAT(s2)
+ and t1, t0
+ /* 1ms delay */
+ addi t1, 27000
+
+ /* Wait for the timer value to exceed t1 */
+1: lw t0, TIMER_TIMER1_STAT(s2)
+ sgtu t2, t1, t0
+ bnez t2, 1b
+ nop
+
+ /* Power back up */
+ li t1, 1
+ sw t1, AON_CTRL_HOST_MISC_CMDS(s0)
+ lw t1, AON_CTRL_HOST_MISC_CMDS(s0)
+
+ sw zero, AON_CTRL_PM_CTRL(s0)
+ lw zero, AON_CTRL_PM_CTRL(s0)
+
+ /* Unlock I-cache */
+ addiu t1, s3, -1
+ not t1
+
+ la t0, brcm_pm_do_s2
+ and t0, t1
+
+ la t2, asm_end
+ and t2, t1
+
+1: cache 0x00, 0(t0)
+ bne t0, t2, 1b
+ addu t0, s3
+
+ /* Unlock interrupt vector */
+ move t0, zero
+
+2: move t1, s4
+ cache 0x00, 0(t1)
+ addu t1, s3
+ addu t0, s3
+ ble t0, s5, 2b
+ nop
+
+ /* Restore cp0 sr */
+ sync
+ nop
+ mtc0 s6, CP0_STATUS
+ nop
+
+ /* Set return value to success */
+ li v0, 0
+
+ /* Return to caller */
+ lw s7, 32(sp)
+ lw s6, 28(sp)
+ lw s5, 24(sp)
+ lw s4, 20(sp)
+ lw s3, 16(sp)
+ lw s2, 12(sp)
+ lw s1, 8(sp)
+ lw s0, 4(sp)
+ lw ra, 0(sp)
+ addiu sp, 64
+
+ jr ra
+ nop
+END(brcm_pm_do_s2)
+
+ .globl asm_end
+asm_end:
+ nop
+
diff --git a/drivers/soc/bcm/brcmstb/pm/s3-mips.S b/drivers/soc/bcm/brcmstb/pm/s3-mips.S
new file mode 100644
index 0000000000..ecfcfd34c2
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s3-mips.S
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Broadcom Corporation
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/bmips.h>
+
+#include "pm.h"
+
+ .text
+ .set noreorder
+ .align 5
+ .global s3_reentry
+
+/*
+ * a0: AON_CTRL base register
+ * a1: D-Cache line size
+ */
+LEAF(brcm_pm_do_s3)
+
+ /* Get the address of s3_context */
+ la t0, gp_regs
+ sw ra, 0(t0)
+ sw s0, 4(t0)
+ sw s1, 8(t0)
+ sw s2, 12(t0)
+ sw s3, 16(t0)
+ sw s4, 20(t0)
+ sw s5, 24(t0)
+ sw s6, 28(t0)
+ sw s7, 32(t0)
+ sw gp, 36(t0)
+ sw sp, 40(t0)
+ sw fp, 44(t0)
+
+ /* Save CP0 Status */
+ mfc0 t1, CP0_STATUS
+ sw t1, 48(t0)
+
+ /* Write-back gp registers - cache will be gone */
+ addiu t1, a1, -1
+ not t1
+ and t0, t1
+
+ /* Flush at least 64 bytes */
+ addiu t2, t0, 64
+ and t2, t1
+
+1: cache 0x17, 0(t0)
+ bne t0, t2, 1b
+ addu t0, a1
+
+ /* Drop to deep standby */
+ li t1, PM_WARM_CONFIG
+ sw zero, AON_CTRL_PM_CTRL(a0)
+ lw zero, AON_CTRL_PM_CTRL(a0)
+ sw t1, AON_CTRL_PM_CTRL(a0)
+ lw t1, AON_CTRL_PM_CTRL(a0)
+
+ li t1, (PM_WARM_CONFIG | PM_PWR_DOWN)
+ sw t1, AON_CTRL_PM_CTRL(a0)
+ lw t1, AON_CTRL_PM_CTRL(a0)
+
+ /* Enable CP0 interrupt 2 and wait for interrupt */
+ mfc0 t0, CP0_STATUS
+
+ li t1, ~(ST0_IM | ST0_IE)
+ and t0, t1
+ ori t0, STATUSF_IP2
+ mtc0 t0, CP0_STATUS
+ nop
+ nop
+ nop
+ ori t0, ST0_IE
+ mtc0 t0, CP0_STATUS
+
+ /* Wait for interrupt */
+ wait
+ nop
+
+s3_reentry:
+
+ /* Clear call/return stack */
+ li t0, (0x06 << 16)
+ mtc0 t0, $22, 2
+ ssnop
+ ssnop
+ ssnop
+
+ /* Clear jump target buffer */
+ li t0, (0x04 << 16)
+ mtc0 t0, $22, 2
+ ssnop
+ ssnop
+ ssnop
+
+ sync
+ nop
+
+ /* Setup mmu defaults */
+ mtc0 zero, CP0_WIRED
+ mtc0 zero, CP0_ENTRYHI
+ li k0, PM_DEFAULT_MASK
+ mtc0 k0, CP0_PAGEMASK
+
+ li sp, BMIPS_WARM_RESTART_VEC
+ la k0, plat_wired_tlb_setup
+ jalr k0
+ nop
+
+ /* Restore general purpose registers */
+ la t0, gp_regs
+ lw fp, 44(t0)
+ lw sp, 40(t0)
+ lw gp, 36(t0)
+ lw s7, 32(t0)
+ lw s6, 28(t0)
+ lw s5, 24(t0)
+ lw s4, 20(t0)
+ lw s3, 16(t0)
+ lw s2, 12(t0)
+ lw s1, 8(t0)
+ lw s0, 4(t0)
+ lw ra, 0(t0)
+
+ /* Restore CP0 status */
+ lw t1, 48(t0)
+ mtc0 t1, CP0_STATUS
+
+ /* Return to caller */
+ li v0, 0
+ jr ra
+ nop
+
+END(brcm_pm_do_s3)
diff --git a/drivers/soc/canaan/Kconfig b/drivers/soc/canaan/Kconfig
new file mode 100644
index 0000000000..43ced2bf84
--- /dev/null
+++ b/drivers/soc/canaan/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config SOC_K210_SYSCTL
+ bool "Canaan Kendryte K210 SoC system controller"
+ depends on RISCV && SOC_CANAAN && OF
+ depends on COMMON_CLK_K210
+ default SOC_CANAAN
+ select PM
+ select MFD_SYSCON
+ help
+ Canaan Kendryte K210 SoC system controller driver.
diff --git a/drivers/soc/canaan/Makefile b/drivers/soc/canaan/Makefile
new file mode 100644
index 0000000000..570280ad79
--- /dev/null
+++ b/drivers/soc/canaan/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SOC_K210_SYSCTL) += k210-sysctl.o
diff --git a/drivers/soc/canaan/k210-sysctl.c b/drivers/soc/canaan/k210-sysctl.c
new file mode 100644
index 0000000000..27a346c406
--- /dev/null
+++ b/drivers/soc/canaan/k210-sysctl.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2019 Christoph Hellwig.
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ */
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <asm/soc.h>
+
+#include <soc/canaan/k210-sysctl.h>
+
+static int k210_sysctl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *pclk;
+ int ret;
+
+ dev_info(dev, "K210 system controller\n");
+
+ /* Get power bus clock */
+ pclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pclk))
+ return dev_err_probe(dev, PTR_ERR(pclk),
+ "Get bus clock failed\n");
+
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
+ dev_err(dev, "Enable bus clock failed\n");
+ return ret;
+ }
+
+ /* Populate children */
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ dev_err(dev, "Populate platform failed %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id k210_sysctl_of_match[] = {
+ { .compatible = "canaan,k210-sysctl", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver k210_sysctl_driver = {
+ .driver = {
+ .name = "k210-sysctl",
+ .of_match_table = k210_sysctl_of_match,
+ },
+ .probe = k210_sysctl_probe,
+};
+builtin_platform_driver(k210_sysctl_driver);
+
+/*
+ * System controller registers base address and size.
+ */
+#define K210_SYSCTL_BASE_ADDR 0x50440000ULL
+#define K210_SYSCTL_BASE_SIZE 0x1000
+
+/*
+ * This needs to be called very early during initialization, given that
+ * PLL1 needs to be enabled to be able to use all SRAM.
+ */
+static void __init k210_soc_early_init(const void *fdt)
+{
+ void __iomem *sysctl_base;
+
+ sysctl_base = ioremap(K210_SYSCTL_BASE_ADDR, K210_SYSCTL_BASE_SIZE);
+ if (!sysctl_base)
+ panic("k210-sysctl: ioremap failed");
+
+ k210_clk_early_init(sysctl_base);
+
+ iounmap(sysctl_base);
+}
+SOC_EARLY_INIT_DECLARE(k210_soc, "canaan,kendryte-k210", k210_soc_early_init);
diff --git a/drivers/soc/dove/Makefile b/drivers/soc/dove/Makefile
new file mode 100644
index 0000000000..daf4549ec7
--- /dev/null
+++ b/drivers/soc/dove/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += pmu.o
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
new file mode 100644
index 0000000000..ffc5311c0e
--- /dev/null
+++ b/drivers/soc/dove/pmu.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell Dove PMU support
+ */
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset.h>
+#include <linux/reset-controller.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/dove/pmu.h>
+#include <linux/spinlock.h>
+
+#define NR_PMU_IRQS 7
+
+#define PMC_SW_RST 0x30
+#define PMC_IRQ_CAUSE 0x50
+#define PMC_IRQ_MASK 0x54
+
+#define PMU_PWR 0x10
+#define PMU_ISO 0x58
+
+struct pmu_data {
+ spinlock_t lock;
+ struct device_node *of_node;
+ void __iomem *pmc_base;
+ void __iomem *pmu_base;
+ struct irq_chip_generic *irq_gc;
+ struct irq_domain *irq_domain;
+#ifdef CONFIG_RESET_CONTROLLER
+ struct reset_controller_dev reset;
+#endif
+};
+
+/*
+ * The PMU contains a register to reset various subsystems within the
+ * SoC. Export this as a reset controller.
+ */
+#ifdef CONFIG_RESET_CONTROLLER
+#define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
+
+static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val = ~BIT(id);
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct pmu_data *pmu = rcdev_to_pmu(rc);
+ unsigned long flags;
+ u32 val = BIT(id);
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops pmu_reset_ops = {
+ .reset = pmu_reset_reset,
+ .assert = pmu_reset_assert,
+ .deassert = pmu_reset_deassert,
+};
+
+static struct reset_controller_dev pmu_reset __initdata = {
+ .ops = &pmu_reset_ops,
+ .owner = THIS_MODULE,
+ .nr_resets = 32,
+};
+
+static void __init pmu_reset_init(struct pmu_data *pmu)
+{
+ int ret;
+
+ pmu->reset = pmu_reset;
+ pmu->reset.of_node = pmu->of_node;
+
+ ret = reset_controller_register(&pmu->reset);
+ if (ret)
+ pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret);
+}
+#else
+static void __init pmu_reset_init(struct pmu_data *pmu)
+{
+}
+#endif
+
+struct pmu_domain {
+ struct pmu_data *pmu;
+ u32 pwr_mask;
+ u32 rst_mask;
+ u32 iso_mask;
+ struct generic_pm_domain base;
+};
+
+#define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
+
+/*
+ * This deals with the "old" Marvell sequence of bringing a power domain
+ * down/up, which is: apply power, release reset, disable isolators.
+ *
+ * Later devices apparantly use a different sequence: power up, disable
+ * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
+ * enable module clock, deassert reset.
+ *
+ * Note: reading the assembly, it seems that the IO accessors have an
+ * unfortunate side-effect - they cause memory already read into registers
+ * for the if () to be re-read for the bit-set or bit-clear operation.
+ * The code is written to avoid this.
+ */
+static int pmu_domain_power_off(struct generic_pm_domain *domain)
+{
+ struct pmu_domain *pmu_dom = to_pmu_domain(domain);
+ struct pmu_data *pmu = pmu_dom->pmu;
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *pmu_base = pmu->pmu_base;
+ void __iomem *pmc_base = pmu->pmc_base;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /* Enable isolators */
+ if (pmu_dom->iso_mask) {
+ val = ~pmu_dom->iso_mask;
+ val &= readl_relaxed(pmu_base + PMU_ISO);
+ writel_relaxed(val, pmu_base + PMU_ISO);
+ }
+
+ /* Reset unit */
+ if (pmu_dom->rst_mask) {
+ val = ~pmu_dom->rst_mask;
+ val &= readl_relaxed(pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmc_base + PMC_SW_RST);
+ }
+
+ /* Power down */
+ val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask;
+ writel_relaxed(val, pmu_base + PMU_PWR);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static int pmu_domain_power_on(struct generic_pm_domain *domain)
+{
+ struct pmu_domain *pmu_dom = to_pmu_domain(domain);
+ struct pmu_data *pmu = pmu_dom->pmu;
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *pmu_base = pmu->pmu_base;
+ void __iomem *pmc_base = pmu->pmc_base;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /* Power on */
+ val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR);
+ writel_relaxed(val, pmu_base + PMU_PWR);
+
+ /* Release reset */
+ if (pmu_dom->rst_mask) {
+ val = pmu_dom->rst_mask;
+ val |= readl_relaxed(pmc_base + PMC_SW_RST);
+ writel_relaxed(val, pmc_base + PMC_SW_RST);
+ }
+
+ /* Disable isolators */
+ if (pmu_dom->iso_mask) {
+ val = pmu_dom->iso_mask;
+ val |= readl_relaxed(pmu_base + PMU_ISO);
+ writel_relaxed(val, pmu_base + PMU_ISO);
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static void __pmu_domain_register(struct pmu_domain *domain,
+ struct device_node *np)
+{
+ unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
+
+ domain->base.power_off = pmu_domain_power_off;
+ domain->base.power_on = pmu_domain_power_on;
+
+ pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask));
+
+ if (np)
+ of_genpd_add_provider_simple(np, &domain->base);
+}
+
+/* PMU IRQ controller */
+static void pmu_irq_handler(struct irq_desc *desc)
+{
+ struct pmu_data *pmu = irq_desc_get_handler_data(desc);
+ struct irq_chip_generic *gc = pmu->irq_gc;
+ struct irq_domain *domain = pmu->irq_domain;
+ void __iomem *base = gc->reg_base;
+ u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache;
+ u32 done = ~0;
+
+ if (stat == 0) {
+ handle_bad_irq(desc);
+ return;
+ }
+
+ while (stat) {
+ u32 hwirq = fls(stat) - 1;
+
+ stat &= ~(1 << hwirq);
+ done &= ~(1 << hwirq);
+
+ generic_handle_irq(irq_find_mapping(domain, hwirq));
+ }
+
+ /*
+ * The PMU mask register is not RW0C: it is RW. This means that
+ * the bits take whatever value is written to them; if you write
+ * a '1', you will set the interrupt.
+ *
+ * Unfortunately this means there is NO race free way to clear
+ * these interrupts.
+ *
+ * So, let's structure the code so that the window is as small as
+ * possible.
+ */
+ irq_gc_lock(gc);
+ done &= readl_relaxed(base + PMC_IRQ_CAUSE);
+ writel_relaxed(done, base + PMC_IRQ_CAUSE);
+ irq_gc_unlock(gc);
+}
+
+static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
+{
+ const char *name = "pmu_irq";
+ struct irq_chip_generic *gc;
+ struct irq_domain *domain;
+ int ret;
+
+ /* mask and clear all interrupts */
+ writel(0, pmu->pmc_base + PMC_IRQ_MASK);
+ writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
+
+ domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ pr_err("%s: unable to add irq domain\n", name);
+ return -ENOMEM;
+ }
+
+ ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name,
+ handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (ret) {
+ pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret);
+ irq_domain_remove(domain);
+ return ret;
+ }
+
+ gc = irq_get_domain_generic_chip(domain, 0);
+ gc->reg_base = pmu->pmc_base;
+ gc->chip_types[0].regs.mask = PMC_IRQ_MASK;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+ pmu->irq_domain = domain;
+ pmu->irq_gc = gc;
+
+ irq_set_handler_data(irq, pmu);
+ irq_set_chained_handler(irq, pmu_irq_handler);
+
+ return 0;
+}
+
+int __init dove_init_pmu_legacy(const struct dove_pmu_initdata *initdata)
+{
+ const struct dove_pmu_domain_initdata *domain_initdata;
+ struct pmu_data *pmu;
+ int ret;
+
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ spin_lock_init(&pmu->lock);
+ pmu->pmc_base = initdata->pmc_base;
+ pmu->pmu_base = initdata->pmu_base;
+
+ pmu_reset_init(pmu);
+ for (domain_initdata = initdata->domains; domain_initdata->name;
+ domain_initdata++) {
+ struct pmu_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (domain) {
+ domain->pmu = pmu;
+ domain->pwr_mask = domain_initdata->pwr_mask;
+ domain->rst_mask = domain_initdata->rst_mask;
+ domain->iso_mask = domain_initdata->iso_mask;
+ domain->base.name = domain_initdata->name;
+
+ __pmu_domain_register(domain, NULL);
+ }
+ }
+
+ ret = dove_init_pmu_irq(pmu, initdata->irq);
+ if (ret)
+ pr_err("dove_init_pmu_irq() failed: %d\n", ret);
+
+ if (pmu->irq_domain)
+ irq_domain_associate_many(pmu->irq_domain,
+ initdata->irq_domain_start,
+ 0, NR_PMU_IRQS);
+
+ return 0;
+}
+
+/*
+ * pmu: power-manager@d0000 {
+ * compatible = "marvell,dove-pmu";
+ * reg = <0xd0000 0x8000> <0xd8000 0x8000>;
+ * interrupts = <33>;
+ * interrupt-controller;
+ * #reset-cells = 1;
+ * vpu_domain: vpu-domain {
+ * #power-domain-cells = <0>;
+ * marvell,pmu_pwr_mask = <0x00000008>;
+ * marvell,pmu_iso_mask = <0x00000001>;
+ * resets = <&pmu 16>;
+ * };
+ * gpu_domain: gpu-domain {
+ * #power-domain-cells = <0>;
+ * marvell,pmu_pwr_mask = <0x00000004>;
+ * marvell,pmu_iso_mask = <0x00000002>;
+ * resets = <&pmu 18>;
+ * };
+ * };
+ */
+int __init dove_init_pmu(void)
+{
+ struct device_node *np_pmu, *domains_node, *np;
+ struct pmu_data *pmu;
+ int ret, parent_irq;
+
+ /* Lookup the PMU node */
+ np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu");
+ if (!np_pmu)
+ return 0;
+
+ domains_node = of_get_child_by_name(np_pmu, "domains");
+ if (!domains_node) {
+ pr_err("%pOFn: failed to find domains sub-node\n", np_pmu);
+ return 0;
+ }
+
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+
+ spin_lock_init(&pmu->lock);
+ pmu->of_node = np_pmu;
+ pmu->pmc_base = of_iomap(pmu->of_node, 0);
+ pmu->pmu_base = of_iomap(pmu->of_node, 1);
+ if (!pmu->pmc_base || !pmu->pmu_base) {
+ pr_err("%pOFn: failed to map PMU\n", np_pmu);
+ iounmap(pmu->pmu_base);
+ iounmap(pmu->pmc_base);
+ kfree(pmu);
+ return -ENOMEM;
+ }
+
+ pmu_reset_init(pmu);
+
+ for_each_available_child_of_node(domains_node, np) {
+ struct of_phandle_args args;
+ struct pmu_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ break;
+
+ domain->pmu = pmu;
+ domain->base.name = kasprintf(GFP_KERNEL, "%pOFn", np);
+ if (!domain->base.name) {
+ kfree(domain);
+ break;
+ }
+
+ of_property_read_u32(np, "marvell,pmu_pwr_mask",
+ &domain->pwr_mask);
+ of_property_read_u32(np, "marvell,pmu_iso_mask",
+ &domain->iso_mask);
+
+ /*
+ * We parse the reset controller property directly here
+ * to ensure that we can operate when the reset controller
+ * support is not configured into the kernel.
+ */
+ ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
+ 0, &args);
+ if (ret == 0) {
+ if (args.np == pmu->of_node)
+ domain->rst_mask = BIT(args.args[0]);
+ of_node_put(args.np);
+ }
+
+ __pmu_domain_register(domain, np);
+ }
+
+ /* Loss of the interrupt controller is not a fatal error. */
+ parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
+ if (!parent_irq) {
+ pr_err("%pOFn: no interrupt specified\n", np_pmu);
+ } else {
+ ret = dove_init_pmu_irq(pmu, parent_irq);
+ if (ret)
+ pr_err("dove_init_pmu_irq() failed: %d\n", ret);
+ }
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
new file mode 100644
index 0000000000..fcec6ed83d
--- /dev/null
+++ b/drivers/soc/fsl/Kconfig
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# NXP/Freescale QorIQ series SOC drivers
+#
+
+menu "NXP/Freescale QorIQ SoC drivers"
+
+source "drivers/soc/fsl/qbman/Kconfig"
+source "drivers/soc/fsl/qe/Kconfig"
+
+config FSL_GUTS
+ bool
+ select SOC_BUS
+ help
+ The global utilities block controls power management, I/O device
+ enabling, power-onreset(POR) configuration monitoring, alternate
+ function selection for multiplexed signals,and clock control.
+ This driver is to manage and access global utilities block.
+ Initially only reading SVR and registering soc device are supported.
+ Other guts accesses, such as reading RCW, should eventually be moved
+ into this driver as well.
+
+config FSL_MC_DPIO
+ tristate "QorIQ DPAA2 DPIO driver"
+ depends on FSL_MC_BUS
+ select SOC_BUS
+ select FSL_GUTS
+ select DIMLIB
+ help
+ Driver for the DPAA2 DPIO object. A DPIO provides queue and
+ buffer management facilities for software to interact with
+ other DPAA2 objects. This driver does not expose the DPIO
+ objects individually, but groups them under a service layer
+ API.
+
+config DPAA2_CONSOLE
+ tristate "QorIQ DPAA2 console driver"
+ depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ default y
+ help
+ Console driver for DPAA2 platforms. Exports 2 char devices,
+ /dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
+ which can be used to dump the Management Complex and AIOP
+ firmware logs.
+
+config FSL_RCPM
+ bool "Freescale RCPM support"
+ depends on PM_SLEEP && (ARM || ARM64)
+ help
+ The NXP QorIQ Processors based on ARM Core have RCPM module
+ (Run Control and Power Management), which performs all device-level
+ tasks associated with power management, such as wakeup source control.
+ Note that currently this driver will not support PowerPC based
+ QorIQ processor.
+endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
new file mode 100644
index 0000000000..906f1cd8af
--- /dev/null
+++ b/drivers/soc/fsl/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Linux Kernel SOC fsl specific device drivers
+#
+
+obj-$(CONFIG_FSL_DPAA) += qbman/
+obj-$(CONFIG_QUICC_ENGINE) += qe/
+obj-$(CONFIG_CPM) += qe/
+obj-$(CONFIG_FSL_RCPM) += rcpm.o
+obj-$(CONFIG_FSL_GUTS) += guts.o
+obj-$(CONFIG_FSL_MC_DPIO) += dpio/
+obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c
new file mode 100644
index 0000000000..1dca693b6b
--- /dev/null
+++ b/drivers/soc/fsl/dpaa2-console.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Freescale DPAA2 Platforms Console Driver
+ *
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2018 NXP
+ */
+
+#define pr_fmt(fmt) "dpaa2-console: " fmt
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+/* MC firmware base low/high registers indexes */
+#define MCFBALR_OFFSET 0
+#define MCFBAHR_OFFSET 1
+
+/* Bit masks used to get the most/least significant part of the MC base addr */
+#define MC_FW_ADDR_MASK_HIGH 0x1FFFF
+#define MC_FW_ADDR_MASK_LOW 0xE0000000
+
+#define MC_BUFFER_OFFSET 0x01000000
+#define MC_BUFFER_SIZE (1024 * 1024 * 16)
+#define MC_OFFSET_DELTA MC_BUFFER_OFFSET
+
+#define AIOP_BUFFER_OFFSET 0x06000000
+#define AIOP_BUFFER_SIZE (1024 * 1024 * 16)
+#define AIOP_OFFSET_DELTA 0
+
+#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
+#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
+
+/* MC and AIOP Magic words */
+#define MAGIC_MC 0x4d430100
+#define MAGIC_AIOP 0x41494F50
+
+struct log_header {
+ __le32 magic_word;
+ char reserved[4];
+ __le32 buf_start;
+ __le32 buf_length;
+ __le32 last_byte;
+};
+
+struct console_data {
+ void __iomem *map_addr;
+ struct log_header __iomem *hdr;
+ void __iomem *start_addr;
+ void __iomem *end_addr;
+ void __iomem *end_of_data;
+ void __iomem *cur_ptr;
+};
+
+static struct resource mc_base_addr;
+
+static inline void adjust_end(struct console_data *cd)
+{
+ u32 last_byte = readl(&cd->hdr->last_byte);
+
+ cd->end_of_data = cd->start_addr + LAST_BYTE(last_byte);
+}
+
+static u64 get_mc_fw_base_address(void)
+{
+ u64 mcfwbase = 0ULL;
+ u32 __iomem *mcfbaregs;
+
+ mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr));
+ if (!mcfbaregs) {
+ pr_err("could not map MC Firmware Base registers\n");
+ return 0;
+ }
+
+ mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) &
+ MC_FW_ADDR_MASK_HIGH;
+ mcfwbase <<= 32;
+ mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_ADDR_MASK_LOW;
+ iounmap(mcfbaregs);
+
+ pr_debug("MC base address at 0x%016llx\n", mcfwbase);
+ return mcfwbase;
+}
+
+static ssize_t dpaa2_console_size(struct console_data *cd)
+{
+ ssize_t size;
+
+ if (cd->cur_ptr <= cd->end_of_data)
+ size = cd->end_of_data - cd->cur_ptr;
+ else
+ size = (cd->end_addr - cd->cur_ptr) +
+ (cd->end_of_data - cd->start_addr);
+
+ return size;
+}
+
+static int dpaa2_generic_console_open(struct inode *node, struct file *fp,
+ u64 offset, u64 size,
+ u32 expected_magic,
+ u32 offset_delta)
+{
+ u32 read_magic, wrapped, last_byte, buf_start, buf_length;
+ struct console_data *cd;
+ u64 base_addr;
+ int err;
+
+ cd = kmalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ base_addr = get_mc_fw_base_address();
+ if (!base_addr) {
+ err = -EIO;
+ goto err_fwba;
+ }
+
+ cd->map_addr = ioremap(base_addr + offset, size);
+ if (!cd->map_addr) {
+ pr_err("cannot map console log memory\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ cd->hdr = (struct log_header __iomem *)cd->map_addr;
+ read_magic = readl(&cd->hdr->magic_word);
+ last_byte = readl(&cd->hdr->last_byte);
+ buf_start = readl(&cd->hdr->buf_start);
+ buf_length = readl(&cd->hdr->buf_length);
+
+ if (read_magic != expected_magic) {
+ pr_warn("expected = %08x, read = %08x\n",
+ expected_magic, read_magic);
+ err = -EIO;
+ goto err_magic;
+ }
+
+ cd->start_addr = cd->map_addr + buf_start - offset_delta;
+ cd->end_addr = cd->start_addr + buf_length;
+
+ wrapped = last_byte & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
+
+ adjust_end(cd);
+ if (wrapped && cd->end_of_data != cd->end_addr)
+ cd->cur_ptr = cd->end_of_data + 1;
+ else
+ cd->cur_ptr = cd->start_addr;
+
+ fp->private_data = cd;
+
+ return 0;
+
+err_magic:
+ iounmap(cd->map_addr);
+
+err_ioremap:
+err_fwba:
+ kfree(cd);
+
+ return err;
+}
+
+static int dpaa2_mc_console_open(struct inode *node, struct file *fp)
+{
+ return dpaa2_generic_console_open(node, fp,
+ MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
+ MAGIC_MC, MC_OFFSET_DELTA);
+}
+
+static int dpaa2_aiop_console_open(struct inode *node, struct file *fp)
+{
+ return dpaa2_generic_console_open(node, fp,
+ AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
+ MAGIC_AIOP, AIOP_OFFSET_DELTA);
+}
+
+static int dpaa2_console_close(struct inode *node, struct file *fp)
+{
+ struct console_data *cd = fp->private_data;
+
+ iounmap(cd->map_addr);
+ kfree(cd);
+ return 0;
+}
+
+static ssize_t dpaa2_console_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct console_data *cd = fp->private_data;
+ size_t bytes = dpaa2_console_size(cd);
+ size_t bytes_end = cd->end_addr - cd->cur_ptr;
+ size_t written = 0;
+ void *kbuf;
+ int err;
+
+ /* Check if we need to adjust the end of data addr */
+ adjust_end(cd);
+
+ if (cd->end_of_data == cd->cur_ptr)
+ return 0;
+
+ if (count < bytes)
+ bytes = count;
+
+ kbuf = kmalloc(bytes, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ if (bytes > bytes_end) {
+ memcpy_fromio(kbuf, cd->cur_ptr, bytes_end);
+ if (copy_to_user(buf, kbuf, bytes_end)) {
+ err = -EFAULT;
+ goto err_free_buf;
+ }
+ buf += bytes_end;
+ cd->cur_ptr = cd->start_addr;
+ bytes -= bytes_end;
+ written += bytes_end;
+ }
+
+ memcpy_fromio(kbuf, cd->cur_ptr, bytes);
+ if (copy_to_user(buf, kbuf, bytes)) {
+ err = -EFAULT;
+ goto err_free_buf;
+ }
+ cd->cur_ptr += bytes;
+ written += bytes;
+
+ kfree(kbuf);
+ return written;
+
+err_free_buf:
+ kfree(kbuf);
+
+ return err;
+}
+
+static const struct file_operations dpaa2_mc_console_fops = {
+ .owner = THIS_MODULE,
+ .open = dpaa2_mc_console_open,
+ .release = dpaa2_console_close,
+ .read = dpaa2_console_read,
+};
+
+static struct miscdevice dpaa2_mc_console_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dpaa2_mc_console",
+ .fops = &dpaa2_mc_console_fops
+};
+
+static const struct file_operations dpaa2_aiop_console_fops = {
+ .owner = THIS_MODULE,
+ .open = dpaa2_aiop_console_open,
+ .release = dpaa2_console_close,
+ .read = dpaa2_console_read,
+};
+
+static struct miscdevice dpaa2_aiop_console_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dpaa2_aiop_console",
+ .fops = &dpaa2_aiop_console_fops
+};
+
+static int dpaa2_console_probe(struct platform_device *pdev)
+{
+ int error;
+
+ error = of_address_to_resource(pdev->dev.of_node, 0, &mc_base_addr);
+ if (error < 0) {
+ pr_err("of_address_to_resource() failed for %pOF with %d\n",
+ pdev->dev.of_node, error);
+ return error;
+ }
+
+ error = misc_register(&dpaa2_mc_console_dev);
+ if (error) {
+ pr_err("cannot register device %s\n",
+ dpaa2_mc_console_dev.name);
+ goto err_register_mc;
+ }
+
+ error = misc_register(&dpaa2_aiop_console_dev);
+ if (error) {
+ pr_err("cannot register device %s\n",
+ dpaa2_aiop_console_dev.name);
+ goto err_register_aiop;
+ }
+
+ return 0;
+
+err_register_aiop:
+ misc_deregister(&dpaa2_mc_console_dev);
+err_register_mc:
+ return error;
+}
+
+static int dpaa2_console_remove(struct platform_device *pdev)
+{
+ misc_deregister(&dpaa2_mc_console_dev);
+ misc_deregister(&dpaa2_aiop_console_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dpaa2_console_match_table[] = {
+ { .compatible = "fsl,dpaa2-console",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dpaa2_console_match_table);
+
+static struct platform_driver dpaa2_console_driver = {
+ .driver = {
+ .name = "dpaa2-console",
+ .pm = NULL,
+ .of_match_table = dpaa2_console_match_table,
+ },
+ .probe = dpaa2_console_probe,
+ .remove = dpaa2_console_remove,
+};
+module_platform_driver(dpaa2_console_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Roy Pledge <roy.pledge@nxp.com>");
+MODULE_DESCRIPTION("DPAA2 console driver");
diff --git a/drivers/soc/fsl/dpio/Makefile b/drivers/soc/fsl/dpio/Makefile
new file mode 100644
index 0000000000..b9ff24c765
--- /dev/null
+++ b/drivers/soc/fsl/dpio/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# QorIQ DPAA2 DPIO driver
+#
+
+obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
+
+fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
new file mode 100644
index 0000000000..2fbcb78cda
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef _FSL_DPIO_CMD_H
+#define _FSL_DPIO_CMD_H
+
+/* DPIO Version */
+#define DPIO_VER_MAJOR 4
+#define DPIO_VER_MINOR 2
+
+/* Command Versioning */
+
+#define DPIO_CMD_ID_OFFSET 4
+#define DPIO_CMD_BASE_VERSION 1
+
+#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
+#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
+#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
+#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
+#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
+#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
+#define DPIO_CMDID_RESET DPIO_CMD(0x005)
+#define DPIO_CMDID_SET_STASHING_DEST DPIO_CMD(0x120)
+
+struct dpio_cmd_open {
+ __le32 dpio_id;
+};
+
+#define DPIO_CHANNEL_MODE_MASK 0x3
+
+struct dpio_rsp_get_attr {
+ /* cmd word 0 */
+ __le32 id;
+ __le16 qbman_portal_id;
+ u8 num_priorities;
+ u8 channel_mode;
+ /* cmd word 1 */
+ __le64 qbman_portal_ce_addr;
+ /* cmd word 2 */
+ __le64 qbman_portal_ci_addr;
+ /* cmd word 3 */
+ __le32 qbman_version;
+ __le32 pad1;
+ /* cmd word 4 */
+ __le32 clk;
+};
+
+struct dpio_stashing_dest {
+ u8 sdest;
+};
+
+#endif /* _FSL_DPIO_CMD_H */
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
new file mode 100644
index 0000000000..9e3fddd8f5
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright NXP 2016
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/sys_soc.h>
+
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "qbman-portal.h"
+#include "dpio.h"
+#include "dpio-cmd.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("DPIO Driver");
+
+struct dpio_priv {
+ struct dpaa2_io *io;
+};
+
+static cpumask_var_t cpus_unused_mask;
+
+static const struct soc_device_attribute ls1088a_soc[] = {
+ {.family = "QorIQ LS1088A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute ls2080a_soc[] = {
+ {.family = "QorIQ LS2080A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute ls2088a_soc[] = {
+ {.family = "QorIQ LS2088A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute lx2160a_soc[] = {
+ {.family = "QorIQ LX2160A"},
+ { /* sentinel */ }
+};
+
+static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu)
+{
+ int cluster_base, cluster_size;
+
+ if (soc_device_match(ls1088a_soc)) {
+ cluster_base = 2;
+ cluster_size = 4;
+ } else if (soc_device_match(ls2080a_soc) ||
+ soc_device_match(ls2088a_soc) ||
+ soc_device_match(lx2160a_soc)) {
+ cluster_base = 0;
+ cluster_size = 2;
+ } else {
+ dev_err(&dpio_dev->dev, "unknown SoC version\n");
+ return -1;
+ }
+
+ return cluster_base + cpu / cluster_size;
+}
+
+static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct dpio_priv *priv = dev_get_drvdata(dev);
+
+ return dpaa2_io_irq(priv->io);
+}
+
+static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
+{
+ struct fsl_mc_device_irq *irq;
+
+ irq = dpio_dev->irqs[0];
+
+ /* clear the affinity hint */
+ irq_set_affinity_hint(irq->virq, NULL);
+}
+
+static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
+{
+ int error;
+ struct fsl_mc_device_irq *irq;
+
+ irq = dpio_dev->irqs[0];
+ error = devm_request_irq(&dpio_dev->dev,
+ irq->virq,
+ dpio_irq_handler,
+ 0,
+ dev_name(&dpio_dev->dev),
+ &dpio_dev->dev);
+ if (error < 0) {
+ dev_err(&dpio_dev->dev,
+ "devm_request_irq() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /* set the affinity hint */
+ if (irq_set_affinity_hint(irq->virq, cpumask_of(cpu)))
+ dev_err(&dpio_dev->dev,
+ "irq_set_affinity failed irq %d cpu %d\n",
+ irq->virq, cpu);
+
+ return 0;
+}
+
+static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
+{
+ struct dpio_attr dpio_attrs;
+ struct dpaa2_io_desc desc;
+ struct dpio_priv *priv;
+ int err = -ENOMEM;
+ struct device *dev = &dpio_dev->dev;
+ int possible_next_cpu;
+ int sdest;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto err_priv_alloc;
+
+ dev_set_drvdata(dev, priv);
+
+ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
+ if (err) {
+ dev_dbg(dev, "MC portal allocation failed\n");
+ err = -EPROBE_DEFER;
+ goto err_priv_alloc;
+ }
+
+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+ &dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpio_reset(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_reset() failed\n");
+ goto err_reset;
+ }
+
+ err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
+ &dpio_attrs);
+ if (err) {
+ dev_err(dev, "dpio_get_attributes() failed %d\n", err);
+ goto err_get_attr;
+ }
+ desc.qman_version = dpio_attrs.qbman_version;
+ desc.qman_clk = dpio_attrs.clk;
+
+ err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_enable() failed %d\n", err);
+ goto err_get_attr;
+ }
+
+ /* initialize DPIO descriptor */
+ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
+ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
+ desc.dpio_id = dpio_dev->obj_desc.id;
+
+ /* get the cpu to use for the affinity hint */
+ possible_next_cpu = cpumask_first(cpus_unused_mask);
+ if (possible_next_cpu >= nr_cpu_ids) {
+ dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
+ err = -ERANGE;
+ goto err_allocate_irqs;
+ }
+ desc.cpu = possible_next_cpu;
+ cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask);
+
+ sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu);
+ if (sdest >= 0) {
+ err = dpio_set_stashing_destination(dpio_dev->mc_io, 0,
+ dpio_dev->mc_handle,
+ sdest);
+ if (err)
+ dev_err(dev, "dpio_set_stashing_destination failed for cpu%d\n",
+ desc.cpu);
+ }
+
+ if (dpio_dev->obj_desc.region_count < 3) {
+ /* No support for DDR backed portals, use classic mapping */
+ /*
+ * Set the CENA regs to be the cache inhibited area of the
+ * portal to avoid coherency issues if a user migrates to
+ * another core.
+ */
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]),
+ MEMREMAP_WC);
+ } else {
+ desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start,
+ resource_size(&dpio_dev->regions[2]),
+ MEMREMAP_WB);
+ }
+
+ if (IS_ERR(desc.regs_cena)) {
+ dev_err(dev, "devm_memremap failed\n");
+ err = PTR_ERR(desc.regs_cena);
+ goto err_allocate_irqs;
+ }
+
+ desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]));
+ if (!desc.regs_cinh) {
+ err = -ENOMEM;
+ dev_err(dev, "devm_ioremap failed\n");
+ goto err_allocate_irqs;
+ }
+
+ err = fsl_mc_allocate_irqs(dpio_dev);
+ if (err) {
+ dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err);
+ goto err_allocate_irqs;
+ }
+
+ priv->io = dpaa2_io_create(&desc, dev);
+ if (!priv->io) {
+ dev_err(dev, "dpaa2_io_create failed\n");
+ err = -ENOMEM;
+ goto err_dpaa2_io_create;
+ }
+
+ err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
+ if (err)
+ goto err_register_dpio_irq;
+
+ dev_info(dev, "probed\n");
+ dev_dbg(dev, " receives_notifications = %d\n",
+ desc.receives_notifications);
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+ return 0;
+
+err_dpaa2_io_create:
+ unregister_dpio_irq_handlers(dpio_dev);
+err_register_dpio_irq:
+ fsl_mc_free_irqs(dpio_dev);
+err_allocate_irqs:
+ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+err_get_attr:
+err_reset:
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+err_open:
+ fsl_mc_portal_free(dpio_dev->mc_io);
+err_priv_alloc:
+ return err;
+}
+
+/* Tear down interrupts for a given DPIO object */
+static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
+{
+ unregister_dpio_irq_handlers(dpio_dev);
+ fsl_mc_free_irqs(dpio_dev);
+}
+
+static void dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
+{
+ struct device *dev;
+ struct dpio_priv *priv;
+ int err = 0, cpu;
+
+ dev = &dpio_dev->dev;
+ priv = dev_get_drvdata(dev);
+ cpu = dpaa2_io_get_cpu(priv->io);
+
+ dpaa2_io_down(priv->io);
+
+ dpio_teardown_irqs(dpio_dev);
+
+ cpumask_set_cpu(cpu, cpus_unused_mask);
+
+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+ &dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_open() failed\n");
+ goto err_open;
+ }
+
+ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+err_open:
+ fsl_mc_portal_free(dpio_dev->mc_io);
+}
+
+static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpio",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_dpio_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_dpio_probe,
+ .remove = dpaa2_dpio_remove,
+ .match_id_table = dpaa2_dpio_match_id_table
+};
+
+static int dpio_driver_init(void)
+{
+ if (!zalloc_cpumask_var(&cpus_unused_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(cpus_unused_mask, cpu_online_mask);
+
+ return fsl_mc_driver_register(&dpaa2_dpio_driver);
+}
+
+static void dpio_driver_exit(void)
+{
+ free_cpumask_var(cpus_unused_mask);
+ fsl_mc_driver_unregister(&dpaa2_dpio_driver);
+}
+module_init(dpio_driver_init);
+module_exit(dpio_driver_exit);
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
new file mode 100644
index 0000000000..1d2b27e3ea
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -0,0 +1,898 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2019 NXP
+ *
+ */
+#include <linux/types.h>
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dim.h>
+#include <linux/slab.h>
+
+#include "dpio.h"
+#include "qbman-portal.h"
+
+struct dpaa2_io {
+ struct dpaa2_io_desc dpio_desc;
+ struct qbman_swp_desc swp_desc;
+ struct qbman_swp *swp;
+ struct list_head node;
+ /* protect against multiple management commands */
+ spinlock_t lock_mgmt_cmd;
+ /* protect notifications list */
+ spinlock_t lock_notifications;
+ struct list_head notifications;
+ struct device *dev;
+
+ /* Net DIM */
+ struct dim rx_dim;
+ /* protect against concurrent Net DIM updates */
+ spinlock_t dim_lock;
+ u16 event_ctr;
+ u64 bytes;
+ u64 frames;
+};
+
+struct dpaa2_io_store {
+ unsigned int max;
+ dma_addr_t paddr;
+ struct dpaa2_dq *vaddr;
+ void *alloced_addr; /* unaligned value from kmalloc() */
+ unsigned int idx; /* position of the next-to-be-returned entry */
+ struct qbman_swp *swp; /* portal used to issue VDQCR */
+ struct device *dev; /* device used for DMA mapping */
+};
+
+/* keep a per cpu array of DPIOs for fast access */
+static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
+static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
+static DEFINE_SPINLOCK(dpio_list_lock);
+
+static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
+ int cpu)
+{
+ if (d)
+ return d;
+
+ if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
+ return NULL;
+
+ /*
+ * If cpu == -1, choose the current cpu, with no guarantees about
+ * potentially being migrated away.
+ */
+ if (cpu < 0)
+ cpu = raw_smp_processor_id();
+
+ /* If a specific cpu was requested, pick it up immediately */
+ return dpio_by_cpu[cpu];
+}
+
+static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
+{
+ if (d)
+ return d;
+
+ d = service_select_by_cpu(d, -1);
+ if (d)
+ return d;
+
+ spin_lock(&dpio_list_lock);
+ d = list_entry(dpio_list.next, struct dpaa2_io, node);
+ list_del(&d->node);
+ list_add_tail(&d->node, &dpio_list);
+ spin_unlock(&dpio_list_lock);
+
+ return d;
+}
+
+/**
+ * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
+ * @cpu: the cpu id
+ *
+ * Return the affine dpaa2_io service, or NULL if there is no service affined
+ * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
+ * service.
+ */
+struct dpaa2_io *dpaa2_io_service_select(int cpu)
+{
+ if (cpu == DPAA2_IO_ANY_CPU)
+ return service_select(NULL);
+
+ return service_select_by_cpu(NULL, cpu);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+
+static void dpaa2_io_dim_work(struct work_struct *w)
+{
+ struct dim *dim = container_of(w, struct dim, work);
+ struct dim_cq_moder moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim);
+
+ dpaa2_io_set_irq_coalescing(d, moder.usec);
+ dim->state = DIM_START_MEASURE;
+}
+
+/**
+ * dpaa2_io_create() - create a dpaa2_io object.
+ * @desc: the dpaa2_io descriptor
+ * @dev: the actual DPIO device
+ *
+ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
+ * DPIO object.
+ *
+ * Return a valid dpaa2_io object for success, or NULL for failure.
+ */
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
+ struct device *dev)
+{
+ struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+ u32 qman_256_cycles_per_ns;
+
+ if (!obj)
+ return NULL;
+
+ /* check if CPU is out of range (-1 means any cpu) */
+ if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
+ kfree(obj);
+ return NULL;
+ }
+
+ obj->dpio_desc = *desc;
+ obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
+ obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+ obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk;
+ obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+
+ /* Compute how many 256 QBMAN cycles fit into one ns. This is because
+ * the interrupt timeout period register needs to be specified in QBMAN
+ * clock cycles in increments of 256.
+ */
+ qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000);
+ obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns;
+ obj->swp = qbman_swp_init(&obj->swp_desc);
+
+ if (!obj->swp) {
+ kfree(obj);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&obj->node);
+ spin_lock_init(&obj->lock_mgmt_cmd);
+ spin_lock_init(&obj->lock_notifications);
+ spin_lock_init(&obj->dim_lock);
+ INIT_LIST_HEAD(&obj->notifications);
+
+ /* For now only enable DQRR interrupts */
+ qbman_swp_interrupt_set_trigger(obj->swp,
+ QBMAN_SWP_INTERRUPT_DQRI);
+ qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
+ if (obj->dpio_desc.receives_notifications)
+ qbman_swp_push_set(obj->swp, 0, 1);
+
+ spin_lock(&dpio_list_lock);
+ list_add_tail(&obj->node, &dpio_list);
+ if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
+ dpio_by_cpu[desc->cpu] = obj;
+ spin_unlock(&dpio_list_lock);
+
+ obj->dev = dev;
+
+ memset(&obj->rx_dim, 0, sizeof(obj->rx_dim));
+ INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
+ obj->event_ctr = 0;
+ obj->bytes = 0;
+ obj->frames = 0;
+
+ return obj;
+}
+
+/**
+ * dpaa2_io_down() - release the dpaa2_io object.
+ * @d: the dpaa2_io object to be released.
+ *
+ * The "struct dpaa2_io" type can represent an individual DPIO object (as
+ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
+ * which can be used to group/encapsulate multiple DPIO objects. In all cases,
+ * each handle obtained should be released using this function.
+ */
+void dpaa2_io_down(struct dpaa2_io *d)
+{
+ spin_lock(&dpio_list_lock);
+ dpio_by_cpu[d->dpio_desc.cpu] = NULL;
+ list_del(&d->node);
+ spin_unlock(&dpio_list_lock);
+
+ kfree(d);
+}
+
+#define DPAA_POLL_MAX 32
+
+/**
+ * dpaa2_io_irq() - ISR for DPIO interrupts
+ *
+ * @obj: the given DPIO object.
+ *
+ * Return IRQ_HANDLED for success or IRQ_NONE if there
+ * were no pending interrupts.
+ */
+irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
+{
+ const struct dpaa2_dq *dq;
+ int max = 0;
+ struct qbman_swp *swp;
+ u32 status;
+
+ obj->event_ctr++;
+
+ swp = obj->swp;
+ status = qbman_swp_interrupt_read_status(swp);
+ if (!status)
+ return IRQ_NONE;
+
+ dq = qbman_swp_dqrr_next(swp);
+ while (dq) {
+ if (qbman_result_is_SCN(dq)) {
+ struct dpaa2_io_notification_ctx *ctx;
+ u64 q64;
+
+ q64 = qbman_result_SCN_ctx(dq);
+ ctx = (void *)(uintptr_t)q64;
+ ctx->cb(ctx);
+ } else {
+ pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
+ }
+ qbman_swp_dqrr_consume(swp, dq);
+ ++max;
+ if (max > DPAA_POLL_MAX)
+ goto done;
+ dq = qbman_swp_dqrr_next(swp);
+ }
+done:
+ qbman_swp_interrupt_clear_status(swp, status);
+ qbman_swp_interrupt_set_inhibit(swp, 0);
+ return IRQ_HANDLED;
+}
+
+/**
+ * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
+ *
+ * @d: the given DPIO object.
+ *
+ * Return the cpu associated with the DPIO object
+ */
+int dpaa2_io_get_cpu(struct dpaa2_io *d)
+{
+ return d->dpio_desc.cpu;
+}
+EXPORT_SYMBOL(dpaa2_io_get_cpu);
+
+/**
+ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
+ * notifications on the given DPIO service.
+ * @d: the given DPIO service.
+ * @ctx: the notification context.
+ * @dev: the device that requests the register
+ *
+ * The caller should make the MC command to attach a DPAA2 object to
+ * a DPIO after this function completes successfully. In that way:
+ * (a) The DPIO service is "ready" to handle a notification arrival
+ * (which might happen before the "attach" command to MC has
+ * returned control of execution back to the caller)
+ * (b) The DPIO service can provide back to the caller the 'dpio_id' and
+ * 'qman64' parameters that it should pass along in the MC command
+ * in order for the object to be configured to produce the right
+ * notification fields to the DPIO service.
+ *
+ * Return 0 for success, or -ENODEV for failure.
+ */
+int dpaa2_io_service_register(struct dpaa2_io *d,
+ struct dpaa2_io_notification_ctx *ctx,
+ struct device *dev)
+{
+ struct device_link *link;
+ unsigned long irqflags;
+
+ d = service_select_by_cpu(d, ctx->desired_cpu);
+ if (!d)
+ return -ENODEV;
+
+ link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!link)
+ return -EINVAL;
+
+ ctx->dpio_id = d->dpio_desc.dpio_id;
+ ctx->qman64 = (u64)(uintptr_t)ctx;
+ ctx->dpio_private = d;
+ spin_lock_irqsave(&d->lock_notifications, irqflags);
+ list_add(&ctx->node, &d->notifications);
+ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+
+ /* Enable the generation of CDAN notifications */
+ if (ctx->is_cdan)
+ return qbman_swp_CDAN_set_context_enable(d->swp,
+ (u16)ctx->id,
+ ctx->qman64);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
+
+/**
+ * dpaa2_io_service_deregister - The opposite of 'register'.
+ * @service: the given DPIO service.
+ * @ctx: the notification context.
+ * @dev: the device that requests to be deregistered
+ *
+ * This function should be called only after sending the MC command to
+ * to detach the notification-producing device from the DPIO.
+ */
+void dpaa2_io_service_deregister(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx,
+ struct device *dev)
+{
+ struct dpaa2_io *d = ctx->dpio_private;
+ unsigned long irqflags;
+
+ if (ctx->is_cdan)
+ qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
+
+ spin_lock_irqsave(&d->lock_notifications, irqflags);
+ list_del(&ctx->node);
+ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
+
+/**
+ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
+ * @d: the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
+ * considered "disarmed". Ie. the user can issue pull dequeue operations on that
+ * traffic source for as long as it likes. Eventually it may wish to "rearm"
+ * that source to allow it to produce another FQDAN/CDAN, that's what this
+ * function achieves.
+ *
+ * Return 0 for success.
+ */
+int dpaa2_io_service_rearm(struct dpaa2_io *d,
+ struct dpaa2_io_notification_ctx *ctx)
+{
+ unsigned long irqflags;
+ int err;
+
+ d = service_select_by_cpu(d, ctx->desired_cpu);
+ if (!unlikely(d))
+ return -ENODEV;
+
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ if (ctx->is_cdan)
+ err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
+ else
+ err = qbman_swp_fq_schedule(d->swp, ctx->id);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
+
+/**
+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_fq(&pd, fqid);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
+
+/**
+ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
+ * @d: the given DPIO service.
+ * @channelid: the given channel id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
+
+/**
+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
+
+/**
+ * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
+ * to a frame queue using one fqid.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ * @nb: number of frames to be enqueud
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd,
+ int nb)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
+
+/**
+ * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
+ * to different frame queue using a list of fqids.
+ * @d: the given DPIO service.
+ * @fqid: the given list of frame queue ids.
+ * @fd: the frame descriptor which is enqueued.
+ * @nb: number of frames to be enqueud
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
+ u32 *fqid,
+ const struct dpaa2_fd *fd,
+ int nb)
+{
+ struct qbman_eq_desc *ed;
+ int i, ret;
+
+ ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+ d = service_select(d);
+ if (!d) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < nb; i++) {
+ qbman_eq_desc_clear(&ed[i]);
+ qbman_eq_desc_set_no_orp(&ed[i], 0);
+ qbman_eq_desc_set_fq(&ed[i], fqid[i]);
+ }
+
+ ret = qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
+out:
+ kfree(ed);
+ return ret;
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
+
+/**
+ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
+ * @d: the given DPIO service.
+ * @qdid: the given queuing destination id.
+ * @prio: the given queuing priority.
+ * @qdbin: the given queuing destination bin.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
+ u32 qdid, u8 prio, u16 qdbin,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
+
+/**
+ * dpaa2_io_service_release() - Release buffers to a buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the buffer pool id.
+ * @buffers: the buffers to be released.
+ * @num_buffers: the number of the buffers to be released.
+ *
+ * Return 0 for success, and negative error code for failure.
+ */
+int dpaa2_io_service_release(struct dpaa2_io *d,
+ u16 bpid,
+ const u64 *buffers,
+ unsigned int num_buffers)
+{
+ struct qbman_release_desc rd;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_release_desc_clear(&rd);
+ qbman_release_desc_set_bpid(&rd, bpid);
+
+ return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
+
+/**
+ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the buffer pool id.
+ * @buffers: the buffer addresses for acquired buffers.
+ * @num_buffers: the expected number of the buffers to acquire.
+ *
+ * Return a negative error code if the command failed, otherwise it returns
+ * the number of buffers acquired, which may be less than the number requested.
+ * Eg. if the buffer pool is empty, this will return zero.
+ */
+int dpaa2_io_service_acquire(struct dpaa2_io *d,
+ u16 bpid,
+ u64 *buffers,
+ unsigned int num_buffers)
+{
+ unsigned long irqflags;
+ int err;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
+
+/*
+ * 'Stores' are reusable memory blocks for holding dequeue results, and to
+ * assist with parsing those results.
+ */
+
+/**
+ * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
+ * @max_frames: the maximum number of dequeued result for frames, must be <= 32.
+ * @dev: the device to allow mapping/unmapping the DMAable region.
+ *
+ * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
+ * The 'dpaa2_io_store' returned is a DPIO service managed object.
+ *
+ * Return pointer to dpaa2_io_store struct for successfully created storage
+ * memory, or NULL on error.
+ */
+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
+ struct device *dev)
+{
+ struct dpaa2_io_store *ret;
+ size_t size;
+
+ if (!max_frames || (max_frames > 32))
+ return NULL;
+
+ ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->max = max_frames;
+ size = max_frames * sizeof(struct dpaa2_dq) + 64;
+ ret->alloced_addr = kzalloc(size, GFP_KERNEL);
+ if (!ret->alloced_addr) {
+ kfree(ret);
+ return NULL;
+ }
+
+ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
+ ret->paddr = dma_map_single(dev, ret->vaddr,
+ sizeof(struct dpaa2_dq) * max_frames,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, ret->paddr)) {
+ kfree(ret->alloced_addr);
+ kfree(ret);
+ return NULL;
+ }
+
+ ret->idx = 0;
+ ret->dev = dev;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
+
+/**
+ * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
+ * result.
+ * @s: the storage memory to be destroyed.
+ */
+void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
+{
+ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
+ DMA_FROM_DEVICE);
+ kfree(s->alloced_addr);
+ kfree(s);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
+
+/**
+ * dpaa2_io_store_next() - Determine when the next dequeue result is available.
+ * @s: the dpaa2_io_store object.
+ * @is_last: indicate whether this is the last frame in the pull command.
+ *
+ * When an object driver performs dequeues to a dpaa2_io_store, this function
+ * can be used to determine when the next frame result is available. Once
+ * this function returns non-NULL, a subsequent call to it will try to find
+ * the next dequeue result.
+ *
+ * Note that if a pull-dequeue has a NULL result because the target FQ/channel
+ * was empty, then this function will also return NULL (rather than expecting
+ * the caller to always check for this. As such, "is_last" can be used to
+ * differentiate between "end-of-empty-dequeue" and "still-waiting".
+ *
+ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
+ */
+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
+{
+ int match;
+ struct dpaa2_dq *ret = &s->vaddr[s->idx];
+
+ match = qbman_result_has_new_result(s->swp, ret);
+ if (!match) {
+ *is_last = 0;
+ return NULL;
+ }
+
+ s->idx++;
+
+ if (dpaa2_dq_is_pull_complete(ret)) {
+ *is_last = 1;
+ s->idx = 0;
+ /*
+ * If we get an empty dequeue result to terminate a zero-results
+ * vdqcr, return NULL to the caller rather than expecting him to
+ * check non-NULL results every time.
+ */
+ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
+ ret = NULL;
+ } else {
+ prefetch(&s->vaddr[s->idx]);
+ *is_last = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
+
+/**
+ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
+ * @d: the given DPIO object.
+ * @fqid: the id of frame queue to be queried.
+ * @fcnt: the queried frame count.
+ * @bcnt: the queried byte count.
+ *
+ * Knowing the FQ count at run-time can be useful in debugging situations.
+ * The instantaneous frame- and byte-count are hereby returned.
+ *
+ * Return 0 for a successful query, and negative error code if query fails.
+ */
+int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
+ u32 *fcnt, u32 *bcnt)
+{
+ struct qbman_fq_query_np_rslt state;
+ struct qbman_swp *swp;
+ unsigned long irqflags;
+ int ret;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ swp = d->swp;
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ ret = qbman_fq_query_state(swp, fqid, &state);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+ if (ret)
+ return ret;
+ *fcnt = qbman_fq_state_frame_count(&state);
+ *bcnt = qbman_fq_state_byte_count(&state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
+
+/**
+ * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
+ * buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the index of buffer pool to be queried.
+ * @num: the queried number of buffers in the buffer pool.
+ *
+ * Return 0 for a successful query, and negative error code if query fails.
+ */
+int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
+{
+ struct qbman_bp_query_rslt state;
+ struct qbman_swp *swp;
+ unsigned long irqflags;
+ int ret;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ swp = d->swp;
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ ret = qbman_bp_query(swp, bpid, &state);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+ if (ret)
+ return ret;
+ *num = qbman_bp_info_num_free_bufs(&state);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
+
+/**
+ * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1,
+ irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing);
+
+/**
+ * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing);
+
+/**
+ * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing
+ * @d: the given DPIO object
+ * @use_adaptive_rx_coalesce: adaptive coalescing state
+ */
+void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce)
+{
+ d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing);
+
+/**
+ * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state
+ * @d: the given DPIO object
+ *
+ * Return 1 when adaptive coalescing is enabled on the DPIO object and 0
+ * otherwise.
+ */
+int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d)
+{
+ return d->swp->use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing);
+
+/**
+ * dpaa2_io_update_net_dim() - Update Net DIM
+ * @d: the given DPIO object
+ * @frames: how many frames have been dequeued by the user since the last call
+ * @bytes: how many bytes have been dequeued by the user since the last call
+ */
+void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
+{
+ struct dim_sample dim_sample = {};
+
+ if (!d->swp->use_adaptive_rx_coalesce)
+ return;
+
+ spin_lock(&d->dim_lock);
+
+ d->bytes += bytes;
+ d->frames += frames;
+
+ dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
+ net_dim(&d->rx_dim, dim_sample);
+
+ spin_unlock(&d->dim_lock);
+}
+EXPORT_SYMBOL(dpaa2_io_update_net_dim);
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
new file mode 100644
index 0000000000..8ed606ffaa
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "dpio.h"
+#include "dpio-cmd.h"
+
+/*
+ * Data Path I/O Portal API
+ * Contains initialization APIs and runtime control APIs for DPIO
+ */
+
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpio_id: DPIO unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpio_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_cmd_open *dpio_cmd;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ dpio_cmd = (struct dpio_cmd_open *)cmd.params;
+ dpio_cmd->dpio_id = cpu_to_le32(dpio_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpio_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_attributes() - Retrieve DPIO attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpio_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_rsp_get_attr *dpio_rsp;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpio_rsp->id);
+ attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id);
+ attr->num_priorities = dpio_rsp->num_priorities;
+ attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK;
+ attr->qbman_portal_ce_offset =
+ le64_to_cpu(dpio_rsp->qbman_portal_ce_addr);
+ attr->qbman_portal_ci_offset =
+ le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
+ attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
+ attr->clk = le32_to_cpu(dpio_rsp->clk);
+
+ return 0;
+}
+
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 sdest)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_stashing_dest *dpio_cmd;
+
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
+ cmd_flags, token);
+ dpio_cmd = (struct dpio_stashing_dest *)cmd.params;
+ dpio_cmd->sdest = sdest;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_api_version - Get Data Path I/O API version
+ * @mc_io: Pointer to MC portal's DPIO object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of DPIO API
+ * @minor_ver: Minor version of DPIO API
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
+
+/**
+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
new file mode 100644
index 0000000000..7fda44f0d7
--- /dev/null
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef __FSL_DPIO_H
+#define __FSL_DPIO_H
+
+struct fsl_mc_io;
+
+int dpio_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpio_id,
+ u16 *token);
+
+int dpio_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * enum dpio_channel_mode - DPIO notification channel mode
+ * @DPIO_NO_CHANNEL: No support for notification channel
+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
+ * dedicated channel in the DPIO; user should point the queue's
+ * destination in the relevant interface to this DPIO
+ */
+enum dpio_channel_mode {
+ DPIO_NO_CHANNEL = 0,
+ DPIO_LOCAL_CHANNEL = 1,
+};
+
+/**
+ * struct dpio_cfg - Structure representing DPIO configuration
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ */
+struct dpio_cfg {
+ enum dpio_channel_mode channel_mode;
+ u8 num_priorities;
+};
+
+int dpio_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpio_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpio_attr - Structure representing DPIO attributes
+ * @id: DPIO object ID
+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
+ * @qbman_portal_id: Software portal ID
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ * @qbman_version: QBMAN version
+ * @clk: QBMAN clock frequency value in Hz
+ */
+struct dpio_attr {
+ int id;
+ u64 qbman_portal_ce_offset;
+ u64 qbman_portal_ci_offset;
+ u16 qbman_portal_id;
+ enum dpio_channel_mode channel_mode;
+ u8 num_priorities;
+ u32 qbman_version;
+ u32 clk;
+};
+
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpio_attr *attr);
+
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 dest);
+
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+int dpio_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+#endif /* __FSL_DPIO_H */
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
new file mode 100644
index 0000000000..0a3fb6c115
--- /dev/null
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -0,0 +1,1853 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2019 NXP
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <soc/fsl/dpaa2-global.h>
+
+#include "qbman-portal.h"
+
+/* All QBMan command and result structures use this "valid bit" encoding */
+#define QB_VALID_BIT ((u32)0x80)
+
+/* QBMan portal management command codes */
+#define QBMAN_MC_ACQUIRE 0x30
+#define QBMAN_WQCHAN_CONFIGURE 0x46
+
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQCR_PI 0x800
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
+#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_CR_RT 0x900
+#define QBMAN_CINH_SWP_VDQCR_RT 0x940
+#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
+#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
+#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
+#define QBMAN_CINH_SWP_DCAP 0xac0
+#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
+#define QBMAN_CINH_SWP_RCR_PI 0xc00
+#define QBMAN_CINH_SWP_RAR 0xcc0
+#define QBMAN_CINH_SWP_ISR 0xe00
+#define QBMAN_CINH_SWP_IER 0xe40
+#define QBMAN_CINH_SWP_ISDR 0xe80
+#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_ITPR 0xf40
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_CR 0x600
+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR 0x780
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
+#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
+
+/* CENA register offsets in memory-backed mode */
+#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_CR_MEM 0x1600
+#define QBMAN_CENA_SWP_RR_MEM 0x1680
+#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
+
+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
+
+/* Define token used to determine if response written to memory is valid */
+#define QMAN_DQ_TOKEN_VALID 1
+
+/* SDQCR attribute codes */
+#define QB_SDQCR_FC_SHIFT 29
+#define QB_SDQCR_FC_MASK 0x1
+#define QB_SDQCR_DCT_SHIFT 24
+#define QB_SDQCR_DCT_MASK 0x3
+#define QB_SDQCR_TOK_SHIFT 16
+#define QB_SDQCR_TOK_MASK 0xff
+#define QB_SDQCR_SRC_SHIFT 0
+#define QB_SDQCR_SRC_MASK 0xffff
+
+/* opaque token for static dequeues */
+#define QMAN_SDQCR_TOKEN 0xbb
+
+#define QBMAN_EQCR_DCA_IDXMASK 0x0f
+#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
+
+#define EQ_DESC_SIZE_WITHOUT_FD 29
+#define EQ_DESC_SIZE_FD_START 32
+
+enum qbman_sdqcr_dct {
+ qbman_sdqcr_dct_null = 0,
+ qbman_sdqcr_dct_prio_ics,
+ qbman_sdqcr_dct_active_ics,
+ qbman_sdqcr_dct_active
+};
+
+enum qbman_sdqcr_fc {
+ qbman_sdqcr_fc_one = 0,
+ qbman_sdqcr_fc_up_to_3 = 1
+};
+
+/* Internal Function declaration */
+static int qbman_swp_enqueue_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int
+qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+static
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+static int qbman_swp_pull_direct(struct qbman_swp *s,
+ struct qbman_pull_desc *d);
+static int qbman_swp_pull_mem_back(struct qbman_swp *s,
+ struct qbman_pull_desc *d);
+
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
+
+static int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+static int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+
+/* Function pointers */
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+ = qbman_swp_enqueue_direct;
+
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_direct;
+
+int
+(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_desc_direct;
+
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
+ = qbman_swp_pull_direct;
+
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
+ = qbman_swp_dqrr_next_direct;
+
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers)
+ = qbman_swp_release_direct;
+
+/* Portal Access */
+
+static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
+{
+ return readl_relaxed(p->addr_cinh + offset);
+}
+
+static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
+ u32 value)
+{
+ writel_relaxed(value, p->addr_cinh + offset);
+}
+
+static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
+{
+ return p->addr_cena + offset;
+}
+
+#define QBMAN_CINH_SWP_CFG 0xd00
+
+#define SWP_CFG_DQRR_MF_SHIFT 20
+#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_CPBS_SHIFT 15
+#define SWP_CFG_WN_SHIFT 14
+#define SWP_CFG_RPM_SHIFT 12
+#define SWP_CFG_DCM_SHIFT 10
+#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_VPM_SHIFT 7
+#define SWP_CFG_CPM_SHIFT 6
+#define SWP_CFG_SD_SHIFT 5
+#define SWP_CFG_SP_SHIFT 4
+#define SWP_CFG_SE_SHIFT 3
+#define SWP_CFG_DP_SHIFT 2
+#define SWP_CFG_DE_SHIFT 1
+#define SWP_CFG_EP_SHIFT 0
+
+static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
+ u8 epm, int sd, int sp, int se,
+ int dp, int de, int ep)
+{
+ return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
+ est << SWP_CFG_EST_SHIFT |
+ wn << SWP_CFG_WN_SHIFT |
+ rpm << SWP_CFG_RPM_SHIFT |
+ dcm << SWP_CFG_DCM_SHIFT |
+ epm << SWP_CFG_EPM_SHIFT |
+ sd << SWP_CFG_SD_SHIFT |
+ sp << SWP_CFG_SP_SHIFT |
+ se << SWP_CFG_SE_SHIFT |
+ dp << SWP_CFG_DP_SHIFT |
+ de << SWP_CFG_DE_SHIFT |
+ ep << SWP_CFG_EP_SHIFT);
+}
+
+#define QMAN_RT_MODE 0x00000100
+
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ else
+ return (2 * ringsize) - (first - last);
+}
+
+/**
+ * qbman_swp_init() - Create a functional object representing the given
+ * QBMan portal descriptor.
+ * @d: the given qbman swp descriptor
+ *
+ * Return qbman_swp portal for success, NULL if the object cannot
+ * be created.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
+{
+ struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ u32 reg;
+ u32 mask_size;
+ u32 eqcr_pi;
+
+ if (!p)
+ return NULL;
+
+ spin_lock_init(&p->access_spinlock);
+
+ p->desc = d;
+ p->mc.valid_bit = QB_VALID_BIT;
+ p->sdq = 0;
+ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ p->mr.valid_bit = QB_VALID_BIT;
+
+ atomic_set(&p->vdq.available, 1);
+ p->vdq.valid_bit = QB_VALID_BIT;
+ p->dqrr.next_idx = 0;
+ p->dqrr.valid_bit = QB_VALID_BIT;
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
+ p->dqrr.dqrr_size = 4;
+ p->dqrr.reset_bug = 1;
+ } else {
+ p->dqrr.dqrr_size = 8;
+ p->dqrr.reset_bug = 0;
+ }
+
+ p->addr_cena = d->cena_bar;
+ p->addr_cinh = d->cinh_bar;
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 0, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 2, /* EPM: EQCR in ring mode */
+ 1, /* mem stashing drop enable enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable enable */
+ 0); /* EQCR_CI stashing priority enable */
+ } else {
+ memset(p->addr_cena, 0, 64 * 1024);
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 1, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 0, /* EPM: EQCR in ring mode */
+ 1, /* mem stashing drop enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable */
+ 0); /* EQCR_CI stashing priority enable */
+ reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
+ 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
+ 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
+ }
+
+ qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
+ reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
+ if (!reg) {
+ pr_err("qbman: the portal is not enabled!\n");
+ kfree(p);
+ return NULL;
+ }
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
+ qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
+ }
+ /*
+ * SDQCR needs to be initialized to 0 when no channels are
+ * being dequeued from or else the QMan HW will indicate an
+ * error. The values that were calculated above will be
+ * applied when dequeues from a specific channel are enabled.
+ */
+ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
+
+ p->eqcr.pi_ring_size = 8;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ p->eqcr.pi_ring_size = 32;
+ qbman_swp_enqueue_ptr =
+ qbman_swp_enqueue_mem_back;
+ qbman_swp_enqueue_multiple_ptr =
+ qbman_swp_enqueue_multiple_mem_back;
+ qbman_swp_enqueue_multiple_desc_ptr =
+ qbman_swp_enqueue_multiple_desc_mem_back;
+ qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
+ qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
+ qbman_swp_release_ptr = qbman_swp_release_mem_back;
+ }
+
+ for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
+ p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
+ eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
+ p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
+ p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
+ & p->eqcr.pi_ci_mask;
+ p->eqcr.available = p->eqcr.pi_ring_size;
+
+ /* Initialize the software portal with a irq timeout period of 0us */
+ qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
+
+ return p;
+}
+
+/**
+ * qbman_swp_finish() - Create and destroy a functional object representing
+ * the given QBMan portal descriptor.
+ * @p: the qbman_swp object to be destroyed
+ */
+void qbman_swp_finish(struct qbman_swp *p)
+{
+ kfree(p);
+}
+
+/**
+ * qbman_swp_interrupt_read_status()
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_ISR register.
+ */
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
+}
+
+/**
+ * qbman_swp_interrupt_clear_status()
+ * @p: the given software portal
+ * @mask: The mask to clear in SWP_ISR register
+ */
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_trigger() - read interrupt enable register
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_IER register.
+ */
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_IER);
+}
+
+/**
+ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
+ * @p: the given software portal
+ * @mask: The mask of bits to enable in SWP_IER
+ */
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
+ * @p: the given software portal object
+ *
+ * Return the value in the SWP_IIR register.
+ */
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
+}
+
+/**
+ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
+ * @p: the given software portal object
+ * @inhibit: whether to inhibit the IRQs
+ */
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+}
+
+/*
+ * Different management commands all use this common base layer of code to issue
+ * commands and poll for results.
+ */
+
+/*
+ * Returns a pointer to where the caller should fill in their management command
+ * (caller should ignore the verb byte)
+ */
+void *qbman_swp_mc_start(struct qbman_swp *p)
+{
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
+ else
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
+}
+
+/*
+ * Commits merges in the caller-supplied command verb (which should not include
+ * the valid-bit) and submits the command to hardware
+ */
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
+{
+ u8 *v = cmd;
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ dma_wmb();
+ *v = cmd_verb | p->mc.valid_bit;
+ } else {
+ *v = cmd_verb | p->mc.valid_bit;
+ dma_wmb();
+ qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
+ }
+}
+
+/*
+ * Checks for a completed response (returns non-NULL if only if the response
+ * is complete).
+ */
+void *qbman_swp_mc_result(struct qbman_swp *p)
+{
+ u32 *ret, verb;
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ /* Remove the valid-bit - command completed if the rest
+ * is non-zero.
+ */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ } else {
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
+ /* Command completed if the valid bit is toggled */
+ if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
+ return NULL;
+ /* Command completed if the rest is non-zero */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mr.valid_bit ^= QB_VALID_BIT;
+ }
+
+ return ret;
+}
+
+#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
+enum qb_enqueue_commands {
+ enqueue_empty = 0,
+ enqueue_response_always = 1,
+ enqueue_rejects_to_fq = 2
+};
+
+#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
+#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
+#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
+#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
+
+/*
+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ */
+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
+ * @d: the enqueue descriptor.
+ * @respond_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ */
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
+{
+ d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
+ if (respond_success)
+ d->verb |= enqueue_response_always;
+ else
+ d->verb |= enqueue_rejects_to_fq;
+}
+
+/*
+ * Exactly one of the following descriptor "targets" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * -enqueue to a frame queue
+ * -enqueue to a queuing destination
+ */
+
+/**
+ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
+ * @d: the enqueue descriptor
+ * @fqid: the id of the frame queue to be enqueued
+ */
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
+{
+ d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
+ d->tgtid = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
+ * @d: the enqueue descriptor
+ * @qdid: the id of the queuing destination to be enqueued
+ * @qd_bin: the queuing destination bin
+ * @qd_prio: the queuing destination priority
+ */
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+ u32 qd_bin, u32 qd_prio)
+{
+ d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
+ d->tgtid = cpu_to_le32(qdid);
+ d->qdbin = cpu_to_le16(qd_bin);
+ d->qpri = qd_prio;
+}
+
+#define EQAR_IDX(eqar) ((eqar) & 0x7)
+#define EQAR_VB(eqar) ((eqar) & 0x80)
+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
+#define QB_RT_BIT ((u32)0x100)
+/**
+ * qbman_swp_enqueue_direct() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static
+int qbman_swp_enqueue_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ int flags = 0;
+ int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
+
+ if (ret >= 0)
+ ret = 0;
+ else
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * qbman_swp_enqueue_mem_back() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static
+int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ int flags = 0;
+ int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
+
+ if (ret >= 0)
+ ret = 0;
+ else
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = (uint32_t *)d;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ spin_lock(&s->access_spinlock);
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
+
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available) {
+ spin_unlock(&s->access_spinlock);
+ return 0;
+ }
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
+
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++)
+ eqcr_pi++;
+ s->eqcr.pi = eqcr_pi & full_mask;
+ spin_unlock(&s->access_spinlock);
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = (uint32_t *)(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&s->access_spinlock, irq_flags);
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available) {
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
+ return 0;
+ }
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
+
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++)
+ eqcr_pi++;
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+
+ return num_enqueued;
+}
+
+/* Static (push) dequeue */
+
+/**
+ * qbman_swp_push_get() - Get the push dequeue setup
+ * @s: the software portal object
+ * @channel_idx: the channel index to query
+ * @enabled: returned boolean to show whether the push dequeue is enabled
+ * for the given channel
+ */
+void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
+{
+ u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+
+ WARN_ON(channel_idx > 15);
+ *enabled = src | (1 << channel_idx);
+}
+
+/**
+ * qbman_swp_push_set() - Enable or disable push dequeue
+ * @s: the software portal object
+ * @channel_idx: the channel index (0 to 15)
+ * @enable: enable or disable push dequeue
+ */
+void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
+{
+ u16 dqsrc;
+
+ WARN_ON(channel_idx > 15);
+ if (enable)
+ s->sdq |= 1 << channel_idx;
+ else
+ s->sdq &= ~(1 << channel_idx);
+
+ /* Read make the complete src map. If no channels are enabled
+ * the SDQCR must be 0 or else QMan will assert errors
+ */
+ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+ if (dqsrc != 0)
+ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
+ else
+ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
+}
+
+#define QB_VDQCR_VERB_DCT_SHIFT 0
+#define QB_VDQCR_VERB_DT_SHIFT 2
+#define QB_VDQCR_VERB_RLS_SHIFT 4
+#define QB_VDQCR_VERB_WAE_SHIFT 5
+
+enum qb_pull_dt_e {
+ qb_pull_dt_channel,
+ qb_pull_dt_workqueue,
+ qb_pull_dt_framequeue
+};
+
+/**
+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state
+ * @d: the pull dequeue descriptor to be cleared
+ */
+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
+ * @d: the pull dequeue descriptor to be set
+ * @storage: the pointer of the memory to store the dequeue result
+ * @storage_phys: the physical address of the storage memory
+ * @stash: to indicate whether write allocate is enabled
+ *
+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
+ * produced to the given memory location (using the DMA address which
+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
+ * those writes to main-memory express a cache-warming attribute.
+ */
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct dpaa2_dq *storage,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ /* save the virtual address */
+ d->rsp_addr_virt = (u64)(uintptr_t)storage;
+
+ if (!storage) {
+ d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
+ return;
+ }
+ d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
+ if (stash)
+ d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
+ else
+ d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
+
+ d->rsp_addr = cpu_to_le64(storage_phys);
+}
+
+/**
+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
+ * @d: the pull dequeue descriptor to be set
+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive
+ */
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
+{
+ d->numf = numframes - 1;
+}
+
+/*
+ * Exactly one of the following descriptor "actions" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - pull dequeue from the given frame queue (FQ)
+ * - pull dequeue from any FQ in the given work queue (WQ)
+ * - pull dequeue from any FQ in any WQ in the given channel
+ */
+
+/**
+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
+ * @d: the pull dequeue descriptor to be set
+ * @fqid: the frame queue index of the given FQ
+ */
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
+{
+ d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
+ * @d: the pull dequeue descriptor to be set
+ * @wqid: composed of channel id and wqid within the channel
+ * @dct: the dequeue command type
+ */
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+ enum qbman_pull_type_e dct)
+{
+ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(wqid);
+}
+
+/**
+ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
+ * dequeues
+ * @d: the pull dequeue descriptor to be set
+ * @chid: the channel id to be dequeued
+ * @dct: the dequeue command type
+ */
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+ enum qbman_pull_type_e dct)
+{
+ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(chid);
+}
+
+/**
+ * qbman_swp_pull_direct() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static
+int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ struct qbman_pull_desc *p;
+
+ if (!atomic_dec_and_test(&s->vdq.available)) {
+ atomic_inc(&s->vdq.available);
+ return -EBUSY;
+ }
+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
+ p->numf = d->numf;
+ p->tok = QMAN_DQ_TOKEN_VALID;
+ p->dq_src = d->dq_src;
+ p->rsp_addr = d->rsp_addr;
+ p->rsp_addr_virt = d->rsp_addr_virt;
+ dma_wmb();
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+
+ return 0;
+}
+
+/**
+ * qbman_swp_pull_mem_back() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static
+int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ struct qbman_pull_desc *p;
+
+ if (!atomic_dec_and_test(&s->vdq.available)) {
+ atomic_inc(&s->vdq.available);
+ return -EBUSY;
+ }
+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
+ p->numf = d->numf;
+ p->tok = QMAN_DQ_TOKEN_VALID;
+ p->dq_src = d->dq_src;
+ p->rsp_addr = d->rsp_addr;
+ p->rsp_addr_virt = d->rsp_addr_virt;
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
+
+ return 0;
+}
+
+#define QMAN_DQRR_PI_MASK 0xf
+
+/**
+ * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
+{
+ u32 verb;
+ u32 response_verb;
+ u32 flags;
+ struct dpaa2_dq *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /*
+ * We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /*
+ * if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ }
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /*
+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ return NULL;
+ }
+ /*
+ * There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+ if (!s->dqrr.next_idx)
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+ /*
+ * If this is the final response to a volatile dequeue command
+ * indicate that the vdq is available
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESULT_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
+ (flags & DPAA2_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.available);
+
+ prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+ return p;
+}
+
+/**
+ * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
+{
+ u32 verb;
+ u32 response_verb;
+ u32 flags;
+ struct dpaa2_dq *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /*
+ * We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /*
+ * if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ }
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /*
+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ return NULL;
+ }
+ /*
+ * There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+ if (!s->dqrr.next_idx)
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+ /*
+ * If this is the final response to a volatile dequeue command
+ * indicate that the vdq is available
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESULT_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
+ (flags & DPAA2_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.available);
+
+ prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+ return p;
+}
+
+/**
+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
+ * qbman_swp_dqrr_next().
+ * @s: the software portal object
+ * @dq: the DQRR entry to be consumed
+ */
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+ qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+}
+
+/**
+ * qbman_result_has_new_result() - Check and get the dequeue response from the
+ * dq storage memory set in pull dequeue command
+ * @s: the software portal object
+ * @dq: the dequeue result read from the memory
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ *
+ * Only used for user-provided storage of dequeue results, not DQRR. For
+ * efficiency purposes, the driver will perform any required endianness
+ * conversion to ensure that the user's dequeue result storage is in host-endian
+ * format. As such, once the user has called qbman_result_has_new_result() and
+ * been returned a valid dequeue result, they should not call it again on
+ * the same memory location (except of course if another dequeue command has
+ * been executed to produce a new result to that location).
+ */
+int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+ if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
+ return 0;
+
+ /*
+ * Set token to be 0 so we will detect change back to 1
+ * next time the looping is traversed. Const is cast away here
+ * as we want users to treat the dequeue responses as read only.
+ */
+ ((struct dpaa2_dq *)dq)->dq.tok = 0;
+
+ /*
+ * Determine whether VDQCR is available based on whether the
+ * current result is sitting in the first storage location of
+ * the busy command.
+ */
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.available);
+ }
+
+ return 1;
+}
+
+/**
+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the pull dequeue descriptor to be cleared
+ */
+void qbman_release_desc_clear(struct qbman_release_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+ d->verb = 1 << 5; /* Release Command Valid */
+}
+
+/**
+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ * @d: the pull dequeue descriptor to be set
+ * @bpid: the bpid value to be set
+ */
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
+{
+ d->bpid = cpu_to_le16(bpid);
+}
+
+/**
+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
+ * interrupt source should be asserted after the release command is completed.
+ * @d: the pull dequeue descriptor to be set
+ * @enable: enable (1) or disable (0) value
+ */
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
+{
+ if (enable)
+ d->verb |= 1 << 6;
+ else
+ d->verb &= ~(1 << 6);
+}
+
+#define RAR_IDX(rar) ((rar) & 0x7)
+#define RAR_VB(rar) ((rar) & 0x80)
+#define RAR_SUCCESS(rar) ((rar) & 0x100)
+
+/**
+ * qbman_swp_release_direct() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
+{
+ int i;
+ struct qbman_release_desc *p;
+ u32 rar;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ /* Start the release command */
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ for (i = 0; i < num_buffers; i++)
+ p->buf[i] = cpu_to_le64(buffers[i]);
+ p->bpid = d->bpid;
+
+ /*
+ * Set the verb byte, have to substitute in the valid-bit
+ * and the number of buffers.
+ */
+ dma_wmb();
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+
+ return 0;
+}
+
+/**
+ * qbman_swp_release_mem_back() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
+{
+ int i;
+ struct qbman_release_desc *p;
+ u32 rar;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ /* Start the release command */
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ for (i = 0; i < num_buffers; i++)
+ p->buf[i] = cpu_to_le64(buffers[i]);
+ p->bpid = d->bpid;
+
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
+
+ return 0;
+}
+
+struct qbman_acquire_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 bpid;
+ u8 num;
+ u8 reserved2[59];
+};
+
+struct qbman_acquire_rslt {
+ u8 verb;
+ u8 rslt;
+ __le16 reserved;
+ u8 num;
+ u8 reserved2[3];
+ __le64 buf[7];
+};
+
+/**
+ * qbman_swp_acquire() - Issue a buffer acquire command
+ * @s: the software portal object
+ * @bpid: the buffer pool index
+ * @buffers: a pointer pointing to the acquired buffer addresses
+ * @num_buffers: number of buffers to be acquired, must be less than 8
+ *
+ * Return 0 for success, or negative error code if the acquire command
+ * fails.
+ */
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+ unsigned int num_buffers)
+{
+ struct qbman_acquire_desc *p;
+ struct qbman_acquire_rslt *r;
+ int i;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->bpid = cpu_to_le16(bpid);
+ p->num = num_buffers;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
+ if (unlikely(!r)) {
+ pr_err("qbman: acquire from BPID %d failed, no response\n",
+ bpid);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
+ bpid, r->rslt);
+ return -EIO;
+ }
+
+ WARN_ON(r->num > num_buffers);
+
+ /* Copy the acquired buffers to the caller's array */
+ for (i = 0; i < r->num; i++)
+ buffers[i] = le64_to_cpu(r->buf[i]);
+
+ return (int)r->num;
+}
+
+struct qbman_alt_fq_state_desc {
+ u8 verb;
+ u8 reserved[3];
+ __le32 fqid;
+ u8 reserved2[56];
+};
+
+struct qbman_alt_fq_state_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 reserved[62];
+};
+
+#define ALT_FQ_FQID_MASK 0x00FFFFFF
+
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+ u8 alt_fq_verb)
+{
+ struct qbman_alt_fq_state_desc *p;
+ struct qbman_alt_fq_state_rslt *r;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
+ if (unlikely(!r)) {
+ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
+ alt_fq_verb);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
+ fqid, r->verb, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+struct qbman_cdan_ctrl_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 ch;
+ u8 we;
+ u8 ctrl;
+ __le16 reserved2;
+ __le64 cdan_ctx;
+ u8 reserved3[48];
+
+};
+
+struct qbman_cdan_ctrl_rslt {
+ u8 verb;
+ u8 rslt;
+ __le16 ch;
+ u8 reserved[60];
+};
+
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+ u8 we_mask, u8 cdan_en,
+ u64 ctx)
+{
+ struct qbman_cdan_ctrl_desc *p = NULL;
+ struct qbman_cdan_ctrl_rslt *r = NULL;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->ch = cpu_to_le16(channelid);
+ p->we = we_mask;
+ if (cdan_en)
+ p->ctrl = 1;
+ else
+ p->ctrl = 0;
+ p->cdan_ctx = cpu_to_le64(ctx);
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
+ if (unlikely(!r)) {
+ pr_err("qbman: wqchan config failed, no response\n");
+ return -EIO;
+ }
+
+ WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
+ channelid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+#define QBMAN_RESPONSE_VERB_MASK 0x7f
+#define QBMAN_FQ_QUERY_NP 0x45
+#define QBMAN_BP_QUERY 0x32
+
+struct qbman_fq_query_desc {
+ u8 verb;
+ u8 reserved[3];
+ __le32 fqid;
+ u8 reserved2[56];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
+ struct qbman_fq_query_np_rslt *r)
+{
+ struct qbman_fq_query_desc *p;
+ void *resp;
+
+ p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ /* FQID is a 24 bit value */
+ p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
+ resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
+ if (!resp) {
+ pr_err("qbman: Query FQID %d NP fields failed, no response\n",
+ fqid);
+ return -EIO;
+ }
+ *r = *(struct qbman_fq_query_np_rslt *)resp;
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
+
+ /* Determine success or failure */
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
+ p->fqid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
+}
+
+u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return le32_to_cpu(r->byte_cnt);
+}
+
+struct qbman_bp_query_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 bpid;
+ u8 reserved2[60];
+};
+
+int qbman_bp_query(struct qbman_swp *s, u16 bpid,
+ struct qbman_bp_query_rslt *r)
+{
+ struct qbman_bp_query_desc *p;
+ void *resp;
+
+ p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->bpid = cpu_to_le16(bpid);
+ resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
+ if (!resp) {
+ pr_err("qbman: Query BPID %d fields failed, no response\n",
+ bpid);
+ return -EIO;
+ }
+ *r = *(struct qbman_bp_query_rslt *)resp;
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
+
+ /* Determine success or failure */
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
+ pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
+ bpid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
+{
+ return le32_to_cpu(a->fill);
+}
+
+/**
+ * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff)
+{
+ u32 itp, max_holdoff;
+
+ /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles
+ * increments. This depends on the QBMAN internal frequency.
+ */
+ itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns;
+ if (itp > 4096) {
+ max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000;
+ pr_err("irq_holdoff must be <= %uus\n", max_holdoff);
+ return -EINVAL;
+ }
+
+ if (irq_threshold >= p->dqrr.dqrr_size) {
+ pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1);
+ return -EINVAL;
+ }
+
+ p->irq_threshold = irq_threshold;
+ p->irq_holdoff = irq_holdoff;
+
+ qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold);
+ qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp);
+
+ return 0;
+}
+
+/**
+ * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold (an IRQ is generated when there are more
+ * DQRR entries in the portal than the threshold)
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff)
+{
+ if (irq_threshold)
+ *irq_threshold = p->irq_threshold;
+ if (irq_holdoff)
+ *irq_holdoff = p->irq_holdoff;
+}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
new file mode 100644
index 0000000000..b23883dd27
--- /dev/null
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -0,0 +1,664 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2019 NXP
+ *
+ */
+#ifndef __FSL_QBMAN_PORTAL_H
+#define __FSL_QBMAN_PORTAL_H
+
+#include <soc/fsl/dpaa2-fd.h>
+
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_5000 0x05000000
+
+#define QMAN_REV_MASK 0xffff0000
+
+struct dpaa2_dq;
+struct qbman_swp;
+
+/* qbman software portal descriptor structure */
+struct qbman_swp_desc {
+ void *cena_bar; /* Cache-enabled portal base address */
+ void __iomem *cinh_bar; /* Cache-inhibited portal base address */
+ u32 qman_version;
+ u32 qman_clk;
+ u32 qman_256_cycles_per_ns;
+};
+
+#define QBMAN_SWP_INTERRUPT_EQRI 0x01
+#define QBMAN_SWP_INTERRUPT_EQDI 0x02
+#define QBMAN_SWP_INTERRUPT_DQRI 0x04
+#define QBMAN_SWP_INTERRUPT_RCRI 0x08
+#define QBMAN_SWP_INTERRUPT_RCDI 0x10
+#define QBMAN_SWP_INTERRUPT_VDCI 0x20
+
+/* the structure for pull dequeue descriptor */
+struct qbman_pull_desc {
+ u8 verb;
+ u8 numf;
+ u8 tok;
+ u8 reserved;
+ __le32 dq_src;
+ __le64 rsp_addr;
+ u64 rsp_addr_virt;
+ u8 padding[40];
+};
+
+enum qbman_pull_type_e {
+ /* dequeue with priority precedence, respect intra-class scheduling */
+ qbman_pull_type_prio = 1,
+ /* dequeue with active FQ precedence, respect ICS */
+ qbman_pull_type_active,
+ /* dequeue with active FQ precedence, no ICS */
+ qbman_pull_type_active_noics
+};
+
+/* Definitions for parsing dequeue entries */
+#define QBMAN_RESULT_MASK 0x7f
+#define QBMAN_RESULT_DQ 0x60
+#define QBMAN_RESULT_FQRN 0x21
+#define QBMAN_RESULT_FQRNI 0x22
+#define QBMAN_RESULT_FQPN 0x24
+#define QBMAN_RESULT_FQDAN 0x25
+#define QBMAN_RESULT_CDAN 0x26
+#define QBMAN_RESULT_CSCN_MEM 0x27
+#define QBMAN_RESULT_CGCU 0x28
+#define QBMAN_RESULT_BPSCN 0x29
+#define QBMAN_RESULT_CSCN_WQ 0x2a
+
+/* QBMan FQ management command codes */
+#define QBMAN_FQ_SCHEDULE 0x48
+#define QBMAN_FQ_FORCE 0x49
+#define QBMAN_FQ_XON 0x4d
+#define QBMAN_FQ_XOFF 0x4e
+
+/* structure of enqueue descriptor */
+struct qbman_eq_desc {
+ u8 verb;
+ u8 dca;
+ __le16 seqnum;
+ __le16 orpid;
+ __le16 reserved1;
+ __le32 tgtid;
+ __le32 tag;
+ __le16 qdbin;
+ u8 qpri;
+ u8 reserved[3];
+ u8 wae;
+ u8 rspid;
+ __le64 rsp_addr;
+};
+
+struct qbman_eq_desc_with_fd {
+ struct qbman_eq_desc desc;
+ u8 fd[32];
+};
+
+/* buffer release descriptor */
+struct qbman_release_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 bpid;
+ __le32 reserved2;
+ __le64 buf[7];
+};
+
+/* Management command result codes */
+#define QBMAN_MC_RSLT_OK 0xf0
+
+#define CODE_CDAN_WE_EN 0x1
+#define CODE_CDAN_WE_CTX 0x4
+
+/* portal data structure */
+struct qbman_swp {
+ const struct qbman_swp_desc *desc;
+ void *addr_cena;
+ void __iomem *addr_cinh;
+
+ /* Management commands */
+ struct {
+ u32 valid_bit; /* 0x00 or 0x80 */
+ } mc;
+
+ /* Management response */
+ struct {
+ u32 valid_bit; /* 0x00 or 0x80 */
+ } mr;
+
+ /* Push dequeues */
+ u32 sdq;
+
+ /* Volatile dequeues */
+ struct {
+ atomic_t available; /* indicates if a command can be sent */
+ u32 valid_bit; /* 0x00 or 0x80 */
+ struct dpaa2_dq *storage; /* NULL if DQRR */
+ } vdq;
+
+ /* DQRR */
+ struct {
+ u32 next_idx;
+ u32 valid_bit;
+ u8 dqrr_size;
+ int reset_bug; /* indicates dqrr reset workaround is needed */
+ } dqrr;
+
+ struct {
+ u32 pi;
+ u32 pi_vb;
+ u32 pi_ring_size;
+ u32 pi_ci_mask;
+ u32 ci;
+ int available;
+ u32 pend;
+ u32 no_pfdr;
+ } eqcr;
+
+ spinlock_t access_spinlock;
+
+ /* Interrupt coalescing */
+ u32 irq_threshold;
+ u32 irq_holdoff;
+ int use_adaptive_rx_coalesce;
+};
+
+/* Function pointers */
+extern
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+extern
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+extern
+int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+extern
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
+extern
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
+extern
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+
+/* Functions */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
+void qbman_swp_finish(struct qbman_swp *p);
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
+
+void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
+void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
+
+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct dpaa2_dq *storage,
+ dma_addr_t storage_phys,
+ int stash);
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+ enum qbman_pull_type_e dct);
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+ enum qbman_pull_type_e dct);
+
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
+
+int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
+
+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+ u32 qd_bin, u32 qd_prio);
+
+
+void qbman_release_desc_clear(struct qbman_release_desc *d);
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+ unsigned int num_buffers);
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+ u8 alt_fq_verb);
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+ u8 we_mask, u8 cdan_en,
+ u64 ctx);
+
+void *qbman_swp_mc_start(struct qbman_swp *p);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
+void *qbman_swp_mc_result(struct qbman_swp *p);
+
+/**
+ * qbman_swp_enqueue() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static inline int
+qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ return qbman_swp_enqueue_ptr(s, d, fd);
+}
+
+/**
+ * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static inline int
+qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static inline int
+qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
+}
+
+/**
+ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
+ * @dq: the dequeue result to be checked
+ *
+ * DQRR entries may contain non-dequeue results, ie. notifications
+ */
+static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
+}
+
+/**
+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
+ * @dq: the dequeue result to be checked
+ *
+ */
+static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
+{
+ return !qbman_result_is_DQ(dq);
+}
+
+/* FQ Data Availability */
+static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
+}
+
+/* Channel Data Availability */
+static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
+}
+
+/* Congestion State Change */
+static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
+}
+
+/* Buffer Pool State Change */
+static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
+}
+
+/* Congestion Group Count Update */
+static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
+}
+
+/* Retirement */
+static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
+}
+
+/* Retirement Immediate */
+static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
+}
+
+ /* Park */
+static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
+}
+
+/**
+ * qbman_result_SCN_state() - Get the state field in State-change notification
+ */
+static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
+{
+ return scn->scn.state;
+}
+
+#define SCN_RID_MASK 0x00FFFFFF
+
+/**
+ * qbman_result_SCN_rid() - Get the resource id in State-change notification
+ */
+static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
+{
+ return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
+}
+
+/**
+ * qbman_result_SCN_ctx() - Get the context data in State-change notification
+ */
+static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
+{
+ return le64_to_cpu(scn->scn.ctx);
+}
+
+/**
+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state
+ * @s: the software portal object
+ * @fqid: the index of frame queue to be scheduled
+ *
+ * There are a couple of different ways that a FQ can end up parked state,
+ * This schedules it.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
+}
+
+/**
+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state
+ * @s: the software portal object
+ * @fqid: the index of frame queue to be forced
+ *
+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
+ * and thus be available for selection by any channel-dequeuing behaviour (push
+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
+ * empty at the time this happens, the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
+}
+
+/**
+ * qbman_swp_fq_xon() - sets FQ flow-control to XON
+ * @s: the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
+}
+
+/**
+ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
+ * @s: the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ * XOFF FQs will remain in the tenatively-scheduled state, even when
+ * non-empty, meaning they won't be selected for scheduled dequeuing.
+ * If a FQ is changed to XOFF after it had already become truly-scheduled
+ * to a channel, and a pull dequeue of that channel occurs that selects
+ * that FQ for dequeuing, then the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
+}
+
+/* If the user has been allocated a channel object that is going to generate
+ * CDANs to another channel, then the qbman_swp_CDAN* functions will be
+ * necessary.
+ *
+ * CDAN-enabled channels only generate a single CDAN notification, after which
+ * they need to be reenabled before they'll generate another. The idea is
+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
+ * reenable step. Each function generates a distinct command to hardware, so a
+ * combination function is provided if the user wishes to modify the "context"
+ * (which shows up in each CDAN message) each time they reenable, as a single
+ * command to hardware.
+ */
+
+/**
+ * qbman_swp_CDAN_set_context() - Set CDAN context
+ * @s: the software portal object
+ * @channelid: the channel index
+ * @ctx: the context to be set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
+ u64 ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_CTX,
+ 0, ctx);
+}
+
+/**
+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 1, 0);
+}
+
+/**
+ * qbman_swp_CDAN_disable() - disable CDAN for the channel
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 0, 0);
+}
+
+/**
+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ * @ctx:i the context set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
+ u16 channelid,
+ u64 ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
+ 1, ctx);
+}
+
+/* Wraps up submit + poll-for-result */
+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+ u8 cmd_verb)
+{
+ int loopvar = 2000;
+
+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
+
+ do {
+ cmd = qbman_swp_mc_result(swp);
+ } while (!cmd && loopvar--);
+
+ WARN_ON(!loopvar);
+
+ return cmd;
+}
+
+/* Query APIs */
+struct qbman_fq_query_np_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 st1;
+ u8 st2;
+ u8 reserved[2];
+ __le16 od1_sfdr;
+ __le16 od2_sfdr;
+ __le16 od3_sfdr;
+ __le16 ra1_sfdr;
+ __le16 ra2_sfdr;
+ __le32 pfdr_hptr;
+ __le32 pfdr_tptr;
+ __le32 frm_cnt;
+ __le32 byte_cnt;
+ __le16 ics_surp;
+ u8 is;
+ u8 reserved2[29];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
+ struct qbman_fq_query_np_rslt *r);
+u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
+u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
+
+struct qbman_bp_query_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 reserved[4];
+ u8 bdi;
+ u8 state;
+ __le32 fill;
+ __le32 hdotr;
+ __le16 swdet;
+ __le16 swdxt;
+ __le16 hwdet;
+ __le16 hwdxt;
+ __le16 swset;
+ __le16 swsxt;
+ __le16 vbpid;
+ __le16 icid;
+ __le64 bpscn_addr;
+ __le64 bpscn_ctx;
+ __le16 hw_targ;
+ u8 dbe;
+ u8 reserved2;
+ u8 sdcnt;
+ u8 hdcnt;
+ u8 sscnt;
+ u8 reserved3[9];
+};
+
+int qbman_bp_query(struct qbman_swp *s, u16 bpid,
+ struct qbman_bp_query_rslt *r);
+
+u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
+
+/**
+ * qbman_swp_release() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+static inline int qbman_swp_release(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers)
+{
+ return qbman_swp_release_ptr(s, d, buffers, num_buffers);
+}
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static inline int qbman_swp_pull(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
+{
+ return qbman_swp_pull_ptr(s, d);
+}
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ return qbman_swp_dqrr_next_ptr(s);
+}
+
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff);
+
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff);
+
+#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
new file mode 100644
index 0000000000..6bf3e6a980
--- /dev/null
+++ b/drivers/soc/fsl/guts.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale QorIQ Platforms GUTS Driver
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/sys_soc.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/fsl/guts.h>
+
+struct fsl_soc_die_attr {
+ char *die;
+ u32 svr;
+ u32 mask;
+};
+
+struct fsl_soc_data {
+ const char *sfp_compat;
+ u32 uid_offset;
+};
+
+/* SoC die attribute definition for QorIQ platform */
+static const struct fsl_soc_die_attr fsl_soc_die[] = {
+ /*
+ * Power Architecture-based SoCs T Series
+ */
+
+ /* Die: T4240, SoC: T4240/T4160/T4080 */
+ { .die = "T4240",
+ .svr = 0x82400000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T1040, SoC: T1040/T1020/T1042/T1022 */
+ { .die = "T1040",
+ .svr = 0x85200000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T2080, SoC: T2080/T2081 */
+ { .die = "T2080",
+ .svr = 0x85300000,
+ .mask = 0xfff00000,
+ },
+ /* Die: T1024, SoC: T1024/T1014/T1023/T1013 */
+ { .die = "T1024",
+ .svr = 0x85400000,
+ .mask = 0xfff00000,
+ },
+
+ /*
+ * ARM-based SoCs LS Series
+ */
+
+ /* Die: LS1043A, SoC: LS1043A/LS1023A */
+ { .die = "LS1043A",
+ .svr = 0x87920000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS2080A, SoC: LS2080A/LS2040A/LS2085A */
+ { .die = "LS2080A",
+ .svr = 0x87010000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1088A, SoC: LS1088A/LS1048A/LS1084A/LS1044A */
+ { .die = "LS1088A",
+ .svr = 0x87030000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1012A, SoC: LS1012A */
+ { .die = "LS1012A",
+ .svr = 0x87040000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS1046A, SoC: LS1046A/LS1026A */
+ { .die = "LS1046A",
+ .svr = 0x87070000,
+ .mask = 0xffff0000,
+ },
+ /* Die: LS2088A, SoC: LS2088A/LS2048A/LS2084A/LS2044A */
+ { .die = "LS2088A",
+ .svr = 0x87090000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1021A, SoC: LS1021A/LS1020A/LS1022A */
+ { .die = "LS1021A",
+ .svr = 0x87000000,
+ .mask = 0xfff70000,
+ },
+ /* Die: LX2160A, SoC: LX2160A/LX2120A/LX2080A */
+ { .die = "LX2160A",
+ .svr = 0x87360000,
+ .mask = 0xff3f0000,
+ },
+ /* Die: LS1028A, SoC: LS1028A */
+ { .die = "LS1028A",
+ .svr = 0x870b0000,
+ .mask = 0xff3f0000,
+ },
+ { },
+};
+
+static const struct fsl_soc_die_attr *fsl_soc_die_match(
+ u32 svr, const struct fsl_soc_die_attr *matches)
+{
+ while (matches->svr) {
+ if (matches->svr == (svr & matches->mask))
+ return matches;
+ matches++;
+ }
+ return NULL;
+}
+
+static u64 fsl_guts_get_soc_uid(const char *compat, unsigned int offset)
+{
+ struct device_node *np;
+ void __iomem *sfp_base;
+ u64 uid;
+
+ np = of_find_compatible_node(NULL, NULL, compat);
+ if (!np)
+ return 0;
+
+ sfp_base = of_iomap(np, 0);
+ if (!sfp_base) {
+ of_node_put(np);
+ return 0;
+ }
+
+ uid = ioread32(sfp_base + offset);
+ uid <<= 32;
+ uid |= ioread32(sfp_base + offset + 4);
+
+ iounmap(sfp_base);
+ of_node_put(np);
+
+ return uid;
+}
+
+static const struct fsl_soc_data ls1028a_data = {
+ .sfp_compat = "fsl,ls1028a-sfp",
+ .uid_offset = 0x21c,
+};
+
+/*
+ * Table for matching compatible strings, for device tree
+ * guts node, for Freescale QorIQ SOCs.
+ */
+static const struct of_device_id fsl_guts_of_match[] = {
+ { .compatible = "fsl,qoriq-device-config-1.0", },
+ { .compatible = "fsl,qoriq-device-config-2.0", },
+ { .compatible = "fsl,p1010-guts", },
+ { .compatible = "fsl,p1020-guts", },
+ { .compatible = "fsl,p1021-guts", },
+ { .compatible = "fsl,p1022-guts", },
+ { .compatible = "fsl,p1023-guts", },
+ { .compatible = "fsl,p2020-guts", },
+ { .compatible = "fsl,bsc9131-guts", },
+ { .compatible = "fsl,bsc9132-guts", },
+ { .compatible = "fsl,mpc8536-guts", },
+ { .compatible = "fsl,mpc8544-guts", },
+ { .compatible = "fsl,mpc8548-guts", },
+ { .compatible = "fsl,mpc8568-guts", },
+ { .compatible = "fsl,mpc8569-guts", },
+ { .compatible = "fsl,mpc8572-guts", },
+ { .compatible = "fsl,ls1021a-dcfg", },
+ { .compatible = "fsl,ls1043a-dcfg", },
+ { .compatible = "fsl,ls2080a-dcfg", },
+ { .compatible = "fsl,ls1088a-dcfg", },
+ { .compatible = "fsl,ls1012a-dcfg", },
+ { .compatible = "fsl,ls1046a-dcfg", },
+ { .compatible = "fsl,lx2160a-dcfg", },
+ { .compatible = "fsl,ls1028a-dcfg", .data = &ls1028a_data},
+ {}
+};
+
+static int __init fsl_guts_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ static struct soc_device *soc_dev;
+ const struct fsl_soc_die_attr *soc_die;
+ const struct fsl_soc_data *soc_data;
+ const struct of_device_id *match;
+ struct ccsr_guts __iomem *regs;
+ const char *machine = NULL;
+ struct device_node *np;
+ bool little_endian;
+ u64 soc_uid = 0;
+ u32 svr;
+ int ret;
+
+ np = of_find_matching_node_and_match(NULL, fsl_guts_of_match, &match);
+ if (!np)
+ return 0;
+ soc_data = match->data;
+
+ regs = of_iomap(np, 0);
+ if (!regs) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ little_endian = of_property_read_bool(np, "little-endian");
+ if (little_endian)
+ svr = ioread32(&regs->svr);
+ else
+ svr = ioread32be(&regs->svr);
+ iounmap(regs);
+ of_node_put(np);
+
+ /* Register soc device */
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ if (of_property_read_string(of_root, "model", &machine))
+ of_property_read_string_index(of_root, "compatible", 0, &machine);
+ if (machine) {
+ soc_dev_attr->machine = kstrdup(machine, GFP_KERNEL);
+ if (!soc_dev_attr->machine)
+ goto err_nomem;
+ }
+
+ soc_die = fsl_soc_die_match(svr, fsl_soc_die);
+ if (soc_die) {
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ %s",
+ soc_die->die);
+ } else {
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "QorIQ");
+ }
+ if (!soc_dev_attr->family)
+ goto err_nomem;
+
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "svr:0x%08x", svr);
+ if (!soc_dev_attr->soc_id)
+ goto err_nomem;
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
+ (svr >> 4) & 0xf, svr & 0xf);
+ if (!soc_dev_attr->revision)
+ goto err_nomem;
+
+ if (soc_data)
+ soc_uid = fsl_guts_get_soc_uid(soc_data->sfp_compat,
+ soc_data->uid_offset);
+ if (soc_uid)
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX",
+ soc_uid);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err;
+ }
+
+ pr_info("Machine: %s\n", soc_dev_attr->machine);
+ pr_info("SoC family: %s\n", soc_dev_attr->family);
+ pr_info("SoC ID: %s, Revision: %s\n",
+ soc_dev_attr->soc_id, soc_dev_attr->revision);
+
+ return 0;
+
+err_nomem:
+ ret = -ENOMEM;
+err:
+ kfree(soc_dev_attr->machine);
+ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr->serial_number);
+ kfree(soc_dev_attr);
+
+ return ret;
+}
+core_initcall(fsl_guts_init);
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644
index 0000000000..bdecb86bb6
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig FSL_DPAA
+ bool "QorIQ DPAA1 framework support"
+ depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE) && ARCH_DMA_ADDR_T_64BIT)
+ select GENERIC_ALLOCATOR
+ help
+ The Freescale Data Path Acceleration Architecture (DPAA) is a set of
+ hardware components on specific QorIQ multicore processors.
+ This architecture provides the infrastructure to support simplified
+ sharing of networking interfaces and accelerators by multiple CPUs.
+ The major h/w blocks composing DPAA are BMan and QMan.
+
+ The Buffer Manager (BMan) is a hardware buffer pool management block
+ that allows software and accelerators on the datapath to acquire and
+ release buffers in order to build frames.
+
+ The Queue Manager (QMan) is a hardware queue management block
+ that allows software and accelerators on the datapath to enqueue and
+ dequeue frames in order to communicate.
+
+if FSL_DPAA
+
+config FSL_DPAA_CHECKING
+ bool "Additional driver checking"
+ help
+ Compiles in additional checks, to sanity-check the drivers and
+ any use of the exported API. Not recommended for performance.
+
+config FSL_BMAN_TEST
+ tristate "BMan self-tests"
+ help
+ Compile the BMan self-test code. These tests will
+ exercise the BMan APIs to confirm functionality
+ of both the software drivers and hardware device.
+
+config FSL_BMAN_TEST_API
+ bool "High-level API self-test"
+ depends on FSL_BMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine
+ to the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST
+ tristate "QMan self-tests"
+ help
+ Compile self-test code for QMan.
+
+config FSL_QMAN_TEST_API
+ bool "QMan high-level self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine to
+ the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST_STASH
+ bool "QMan 'hot potato' data-stashing self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This performs a "hot potato" style test enqueuing/dequeuing a frame
+ across a series of FQs scheduled to different portals (and cpus), with
+ DQRR, data and context stashing always on.
+
+endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644
index 0000000000..811312ad52
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \
+ bman_portal.o qman_portal.o \
+ bman.o qman.o dpaa_sys.o
+
+obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o
+bman-test-y = bman_test.o
+bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o
+
+obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o
+qman-test-y = qman_test.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 0000000000..6cc1847e53
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,819 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x3000
+#define BM_REG_RCR_CI_CINH 0x3100
+#define BM_REG_RCR_ITR 0x3200
+#define BM_REG_CFG 0x3300
+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
+#define BM_REG_ISR 0x3e00
+#define BM_REG_IER 0x3e40
+#define BM_REG_ISDR 0x3e80
+#define BM_REG_IIR 0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+#else
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x0000
+#define BM_REG_RCR_CI_CINH 0x0004
+#define BM_REG_RCR_ITR 0x0008
+#define BM_REG_CFG 0x0100
+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
+#define BM_REG_ISR 0x0e00
+#define BM_REG_IER 0x0e04
+#define BM_REG_ISDR 0x0e08
+#define BM_REG_IIR 0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+#endif
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+/* Release Command */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+};
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+struct bm_mc_command {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used by acquire command */
+ u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+ struct {
+ u8 verb;
+ u8 bpid;
+ u8 __reserved[62];
+ };
+ struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT 10000 /* us */
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ union bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void *ce; /* cache-enabled */
+ __be32 *ce_be; /* Same as above but for direct access */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+ return ioread32be(p->addr.ci + offset);
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+ iowrite32be(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+ return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
+}
+
+struct bman_portal {
+ struct bm_portal p;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ /* probing time config params for cpu-affine portals */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
+ */
+struct bman_pool {
+ /* index of the buffer pool to encapsulate (0-63) */
+ u32 bpid;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+};
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct bman_portal *p = ptr;
+ struct bm_portal *portal = &p->p;
+ u32 clear = p->irq_sources;
+ u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ clear |= poll_portal_slow(p, is);
+ bm_out(portal, BM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+/* --- RCR API --- */
+
+#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~RCR_CARRY;
+
+ return (struct bm_rcr_entry *)addr;
+}
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
+}
+#endif
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
+{
+ /* increment to the next RCR pointer and handle overflow and 'vbit' */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = rcr_carryclear(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static int bm_rcr_get_avail(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static int bm_rcr_get_fill(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(portal, BM_REG_RCR_ITR, ithresh);
+}
+
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
+}
+
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+ diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 1;
+#endif
+ dpaa_zero(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ DPAA_ASSERT(rcr->available >= 1);
+ dma_wmb();
+ rcursor = rcr->cursor;
+ rcursor->_ncw_verb = myverb | rcr->vbit;
+ dpaa_flush(rcursor);
+ rcr_inc(rcr);
+ rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ enum bm_rcr_cmode cmode)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+ BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+ | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(portal, BM_REG_CFG, cfg);
+ return 0;
+}
+
+static void bm_rcr_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_rcr *rcr = &portal->rcr;
+ int i;
+
+ DPAA_ASSERT(!rcr->busy);
+
+ i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr_ptr2idx(rcr->cursor))
+ pr_crit("losing uncommitted RCR entries\n");
+
+ i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+#endif
+}
+
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
+ 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static void bm_mc_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_hw;
+#endif
+}
+
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!rr->verb) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+ union bm_mc_result **mcr)
+{
+ int timeout = BM_MCR_TIMEOUT;
+
+ do {
+ *mcr = bm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
+{
+ bm_out(portal, BM_REG_SCN(0), 0);
+ bm_out(portal, BM_REG_SCN(1), 0);
+}
+
+static int bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
+{
+ struct bm_portal *p;
+ int ret;
+
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt_ce;
+ p->addr.ce_be = c->addr_virt_ce;
+ p->addr.ci = c->addr_virt_ci;
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ dev_err(c->dev, "RCR initialisation failed\n");
+ goto fail_rcr;
+ }
+ if (bm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_disable(p);
+
+ /* Write-to-clear any stale interrupt status bits */
+ bm_out(p, BM_REG_ISDR, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_out(p, BM_REG_IER, 0);
+ bm_out(p, BM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
+ goto fail_affinity;
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
+
+ bm_out(p, BM_REG_ISDR, 0);
+ bm_out(p, BM_REG_IIR, 0);
+
+ return 0;
+
+fail_rcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+fail_rcr:
+ return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
+{
+ struct bman_portal *portal;
+ int err;
+
+ portal = &per_cpu(bman_affine_portal, c->cpu);
+ err = bman_create_portal(portal, c);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+ u32 ret = is;
+
+ if (is & BM_PIRQ_RCRI) {
+ bm_rcr_cce_update(&p->p);
+ bm_rcr_set_ithresh(&p->p, 0);
+ bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+ is &= ~BM_PIRQ_RCRI;
+ }
+
+ /* There should be no status register bits left undefined */
+ DPAA_ASSERT(!is);
+ return ret;
+}
+
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ p->irq_sources |= bits & BM_PIRQ_VISIBLE;
+ bm_out(&p->p, BM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+ return 0;
+}
+
+int bm_shutdown_pool(u32 bpid)
+{
+ int err = 0;
+ struct bm_mc_command *bm_cmd;
+ union bm_mc_result *bm_res;
+
+
+ struct bman_portal *p = get_affine_portal();
+ while (1) {
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(&p->p);
+ bm_cmd->bpid = bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+ pr_crit("BMan Acquire Command timedout\n");
+ err = -ETIMEDOUT;
+ goto done;
+ }
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ /* Pool is empty */
+ goto done;
+ }
+ }
+done:
+ put_affine_portal();
+ return err;
+}
+
+struct gen_pool *bm_bpalloc;
+
+static int bm_alloc_bpid_range(u32 *result, u32 count)
+{
+ unsigned long addr;
+
+ addr = gen_pool_alloc(bm_bpalloc, count);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+static int bm_release_bpid(u32 bpid)
+{
+ int ret;
+
+ ret = bm_shutdown_pool(bpid);
+ if (ret) {
+ pr_debug("BPID %d leaked\n", bpid);
+ return ret;
+ }
+
+ gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+
+struct bman_pool *bman_new_pool(void)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (bm_alloc_bpid_range(&bpid, 1))
+ return NULL;
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+
+ pool->bpid = bpid;
+
+ return pool;
+err:
+ bm_release_bpid(bpid);
+ return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+ bm_release_bpid(pool->bpid);
+
+ kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+int bman_get_bpid(const struct bman_pool *pool)
+{
+ return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ unsigned long irqflags;
+ int avail, timeout = 1000; /* 1ms */
+ int i = num - 1;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ do {
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ if (likely(r))
+ break;
+
+ udelay(1);
+ } while (--timeout);
+
+ if (unlikely(!timeout))
+ return -ETIMEDOUT;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+ bm_buffer_set_bpid(r->bufs, pool->bpid);
+ if (i)
+ memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_release);
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ union bm_mc_result *mcr;
+ int ret;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ mcc = bm_mc_start(&p->p);
+ mcc->bpid = pool->bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ if (!bm_mc_result_timeout(&p->p, &mcr)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Timeout\n");
+ return -ETIMEDOUT;
+ }
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs)
+ memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
+
+ put_affine_portal();
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+ return portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644
index 0000000000..cb24a08be0
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -0,0 +1,320 @@
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FBPR_BARE 0x0c00
+#define REG_FBPR_BAR 0x0c04
+#define REG_FBPR_AR 0x0c10
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_ERR_ISDR 0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
+
+struct bman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+ { BM_EIRQ_IVCI, "Invalid Command Verb" },
+ { BM_EIRQ_FLWI, "FBPR Low Watermark" },
+ { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { BM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+ return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = bm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+/* Track if probe has occurred and if cleanup is required */
+static int __bman_probed;
+static int __bman_requires_cleanup;
+
+
+static int bm_set_memory(u64 ba, u32 size)
+{
+ u32 bar, bare;
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+ is_power_of_2(size));
+ /* choke if '[e]ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+
+ /* Check to see if BMan has already been initialized */
+ bar = bm_ccsr_in(REG_FBPR_BAR);
+ if (bar) {
+ /* Maker sure ba == what was programmed) */
+ bare = bm_ccsr_in(REG_FBPR_BARE);
+ if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
+ pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
+ ba, bare, bar);
+ return -ENOMEM;
+ }
+ pr_info("BMan BAR already configured\n");
+ __bman_requires_cleanup = 1;
+ return 1;
+ }
+
+ bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_AR, exp - 1);
+ return 0;
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+ fbpr_a = rmem->base;
+ fbpr_sz = rmem->size;
+
+ WARN_ON(!(fbpr_a && fbpr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = bm_ccsr_in(REG_ERR_IER);
+ isr_val = bm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = bm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+ if (bman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ bman_hwerr_txts[i].txt);
+ if (bman_hwerr_txts[i].mask & ecsr_val) {
+ /* Re-arm error capture registers */
+ bm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ bman_hwerr_txts[i].mask);
+ ier_val &= ~bman_hwerr_txts[i].mask;
+ bm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+int bman_is_probed(void)
+{
+ return __bman_probed;
+}
+EXPORT_SYMBOL_GPL(bman_is_probed);
+
+int bman_requires_cleanup(void)
+{
+ return __bman_requires_cleanup;
+}
+
+void bman_done_cleanup(void)
+{
+ __bman_requires_cleanup = 0;
+}
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+ int ret, err_irq;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ u16 id, bm_pool_cnt;
+ u8 major, minor;
+
+ __bman_probed = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
+ node);
+ return -ENXIO;
+ }
+ bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!bm_ccsr_start)
+ return -ENXIO;
+
+ bm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ bman_ip_rev = BMAN_REV10;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else if (major == 2 && minor == 0) {
+ bman_ip_rev = BMAN_REV20;
+ bm_pool_cnt = 8;
+ } else if (major == 2 && minor == 1) {
+ bman_ip_rev = BMAN_REV21;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else {
+ dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+ id, major, minor);
+ return -ENODEV;
+ }
+
+ /*
+ * If FBPR memory wasn't defined using the qbman compatible string
+ * try using the of_reserved_mem_device method
+ */
+ if (!fbpr_a) {
+ ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+
+ dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
+
+ bm_set_memory(fbpr_a, fbpr_sz);
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %pOF IRQ\n", node);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
+ ret, node);
+ return ret;
+ }
+ /* Disable Buffer Pool State Change */
+ bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+ if (IS_ERR(bm_bpalloc)) {
+ ret = PTR_ERR(bm_bpalloc);
+ dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* seed BMan resource pool */
+ ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+ 0, bm_pool_cnt - 1, ret);
+ return ret;
+ }
+
+ __bman_probed = 1;
+
+ return 0;
+};
+
+static const struct of_device_id fsl_bman_ids[] = {
+ {
+ .compatible = "fsl,bman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_bman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_bman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 0000000000..4d7b9caee1
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,244 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct bman_portal *affine_bportals[NR_CPUS];
+static struct cpumask portal_cpus;
+static int __bman_portals_probed;
+/* protect bman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(bman_lock);
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+ struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ bman_p_irqsource_add(p, BM_PIRQ_RCRI);
+ affine_bportals[pcfg->cpu] = p;
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static int bman_offline_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return 0;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return 0;
+
+ /* use any other online CPU */
+ cpu = cpumask_any_but(cpu_online_mask, cpu);
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ return 0;
+}
+
+static int bman_online_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return 0;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return 0;
+
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ return 0;
+}
+
+int bman_portals_probed(void)
+{
+ return __bman_portals_probed;
+}
+EXPORT_SYMBOL_GPL(bman_portals_probed);
+
+static int bman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct bm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ int irq, cpu, err, i;
+
+ err = bman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(&pdev->dev, "failing probe due to bman probe error\n");
+ return -ENODEV;
+ }
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg) {
+ __bman_portals_probed = -1;
+ return -ENOMEM;
+ }
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
+ goto err_ioremap1;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
+ goto err_ioremap1;
+ }
+
+ pcfg->cpu = -1;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ goto err_ioremap1;
+ pcfg->irq = irq;
+
+ pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+ resource_size(addr_phys[0]),
+ QBMAN_MEMREMAP_ATTR);
+ if (!pcfg->addr_virt_ce) {
+ dev_err(dev, "memremap::CE failed\n");
+ goto err_ioremap1;
+ }
+
+ pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+ resource_size(addr_phys[1]));
+ if (!pcfg->addr_virt_ci) {
+ dev_err(dev, "ioremap::CI failed\n");
+ goto err_ioremap2;
+ }
+
+ spin_lock(&bman_lock);
+ cpu = cpumask_first_zero(&portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ __bman_portals_probed = 1;
+ /* unassigned portal, skip init */
+ spin_unlock(&bman_lock);
+ goto check_cleanup;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&bman_lock);
+ pcfg->cpu = cpu;
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ bman_offline_cpu(cpu);
+
+check_cleanup:
+ if (__bman_portals_probed == 1 && bman_requires_cleanup()) {
+ /*
+ * BMan wasn't reset prior to boot (Kexec for example)
+ * Empty all the buffer pools so they are in reset state
+ */
+ for (i = 0; i < BM_POOL_MAX; i++) {
+ err = bm_shutdown_pool(i);
+ if (err) {
+ dev_err(dev, "Failed to shutdown bpool %d\n",
+ i);
+ goto err_portal_init;
+ }
+ }
+ bman_done_cleanup();
+ }
+
+ return 0;
+
+err_portal_init:
+ iounmap(pcfg->addr_virt_ci);
+err_ioremap2:
+ memunmap(pcfg->addr_virt_ce);
+err_ioremap1:
+ __bman_portals_probed = -1;
+
+ return -ENXIO;
+}
+
+static const struct of_device_id bman_portal_ids[] = {
+ {
+ .compatible = "fsl,bman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bman_portal_ids,
+ },
+ .probe = bman_portal_probe,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qbman_portal:online",
+ bman_online_cpu, bman_offline_cpu);
+ if (ret < 0) {
+ pr_err("bman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
+ return 0;
+}
+
+module_driver(bman_portal_driver,
+ bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 0000000000..aa3981e049
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,83 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
+
+extern struct gen_pool *bm_bpalloc;
+
+struct bm_portal_config {
+ /* Portal addresses */
+ void *addr_virt_ce;
+ void __iomem *addr_virt_ci;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ struct device *dev;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+};
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+ */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
+
+int bman_requires_cleanup(void);
+void bman_done_cleanup(void);
+
+int bm_shutdown_pool(u32 bpid);
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 0000000000..09b1c960b2
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,53 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+ int loop = 1;
+
+ while (loop--)
+ bman_test_api();
+#endif
+ return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 0000000000..037ed342ad
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,35 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+void bman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 0000000000..6f6bdd154f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,151 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+#define NUM_BUFS 93
+#define LOOPS 3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+static struct bman_pool *pool;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+static void bufs_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BUFS; i++)
+ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+ bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+ if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
+
+ /*
+ * On SoCs with BMan revison 2.0, BMan only respects the 40
+ * LS-bits of buffer addresses, masking off the upper 8-bits on
+ * release commands. The API provides for 48-bit addresses
+ * because some SoCs support all 48-bits. When generating
+ * garbage addresses for testing, we either need to zero the
+ * upper 8-bits when releasing to BMan (otherwise we'll be
+ * disappointed when the buffers we acquire back from BMan
+ * don't match), or we need to mask the upper 8-bits off when
+ * comparing. We do the latter.
+ */
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return -1;
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return 1;
+ } else {
+ if (bm_buffer_get64(a) < bm_buffer_get64(b))
+ return -1;
+ if (bm_buffer_get64(a) > bm_buffer_get64(b))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bufs_confirm(void)
+{
+ int i, j;
+
+ for (i = 0; i < NUM_BUFS; i++) {
+ int matches = 0;
+
+ for (j = 0; j < NUM_BUFS; j++)
+ if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+ matches++;
+ WARN_ON(matches != 1);
+ }
+}
+
+/* test */
+void bman_test_api(void)
+{
+ int i, loops = LOOPS;
+
+ bufs_init();
+
+ pr_info("%s(): Starting\n", __func__);
+
+ pool = bman_new_pool();
+ if (!pool) {
+ pr_crit("bman_new_pool() failed\n");
+ goto failed;
+ }
+
+ /* Release buffers */
+do_loop:
+ i = 0;
+ while (i < NUM_BUFS) {
+ int num = 8;
+
+ if (i + num > NUM_BUFS)
+ num = NUM_BUFS - i;
+ if (bman_release(pool, bufs_in + i, num)) {
+ pr_crit("bman_release() failed\n");
+ goto failed;
+ }
+ i += num;
+ }
+
+ /* Acquire buffers */
+ while (i > 0) {
+ int tmp, num = 8;
+
+ if (num > i)
+ num = i;
+ tmp = bman_acquire(pool, bufs_out + i - num, num);
+ WARN_ON(tmp != num);
+ i -= num;
+ }
+ i = bman_acquire(pool, NULL, 1);
+ WARN_ON(i > 0);
+
+ bufs_confirm();
+
+ if (--loops)
+ goto do_loop;
+
+ /* Clean up */
+ bman_free_pool(pool);
+ pr_info("%s(): Finished\n", __func__);
+ return;
+
+failed:
+ WARN_ON(1);
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
new file mode 100644
index 0000000000..3375145004
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -0,0 +1,89 @@
+/* Copyright 2017 NXP Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of NXP Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "dpaa_sys.h"
+
+/*
+ * Initialize a devices private memory region
+ */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+ size_t *size)
+{
+ struct device_node *mem_node;
+ struct reserved_mem *rmem;
+ int err;
+ __be32 *res_array;
+
+ mem_node = of_parse_phandle(dev->of_node, "memory-region", idx);
+ if (!mem_node) {
+ dev_err(dev, "No memory-region found for index %d\n", idx);
+ return -ENODEV;
+ }
+
+ rmem = of_reserved_mem_lookup(mem_node);
+ if (!rmem) {
+ dev_err(dev, "of_reserved_mem_lookup() returned NULL\n");
+ return -ENODEV;
+ }
+ *addr = rmem->base;
+ *size = rmem->size;
+
+ /*
+ * Check if the reg property exists - if not insert the node
+ * so upon kexec() the same memory region address will be preserved.
+ * This is needed because QBMan HW does not allow the base address/
+ * size to be modified once set.
+ */
+ if (!of_property_present(mem_node, "reg")) {
+ struct property *prop;
+
+ prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+ prop->value = res_array = devm_kzalloc(dev, sizeof(__be32) * 4,
+ GFP_KERNEL);
+ if (!prop->value)
+ return -ENOMEM;
+ res_array[0] = cpu_to_be32(upper_32_bits(*addr));
+ res_array[1] = cpu_to_be32(lower_32_bits(*addr));
+ res_array[2] = cpu_to_be32(upper_32_bits(*size));
+ res_array[3] = cpu_to_be32(lower_32_bits(*size));
+ prop->length = sizeof(__be32) * 4;
+ prop->name = devm_kstrdup(dev, "reg", GFP_KERNEL);
+ if (!prop->name)
+ return -ENOMEM;
+ err = of_add_property(mem_node, prop);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 0000000000..ae8afa552b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,134 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+static inline void dpaa_flush(void *p)
+{
+ /*
+ * Only PPC needs to flush the cache currently - on ARM the mapping
+ * is non cacheable
+ */
+#ifdef CONFIG_PPC
+ flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#endif
+}
+
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#define dpaa_zero(p) memset(p, 0, 64)
+
+static inline void dpaa_touch_ro(void *p)
+{
+#if (L1_CACHE_BYTES == 32)
+ prefetch(p+32);
+#endif
+ prefetch(p);
+}
+
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
+{
+ dpaa_invalidate(p);
+ dpaa_touch_ro(p);
+}
+
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
+
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF 0x80000000
+
+/* Initialize the devices private memory region */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+ size_t *size);
+
+/* memremap() attributes for different platforms */
+#ifdef CONFIG_PPC
+#define QBMAN_MEMREMAP_ATTR MEMREMAP_WB
+#else
+#define QBMAN_MEMREMAP_ATTR MEMREMAP_WC
+#endif
+
+static inline int dpaa_set_portal_irq_affinity(struct device *dev,
+ int irq, int cpu)
+{
+ int ret = 0;
+
+ if (!irq_can_set_affinity(irq)) {
+ dev_err(dev, "unable to set IRQ affinity\n");
+ return -EINVAL;
+ }
+
+ if (cpu == -1 || !cpu_online(cpu))
+ cpu = cpumask_any(cpu_online_mask);
+
+ ret = irq_set_affinity(irq, cpumask_of(cpu));
+ if (ret)
+ dev_err(dev, "irq_set_affinity() on CPU %d failed\n", cpu);
+
+ return ret;
+}
+
+#endif /* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 0000000000..739e4eee6b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,3053 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_DQRR_IT_MAX 15
+#define QMAN_ITP_MAX 0xFFF
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x3000
+#define QM_REG_EQCR_CI_CINH 0x3040
+#define QM_REG_EQCR_ITR 0x3080
+#define QM_REG_DQRR_PI_CINH 0x3100
+#define QM_REG_DQRR_CI_CINH 0x3140
+#define QM_REG_DQRR_ITR 0x3180
+#define QM_REG_DQRR_DCAP 0x31C0
+#define QM_REG_DQRR_SDQCR 0x3200
+#define QM_REG_DQRR_VDQCR 0x3240
+#define QM_REG_DQRR_PDQCR 0x3280
+#define QM_REG_MR_PI_CINH 0x3300
+#define QM_REG_MR_CI_CINH 0x3340
+#define QM_REG_MR_ITR 0x3380
+#define QM_REG_CFG 0x3500
+#define QM_REG_ISR 0x3600
+#define QM_REG_IER 0x3640
+#define QM_REG_ISDR 0x3680
+#define QM_REG_IIR 0x36C0
+#define QM_REG_ITPR 0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3040
+#define QM_CL_DQRR_PI_CENA 0x3100
+#define QM_CL_DQRR_CI_CENA 0x3140
+#define QM_CL_MR_PI_CENA 0x3300
+#define QM_CL_MR_CI_CENA 0x3340
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+#else
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x0000
+#define QM_REG_EQCR_CI_CINH 0x0004
+#define QM_REG_EQCR_ITR 0x0008
+#define QM_REG_DQRR_PI_CINH 0x0040
+#define QM_REG_DQRR_CI_CINH 0x0044
+#define QM_REG_DQRR_ITR 0x0048
+#define QM_REG_DQRR_DCAP 0x0050
+#define QM_REG_DQRR_SDQCR 0x0054
+#define QM_REG_DQRR_VDQCR 0x0058
+#define QM_REG_DQRR_PDQCR 0x005c
+#define QM_REG_MR_PI_CINH 0x0080
+#define QM_REG_MR_CI_CINH 0x0084
+#define QM_REG_MR_ITR 0x0088
+#define QM_REG_CFG 0x0100
+#define QM_REG_ISR 0x0e00
+#define QM_REG_IER 0x0e04
+#define QM_REG_ISDR 0x0e08
+#define QM_REG_IIR 0x0e0c
+#define QM_REG_ITPR 0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3100
+#define QM_CL_DQRR_PI_CENA 0x3200
+#define QM_CL_DQRR_CI_CENA 0x3300
+#define QM_CL_MR_PI_CENA 0x3400
+#define QM_CL_MR_CI_CENA 0x3500
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+#endif
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 dca;
+ __be16 seqnum;
+ u8 __reserved[4];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
+ struct qm_fd fd;
+ u8 __reserved3[32];
+} __packed __aligned(8);
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ const struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ union qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+/* "FQ" command layout */
+struct qm_mcc_fq {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ __be32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+
+/* "CGR" command layout */
+struct qm_mcc_cgr {
+ u8 _ncw_verb;
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+};
+
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+union qm_mc_command {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 __reserved[63];
+ };
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_fq fq;
+ struct qm_mcc_cgr cgr;
+};
+
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+ u8 verb;
+ u8 result;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT 10000 /* us */
+union qm_mc_result {
+ struct {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[62];
+ };
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ struct qm_mcr_queryfq_np queryfq_np;
+};
+
+struct qm_mc {
+ union qm_mc_command *cr;
+ union qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+struct qm_addr {
+ void *ce; /* cache-enabled */
+ __be32 *ce_be; /* same value as above but for direct access */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ /*
+ * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+ * and including 'mc' fits within a cacheline (yay!). The 'config' part
+ * is setup-only, so isn't a cause for a concern. In other words, don't
+ * rearrange this structure on a whim, there be dragons ...
+ */
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+ return ioread32be(p->addr.ci + offset);
+}
+
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+ iowrite32be(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
+{
+ return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
+}
+
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~EQCR_CARRY;
+
+ return (struct qm_eqcr_entry *)addr;
+}
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
+{
+ /* increment to the next EQCR pointer and handle overflow and 'vbit' */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = eqcr_carryclear(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (pi != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("losing uncommitted EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+ (QM_EQCR_SIZE - 1);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
+{
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
+ DPAA_ASSERT(eqcr->available >= 1);
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+ eqcr_commit_checks(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+ dma_wmb();
+ eqcursor = eqcr->cursor;
+ eqcursor->_ncw_verb = myverb | eqcr->vbit;
+ dpaa_flush(eqcursor);
+ eqcr_inc(eqcr);
+ eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+ const struct qm_dqrr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~DQRR_CARRY;
+
+ return (const struct qm_dqrr_entry *)addr;
+}
+
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
+}
+
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
+{
+ return dqrr_carryclear(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(portal, QM_REG_CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (dqrr->cmode != qm_dqrr_cdc &&
+ dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = dqrr_inc(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+ /*
+ * If PAMU is not available we need to invalidate the cache.
+ * When PAMU is available the cache is updated by stash
+ */
+ dpaa_invalidate_touch_ro(res);
+#endif
+ if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+ int idx = dqrr_ptr2idx(dq);
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPAA_ASSERT((dqrr->ring + idx) == dq);
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+
+ if (ithresh > QMAN_DQRR_IT_MAX)
+ return -EINVAL;
+
+ qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+
+ return 0;
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~MR_CARRY;
+
+ return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+ return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+ ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != mr_ptr2idx(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = mr_inc(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+ union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+
+ if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = mr_inc(res);
+ }
+ dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = mr_ptr2idx(mr->cursor);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ u8 rr0, rr1;
+ struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ /*
+ * The expected valid bit polarity for the next CR command is 0
+ * if RR1 contains a valid response, and is 1 if RR0 contains a
+ * valid response. If both RR contain all 0, this indicates either
+ * that no command has been executed since reset (in which case the
+ * expected valid bit polarity is 1)
+ */
+ rr0 = mc->rr->verb;
+ rr1 = (mc->rr+1)->verb;
+ if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
+ mc->rridx = 1;
+ else
+ mc->rridx = 0;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!rr->verb) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+ union qm_mc_result **mcr)
+{
+ int timeout = QM_MCR_TIMEOUT;
+
+ do {
+ *mcr = qm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ fq->flags |= mask;
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ fq->flags &= ~mask;
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ /* probing time config params for cpu-affine portals */
+ const struct qm_portal_config *config;
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ struct work_struct congestion_work;
+ struct work_struct mr_work;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+struct qman_portal *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(qman_affine_portal);
+}
+
+
+static inline struct qman_portal *get_portal_for_channel(u16 channel)
+{
+ int i;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (affine_portals[i] &&
+ affine_portals[i]->config->channel == channel)
+ return affine_portals[i];
+ }
+
+ return NULL;
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
+{
+ int res;
+
+ if (!portal)
+ return -EINVAL;
+
+ res = qm_dqrr_set_ithresh(&portal->p, ithresh);
+ if (res)
+ return res;
+
+ portal->p.dqrr.ithresh = ithresh;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_dqrr_set_ithresh);
+
+void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
+{
+ if (portal && ithresh)
+ *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
+}
+EXPORT_SYMBOL(qman_dqrr_get_ithresh);
+
+void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
+{
+ if (portal && iperiod)
+ *iperiod = qm_in(&portal->p, QM_REG_ITPR);
+}
+EXPORT_SYMBOL(qman_portal_get_iperiod);
+
+int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
+{
+ if (!portal || iperiod > QMAN_ITP_MAX)
+ return -EINVAL;
+
+ qm_out(&portal->p, QM_REG_ITPR, iperiod);
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_portal_set_iperiod);
+
+int qman_wq_alloc(void)
+{
+ qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+ if (!qm_portal_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+
+void qman_enable_irqs(void)
+{
+ int i;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (affine_portals[i]) {
+ qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
+ qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
+ }
+
+ }
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+ num_fqids = _num_fqids;
+
+ fq_table = vzalloc(array3_size(sizeof(struct qman_fq *),
+ num_fqids, 2));
+ if (!fq_table)
+ return -ENOMEM;
+
+ pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+ fq_table, num_fqids * 2);
+ return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+ struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (WARN_ON(idx >= num_fqids * 2))
+ return NULL;
+#endif
+ fq = fq_table[idx];
+ DPAA_ASSERT(!fq || idx == fq->idx);
+
+ return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+ return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+ return idx_to_fq(tag);
+#else
+ return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+ return fq->idx;
+#else
+ return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit, bool sched_napi);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+ u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+ u32 clear = 0;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI) {
+ __poll_portal_fast(p, QMAN_POLL_LIMIT, true);
+ clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
+ }
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
+ qm_out(&p->p, QM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const union qm_mr_entry *msg;
+loop:
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ mdelay(1);
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+ /* PAMU is required for stashing */
+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+ portal->use_eqcr_ci_stashing = 0;
+#endif
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt_ce;
+ p->addr.ce_be = c->addr_virt_ce;
+ p->addr.ci = c->addr_virt_ci;
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ dev_err(c->dev, "EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ dev_err(c->dev, "DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ dev_err(c->dev, "MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+ qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+ qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+ portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ INIT_WORK(&portal->congestion_work, qm_congestion_task);
+ INIT_WORK(&portal->mr_work, qm_mr_process_task);
+ portal->bits = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ isdr = 0xffffffff;
+ qm_out(p, QM_REG_ISDR, isdr);
+ portal->irq_sources = 0;
+ qm_out(p, QM_REG_IER, 0);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ qm_out(p, QM_REG_IIR, 1);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+
+ if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
+ goto fail_affinity;
+
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_out(p, QM_REG_ISDR, isdr);
+ ret = qm_eqcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_out(p, QM_REG_ISDR, isdr);
+ if (qm_dqrr_current(p)) {
+ dev_dbg(c->dev, "DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
+ }
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ const union qm_mr_entry *e = qm_mr_current(p);
+
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
+ e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_out(p, QM_REG_ISR, 0xffffffff);
+ qm_out(p, QM_REG_ISDR, 0);
+ if (!qman_requires_cleanup())
+ qm_out(p, QM_REG_IIR, 0);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ kfree(portal->cgrs);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *portal;
+ int err;
+
+ portal = &per_cpu(qman_affine_portal, c->cpu);
+ err = qman_create_portal(portal, c, cgrs);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ affine_channels[c->cpu] = c->channel;
+ affine_portals[c->cpu] = portal;
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+ struct qman_portal *qm = get_affine_portal();
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ put_affine_portal();
+ return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg, u8 verb)
+{
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+ fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+}
+
+static void qm_congestion_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ congestion_work);
+ struct qman_cgrs rr, c;
+ union qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ spin_unlock(&p->cgr_lock);
+ dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+ return;
+ }
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+ &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
+ qman_p_irqsource_add(p, QM_PIRQ_CSCI);
+}
+
+static void qm_mr_process_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ mr_work);
+ const union qm_mr_entry *msg;
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+
+ preempt_disable();
+
+ while (1) {
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ break;
+
+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is clear */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = fqid_to_fq(qm_fqid_get(&msg->fq));
+ if (WARN_ON(!fq))
+ break;
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+ fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ pr_crit_once("Leaking DCP ERNs!\n");
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+ fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
+ fq->cb.ern(p, fq, msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ }
+
+ qm_mr_cci_consume(&p->p, num);
+ qman_p_irqsource_add(p, QM_PIRQ_MRI);
+ preempt_enable();
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ if (is & QM_PIRQ_CSCI) {
+ qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->congestion_work);
+ }
+
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ qman_p_irqsource_remove(p, QM_PIRQ_MRI);
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->mr_work);
+ }
+
+ return is;
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit, bool sched_napi)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust context_b as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq, sched_napi);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: context_b points to the FQ */
+ fq = tag_to_fq(be32_to_cpu(dq->context_b));
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq, sched_napi);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
+
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ p->irq_sources |= bits & QM_PIRQ_VISIBLE;
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+ u32 ier;
+
+ /*
+ * Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+ local_irq_save(irqflags);
+ bits &= QM_PIRQ_VISIBLE;
+ p->irq_sources &= ~bits;
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ ier = qm_in(&p->p, QM_REG_IER);
+ /*
+ * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_out(&p->p, QM_REG_ISR, ~ier);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
+
+ cpu = portal->config->cpu;
+ put_affine_portal();
+ }
+ WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+struct qman_portal *qman_get_affine_portal(int cpu)
+{
+ return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_start_using_portal(struct qman_portal *p, struct device *dev)
+{
+ return (!device_link_add(dev, p->config->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(qman_start_using_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+ return __poll_portal_fast(p, limit, false);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ fq->fqid = fqid;
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+
+ /* A context_b of 0 is allegedly special, so don't use that fqid */
+ if (fqid == 0 || fqid >= num_fqids) {
+ WARN(1, "bad fqid %d\n", fqid);
+ return -EINVAL;
+ }
+
+ fq->idx = fqid * 2;
+ if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+ fq->idx++;
+
+ WARN_ON(fq_table[fq->idx]);
+ fq_table[fq->idx] = fq;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+
+ DPAA_ASSERT(fq_table[fq->idx]);
+ fq_table[fq->idx] = NULL;
+ return;
+ default:
+ break;
+ }
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ u8 res, myverb;
+ int ret = 0;
+
+ myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+ ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ struct qman_portal *p = qman_dma_portal;
+
+ phys_fq = dma_map_single(p->config->dev, fq,
+ sizeof(*fq), DMA_TO_DEVICE);
+ if (dma_mapping_error(p->config->dev, phys_fq)) {
+ dev_err(p->config->dev, "dma_mapping failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ int wq = 0;
+
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_DESTWQ);
+ wq = 4;
+ }
+ qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+ }
+ qm_mc_commit(&p->p, myverb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "MCR timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ if (opts) {
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+ if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state != qman_fq_state_parked) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_sched)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state == qman_fq_state_retired ||
+ fq->state == qman_fq_state_oos) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (res == QM_MCR_RESULT_OK) {
+ ret = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ union qm_mr_entry msg;
+
+ msg.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ qm_fqid_set(&msg.fq, fq->fqid);
+ msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ ret = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+ fq->state != qman_fq_state_retired) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *np = mcr->queryfq_np;
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ ret = -ERANGE;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_query_fq_np);
+
+static int qman_query_cgr(struct qman_cgr *cgr,
+ struct qm_mcr_querycgr *cgrd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->cgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ else {
+ dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+ mcr_result_str(mcr->result));
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+ struct qm_mcr_querycgr query_cgr;
+ int err;
+
+ err = qman_query_cgr(cgr, &query_cgr);
+ if (err)
+ return err;
+
+ *result = !!query_cgr.cgr.cs;
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+ unsigned long irqflags;
+ int ret = -EBUSY;
+
+ local_irq_save(irqflags);
+ if (p->vdqcr_owned)
+ goto out;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto out;
+
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ p->vdqcr_owned = fq;
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ ret = 0;
+out:
+ local_irq_restore(irqflags);
+ return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+ int ret;
+
+ *p = get_affine_portal();
+ ret = set_p_vdqcr(*p, fq, vdqcr);
+ put_affine_portal();
+ return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !set_vdqcr(p, fq, vdqcr));
+ else
+ wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_retired)
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+ else
+ ret = set_vdqcr(&p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /*
+ * NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue.
+ */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags;
+ u8 avail;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ goto out;
+
+ qm_fqid_set(eq, fq->fqid);
+ eq->tag = cpu_to_be32(fq_to_tag(fq));
+ eq->fd = *fd;
+
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ if (mcr->result != QM_MCR_RESULT_OK)
+ ret = -EIO;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
+
+/* congestion state change notification target update control */
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
+ else
+ cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
+}
+
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
+ else
+ cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
+}
+
+static u8 qman_cgr_cpus[CGR_NUM];
+
+void qman_init_cgr_all(void)
+{
+ struct qman_cgr cgr;
+ int err_cnt = 0;
+
+ for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+ if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+ err_cnt++;
+ }
+
+ if (err_cnt)
+ pr_err("Warning: %d error%s while initialising CGR h/w\n",
+ err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ int ret;
+ struct qman_portal *p;
+
+ /*
+ * We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= CGR_NUM)
+ return -EINVAL;
+
+ preempt_disable();
+ p = get_affine_portal();
+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+ preempt_enable();
+
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto out;
+
+ qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+ local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+
+ /* send init if flags indicate so */
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto out;
+ }
+
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success */
+ dev_err(p->config->dev, "CGR HW state partially modified\n");
+ ret = 0;
+ goto out;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en &&
+ qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+out:
+ spin_unlock(&p->cgr_lock);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+static struct qman_portal *qman_cgr_get_affine_portal(struct qman_cgr *cgr)
+{
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ /* attempt to delete from other portal than creator */
+ dev_err(p->config->dev, "CGR not owned by current portal");
+ dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+ put_affine_portal();
+ return NULL;
+ }
+
+ return p;
+}
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ unsigned long irqflags;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
+
+ if (!p)
+ return -EINVAL;
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if (i->cgrid == cgr->cgrid && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+
+ local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+ qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+ struct qman_cgr *cgr;
+ struct completion completion;
+};
+
+static void qman_delete_cgr_smp_call(void *p)
+{
+ qman_delete_cgr((struct qman_cgr *)p);
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_delete_cgr_smp_call, cgr, true);
+ preempt_enable();
+ return;
+ }
+
+ qman_delete_cgr(cgr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+{
+ int ret;
+ unsigned long irqflags;
+ struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
+
+ if (!p)
+ return -EINVAL;
+
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ ret = qm_modify_cgr(cgr, 0, opts);
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+}
+
+struct update_cgr_params {
+ struct qman_cgr *cgr;
+ struct qm_mcc_initcgr *opts;
+ int ret;
+};
+
+static void qman_update_cgr_smp_call(void *p)
+{
+ struct update_cgr_params *params = p;
+
+ params->ret = qman_update_cgr(params->cgr, params->opts);
+}
+
+int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+{
+ struct update_cgr_params params = {
+ .cgr = cgr,
+ .opts = opts,
+ };
+
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id())
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_update_cgr_smp_call, &params,
+ true);
+ else
+ params.ret = qman_update_cgr(cgr, opts);
+ preempt_enable();
+ return params.ret;
+}
+EXPORT_SYMBOL(qman_update_cgr_safe);
+
+/* Cleanup FQs */
+
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
+{
+ const union qm_mr_entry *msg;
+ int found = 0;
+
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ while (msg) {
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+ found = 1;
+ qm_mr_next(p);
+ qm_mr_cci_consume_to_current(p);
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ }
+ return found;
+}
+
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+ bool wait)
+{
+ const struct qm_dqrr_entry *dqrr;
+ int found = 0;
+
+ do {
+ qm_dqrr_pvb_update(p);
+ dqrr = qm_dqrr_current(p);
+ if (!dqrr)
+ cpu_relax();
+ } while (wait && !dqrr);
+
+ while (dqrr) {
+ if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
+ found = 1;
+ qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+ qm_dqrr_pvb_update(p);
+ qm_dqrr_next(p);
+ dqrr = qm_dqrr_current(p);
+ }
+ return found;
+}
+
+#define qm_mr_drain(p, V) \
+ _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
+
+#define qm_dqrr_drain_wait(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+ _qm_dqrr_consume_and_match(p, 0, 0, false)
+
+int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p, *channel_portal;
+ struct device *dev;
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ int orl_empty, drain = 0, ret = 0;
+ u32 channel, res;
+ u8 state;
+
+ p = get_affine_portal();
+ dev = p->config->dev;
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ goto out; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ /* Need to store these since the MCR gets reused */
+ channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+ qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+ if (channel < qm_channel_pool1) {
+ channel_portal = get_portal_for_channel(channel);
+ if (channel_portal == NULL) {
+ dev_err(dev, "Can't find portal for dedicated channel 0x%x\n",
+ channel);
+ ret = -EIO;
+ goto out;
+ }
+ } else
+ channel_portal = p;
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(&channel_portal->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
+ dev_err(dev, "ALTER_RETIRE timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (res == QM_MCR_RESULT_OK)
+ drain_mr_fqrni(&channel_portal->p);
+
+ if (res == QM_MCR_RESULT_PENDING) {
+ /*
+ * Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ int found_fqrn = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < qm_channel_pool1 + 15) {
+ /* Pool channel, enable the bit in the portal */
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ } else {
+ dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+ fqid, channel);
+ ret = -EBUSY;
+ goto out;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(&channel_portal->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(&channel_portal->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ do {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_drain_nomatch(&channel_portal->p);
+ /* Process message ring too */
+ found_fqrn = qm_mr_drain(&channel_portal->p,
+ FQRN);
+ cpu_relax();
+ } while (!found_fqrn);
+ /* Restore SDQCR */
+ qm_dqrr_sdqcr_set(&channel_portal->p,
+ channel_portal->sdqcr);
+
+ }
+ if (res != QM_MCR_RESULT_OK &&
+ res != QM_MCR_RESULT_PENDING) {
+ dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+ fqid, res);
+ ret = -EIO;
+ goto out;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /*
+ * ORL had no entries, no need to wait until the
+ * ERNs come in
+ */
+ orl_empty = 1;
+ }
+ /*
+ * Retirement succeeded, check to see if FQ needs
+ * to be drained
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ do {
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ /*
+ * Wait for a dequeue and process the dequeues,
+ * making sure to empty the ring completely
+ */
+ } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+ }
+
+ while (!orl_empty) {
+ /* Wait for the ORL to have been completely drained */
+ orl_empty = qm_mr_drain(&p->p, FQRL);
+ cpu_relax();
+ }
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(&p->p);
+ qm_fqid_set(&mcc->fq, fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_OOS:
+ /* Done */
+ break;
+
+ default:
+ ret = -EIO;
+ }
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal)
+{
+ return portal->config;
+}
+EXPORT_SYMBOL(qman_get_qm_portal_config);
+
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+{
+ unsigned long addr;
+
+ if (!p)
+ return -ENODEV;
+
+ addr = gen_pool_alloc(p, cnt);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+int qman_alloc_pool_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+ int ret = qman_shutdown_fq(fqid);
+
+ if (ret) {
+ pr_debug("FQID %d leaked\n", fqid);
+ return ret;
+ }
+
+ gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+ /*
+ * We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose destination channel is the pool-channel being released.
+ * When a non-OOS FQD is found we attempt to clean it up
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err == -ERANGE)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ else if (WARN_ON(err))
+ return err;
+
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return err;
+ if (qm_fqd_get_chan(&fqd) == qp) {
+ /* The channel is the FQ's target, clean it */
+ err = qman_shutdown_fq(fq.fqid);
+ if (err)
+ /*
+ * Couldn't shut down the FQ
+ * so the pool must be leaked
+ */
+ return err;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_pool(u32 qp)
+{
+ int ret;
+
+ ret = qpool_cleanup(qp);
+ if (ret) {
+ pr_debug("CHID %d leaked\n", qp);
+ return ret;
+ }
+
+ gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
+
+static int cgr_cleanup(u32 cgrid)
+{
+ /*
+ * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+ * error, looking for non-OOS FQDs whose CGR is the CGR being released
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err == -ERANGE)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ else if (WARN_ON(err))
+ return err;
+
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return err;
+ if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
+ fqd.cgid == cgrid) {
+ pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+ cgrid, fq.fqid);
+ return -EIO;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_cgrid(u32 cgrid)
+{
+ int ret;
+
+ ret = cgr_cleanup(cgrid);
+ if (ret) {
+ pr_debug("CGRID %d leaked\n", cgrid);
+ return ret;
+ }
+
+ gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 0000000000..157659fd03
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,917 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+EXPORT_SYMBOL(qm_channel_caam);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
+#define REG_DD_CFG 0x0200
+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC 0x0400
+#define REG_PFDR_FP_HEAD 0x0404
+#define REG_PFDR_FP_TAIL 0x0408
+#define REG_PFDR_FP_LWIT 0x0410
+#define REG_PFDR_CFG 0x0414
+#define REG_SFDR_CFG 0x0500
+#define REG_SFDR_IN_USE 0x0504
+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID 0x0630
+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_ECIR2 0x0a0c
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_MCR 0x0b00
+#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG 0x0be0
+#define REG_HID_CFG 0x0bf0
+#define REG_IDLE_STAT 0x0bf4
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FQD_BARE 0x0c00
+#define REG_PFDR_BARE 0x0c20
+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE 0x0c80
+#define REG_QCSP_BAR 0x0c84
+#define REG_CI_SCHED_CFG 0x0d00
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_CI_RLM_AVG 0x0d14
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR 0x01000000
+#define MCR_get_rslt(v) (u8)((v) >> 24)
+#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r) ((r) == 0xf0)
+#define MCR_rslt_eaccess(r) ((r) == 0xf8)
+#define MCR_rslt_inval(r) ((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV 4
+#define QM_CI_SCHED_CFG_SRQ_W 3
+#define QM_CI_SCHED_CFG_RW_W 2
+#define QM_CI_SCHED_CFG_BMAN_W 2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+ qm_wq_portal = 0,
+ qm_wq_pool = 1,
+ qm_wq_fman0 = 2,
+ qm_wq_fman1 = 3,
+ qm_wq_caam = 4,
+ qm_wq_pme = 5,
+ qm_wq_first = qm_wq_portal,
+ qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+ qm_memory_fqd,
+ qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IFSI)
+
+struct qm_ecir {
+ u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+ return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+ return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+ u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+ return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+ return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+ u32 info; /* memid[24-27], eadr[0-11] */
+ /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+ { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+ { QM_EIRQ_CTDE, "Corenet Target Data Error" },
+ { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+ { QM_EIRQ_PLWI, "PFDR Low Watermark" },
+ { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { QM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+ { QM_EIRQ_ICVI, "Invalid Command Verb" },
+ { QM_EIRQ_IFSI, "Invalid Flow Control State" },
+ { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+ { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+ { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+ { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+ { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+ { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+ { QM_EIRQ_IESI, "Invalid Enqueue State" },
+ { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+ { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+ { 0x01FF, 24, "FQD cache tag memory 0" },
+ { 0x01FF, 24, "FQD cache tag memory 1" },
+ { 0x01FF, 24, "FQD cache tag memory 2" },
+ { 0x01FF, 24, "FQD cache tag memory 3" },
+ { 0x0FFF, 512, "FQD cache memory" },
+ { 0x07FF, 128, "SFDR memory" },
+ { 0x01FF, 72, "WQ context memory" },
+ { 0x00FF, 240, "CGR memory" },
+ { 0x00FF, 302, "Internal Order Restoration List memory" },
+ { 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+static int __qman_probed;
+static int __qman_requires_cleanup;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+ return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+ return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+ DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+ portal == qm_dc_portal_fman1);
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+ else
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+ u8 csw5, u8 csw6, u8 csw7)
+{
+ qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+ qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+ qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+ (QM_CI_SCHED_CFG_SRCCIV << 24) |
+ (QM_CI_SCHED_CFG_SRQ_W << 8) |
+ (QM_CI_SCHED_CFG_RW_W << 4) |
+ QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = qm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+#define PFDR_AR_EN BIT(31)
+static int qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+ void *ptr;
+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+ u32 exp = ilog2(size);
+ u32 bar, bare;
+
+ /* choke if size isn't within range */
+ DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+ is_power_of_2(size));
+ /* choke if 'ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+
+ /* Check to see if QMan has already been initialized */
+ bar = qm_ccsr_in(offset + REG_offset_BAR);
+ if (bar) {
+ /* Maker sure ba == what was programmed) */
+ bare = qm_ccsr_in(offset);
+ if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
+ pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
+ ba, bare, bar);
+ return -ENOMEM;
+ }
+ __qman_requires_cleanup = 1;
+ /* Return 1 to indicate memory was previously programmed */
+ return 1;
+ }
+ /* Need to temporarily map the area to make sure it is zeroed */
+ ptr = memremap(ba, size, MEMREMAP_WB);
+ if (!ptr) {
+ pr_crit("memremap() of QMan private memory failed\n");
+ return -ENOMEM;
+ }
+ memset(ptr, 0, size);
+
+#ifdef CONFIG_PPC
+ /*
+ * PPC doesn't appear to flush the cache on memunmap() but the
+ * cache must be flushed since QMan does non coherent accesses
+ * to this memory
+ */
+ flush_dcache_range((unsigned long) ptr, (unsigned long) ptr+size);
+#endif
+ memunmap(ptr);
+
+ qm_ccsr_out(offset, upper_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+ return 0;
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+ qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+ qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+ qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+ u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+ DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+ /* Make sure the command interface is 'idle' */
+ if (!MCR_rslt_idle(rslt)) {
+ dev_crit(dev, "QMAN_MCR isn't idle");
+ WARN_ON(1);
+ }
+
+ /* Write the MCR command params then the verb */
+ qm_ccsr_out(REG_MCP(0), pfdr_start);
+ /*
+ * TODO: remove this - it's a workaround for a model bug that is
+ * corrected in more recent versions. We use the workaround until
+ * everyone has upgraded.
+ */
+ qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+ dma_wmb();
+ qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+ /* Poll for the result */
+ do {
+ rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+ } while (!MCR_rslt_idle(rslt));
+ if (MCR_rslt_ok(rslt))
+ return 0;
+ if (MCR_rslt_eaccess(rslt))
+ return -EACCES;
+ if (MCR_rslt_inval(rslt))
+ return -EINVAL;
+ dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+ return -ENODEV;
+}
+
+/*
+ * QMan needs two global memory areas initialized at boot time:
+ * 1) FQD: Frame Queue Descriptors used to manage frame queues
+ * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
+ * Both areas are reserved using the device tree reserved memory framework
+ * and the addresses and sizes are initialized when the QMan device is probed
+ */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+#ifdef CONFIG_PPC
+/*
+ * Support for PPC Device Tree backward compatibility when compatible
+ * string is set to fsl-qman-fqd and fsl-qman-pfdr
+ */
+static int zero_priv_mem(phys_addr_t addr, size_t sz)
+{
+ /* map as cacheable, non-guarded */
+ void __iomem *tmpp = ioremap_cache(addr, sz);
+
+ if (!tmpp)
+ return -ENOMEM;
+
+ memset_io(tmpp, 0, sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + sz);
+ iounmap(tmpp);
+
+ return 0;
+}
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+ fqd_a = rmem->base;
+ fqd_sz = rmem->size;
+
+ WARN_ON(!(fqd_a && fqd_sz));
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+ pfdr_a = rmem->base;
+ pfdr_sz = rmem->size;
+
+ WARN_ON(!(pfdr_a && pfdr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+
+#endif
+
+unsigned int qm_get_fqid_maxcnt(void)
+{
+ return fqd_sz / 64;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ dev_warn(dev, "ErrInt, EDATA:\n");
+ i = bit_count / 32;
+ if (bit_count % 32) {
+ i++;
+ mask = ~(mask << bit_count % 32);
+ }
+ j = 16 - i;
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+ u32 ecsr_val)
+{
+ struct qm_ecir ecir_val;
+ struct qm_eadr eadr_val;
+ int memid;
+
+ ecir_val.info = qm_ccsr_in(REG_ECIR);
+ /* Is portal info valid */
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ struct qm_ecir2 ecir2_val;
+
+ ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+ qm_ecir2_get_pnum(&ecir2_val));
+ }
+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_v3_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_v3_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ } else {
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+ qm_ecir_get_pnum(&ecir_val));
+ }
+ if (ecsr_val & FQID_ECSR_ERR)
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ }
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = qm_ccsr_in(REG_ERR_IER);
+ isr_val = qm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = qm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+ if (qman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ qman_hwerr_txts[i].txt);
+ if (qman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(dev, isr_mask,
+ ecsr_val);
+ /* Re-arm error capture registers */
+ qm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ qman_hwerr_txts[i].mask);
+ ier_val &= ~qman_hwerr_txts[i].mask;
+ qm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+ int i, err;
+
+ /* FQD memory */
+ err = qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ if (err < 0)
+ return err;
+ /* PFDR memory */
+ err = qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+ if (err < 0)
+ return err;
+ /* Only initialize PFDRs if the QMan was not initialized before */
+ if (err == 0) {
+ err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+ if (err)
+ return err;
+ }
+ /* thresholds */
+ qm_set_pfdr_threshold(512, 64);
+ qm_set_sfdr_threshold(128);
+ /* clear stale PEBI bit from interrupt status register */
+ qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+ /* corenet initiator settings */
+ qm_set_corenet_initiator();
+ /* HID settings */
+ qm_set_hid();
+ /* Set scheduling weights to defaults */
+ for (i = qm_wq_first; i <= qm_wq_last; i++)
+ qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+ /* We are not prepared to accept ERNs for hardware enqueues */
+ qm_set_dc(qm_dc_portal_fman0, 1, 0);
+ qm_set_dc(qm_dc_portal_fman1, 1, 0);
+ return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void __qman_liodn_fixup(u16 channel)
+{
+ static int done;
+ static u32 liodn_offset;
+ u32 before, after;
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+ else
+ before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+ if (!done) {
+ liodn_offset = before & LIO_CFG_LIODN_MASK;
+ done = 1;
+ return;
+ }
+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+ else
+ qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+void qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+ u32 before, after;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+ /* Each pair of vcpu share the same SRQ(SDEST) */
+ cpu_idx /= 2;
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+ } else {
+ before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+ }
+}
+
+static int qman_resource_init(struct device *dev)
+{
+ int pool_chan_num, cgrid_num;
+ int ret, i;
+
+ switch (qman_ip_rev >> 8) {
+ case 1:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ case 2:
+ pool_chan_num = 3;
+ cgrid_num = 64;
+ break;
+ case 3:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+ pool_chan_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+ return ret;
+ }
+
+ ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+ return ret;
+ }
+
+ /* parse pool channels into the SDQCR mask */
+ for (i = 0; i < cgrid_num; i++)
+ qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+ ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
+ qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int qman_is_probed(void)
+{
+ return __qman_probed;
+}
+EXPORT_SYMBOL_GPL(qman_is_probed);
+
+int qman_requires_cleanup(void)
+{
+ return __qman_requires_cleanup;
+}
+
+void qman_done_cleanup(void)
+{
+ qman_enable_irqs();
+ __qman_requires_cleanup = 0;
+}
+
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ int ret, err_irq;
+ u16 id;
+ u8 major, minor;
+
+ __qman_probed = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
+ node);
+ return -ENXIO;
+ }
+ qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!qm_ccsr_start)
+ return -ENXIO;
+
+ qm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+ return -ENODEV;
+ } else if (major == 1 && minor == 1)
+ qman_ip_rev = QMAN_REV11;
+ else if (major == 1 && minor == 2)
+ qman_ip_rev = QMAN_REV12;
+ else if (major == 2 && minor == 0)
+ qman_ip_rev = QMAN_REV20;
+ else if (major == 3 && minor == 0)
+ qman_ip_rev = QMAN_REV30;
+ else if (major == 3 && minor == 1)
+ qman_ip_rev = QMAN_REV31;
+ else if (major == 3 && minor == 2)
+ qman_ip_rev = QMAN_REV32;
+ else {
+ dev_err(dev, "Unknown QMan version\n");
+ return -ENODEV;
+ }
+
+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+ }
+
+ if (fqd_a) {
+#ifdef CONFIG_PPC
+ /*
+ * For PPC backward DT compatibility
+ * FQD memory MUST be zero'd by software
+ */
+ zero_priv_mem(fqd_a, fqd_sz);
+#else
+ WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
+#endif
+ } else {
+ /*
+ * Order of memory regions is assumed as FQD followed by PFDR
+ * in order to ensure allocations from the correct regions the
+ * driver initializes then allocates each piece in order
+ */
+ ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+ dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
+
+ if (!pfdr_a) {
+ /* Setup PFDR memory */
+ ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+ dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
+
+ ret = qman_init_ccsr(dev);
+ if (ret) {
+ dev_err(dev, "CCSR setup failed\n");
+ return ret;
+ }
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %pOF property 'interrupts'\n",
+ node);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
+ ret, node);
+ return ret;
+ }
+
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+ if (IS_ERR(qm_fqalloc)) {
+ ret = PTR_ERR(qm_fqalloc);
+ dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+ if (IS_ERR(qm_qpalloc)) {
+ ret = PTR_ERR(qm_qpalloc);
+ dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+ if (IS_ERR(qm_cgralloc)) {
+ ret = PTR_ERR(qm_cgralloc);
+ dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qman_resource_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+ if (ret)
+ return ret;
+
+ ret = qman_wq_alloc();
+ if (ret)
+ return ret;
+
+ __qman_probed = 1;
+
+ return 0;
+}
+
+static const struct of_device_id fsl_qman_ids[] = {
+ {
+ .compatible = "fsl,qman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_qman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_qman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 0000000000..e23b60618c
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,342 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW 1
+#define CONFIG_FSL_DPA_PIRQ_FAST 1
+
+static struct cpumask portal_cpus;
+static int __qman_portals_probed;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+ struct device *dev = pcfg->dev;
+ int ret;
+
+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+ if (!pcfg->iommu_domain) {
+ dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+ goto no_iommu;
+ }
+ ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu);
+ if (ret < 0) {
+ dev_err(dev, "%s(): fsl_pamu_configure_l1_stash() = %d",
+ __func__, ret);
+ goto out_domain_free;
+ }
+ ret = iommu_attach_device(pcfg->iommu_domain, dev);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+
+no_iommu:
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+
+ return;
+
+#ifdef CONFIG_FSL_PAMU
+out_domain_free:
+ iommu_domain_free(pcfg->iommu_domain);
+ pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+ struct qman_portal *p;
+ u32 irq_sources = 0;
+
+ /* We need the same LIODN offset for all portals */
+ qman_liodn_fixup(pcfg->channel);
+
+ pcfg->iommu_domain = NULL;
+ portal_set_cpu(pcfg, pcfg->cpu);
+
+ p = qman_create_affine_portal(pcfg, NULL);
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ /* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+ QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+ irq_sources |= QM_PIRQ_DQRI;
+#endif
+ qman_p_irqsource_add(p, irq_sources);
+
+ spin_lock(&qman_lock);
+ if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+ /* all assigned portals are initialized now */
+ qman_init_cgr_all();
+ }
+
+ if (!qman_dma_portal)
+ qman_dma_portal = p;
+
+ spin_unlock(&qman_lock);
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+ unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU /* TODO */
+ if (pcfg->iommu_domain) {
+ if (fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu) < 0) {
+ dev_err(pcfg->dev,
+ "Failed to update pamu stash setting\n");
+ return;
+ }
+ }
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+}
+
+static int qman_offline_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ /* select any other online CPU */
+ cpu = cpumask_any_but(cpu_online_mask, cpu);
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ qman_portal_update_sdest(pcfg, cpu);
+ }
+ }
+ return 0;
+}
+
+static int qman_online_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ qman_portal_update_sdest(pcfg, cpu);
+ }
+ }
+ return 0;
+}
+
+int qman_portals_probed(void)
+{
+ return __qman_portals_probed;
+}
+EXPORT_SYMBOL_GPL(qman_portals_probed);
+
+static int qman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct qm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ int irq, cpu, err, i;
+ u32 val;
+
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(&pdev->dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg) {
+ __qman_portals_probed = -1;
+ return -ENOMEM;
+ }
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
+ goto err_ioremap1;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
+ goto err_ioremap1;
+ }
+
+ err = of_property_read_u32(node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
+ __qman_portals_probed = -1;
+ return err;
+ }
+ pcfg->channel = val;
+ pcfg->cpu = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ goto err_ioremap1;
+ pcfg->irq = irq;
+
+ pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+ resource_size(addr_phys[0]),
+ QBMAN_MEMREMAP_ATTR);
+ if (!pcfg->addr_virt_ce) {
+ dev_err(dev, "memremap::CE failed\n");
+ goto err_ioremap1;
+ }
+
+ pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+ resource_size(addr_phys[1]));
+ if (!pcfg->addr_virt_ci) {
+ dev_err(dev, "ioremap::CI failed\n");
+ goto err_ioremap2;
+ }
+
+ pcfg->pools = qm_get_pools_sdqcr();
+
+ spin_lock(&qman_lock);
+ cpu = cpumask_first_zero(&portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ __qman_portals_probed = 1;
+ /* unassigned portal, skip init */
+ spin_unlock(&qman_lock);
+ goto check_cleanup;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&qman_lock);
+ pcfg->cpu = cpu;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+ dev_err(dev, "dma_set_mask() failed\n");
+ goto err_portal_init;
+ }
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ qman_offline_cpu(cpu);
+
+check_cleanup:
+ if (__qman_portals_probed == 1 && qman_requires_cleanup()) {
+ /*
+ * QMan wasn't reset prior to boot (Kexec for example)
+ * Empty all the frame queues so they are in reset state
+ */
+ for (i = 0; i < qm_get_fqid_maxcnt(); i++) {
+ err = qman_shutdown_fq(i);
+ if (err) {
+ dev_err(dev, "Failed to shutdown frame queue %d\n",
+ i);
+ goto err_portal_init;
+ }
+ }
+ qman_done_cleanup();
+ }
+
+ return 0;
+
+err_portal_init:
+ iounmap(pcfg->addr_virt_ci);
+err_ioremap2:
+ memunmap(pcfg->addr_virt_ce);
+err_ioremap1:
+ __qman_portals_probed = -1;
+
+ return -ENXIO;
+}
+
+static const struct of_device_id qman_portal_ids[] = {
+ {
+ .compatible = "fsl,qman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = qman_portal_ids,
+ },
+ .probe = qman_portal_probe,
+};
+
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qman_portal:online",
+ qman_online_cpu, qman_offline_cpu);
+ if (ret < 0) {
+ pr_err("qman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
+ return 0;
+}
+
+module_driver(qman_portal_driver,
+ qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 0000000000..fd1cf543fb
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,282 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+ u8 verb;
+ u8 result;
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+ return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+ u8 verb;
+ u8 result;
+ u8 __reserved[30];
+ /* Access this struct using qman_cgrs_get() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+ u8 verb;
+ u8 result;
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[6];
+ u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
+ __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved3[3];
+ u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
+ __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
+}
+
+/* Congestion Groups */
+
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
+{
+ return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ *dest = *src;
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ & *_b++;
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ ^ *_b++;
+}
+
+void qman_init_cgr_all(void);
+
+struct qm_portal_config {
+ /* Portal addresses */
+ void *addr_virt_ce;
+ void __iomem *addr_virt_ci;
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /*
+ * the portal's dedicated channel id, used initialising
+ * frame queues to target this portal when scheduled
+ */
+ u16 channel;
+ /*
+ * mask of pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+ */
+ u32 pools;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
+
+int qman_wq_alloc(void);
+#ifdef CONFIG_FSL_PAMU
+#define qman_liodn_fixup __qman_liodn_fixup
+#else
+static inline void qman_liodn_fixup(u16 channel)
+{
+}
+#endif
+void __qman_liodn_fixup(u16 channel);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+int qman_alloc_fq_table(u32 num_fqids);
+
+/* QMan s/w corenet portal, low-level i/face */
+
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal);
+
+unsigned int qm_get_fqid_maxcnt(void);
+
+int qman_shutdown_fq(u32 fqid);
+
+int qman_requires_cleanup(void);
+void qman_done_cleanup(void);
+void qman_enable_irqs(void);
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 0000000000..18f7f0202f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,62 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+ int loop = 1;
+ int err = 0;
+
+ while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+ err = qman_test_stash();
+ if (err)
+ break;
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+ err = qman_test_api();
+ if (err)
+ break;
+#endif
+ }
+ return err;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 0000000000..41bdbc48ca
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,34 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+int qman_test_stash(void);
+int qman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 0000000000..28fbddc3c2
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,247 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#define CGR_ID 27
+#define POOL_ID 2
+#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES 10
+#define NUM_PARTIAL 4
+#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
+ QM_SDQCR_TYPE_PRIO_QOS | \
+ QM_SDQCR_TOKEN_SET(0x98) | \
+ QM_SDQCR_CHANNELS_DEDICATED | \
+ QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
+#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+ struct qman_fq *,
+ const struct qm_dqrr_entry *,
+ bool sched_napi);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+ .cb.dqrr = cb_dqrr,
+ .cb.ern = cb_ern,
+ .cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *fd)
+{
+ qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
+ qm_fd_set_contig_big(fd, 0x0000ffff);
+ fd->cmd = cpu_to_be32(0xfeedf00d);
+}
+
+static void fd_inc(struct qm_fd *fd)
+{
+ u64 t = qm_fd_addr_get64(fd);
+ int z = t >> 40;
+ unsigned int len, off;
+ enum qm_fd_format fmt;
+
+ t <<= 1;
+ if (z)
+ t |= 1;
+ qm_fd_addr_set64(fd, t);
+
+ fmt = qm_fd_get_format(fd);
+ off = qm_fd_get_offset(fd);
+ len = qm_fd_get_length(fd);
+ len--;
+ qm_fd_set_param(fd, fmt, off, len);
+
+ be32_add_cpu(&fd->cmd, 1);
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
+{
+ bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
+
+ neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
+ neq |= a->cfg != b->cfg;
+ neq |= a->cmd != b->cmd;
+
+ return neq;
+}
+
+/* test */
+static int do_enqueues(struct qman_fq *fq)
+{
+ unsigned int loop;
+ int err = 0;
+
+ for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+ if (qman_enqueue(fq, &fd)) {
+ pr_crit("qman_enqueue() failed\n");
+ err = -EIO;
+ }
+ fd_inc(&fd);
+ }
+
+ return err;
+}
+
+int qman_test_api(void)
+{
+ unsigned int flags, frmcnt;
+ int err;
+ struct qman_fq *fq = &fq_base;
+
+ pr_info("%s(): Starting\n", __func__);
+ fd_init(&fd);
+ fd_init(&fd_dq);
+
+ /* Initialise (parked) FQ */
+ err = qman_create_fq(0, FQ_FLAGS, fq);
+ if (err) {
+ pr_crit("qman_create_fq() failed\n");
+ goto failed;
+ }
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err) {
+ pr_crit("qman_init_fq() failed\n");
+ goto failed;
+ }
+ /* Do enqueues + VDQCR, twice. (Parked FQ) */
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (till-empty);\n");
+ frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+ NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_err("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("scheduled dequeue (till-empty)\n");
+ err = qman_schedule_fq(fq);
+ if (err) {
+ pr_crit("qman_schedule_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, sdqcr_complete);
+
+ /* Retire and OOS the FQ */
+ err = qman_retire_fq(fq, &flags);
+ if (err < 0) {
+ pr_crit("qman_retire_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, retire_complete);
+ if (flags & QMAN_FQ_STATE_BLOCKOOS) {
+ err = -EIO;
+ pr_crit("leaking frames\n");
+ goto failed;
+ }
+ err = qman_oos_fq(fq);
+ if (err) {
+ pr_crit("qman_oos_fq() failed\n");
+ goto failed;
+ }
+ qman_destroy_fq(fq);
+ pr_info("%s(): Finished\n", __func__);
+ return 0;
+
+failed:
+ WARN_ON(1);
+ return err;
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
+{
+ if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
+ pr_err("BADNESS: dequeued frame doesn't match;\n");
+ return qman_cb_dqrr_consume;
+ }
+ fd_inc(&fd_dq);
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
+ sdqcr_complete = 1;
+ wake_up(&waitqueue);
+ }
+ return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ pr_crit("cb_ern() unimplemented");
+ WARN_ON(1);
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
+ pr_crit("unexpected FQS message");
+ WARN_ON(1);
+ return;
+ }
+ pr_info("Retirement message received\n");
+ retire_complete = 1;
+ wake_up(&waitqueue);
+}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 0000000000..b7e8e5ec88
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,629 @@
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/*
+ * Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ * handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ * and advance the iterator for the next loop. This includes a final fixup,
+ * which connects the last handler to the first (and which is why phase 2
+ * and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * initialise FQ objects and advance the iterator for the next loop.
+ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ * initialisation targets the correct cpu.
+ */
+
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
+struct bstrap {
+ int (*fn)(void);
+ atomic_t started;
+};
+static int bstrap_fn(void *bs)
+{
+ struct bstrap *bstrap = bs;
+ int err;
+
+ atomic_inc(&bstrap->started);
+ err = bstrap->fn();
+ if (err)
+ return err;
+ while (!kthread_should_stop())
+ msleep(20);
+ return 0;
+}
+static int on_all_cpus(int (*fn)(void))
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct bstrap bstrap = {
+ .fn = fn,
+ .started = ATOMIC_INIT(0)
+ };
+ struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+ "hotpotato%d", cpu);
+ int ret;
+
+ if (IS_ERR(k))
+ return -ENOMEM;
+ kthread_bind(k, cpu);
+ wake_up_process(k);
+ /*
+ * If we call kthread_stop() before the "wake up" has had an
+ * effect, then the thread may exit with -EINTR without ever
+ * running the function. So poll until it's started before
+ * requesting it to stop.
+ */
+ while (!atomic_read(&bstrap.started))
+ msleep(20);
+ ret = kthread_stop(k);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+struct hp_handler {
+
+ /* The following data is stashed when 'rx' is dequeued; */
+ /* -------------- */
+ /* The Rx FQ, dequeues of which will stash the entire hp_handler */
+ struct qman_fq rx;
+ /* The Tx FQ we should forward to */
+ struct qman_fq tx;
+ /* The value we XOR post-dequeue, prior to validating */
+ u32 rx_mixer;
+ /* The value we XOR pre-enqueue, after validating */
+ u32 tx_mixer;
+ /* what the hotpotato address should be on dequeue */
+ dma_addr_t addr;
+ u32 *frame_ptr;
+
+ /* The following data isn't (necessarily) stashed on dequeue; */
+ /* -------------- */
+ u32 fqid_rx, fqid_tx;
+ /* list node for linking us into 'hp_cpu' */
+ struct list_head node;
+ /* Just to check ... */
+ unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+ /* identify the cpu we run on; */
+ unsigned int processor_id;
+ /* root node for the per-cpu list of handlers */
+ struct list_head handlers;
+ /* list node for linking us into 'hp_cpu_list' */
+ struct list_head node;
+ /*
+ * when repeatedly scanning 'hp_list', each time linking the n'th
+ * handlers together, this is used as per-cpu iterator state
+ */
+ struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static DEFINE_SPINLOCK(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+static void *__frame_ptr;
+static u32 *frame_ptr;
+static dma_addr_t frame_dma;
+
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU 2
+#define HP_LOOPS 8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS 80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD 0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static int allocate_frame_data(void)
+{
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+
+ if (!qman_dma_portal) {
+ pr_crit("portal not available\n");
+ return -EIO;
+ }
+
+ pcfg = qman_get_qm_portal_config(qman_dma_portal);
+
+ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+ if (!__frame_ptr)
+ return -ENOMEM;
+
+ frame_ptr = PTR_ALIGN(__frame_ptr, 64);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+ frame_ptr[loop] = lfsr;
+ lfsr = do_lfsr(lfsr);
+ }
+
+ frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pcfg->dev, frame_dma)) {
+ pr_crit("dma mapping failure\n");
+ kfree(__frame_ptr);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void deallocate_frame_data(void)
+{
+ dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ kfree(__frame_ptr);
+}
+
+static inline int process_frame_data(struct hp_handler *handler,
+ const struct qm_fd *fd)
+{
+ u32 *p = handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+
+ if (qm_fd_addr_get64(fd) != handler->addr) {
+ pr_crit("bad frame address, [%llX != %llX]\n",
+ qm_fd_addr_get64(fd), handler->addr);
+ return -EIO;
+ }
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ *p ^= handler->rx_mixer;
+ if (*p != lfsr) {
+ pr_crit("corrupt frame data");
+ return -EIO;
+ }
+ *p ^= handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ return 0;
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ if (process_frame_data(handler, &dqrr->fd)) {
+ WARN_ON(1);
+ goto skip;
+ }
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ process_frame_data(handler, &dqrr->fd);
+ if (++loop_counter < HP_LOOPS) {
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ goto skip;
+ }
+ } else {
+ pr_info("Received final (%dth) frame\n", loop_counter);
+ wake_up(&queue);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static int create_per_cpu_handlers(void)
+{
+ struct hp_handler *handler;
+ int loop;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ hp_cpu->processor_id = smp_processor_id();
+ spin_lock(&hp_lock);
+ list_add_tail(&hp_cpu->node, &hp_cpu_list);
+ hp_cpu_list_length++;
+ spin_unlock(&hp_lock);
+ INIT_LIST_HEAD(&hp_cpu->handlers);
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+ if (!handler) {
+ pr_crit("kmem_cache_alloc() failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ handler->processor_id = hp_cpu->processor_id;
+ handler->addr = frame_dma;
+ handler->frame_ptr = frame_ptr;
+ list_add_tail(&handler->node, &hp_cpu->handlers);
+ }
+ return 0;
+}
+
+static int destroy_per_cpu_handlers(void)
+{
+ struct list_head *loop, *tmp;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ spin_lock(&hp_lock);
+ list_del(&hp_cpu->node);
+ spin_unlock(&hp_lock);
+ list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+ u32 flags = 0;
+ struct hp_handler *handler = list_entry(loop, struct hp_handler,
+ node);
+ if (qman_retire_fq(&handler->rx, &flags) ||
+ (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+ pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+ WARN_ON(1);
+ return -EIO;
+ }
+ if (qman_oos_fq(&handler->rx)) {
+ pr_crit("qman_oos_fq(rx) failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ qman_destroy_fq(&handler->rx);
+ qman_destroy_fq(&handler->tx);
+ qman_release_fqid(handler->fqid_rx);
+ list_del(&handler->node);
+ kmem_cache_free(hp_handler_slab, handler);
+ }
+ return 0;
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+ u8 res = (offset + (L1_CACHE_BYTES - 1))
+ / (L1_CACHE_BYTES);
+ if (res > 3)
+ return 3;
+ return res;
+}
+#define STASH_DATA_CL \
+ num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+ num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static int init_handler(void *h)
+{
+ struct qm_mcc_initfq opts;
+ struct hp_handler *handler = h;
+ int err;
+
+ if (handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ /* Set up rx */
+ memset(&handler->rx, 0, sizeof(handler->rx));
+ if (handler == special_handler)
+ handler->rx.cb.dqrr = special_dqrr;
+ else
+ handler->rx.cb.dqrr = normal_dqrr;
+ err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+ if (err) {
+ pr_crit("qman_create_fq(rx) failed");
+ goto failed;
+ }
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
+ qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+ err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+ QMAN_INITFQ_FLAG_LOCAL, &opts);
+ if (err) {
+ pr_crit("qman_init_fq(rx) failed");
+ goto failed;
+ }
+ /* Set up tx */
+ memset(&handler->tx, 0, sizeof(handler->tx));
+ err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+ &handler->tx);
+ if (err) {
+ pr_crit("qman_create_fq(tx) failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void init_handler_cb(void *h)
+{
+ if (init_handler(h))
+ WARN_ON(1);
+}
+
+static int init_phase2(void)
+{
+ int loop;
+ u32 fqid = 0;
+ u32 lfsr = 0xdeadbeef;
+ struct hp_cpu *hp_cpu;
+ struct hp_handler *handler;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ int err;
+
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ /* Rx FQID is the previous handler's Tx FQID */
+ hp_cpu->iterator->fqid_rx = fqid;
+ /* Allocate new FQID for Tx */
+ err = qman_alloc_fqid(&fqid);
+ if (err) {
+ pr_crit("qman_alloc_fqid() failed");
+ return err;
+ }
+ hp_cpu->iterator->fqid_tx = fqid;
+ /* Rx mixer is the previous handler's Tx mixer */
+ hp_cpu->iterator->rx_mixer = lfsr;
+ /* Get new mixer for Tx */
+ lfsr = do_lfsr(lfsr);
+ hp_cpu->iterator->tx_mixer = lfsr;
+ }
+ }
+ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+ if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+ return 1;
+ handler->fqid_rx = fqid;
+ handler->rx_mixer = lfsr;
+ /* and tag it as our "special" handler */
+ special_handler = handler;
+ return 0;
+}
+
+static int init_phase3(void)
+{
+ int loop, err;
+ struct hp_cpu *hp_cpu;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ preempt_disable();
+ if (hp_cpu->processor_id == smp_processor_id()) {
+ err = init_handler(hp_cpu->iterator);
+ if (err)
+ return err;
+ } else {
+ smp_call_function_single(hp_cpu->processor_id,
+ init_handler_cb, hp_cpu->iterator, 1);
+ }
+ preempt_enable();
+ }
+ }
+ return 0;
+}
+
+static int send_first_frame(void *ignore)
+{
+ u32 *p = special_handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop, err;
+ struct qm_fd fd;
+
+ if (special_handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ memset(&fd, 0, sizeof(fd));
+ qm_fd_addr_set64(&fd, special_handler->addr);
+ qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ if (*p != lfsr) {
+ err = -EIO;
+ pr_crit("corrupt frame data");
+ goto failed;
+ }
+ *p ^= special_handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ pr_info("Sending first frame\n");
+ err = qman_enqueue(&special_handler->tx, &fd);
+ if (err) {
+ pr_crit("qman_enqueue() failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void send_first_frame_cb(void *ignore)
+{
+ if (send_first_frame(NULL))
+ WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+ int err;
+
+ if (cpumask_weight(cpu_online_mask) < 2) {
+ pr_info("%s(): skip - only 1 CPU\n", __func__);
+ return 0;
+ }
+
+ pr_info("%s(): Starting\n", __func__);
+
+ hp_cpu_list_length = 0;
+ loop_counter = 0;
+ hp_handler_slab = kmem_cache_create("hp_handler_slab",
+ sizeof(struct hp_handler), L1_CACHE_BYTES,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!hp_handler_slab) {
+ err = -EIO;
+ pr_crit("kmem_cache_create() failed");
+ goto failed;
+ }
+
+ err = allocate_frame_data();
+ if (err)
+ goto failed;
+
+ /* Init phase 1 */
+ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+ if (on_all_cpus(create_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ pr_info("Number of cpus: %d, total of %d handlers\n",
+ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+ err = init_phase2();
+ if (err)
+ goto failed;
+
+ err = init_phase3();
+ if (err)
+ goto failed;
+
+ preempt_disable();
+ if (special_handler->processor_id == smp_processor_id()) {
+ err = send_first_frame(NULL);
+ if (err)
+ goto failed;
+ } else {
+ smp_call_function_single(special_handler->processor_id,
+ send_first_frame_cb, NULL, 1);
+ }
+ preempt_enable();
+
+ wait_event(queue, loop_counter == HP_LOOPS);
+ deallocate_frame_data();
+ if (on_all_cpus(destroy_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ kmem_cache_destroy(hp_handler_slab);
+ pr_info("%s(): Finished\n", __func__);
+
+ return 0;
+failed:
+ WARN_ON(1);
+ return err;
+}
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
new file mode 100644
index 0000000000..fa9ffbed0e
--- /dev/null
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# QE Communication options
+#
+
+config QUICC_ENGINE
+ bool "QUICC Engine (QE) framework support"
+ depends on OF && HAS_IOMEM
+ depends on PPC || ARM || ARM64 || COMPILE_TEST
+ select GENERIC_ALLOCATOR
+ select CRC32
+ help
+ The QUICC Engine (QE) is a new generation of communications
+ coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
+ Selecting this option means that you wish to build a kernel
+ for a machine with a QE coprocessor.
+
+config UCC_SLOW
+ bool
+ default y if SERIAL_QE
+ help
+ This option provides qe_lib support to UCC slow
+ protocols: UART, BISYNC, QMC
+
+config UCC_FAST
+ bool
+ default y if UCC_GETH || QE_TDM
+ help
+ This option provides qe_lib support to UCC fast
+ protocols: HDLC, Ethernet, ATM, transparent
+
+config UCC
+ bool
+ default y if UCC_FAST || UCC_SLOW
+
+config CPM_TSA
+ tristate "CPM TSA support"
+ depends on OF && HAS_IOMEM
+ depends on CPM1 || (CPM && COMPILE_TEST)
+ help
+ Freescale CPM Time Slot Assigner (TSA)
+ controller.
+
+ This option enables support for this
+ controller
+
+config CPM_QMC
+ tristate "CPM QMC support"
+ depends on OF && HAS_IOMEM
+ depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST)
+ depends on CPM_TSA
+ help
+ Freescale CPM QUICC Multichannel Controller
+ (QMC)
+
+ This option enables support for this
+ controller
+
+config QE_TDM
+ bool
+ default y if FSL_UCC_HDLC
+
+config QE_USB
+ bool
+ depends on QUICC_ENGINE
+ default y if USB_FSL_QE
+ help
+ QE USB Controller support
diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
new file mode 100644
index 0000000000..ec8506e131
--- /dev/null
+++ b/drivers/soc/fsl/qe/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux ppc-specific parts of QE
+#
+obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
+obj-$(CONFIG_CPM) += qe_common.o
+obj-$(CONFIG_CPM_TSA) += tsa.o
+obj-$(CONFIG_CPM_QMC) += qmc.o
+obj-$(CONFIG_UCC) += ucc.o
+obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
+obj-$(CONFIG_UCC_FAST) += ucc_fast.o
+obj-$(CONFIG_QE_TDM) += qe_tdm.o
+obj-$(CONFIG_QE_USB) += usb.o
+obj-$(CONFIG_QE_GPIO) += gpio.o
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
new file mode 100644
index 0000000000..3ef24ba024
--- /dev/null
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * QUICC Engine GPIOs
+ *
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/gpio/legacy-of-mm-gpiochip.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/property.h>
+
+#include <soc/fsl/qe/qe.h>
+
+struct qe_gpio_chip {
+ struct of_mm_gpio_chip mm_gc;
+ spinlock_t lock;
+
+ /* shadowed data register to clear/set bits safely */
+ u32 cpdata;
+
+ /* saved_regs used to restore dedicated functions */
+ struct qe_pio_regs saved_regs;
+};
+
+static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct qe_gpio_chip *qe_gc =
+ container_of(mm_gc, struct qe_gpio_chip, mm_gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+
+ qe_gc->cpdata = ioread32be(&regs->cpdata);
+ qe_gc->saved_regs.cpdata = qe_gc->cpdata;
+ qe_gc->saved_regs.cpdir1 = ioread32be(&regs->cpdir1);
+ qe_gc->saved_regs.cpdir2 = ioread32be(&regs->cpdir2);
+ qe_gc->saved_regs.cppar1 = ioread32be(&regs->cppar1);
+ qe_gc->saved_regs.cppar2 = ioread32be(&regs->cppar2);
+ qe_gc->saved_regs.cpodr = ioread32be(&regs->cpodr);
+}
+
+static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+ return !!(ioread32be(&regs->cpdata) & pin_mask);
+}
+
+static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+ u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (val)
+ qe_gc->cpdata |= pin_mask;
+ else
+ qe_gc->cpdata &= ~pin_mask;
+
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static void qe_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ for (i = 0; i < gc->ngpio; i++) {
+ if (*mask == 0)
+ break;
+ if (__test_and_clear_bit(i, mask)) {
+ if (test_bit(i, bits))
+ qe_gc->cpdata |= (1U << (QE_PIO_PINS - 1 - i));
+ else
+ qe_gc->cpdata &= ~(1U << (QE_PIO_PINS - 1 - i));
+ }
+ }
+
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
+}
+
+static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ qe_gpio_set(gc, gpio, val);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
+}
+
+struct qe_pin {
+ /*
+ * The qe_gpio_chip name is unfortunate, we should change that to
+ * something like qe_pio_controller. Someday.
+ */
+ struct qe_gpio_chip *controller;
+ int num;
+};
+
+/**
+ * qe_pin_request - Request a QE pin
+ * @dev: device to get the pin from
+ * @index: index of the pin in the device tree
+ * Context: non-atomic
+ *
+ * This function return qe_pin so that you could use it with the rest of
+ * the QE Pin Multiplexing API.
+ */
+struct qe_pin *qe_pin_request(struct device *dev, int index)
+{
+ struct qe_pin *qe_pin;
+ struct gpio_chip *gc;
+ struct gpio_desc *gpiod;
+ int gpio_num;
+ int err;
+
+ qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
+ if (!qe_pin) {
+ dev_dbg(dev, "%s: can't allocate memory\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Request gpio as nonexclusive as it was likely reserved by the
+ * caller, and we are not planning on controlling it, we only need
+ * the descriptor to the to the gpio chip structure.
+ */
+ gpiod = gpiod_get_index(dev, NULL, index,
+ GPIOD_ASIS | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ err = PTR_ERR_OR_ZERO(gpiod);
+ if (err)
+ goto err0;
+
+ gc = gpiod_to_chip(gpiod);
+ gpio_num = desc_to_gpio(gpiod);
+ /* We no longer need this descriptor */
+ gpiod_put(gpiod);
+
+ if (WARN_ON(!gc)) {
+ err = -ENODEV;
+ goto err0;
+ }
+
+ qe_pin->controller = gpiochip_get_data(gc);
+ /*
+ * FIXME: this gets the local offset on the gpio_chip so that the driver
+ * can manipulate pin control settings through its custom API. The real
+ * solution is to create a real pin control driver for this.
+ */
+ qe_pin->num = gpio_num - gc->base;
+
+ if (!fwnode_device_is_compatible(gc->fwnode, "fsl,mpc8323-qe-pario-bank")) {
+ dev_dbg(dev, "%s: tried to get a non-qe pin\n", __func__);
+ err = -EINVAL;
+ goto err0;
+ }
+ return qe_pin;
+err0:
+ kfree(qe_pin);
+ dev_dbg(dev, "%s failed with status %d\n", __func__, err);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(qe_pin_request);
+
+/**
+ * qe_pin_free - Free a pin
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function frees the qe_pin structure and makes a pin available
+ * for further qe_pin_request() calls.
+ */
+void qe_pin_free(struct qe_pin *qe_pin)
+{
+ kfree(qe_pin);
+}
+EXPORT_SYMBOL(qe_pin_free);
+
+/**
+ * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function resets a pin to a dedicated peripheral function that
+ * has been set up by the firmware.
+ */
+void qe_pin_set_dedicated(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs *sregs = &qe_gc->saved_regs;
+ int pin = qe_pin->num;
+ u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
+ u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
+ bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (second_reg) {
+ qe_clrsetbits_be32(&regs->cpdir2, mask2,
+ sregs->cpdir2 & mask2);
+ qe_clrsetbits_be32(&regs->cppar2, mask2,
+ sregs->cppar2 & mask2);
+ } else {
+ qe_clrsetbits_be32(&regs->cpdir1, mask2,
+ sregs->cpdir1 & mask2);
+ qe_clrsetbits_be32(&regs->cppar1, mask2,
+ sregs->cppar1 & mask2);
+ }
+
+ if (sregs->cpdata & mask1)
+ qe_gc->cpdata |= mask1;
+ else
+ qe_gc->cpdata &= ~mask1;
+
+ iowrite32be(qe_gc->cpdata, &regs->cpdata);
+ qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_dedicated);
+
+/**
+ * qe_pin_set_gpio - Set a pin to the GPIO mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function sets a pin to the GPIO mode.
+ */
+void qe_pin_set_gpio(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ /* Let's make it input by default, GPIO API is able to change that. */
+ __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_gpio);
+
+static int __init qe_add_gpiochips(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
+ int ret;
+ struct qe_gpio_chip *qe_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct gpio_chip *gc;
+
+ qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
+ if (!qe_gc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&qe_gc->lock);
+
+ mm_gc = &qe_gc->mm_gc;
+ gc = &mm_gc->gc;
+
+ mm_gc->save_regs = qe_gpio_save_regs;
+ gc->ngpio = QE_PIO_PINS;
+ gc->direction_input = qe_gpio_dir_in;
+ gc->direction_output = qe_gpio_dir_out;
+ gc->get = qe_gpio_get;
+ gc->set = qe_gpio_set;
+ gc->set_multiple = qe_gpio_set_multiple;
+
+ ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
+ if (ret)
+ goto err;
+ continue;
+err:
+ pr_err("%pOF: registration failed with status %d\n",
+ np, ret);
+ kfree(qe_gc);
+ /* try others anyway */
+ }
+ return 0;
+}
+arch_initcall(qe_add_gpiochips);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
new file mode 100644
index 0000000000..3ee0c7c1e9
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net)
+ *
+ * Description:
+ * General Purpose functions for the global management of the
+ * QUICC Engine (QE).
+ */
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/crc32.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+static void qe_snums_init(void);
+static int qe_sdma_init(void);
+
+static DEFINE_SPINLOCK(qe_lock);
+DEFINE_SPINLOCK(cmxgcr_lock);
+EXPORT_SYMBOL(cmxgcr_lock);
+
+/* We allocate this here because it is used almost exclusively for
+ * the communication processor devices.
+ */
+struct qe_immap __iomem *qe_immr;
+EXPORT_SYMBOL(qe_immr);
+
+static u8 snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
+static DECLARE_BITMAP(snum_state, QE_NUM_OF_SNUM);
+static unsigned int qe_num_of_snum;
+
+static phys_addr_t qebase = -1;
+
+static struct device_node *qe_get_device_node(void)
+{
+ struct device_node *qe;
+
+ /*
+ * Newer device trees have an "fsl,qe" compatible property for the QE
+ * node, but we still need to support older device trees.
+ */
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (qe)
+ return qe;
+ return of_find_node_by_type(NULL, "qe");
+}
+
+static phys_addr_t get_qe_base(void)
+{
+ struct device_node *qe;
+ int ret;
+ struct resource res;
+
+ if (qebase != -1)
+ return qebase;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return qebase;
+
+ ret = of_address_to_resource(qe, 0, &res);
+ if (!ret)
+ qebase = res.start;
+ of_node_put(qe);
+
+ return qebase;
+}
+
+void qe_reset(void)
+{
+ if (qe_immr == NULL)
+ qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
+
+ qe_snums_init();
+
+ qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ /* Reclaim the MURAM memory for our use. */
+ qe_muram_init();
+
+ if (qe_sdma_init())
+ panic("sdma init failed!");
+}
+
+int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
+{
+ unsigned long flags;
+ u8 mcn_shift = 0, dev_shift = 0;
+ u32 val;
+ int ret;
+
+ spin_lock_irqsave(&qe_lock, flags);
+ if (cmd == QE_RESET) {
+ iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
+ } else {
+ if (cmd == QE_ASSIGN_PAGE) {
+ /* Here device is the SNUM, not sub-block */
+ dev_shift = QE_CR_SNUM_SHIFT;
+ } else if (cmd == QE_ASSIGN_RISC) {
+ /* Here device is the SNUM, and mcnProtocol is
+ * e_QeCmdRiscAssignment value */
+ dev_shift = QE_CR_SNUM_SHIFT;
+ mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
+ } else {
+ if (device == QE_CR_SUBBLOCK_USB)
+ mcn_shift = QE_CR_MCN_USB_SHIFT;
+ else
+ mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
+ }
+
+ iowrite32be(cmd_input, &qe_immr->cp.cecdr);
+ iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
+ &qe_immr->cp.cecr);
+ }
+
+ /* wait for the QE_CR_FLG to clear */
+ ret = readx_poll_timeout_atomic(ioread32be, &qe_immr->cp.cecr, val,
+ (val & QE_CR_FLG) == 0, 0, 100);
+ /* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */
+ spin_unlock_irqrestore(&qe_lock, flags);
+
+ return ret == 0;
+}
+EXPORT_SYMBOL(qe_issue_cmd);
+
+/* Set a baud rate generator. This needs lots of work. There are
+ * 16 BRGs, which can be connected to the QE channels or output
+ * as clocks. The BRGs are in two different block of internal
+ * memory mapped space.
+ * The BRG clock is the QE clock divided by 2.
+ * It was set up long ago during the initial boot phase and is
+ * given to us.
+ * Baud rate clocks are zero-based in the driver code (as that maps
+ * to port numbers). Documentation uses 1-based numbering.
+ */
+static unsigned int brg_clk = 0;
+
+#define CLK_GRAN (1000)
+#define CLK_GRAN_LIMIT (5)
+
+unsigned int qe_get_brg_clk(void)
+{
+ struct device_node *qe;
+ u32 brg;
+ unsigned int mod;
+
+ if (brg_clk)
+ return brg_clk;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return brg_clk;
+
+ if (!of_property_read_u32(qe, "brg-frequency", &brg))
+ brg_clk = brg;
+
+ of_node_put(qe);
+
+ /* round this if near to a multiple of CLK_GRAN */
+ mod = brg_clk % CLK_GRAN;
+ if (mod) {
+ if (mod < CLK_GRAN_LIMIT)
+ brg_clk -= mod;
+ else if (mod > (CLK_GRAN - CLK_GRAN_LIMIT))
+ brg_clk += CLK_GRAN - mod;
+ }
+
+ return brg_clk;
+}
+EXPORT_SYMBOL(qe_get_brg_clk);
+
+#define PVR_VER_836x 0x8083
+#define PVR_VER_832x 0x8084
+
+static bool qe_general4_errata(void)
+{
+#ifdef CONFIG_PPC32
+ return pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x);
+#endif
+ return false;
+}
+
+/* Program the BRG to the given sampling rate and multiplier
+ *
+ * @brg: the BRG, QE_BRG1 - QE_BRG16
+ * @rate: the desired sampling rate
+ * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
+ * GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
+ * then 'multiplier' should be 8.
+ */
+int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
+{
+ u32 divisor, tempval;
+ u32 div16 = 0;
+
+ if ((brg < QE_BRG1) || (brg > QE_BRG16))
+ return -EINVAL;
+
+ divisor = qe_get_brg_clk() / (rate * multiplier);
+
+ if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
+ div16 = QE_BRGC_DIV16;
+ divisor /= 16;
+ }
+
+ /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
+ that the BRG divisor must be even if you're not using divide-by-16
+ mode. */
+ if (qe_general4_errata())
+ if (!div16 && (divisor & 1) && (divisor > 3))
+ divisor++;
+
+ tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
+ QE_BRGC_ENABLE | div16;
+
+ iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_setbrg);
+
+/* Convert a string to a QE clock source enum
+ *
+ * This function takes a string, typically from a property in the device
+ * tree, and returns the corresponding "enum qe_clock" value.
+*/
+enum qe_clock qe_clock_source(const char *source)
+{
+ unsigned int i;
+
+ if (strcasecmp(source, "none") == 0)
+ return QE_CLK_NONE;
+
+ if (strcmp(source, "tsync_pin") == 0)
+ return QE_TSYNC_PIN;
+
+ if (strcmp(source, "rsync_pin") == 0)
+ return QE_RSYNC_PIN;
+
+ if (strncasecmp(source, "brg", 3) == 0) {
+ i = simple_strtoul(source + 3, NULL, 10);
+ if ((i >= 1) && (i <= 16))
+ return (QE_BRG1 - 1) + i;
+ else
+ return QE_CLK_DUMMY;
+ }
+
+ if (strncasecmp(source, "clk", 3) == 0) {
+ i = simple_strtoul(source + 3, NULL, 10);
+ if ((i >= 1) && (i <= 24))
+ return (QE_CLK1 - 1) + i;
+ else
+ return QE_CLK_DUMMY;
+ }
+
+ return QE_CLK_DUMMY;
+}
+EXPORT_SYMBOL(qe_clock_source);
+
+/* Initialize SNUMs (thread serial numbers) according to
+ * QE Module Control chapter, SNUM table
+ */
+static void qe_snums_init(void)
+{
+ static const u8 snum_init_76[] = {
+ 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+ 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
+ 0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
+ 0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
+ 0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
+ 0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
+ 0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
+ 0xF4, 0xF5, 0xFC, 0xFD,
+ };
+ static const u8 snum_init_46[] = {
+ 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+ 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19,
+ 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
+ 0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
+ };
+ struct device_node *qe;
+ const u8 *snum_init;
+ int i;
+
+ bitmap_zero(snum_state, QE_NUM_OF_SNUM);
+ qe_num_of_snum = 28; /* The default number of snum for threads is 28 */
+ qe = qe_get_device_node();
+ if (qe) {
+ i = of_property_read_variable_u8_array(qe, "fsl,qe-snums",
+ snums, 1, QE_NUM_OF_SNUM);
+ if (i > 0) {
+ of_node_put(qe);
+ qe_num_of_snum = i;
+ return;
+ }
+ /*
+ * Fall back to legacy binding of using the value of
+ * fsl,qe-num-snums to choose one of the static arrays
+ * above.
+ */
+ of_property_read_u32(qe, "fsl,qe-num-snums", &qe_num_of_snum);
+ of_node_put(qe);
+ }
+
+ if (qe_num_of_snum == 76) {
+ snum_init = snum_init_76;
+ } else if (qe_num_of_snum == 28 || qe_num_of_snum == 46) {
+ snum_init = snum_init_46;
+ } else {
+ pr_err("QE: unsupported value of fsl,qe-num-snums: %u\n", qe_num_of_snum);
+ return;
+ }
+ memcpy(snums, snum_init, qe_num_of_snum);
+}
+
+int qe_get_snum(void)
+{
+ unsigned long flags;
+ int snum = -EBUSY;
+ int i;
+
+ spin_lock_irqsave(&qe_lock, flags);
+ i = find_first_zero_bit(snum_state, qe_num_of_snum);
+ if (i < qe_num_of_snum) {
+ set_bit(i, snum_state);
+ snum = snums[i];
+ }
+ spin_unlock_irqrestore(&qe_lock, flags);
+
+ return snum;
+}
+EXPORT_SYMBOL(qe_get_snum);
+
+void qe_put_snum(u8 snum)
+{
+ const u8 *p = memchr(snums, snum, qe_num_of_snum);
+
+ if (p)
+ clear_bit(p - snums, snum_state);
+}
+EXPORT_SYMBOL(qe_put_snum);
+
+static int qe_sdma_init(void)
+{
+ struct sdma __iomem *sdma = &qe_immr->sdma;
+ static s32 sdma_buf_offset = -ENOMEM;
+
+ /* allocate 2 internal temporary buffers (512 bytes size each) for
+ * the SDMA */
+ if (sdma_buf_offset < 0) {
+ sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
+ if (sdma_buf_offset < 0)
+ return -ENOMEM;
+ }
+
+ iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
+ &sdma->sdebcr);
+ iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
+ &sdma->sdmr);
+
+ return 0;
+}
+
+/* The maximum number of RISCs we support */
+#define MAX_QE_RISC 4
+
+/* Firmware information stored here for qe_get_firmware_info() */
+static struct qe_firmware_info qe_firmware_info;
+
+/*
+ * Set to 1 if QE firmware has been uploaded, and therefore
+ * qe_firmware_info contains valid data.
+ */
+static int qe_firmware_uploaded;
+
+/*
+ * Upload a QE microcode
+ *
+ * This function is a worker function for qe_upload_firmware(). It does
+ * the actual uploading of the microcode.
+ */
+static void qe_upload_microcode(const void *base,
+ const struct qe_microcode *ucode)
+{
+ const __be32 *code = base + be32_to_cpu(ucode->code_offset);
+ unsigned int i;
+
+ if (ucode->major || ucode->minor || ucode->revision)
+ printk(KERN_INFO "qe-firmware: "
+ "uploading microcode '%s' version %u.%u.%u\n",
+ ucode->id, ucode->major, ucode->minor, ucode->revision);
+ else
+ printk(KERN_INFO "qe-firmware: "
+ "uploading microcode '%s'\n", ucode->id);
+
+ /* Use auto-increment */
+ iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
+ &qe_immr->iram.iadd);
+
+ for (i = 0; i < be32_to_cpu(ucode->count); i++)
+ iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
+
+ /* Set I-RAM Ready Register */
+ iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
+}
+
+/*
+ * Upload a microcode to the I-RAM at a specific address.
+ *
+ * See Documentation/powerpc/qe_firmware.rst for information on QE microcode
+ * uploading.
+ *
+ * Currently, only version 1 is supported, so the 'version' field must be
+ * set to 1.
+ *
+ * The SOC model and revision are not validated, they are only displayed for
+ * informational purposes.
+ *
+ * 'calc_size' is the calculated size, in bytes, of the firmware structure and
+ * all of the microcode structures, minus the CRC.
+ *
+ * 'length' is the size that the structure says it is, including the CRC.
+ */
+int qe_upload_firmware(const struct qe_firmware *firmware)
+{
+ unsigned int i;
+ unsigned int j;
+ u32 crc;
+ size_t calc_size;
+ size_t length;
+ const struct qe_header *hdr;
+
+ if (!firmware) {
+ printk(KERN_ERR "qe-firmware: invalid pointer\n");
+ return -EINVAL;
+ }
+
+ hdr = &firmware->header;
+ length = be32_to_cpu(hdr->length);
+
+ /* Check the magic */
+ if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
+ (hdr->magic[2] != 'F')) {
+ printk(KERN_ERR "qe-firmware: not a microcode\n");
+ return -EPERM;
+ }
+
+ /* Check the version */
+ if (hdr->version != 1) {
+ printk(KERN_ERR "qe-firmware: unsupported version\n");
+ return -EPERM;
+ }
+
+ /* Validate some of the fields */
+ if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
+ printk(KERN_ERR "qe-firmware: invalid data\n");
+ return -EINVAL;
+ }
+
+ /* Validate the length and check if there's a CRC */
+ calc_size = struct_size(firmware, microcode, firmware->count);
+
+ for (i = 0; i < firmware->count; i++)
+ /*
+ * For situations where the second RISC uses the same microcode
+ * as the first, the 'code_offset' and 'count' fields will be
+ * zero, so it's okay to add those.
+ */
+ calc_size += sizeof(__be32) *
+ be32_to_cpu(firmware->microcode[i].count);
+
+ /* Validate the length */
+ if (length != calc_size + sizeof(__be32)) {
+ printk(KERN_ERR "qe-firmware: invalid length\n");
+ return -EPERM;
+ }
+
+ /* Validate the CRC */
+ crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
+ if (crc != crc32(0, firmware, calc_size)) {
+ printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
+ return -EIO;
+ }
+
+ /*
+ * If the microcode calls for it, split the I-RAM.
+ */
+ if (!firmware->split)
+ qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+
+ if (firmware->soc.model)
+ printk(KERN_INFO
+ "qe-firmware: firmware '%s' for %u V%u.%u\n",
+ firmware->id, be16_to_cpu(firmware->soc.model),
+ firmware->soc.major, firmware->soc.minor);
+ else
+ printk(KERN_INFO "qe-firmware: firmware '%s'\n",
+ firmware->id);
+
+ /*
+ * The QE only supports one microcode per RISC, so clear out all the
+ * saved microcode information and put in the new.
+ */
+ memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
+ strscpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
+ qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes);
+ memcpy(qe_firmware_info.vtraps, firmware->vtraps,
+ sizeof(firmware->vtraps));
+
+ /* Loop through each microcode. */
+ for (i = 0; i < firmware->count; i++) {
+ const struct qe_microcode *ucode = &firmware->microcode[i];
+
+ /* Upload a microcode if it's present */
+ if (ucode->code_offset)
+ qe_upload_microcode(firmware, ucode);
+
+ /* Program the traps for this processor */
+ for (j = 0; j < 16; j++) {
+ u32 trap = be32_to_cpu(ucode->traps[j]);
+
+ if (trap)
+ iowrite32be(trap,
+ &qe_immr->rsp[i].tibcr[j]);
+ }
+
+ /* Enable traps */
+ iowrite32be(be32_to_cpu(ucode->eccr),
+ &qe_immr->rsp[i].eccr);
+ }
+
+ qe_firmware_uploaded = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_upload_firmware);
+
+/*
+ * Get info on the currently-loaded firmware
+ *
+ * This function also checks the device tree to see if the boot loader has
+ * uploaded a firmware already.
+ */
+struct qe_firmware_info *qe_get_firmware_info(void)
+{
+ static int initialized;
+ struct device_node *qe;
+ struct device_node *fw = NULL;
+ const char *sprop;
+
+ /*
+ * If we haven't checked yet, and a driver hasn't uploaded a firmware
+ * yet, then check the device tree for information.
+ */
+ if (qe_firmware_uploaded)
+ return &qe_firmware_info;
+
+ if (initialized)
+ return NULL;
+
+ initialized = 1;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return NULL;
+
+ /* Find the 'firmware' child node */
+ fw = of_get_child_by_name(qe, "firmware");
+ of_node_put(qe);
+
+ /* Did we find the 'firmware' node? */
+ if (!fw)
+ return NULL;
+
+ qe_firmware_uploaded = 1;
+
+ /* Copy the data into qe_firmware_info*/
+ sprop = of_get_property(fw, "id", NULL);
+ if (sprop)
+ strscpy(qe_firmware_info.id, sprop,
+ sizeof(qe_firmware_info.id));
+
+ of_property_read_u64(fw, "extended-modes",
+ &qe_firmware_info.extended_modes);
+
+ of_property_read_u32_array(fw, "virtual-traps", qe_firmware_info.vtraps,
+ ARRAY_SIZE(qe_firmware_info.vtraps));
+
+ of_node_put(fw);
+
+ return &qe_firmware_info;
+}
+EXPORT_SYMBOL(qe_get_firmware_info);
+
+unsigned int qe_get_num_of_risc(void)
+{
+ struct device_node *qe;
+ unsigned int num_of_risc = 0;
+
+ qe = qe_get_device_node();
+ if (!qe)
+ return num_of_risc;
+
+ of_property_read_u32(qe, "fsl,qe-num-riscs", &num_of_risc);
+
+ of_node_put(qe);
+
+ return num_of_risc;
+}
+EXPORT_SYMBOL(qe_get_num_of_risc);
+
+unsigned int qe_get_num_of_snums(void)
+{
+ return qe_num_of_snum;
+}
+EXPORT_SYMBOL(qe_get_num_of_snums);
+
+static int __init qe_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!np)
+ return -ENODEV;
+ qe_reset();
+ of_node_put(np);
+ return 0;
+}
+subsys_initcall(qe_init);
+
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
+static int qe_resume(struct platform_device *ofdev)
+{
+ if (!qe_alive_during_sleep())
+ qe_reset();
+ return 0;
+}
+
+static int qe_probe(struct platform_device *ofdev)
+{
+ return 0;
+}
+
+static const struct of_device_id qe_ids[] = {
+ { .compatible = "fsl,qe", },
+ { },
+};
+
+static struct platform_driver qe_driver = {
+ .driver = {
+ .name = "fsl-qe",
+ .of_match_table = qe_ids,
+ },
+ .probe = qe_probe,
+ .resume = qe_resume,
+};
+
+builtin_platform_driver(qe_driver);
+#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
new file mode 100644
index 0000000000..9729ce86db
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Common CPM code
+ *
+ * Author: Scott Wood <scottwood@freescale.com>
+ *
+ * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
+ *
+ * Some parts derived from commproc.c/cpm2_common.c, which is:
+ * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
+ * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
+ * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ */
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/qe.h>
+
+static struct gen_pool *muram_pool;
+static DEFINE_SPINLOCK(cpm_muram_lock);
+static void __iomem *muram_vbase;
+static phys_addr_t muram_pbase;
+
+struct muram_block {
+ struct list_head head;
+ s32 start;
+ int size;
+};
+
+static LIST_HEAD(muram_block_list);
+
+/* max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+#define GENPOOL_OFFSET (4096 * 8)
+
+int cpm_muram_init(void)
+{
+ struct device_node *np;
+ struct resource r;
+ __be32 zero[OF_MAX_ADDR_CELLS] = {};
+ resource_size_t max = 0;
+ int i = 0;
+ int ret = 0;
+
+ if (muram_pbase)
+ return 0;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
+ if (!np) {
+ /* try legacy bindings */
+ np = of_find_node_by_name(NULL, "data-only");
+ if (!np) {
+ pr_err("Cannot find CPM muram data node");
+ ret = -ENODEV;
+ goto out_muram;
+ }
+ }
+
+ muram_pool = gen_pool_create(0, -1);
+ if (!muram_pool) {
+ pr_err("Cannot allocate memory pool for CPM/QE muram");
+ ret = -ENOMEM;
+ goto out_muram;
+ }
+ muram_pbase = of_translate_address(np, zero);
+ if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
+ pr_err("Cannot translate zero through CPM muram node");
+ ret = -ENODEV;
+ goto out_pool;
+ }
+
+ while (of_address_to_resource(np, i++, &r) == 0) {
+ if (r.end > max)
+ max = r.end;
+ ret = gen_pool_add(muram_pool, r.start - muram_pbase +
+ GENPOOL_OFFSET, resource_size(&r), -1);
+ if (ret) {
+ pr_err("QE: couldn't add muram to pool!\n");
+ goto out_pool;
+ }
+ }
+
+ muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
+ if (!muram_vbase) {
+ pr_err("Cannot map QE muram");
+ ret = -ENOMEM;
+ goto out_pool;
+ }
+ goto out_muram;
+out_pool:
+ gen_pool_destroy(muram_pool);
+out_muram:
+ of_node_put(np);
+ return ret;
+}
+
+/*
+ * cpm_muram_alloc_common - cpm_muram_alloc common code
+ * @size: number of bytes to allocate
+ * @algo: algorithm for alloc.
+ * @data: data for genalloc's algorithm.
+ *
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
+ */
+static s32 cpm_muram_alloc_common(unsigned long size,
+ genpool_algo_t algo, void *data)
+{
+ struct muram_block *entry;
+ s32 start;
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+ start = gen_pool_alloc_algo(muram_pool, size, algo, data);
+ if (!start) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+ start = start - GENPOOL_OFFSET;
+ memset_io(cpm_muram_addr(start), 0, size);
+ entry->start = start;
+ entry->size = size;
+ list_add(&entry->head, &muram_block_list);
+
+ return start;
+}
+
+/*
+ * cpm_muram_alloc - allocate the requested size worth of multi-user ram
+ * @size: number of bytes to allocate
+ * @align: requested alignment, in bytes
+ *
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+s32 cpm_muram_alloc(unsigned long size, unsigned long align)
+{
+ s32 start;
+ unsigned long flags;
+ struct genpool_data_align muram_pool_data;
+
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ muram_pool_data.align = align;
+ start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
+ &muram_pool_data);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+ return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc);
+
+/**
+ * cpm_muram_free - free a chunk of multi-user ram
+ * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
+ */
+void cpm_muram_free(s32 offset)
+{
+ unsigned long flags;
+ int size;
+ struct muram_block *tmp;
+
+ if (offset < 0)
+ return;
+
+ size = 0;
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ list_for_each_entry(tmp, &muram_block_list, head) {
+ if (tmp->start == offset) {
+ size = tmp->size;
+ list_del(&tmp->head);
+ kfree(tmp);
+ break;
+ }
+ }
+ gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+}
+EXPORT_SYMBOL(cpm_muram_free);
+
+/*
+ * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
+ * @offset: offset of allocation start address
+ * @size: number of bytes to allocate
+ * This function returns @offset if the area was available, a negative
+ * errno otherwise.
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
+{
+ s32 start;
+ unsigned long flags;
+ struct genpool_data_fixed muram_pool_data_fixed;
+
+ spin_lock_irqsave(&cpm_muram_lock, flags);
+ muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
+ start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
+ &muram_pool_data_fixed);
+ spin_unlock_irqrestore(&cpm_muram_lock, flags);
+ return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc_fixed);
+
+/**
+ * cpm_muram_addr - turn a muram offset into a virtual address
+ * @offset: muram offset to convert
+ */
+void __iomem *cpm_muram_addr(unsigned long offset)
+{
+ return muram_vbase + offset;
+}
+EXPORT_SYMBOL(cpm_muram_addr);
+
+unsigned long cpm_muram_offset(const void __iomem *addr)
+{
+ return addr - muram_vbase;
+}
+EXPORT_SYMBOL(cpm_muram_offset);
+
+/**
+ * cpm_muram_dma - turn a muram virtual address into a DMA address
+ * @addr: virtual address from cpm_muram_addr() to convert
+ */
+dma_addr_t cpm_muram_dma(void __iomem *addr)
+{
+ return muram_pbase + (addr - muram_vbase);
+}
+EXPORT_SYMBOL(cpm_muram_dma);
+
+/*
+ * As cpm_muram_free, but takes the virtual address rather than the
+ * muram offset.
+ */
+void cpm_muram_free_addr(const void __iomem *addr)
+{
+ if (!addr)
+ return;
+ cpm_muram_free(cpm_muram_offset(addr));
+}
+EXPORT_SYMBOL(cpm_muram_free_addr);
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
new file mode 100644
index 0000000000..bbae3d39c7
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_ic.c
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ *
+ * QUICC ENGINE Interrupt Controller
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <soc/fsl/qe/qe.h>
+
+#define NR_QE_IC_INTS 64
+
+/* QE IC registers offset */
+#define QEIC_CICR 0x00
+#define QEIC_CIVEC 0x04
+#define QEIC_CIPXCC 0x10
+#define QEIC_CIPYCC 0x14
+#define QEIC_CIPWCC 0x18
+#define QEIC_CIPZCC 0x1c
+#define QEIC_CIMR 0x20
+#define QEIC_CRIMR 0x24
+#define QEIC_CIPRTA 0x30
+#define QEIC_CIPRTB 0x34
+#define QEIC_CHIVEC 0x60
+
+struct qe_ic {
+ /* Control registers offset */
+ __be32 __iomem *regs;
+
+ /* The remapper for this QEIC */
+ struct irq_domain *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+
+ /* VIRQ numbers of QE high/low irqs */
+ int virq_high;
+ int virq_low;
+};
+
+/*
+ * QE interrupt controller internal structure
+ */
+struct qe_ic_info {
+ /* Location of this source at the QIMR register */
+ u32 mask;
+
+ /* Mask register offset */
+ u32 mask_reg;
+
+ /*
+ * For grouped interrupts sources - the interrupt code as
+ * appears at the group priority register
+ */
+ u8 pri_code;
+
+ /* Group priority register offset */
+ u32 pri_reg;
+};
+
+static DEFINE_RAW_SPINLOCK(qe_ic_lock);
+
+static struct qe_ic_info qe_ic_info[] = {
+ [1] = {
+ .mask = 0x00008000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [2] = {
+ .mask = 0x00004000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [3] = {
+ .mask = 0x00002000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPWCC,
+ },
+ [10] = {
+ .mask = 0x00000040,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [11] = {
+ .mask = 0x00000020,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [12] = {
+ .mask = 0x00000010,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [13] = {
+ .mask = 0x00000008,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 4,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [14] = {
+ .mask = 0x00000004,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 5,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [15] = {
+ .mask = 0x00000002,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 6,
+ .pri_reg = QEIC_CIPZCC,
+ },
+ [20] = {
+ .mask = 0x10000000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPRTA,
+ },
+ [25] = {
+ .mask = 0x00800000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [26] = {
+ .mask = 0x00400000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [27] = {
+ .mask = 0x00200000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [28] = {
+ .mask = 0x00100000,
+ .mask_reg = QEIC_CRIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPRTB,
+ },
+ [32] = {
+ .mask = 0x80000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [33] = {
+ .mask = 0x40000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [34] = {
+ .mask = 0x20000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [35] = {
+ .mask = 0x10000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [36] = {
+ .mask = 0x08000000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 4,
+ .pri_reg = QEIC_CIPXCC,
+ },
+ [40] = {
+ .mask = 0x00800000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 0,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [41] = {
+ .mask = 0x00400000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 1,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [42] = {
+ .mask = 0x00200000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 2,
+ .pri_reg = QEIC_CIPYCC,
+ },
+ [43] = {
+ .mask = 0x00100000,
+ .mask_reg = QEIC_CIMR,
+ .pri_code = 3,
+ .pri_reg = QEIC_CIPYCC,
+ },
+};
+
+static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
+{
+ return ioread32be(base + (reg >> 2));
+}
+
+static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
+ u32 value)
+{
+ iowrite32be(value, base + (reg >> 2));
+}
+
+static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
+{
+ return irq_get_chip_data(virq);
+}
+
+static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
+{
+ return irq_data_get_irq_chip_data(d);
+}
+
+static void qe_ic_unmask_irq(struct irq_data *d)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 temp;
+
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+ temp | qe_ic_info[src].mask);
+
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static void qe_ic_mask_irq(struct irq_data *d)
+{
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = irqd_to_hwirq(d);
+ unsigned long flags;
+ u32 temp;
+
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
+
+ temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
+ qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
+ temp & ~qe_ic_info[src].mask);
+
+ /* Flush the above write before enabling interrupts; otherwise,
+ * spurious interrupts will sometimes happen. To be 100% sure
+ * that the write has reached the device before interrupts are
+ * enabled, the mask register would have to be read back; however,
+ * this is not required for correctness, only to avoid wasting
+ * time on a large number of spurious interrupts. In testing,
+ * a sync reduced the observed spurious interrupts to zero.
+ */
+ mb();
+
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
+}
+
+static struct irq_chip qe_ic_irq_chip = {
+ .name = "QEIC",
+ .irq_unmask = qe_ic_unmask_irq,
+ .irq_mask = qe_ic_mask_irq,
+ .irq_mask_ack = qe_ic_mask_irq,
+};
+
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ /* Exact match, unless qe_ic node is NULL */
+ struct device_node *of_node = irq_domain_get_of_node(h);
+ return of_node == NULL || of_node == node;
+}
+
+static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct qe_ic *qe_ic = h->host_data;
+ struct irq_chip *chip;
+
+ if (hw >= ARRAY_SIZE(qe_ic_info)) {
+ pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
+ return -EINVAL;
+ }
+
+ if (qe_ic_info[hw].mask == 0) {
+ printk(KERN_ERR "Can't map reserved IRQ\n");
+ return -EINVAL;
+ }
+ /* Default chip */
+ chip = &qe_ic->hc_irq;
+
+ irq_set_chip_data(virq, qe_ic);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+
+ irq_set_chip_and_handler(virq, chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops qe_ic_host_ops = {
+ .match = qe_ic_host_match,
+ .map = qe_ic_host_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+{
+ int irq;
+
+ BUG_ON(qe_ic == NULL);
+
+ /* get the interrupt source vector. */
+ irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
+
+ if (irq == 0)
+ return 0;
+
+ return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+{
+ int irq;
+
+ BUG_ON(qe_ic == NULL);
+
+ /* get the interrupt source vector. */
+ irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
+
+ if (irq == 0)
+ return 0;
+
+ return irq_linear_revmap(qe_ic->irqhost, irq);
+}
+
+static void qe_ic_cascade_low(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_high(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ cascade_irq = qe_ic_get_high_irq(qe_ic);
+ if (cascade_irq == 0)
+ cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static int qe_ic_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void (*low_handler)(struct irq_desc *desc);
+ void (*high_handler)(struct irq_desc *desc);
+ struct qe_ic *qe_ic;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL);
+ if (qe_ic == NULL)
+ return -ENOMEM;
+
+ qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (qe_ic->regs == NULL) {
+ dev_err(dev, "failed to ioremap() registers\n");
+ return -ENODEV;
+ }
+
+ qe_ic->hc_irq = qe_ic_irq_chip;
+
+ qe_ic->virq_high = platform_get_irq(pdev, 0);
+ qe_ic->virq_low = platform_get_irq(pdev, 1);
+
+ if (qe_ic->virq_low <= 0)
+ return -ENODEV;
+
+ if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) {
+ low_handler = qe_ic_cascade_low;
+ high_handler = qe_ic_cascade_high;
+ } else {
+ low_handler = qe_ic_cascade_muxed_mpic;
+ high_handler = NULL;
+ }
+
+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
+ if (qe_ic->irqhost == NULL) {
+ dev_err(dev, "failed to add irq domain\n");
+ return -ENODEV;
+ }
+
+ qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
+
+ irq_set_handler_data(qe_ic->virq_low, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_low, low_handler);
+
+ if (high_handler) {
+ irq_set_handler_data(qe_ic->virq_high, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_high, high_handler);
+ }
+ return 0;
+}
+static const struct of_device_id qe_ic_ids[] = {
+ { .compatible = "fsl,qe-ic"},
+ { .type = "qeic"},
+ {},
+};
+
+static struct platform_driver qe_ic_driver =
+{
+ .driver = {
+ .name = "qe-ic",
+ .of_match_table = qe_ic_ids,
+ },
+ .probe = qe_ic_init,
+};
+
+static int __init qe_ic_of_init(void)
+{
+ platform_driver_register(&qe_ic_driver);
+ return 0;
+}
+subsys_initcall(qe_ic_of_init);
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
new file mode 100644
index 0000000000..a5e2d0e5ab
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/powerpc/sysdev/qe_lib/qe_io.c
+ *
+ * QE Parallel I/O ports configuration routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <LeoLi@freescale.com>
+ * Based on code from Shlomi Gridish <gridish@freescale.com>
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/qe.h>
+
+#undef DEBUG
+
+static struct qe_pio_regs __iomem *par_io;
+static int num_par_io_ports = 0;
+
+int par_io_init(struct device_node *np)
+{
+ struct resource res;
+ int ret;
+ u32 num_ports;
+
+ /* Map Parallel I/O ports registers */
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+ par_io = ioremap(res.start, resource_size(&res));
+ if (!par_io)
+ return -ENOMEM;
+
+ if (!of_property_read_u32(np, "num-ports", &num_ports))
+ num_par_io_ports = num_ports;
+
+ return 0;
+}
+
+void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
+ int open_drain, int assignment, int has_irq)
+{
+ u32 pin_mask1bit;
+ u32 pin_mask2bits;
+ u32 new_mask2bits;
+ u32 tmp_val;
+
+ /* calculate pin location for single and 2 bits information */
+ pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
+
+ /* Set open drain, if required */
+ tmp_val = ioread32be(&par_io->cpodr);
+ if (open_drain)
+ iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
+ else
+ iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
+
+ /* define direction */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+ ioread32be(&par_io->cpdir2) :
+ ioread32be(&par_io->cpdir1);
+
+ /* get all bits mask for 2 bit per port */
+ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+ /* Get the final mask we need for the right definition */
+ new_mask2bits = (u32) (dir << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
+ } else {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
+ }
+ /* define pin assignment */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+ ioread32be(&par_io->cppar2) :
+ ioread32be(&par_io->cppar1);
+
+ new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
+ } else {
+ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
+ tmp_val &= ~pin_mask2bits;
+ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
+ }
+}
+EXPORT_SYMBOL(__par_io_config_pin);
+
+int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+ int assignment, int has_irq)
+{
+ if (!par_io || port >= num_par_io_ports)
+ return -EINVAL;
+
+ __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
+ has_irq);
+ return 0;
+}
+EXPORT_SYMBOL(par_io_config_pin);
+
+int par_io_data_set(u8 port, u8 pin, u8 val)
+{
+ u32 pin_mask, tmp_val;
+
+ if (port >= num_par_io_ports)
+ return -EINVAL;
+ if (pin >= QE_PIO_PINS)
+ return -EINVAL;
+ /* calculate pin location */
+ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
+
+ tmp_val = ioread32be(&par_io[port].cpdata);
+
+ if (val == 0) /* clear */
+ iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
+ else /* set */
+ iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
+
+ return 0;
+}
+EXPORT_SYMBOL(par_io_data_set);
+
+int par_io_of_config(struct device_node *np)
+{
+ struct device_node *pio;
+ int pio_map_len;
+ const __be32 *pio_map;
+
+ if (par_io == NULL) {
+ printk(KERN_ERR "par_io not initialized\n");
+ return -1;
+ }
+
+ pio = of_parse_phandle(np, "pio-handle", 0);
+ if (pio == NULL) {
+ printk(KERN_ERR "pio-handle not available\n");
+ return -1;
+ }
+
+ pio_map = of_get_property(pio, "pio-map", &pio_map_len);
+ if (pio_map == NULL) {
+ printk(KERN_ERR "pio-map is not set!\n");
+ return -1;
+ }
+ pio_map_len /= sizeof(unsigned int);
+ if ((pio_map_len % 6) != 0) {
+ printk(KERN_ERR "pio-map format wrong!\n");
+ return -1;
+ }
+
+ while (pio_map_len > 0) {
+ u8 port = be32_to_cpu(pio_map[0]);
+ u8 pin = be32_to_cpu(pio_map[1]);
+ int dir = be32_to_cpu(pio_map[2]);
+ int open_drain = be32_to_cpu(pio_map[3]);
+ int assignment = be32_to_cpu(pio_map[4]);
+ int has_irq = be32_to_cpu(pio_map[5]);
+
+ par_io_config_pin(port, pin, dir, open_drain,
+ assignment, has_irq);
+ pio_map += 6;
+ pio_map_len -= 6;
+ }
+ of_node_put(pio);
+ return 0;
+}
+EXPORT_SYMBOL(par_io_of_config);
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
new file mode 100644
index 0000000000..a3b691875c
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Zhao Qiang <qiang.zhao@nxp.com>
+ *
+ * Description:
+ * QE TDM API Set - TDM specific routines implementations.
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <soc/fsl/qe/qe_tdm.h>
+
+static int set_tdm_framer(const char *tdm_framer_type)
+{
+ if (strcmp(tdm_framer_type, "e1") == 0)
+ return TDM_FRAMER_E1;
+ else if (strcmp(tdm_framer_type, "t1") == 0)
+ return TDM_FRAMER_T1;
+ else
+ return -EINVAL;
+}
+
+static void set_si_param(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si_mode_info *si_info = &ut_info->si_info;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK) {
+ si_info->simr_crt = 1;
+ si_info->simr_rfsd = 0;
+ }
+}
+
+int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
+ struct ucc_tdm_info *ut_info)
+{
+ const char *sprop;
+ int ret = 0;
+ u32 val;
+
+ sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.rx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.rx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.rx_sync > QE_RSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ sprop = of_get_property(np, "fsl,tx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.tx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.tx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.tx_sync > QE_TSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "fsl,tx-timeslot-mask", 0, &val);
+ if (ret) {
+ pr_err("QE-TDM: Invalid tx-timeslot-mask property\n");
+ return -EINVAL;
+ }
+ utdm->tx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,rx-timeslot-mask", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: Invalid rx-timeslot-mask property\n");
+ return ret;
+ }
+ utdm->rx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,tdm-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No fsl,tdm-id property for this UCC\n");
+ return ret;
+ }
+ utdm->tdm_port = val;
+ ut_info->uf_info.tdm_num = utdm->tdm_port;
+
+ if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
+ utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
+ else
+ utdm->tdm_mode = TDM_NORMAL;
+
+ sprop = of_get_property(np, "fsl,tdm-framer-type", NULL);
+ if (!sprop) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No tdm-framer-type property for UCC\n");
+ return ret;
+ }
+ ret = set_tdm_framer(sprop);
+ if (ret < 0)
+ return -EINVAL;
+ utdm->tdm_framer_type = ret;
+
+ ret = of_property_read_u32_index(np, "fsl,siram-entry-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No siram entry id for UCC\n");
+ return ret;
+ }
+ utdm->siram_entry_id = val;
+
+ set_si_param(utdm, ut_info);
+ return ret;
+}
+EXPORT_SYMBOL(ucc_of_parse_tdm);
+
+void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si1 __iomem *si_regs;
+ u16 __iomem *siram;
+ u16 siram_entry_valid;
+ u16 siram_entry_closed;
+ u16 ucc_num;
+ u8 csel;
+ u16 sixmr;
+ u16 tdm_port;
+ u32 siram_entry_id;
+ u32 mask;
+ int i;
+
+ si_regs = utdm->si_regs;
+ siram = utdm->siram;
+ ucc_num = ut_info->uf_info.ucc_num;
+ tdm_port = utdm->tdm_port;
+ siram_entry_id = utdm->siram_entry_id;
+
+ if (utdm->tdm_framer_type == TDM_FRAMER_T1)
+ utdm->num_of_ts = 24;
+ if (utdm->tdm_framer_type == TDM_FRAMER_E1)
+ utdm->num_of_ts = 32;
+
+ /* set siram table */
+ csel = (ucc_num < 4) ? ucc_num + 9 : ucc_num - 3;
+
+ siram_entry_valid = SIR_CSEL(csel) | SIR_BYTE | SIR_CNT(0);
+ siram_entry_closed = SIR_IDLE | SIR_BYTE | SIR_CNT(0);
+
+ for (i = 0; i < utdm->num_of_ts; i++) {
+ mask = 0x01 << i;
+
+ if (utdm->tx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + i]);
+
+ if (utdm->rx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ }
+
+ qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+ qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+
+ /* Set SIxMR register */
+ sixmr = SIMR_SAD(siram_entry_id);
+
+ sixmr &= ~SIMR_SDM_MASK;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK)
+ sixmr |= SIMR_SDM_INTERNAL_LOOPBACK;
+ else
+ sixmr |= SIMR_SDM_NORMAL;
+
+ sixmr |= SIMR_RFSD(ut_info->si_info.simr_rfsd) |
+ SIMR_TFSD(ut_info->si_info.simr_tfsd);
+
+ if (ut_info->si_info.simr_crt)
+ sixmr |= SIMR_CRT;
+ if (ut_info->si_info.simr_sl)
+ sixmr |= SIMR_SL;
+ if (ut_info->si_info.simr_ce)
+ sixmr |= SIMR_CE;
+ if (ut_info->si_info.simr_fe)
+ sixmr |= SIMR_FE;
+ if (ut_info->si_info.simr_gm)
+ sixmr |= SIMR_GM;
+
+ switch (tdm_port) {
+ case 0:
+ iowrite16be(sixmr, &si_regs->sixmr1[0]);
+ break;
+ case 1:
+ iowrite16be(sixmr, &si_regs->sixmr1[1]);
+ break;
+ case 2:
+ iowrite16be(sixmr, &si_regs->sixmr1[2]);
+ break;
+ case 3:
+ iowrite16be(sixmr, &si_regs->sixmr1[3]);
+ break;
+ default:
+ pr_err("QE-TDM: can not find tdm sixmr reg\n");
+ break;
+ }
+}
+EXPORT_SYMBOL(ucc_tdm_init);
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
new file mode 100644
index 0000000000..8dc73cc1a8
--- /dev/null
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -0,0 +1,1536 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * QMC driver
+ *
+ * Copyright 2022 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <soc/fsl/qe/qmc.h>
+#include <linux/dma-mapping.h>
+#include <linux/hdlc.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/fsl/cpm.h>
+#include <sysdev/fsl_soc.h>
+#include "tsa.h"
+
+/* SCC general mode register high (32 bits) */
+#define SCC_GSMRL 0x00
+#define SCC_GSMRL_ENR (1 << 5)
+#define SCC_GSMRL_ENT (1 << 4)
+#define SCC_GSMRL_MODE_QMC (0x0A << 0)
+
+/* SCC general mode register low (32 bits) */
+#define SCC_GSMRH 0x04
+#define SCC_GSMRH_CTSS (1 << 7)
+#define SCC_GSMRH_CDS (1 << 8)
+#define SCC_GSMRH_CTSP (1 << 9)
+#define SCC_GSMRH_CDP (1 << 10)
+
+/* SCC event register (16 bits) */
+#define SCC_SCCE 0x10
+#define SCC_SCCE_IQOV (1 << 3)
+#define SCC_SCCE_GINT (1 << 2)
+#define SCC_SCCE_GUN (1 << 1)
+#define SCC_SCCE_GOV (1 << 0)
+
+/* SCC mask register (16 bits) */
+#define SCC_SCCM 0x14
+/* Multichannel base pointer (32 bits) */
+#define QMC_GBL_MCBASE 0x00
+/* Multichannel controller state (16 bits) */
+#define QMC_GBL_QMCSTATE 0x04
+/* Maximum receive buffer length (16 bits) */
+#define QMC_GBL_MRBLR 0x06
+/* Tx time-slot assignment table pointer (16 bits) */
+#define QMC_GBL_TX_S_PTR 0x08
+/* Rx pointer (16 bits) */
+#define QMC_GBL_RXPTR 0x0A
+/* Global receive frame threshold (16 bits) */
+#define QMC_GBL_GRFTHR 0x0C
+/* Global receive frame count (16 bits) */
+#define QMC_GBL_GRFCNT 0x0E
+/* Multichannel interrupt base address (32 bits) */
+#define QMC_GBL_INTBASE 0x10
+/* Multichannel interrupt pointer (32 bits) */
+#define QMC_GBL_INTPTR 0x14
+/* Rx time-slot assignment table pointer (16 bits) */
+#define QMC_GBL_RX_S_PTR 0x18
+/* Tx pointer (16 bits) */
+#define QMC_GBL_TXPTR 0x1A
+/* CRC constant (32 bits) */
+#define QMC_GBL_C_MASK32 0x1C
+/* Time slot assignment table Rx (32 x 16 bits) */
+#define QMC_GBL_TSATRX 0x20
+/* Time slot assignment table Tx (32 x 16 bits) */
+#define QMC_GBL_TSATTX 0x60
+/* CRC constant (16 bits) */
+#define QMC_GBL_C_MASK16 0xA0
+
+/* TSA entry (16bit entry in TSATRX and TSATTX) */
+#define QMC_TSA_VALID (1 << 15)
+#define QMC_TSA_WRAP (1 << 14)
+#define QMC_TSA_MASK (0x303F)
+#define QMC_TSA_CHANNEL(x) ((x) << 6)
+
+/* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
+#define QMC_SPE_TBASE 0x00
+
+/* Channel mode register (16 bits) */
+#define QMC_SPE_CHAMR 0x02
+#define QMC_SPE_CHAMR_MODE_HDLC (1 << 15)
+#define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13))
+#define QMC_SPE_CHAMR_ENT (1 << 12)
+#define QMC_SPE_CHAMR_POL (1 << 8)
+#define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13)
+#define QMC_SPE_CHAMR_HDLC_CRC (1 << 7)
+#define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0)
+#define QMC_SPE_CHAMR_TRANSP_RD (1 << 14)
+#define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10)
+
+/* Tx internal state (32 bits) */
+#define QMC_SPE_TSTATE 0x04
+/* Tx buffer descriptor pointer (16 bits) */
+#define QMC_SPE_TBPTR 0x0C
+/* Zero-insertion state (32 bits) */
+#define QMC_SPE_ZISTATE 0x14
+/* Channel’s interrupt mask flags (16 bits) */
+#define QMC_SPE_INTMSK 0x1C
+/* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
+#define QMC_SPE_RBASE 0x20
+/* HDLC: Maximum frame length register (16 bits) */
+#define QMC_SPE_MFLR 0x22
+/* TRANSPARENT: Transparent maximum receive length (16 bits) */
+#define QMC_SPE_TMRBLR 0x22
+/* Rx internal state (32 bits) */
+#define QMC_SPE_RSTATE 0x24
+/* Rx buffer descriptor pointer (16 bits) */
+#define QMC_SPE_RBPTR 0x2C
+/* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
+#define QMC_SPE_RPACK 0x30
+/* Zero deletion state (32 bits) */
+#define QMC_SPE_ZDSTATE 0x34
+
+/* Transparent synchronization (16 bits) */
+#define QMC_SPE_TRNSYNC 0x3C
+#define QMC_SPE_TRNSYNC_RX(x) ((x) << 8)
+#define QMC_SPE_TRNSYNC_TX(x) ((x) << 0)
+
+/* Interrupt related registers bits */
+#define QMC_INT_V (1 << 15)
+#define QMC_INT_W (1 << 14)
+#define QMC_INT_NID (1 << 13)
+#define QMC_INT_IDL (1 << 12)
+#define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6)
+#define QMC_INT_MRF (1 << 5)
+#define QMC_INT_UN (1 << 4)
+#define QMC_INT_RXF (1 << 3)
+#define QMC_INT_BSY (1 << 2)
+#define QMC_INT_TXB (1 << 1)
+#define QMC_INT_RXB (1 << 0)
+
+/* BD related registers bits */
+#define QMC_BD_RX_E (1 << 15)
+#define QMC_BD_RX_W (1 << 13)
+#define QMC_BD_RX_I (1 << 12)
+#define QMC_BD_RX_L (1 << 11)
+#define QMC_BD_RX_F (1 << 10)
+#define QMC_BD_RX_CM (1 << 9)
+#define QMC_BD_RX_UB (1 << 7)
+#define QMC_BD_RX_LG (1 << 5)
+#define QMC_BD_RX_NO (1 << 4)
+#define QMC_BD_RX_AB (1 << 3)
+#define QMC_BD_RX_CR (1 << 2)
+
+#define QMC_BD_TX_R (1 << 15)
+#define QMC_BD_TX_W (1 << 13)
+#define QMC_BD_TX_I (1 << 12)
+#define QMC_BD_TX_L (1 << 11)
+#define QMC_BD_TX_TC (1 << 10)
+#define QMC_BD_TX_CM (1 << 9)
+#define QMC_BD_TX_UB (1 << 7)
+#define QMC_BD_TX_PAD (0x0f << 0)
+
+/* Numbers of BDs and interrupt items */
+#define QMC_NB_TXBDS 8
+#define QMC_NB_RXBDS 8
+#define QMC_NB_INTS 128
+
+struct qmc_xfer_desc {
+ union {
+ void (*tx_complete)(void *context);
+ void (*rx_complete)(void *context, size_t length);
+ };
+ void *context;
+};
+
+struct qmc_chan {
+ struct list_head list;
+ unsigned int id;
+ struct qmc *qmc;
+ void __iomem *s_param;
+ enum qmc_mode mode;
+ u64 tx_ts_mask;
+ u64 rx_ts_mask;
+ bool is_reverse_data;
+
+ spinlock_t tx_lock;
+ cbd_t __iomem *txbds;
+ cbd_t __iomem *txbd_free;
+ cbd_t __iomem *txbd_done;
+ struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
+ u64 nb_tx_underrun;
+ bool is_tx_stopped;
+
+ spinlock_t rx_lock;
+ cbd_t __iomem *rxbds;
+ cbd_t __iomem *rxbd_free;
+ cbd_t __iomem *rxbd_done;
+ struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
+ u64 nb_rx_busy;
+ int rx_pending;
+ bool is_rx_halted;
+ bool is_rx_stopped;
+};
+
+struct qmc {
+ struct device *dev;
+ struct tsa_serial *tsa_serial;
+ void __iomem *scc_regs;
+ void __iomem *scc_pram;
+ void __iomem *dpram;
+ u16 scc_pram_offset;
+ cbd_t __iomem *bd_table;
+ dma_addr_t bd_dma_addr;
+ size_t bd_size;
+ u16 __iomem *int_table;
+ u16 __iomem *int_curr;
+ dma_addr_t int_dma_addr;
+ size_t int_size;
+ struct list_head chan_head;
+ struct qmc_chan *chans[64];
+};
+
+static inline void qmc_write16(void __iomem *addr, u16 val)
+{
+ iowrite16be(val, addr);
+}
+
+static inline u16 qmc_read16(void __iomem *addr)
+{
+ return ioread16be(addr);
+}
+
+static inline void qmc_setbits16(void __iomem *addr, u16 set)
+{
+ qmc_write16(addr, qmc_read16(addr) | set);
+}
+
+static inline void qmc_clrbits16(void __iomem *addr, u16 clr)
+{
+ qmc_write16(addr, qmc_read16(addr) & ~clr);
+}
+
+static inline void qmc_write32(void __iomem *addr, u32 val)
+{
+ iowrite32be(val, addr);
+}
+
+static inline u32 qmc_read32(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static inline void qmc_setbits32(void __iomem *addr, u32 set)
+{
+ qmc_write32(addr, qmc_read32(addr) | set);
+}
+
+
+int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
+{
+ struct tsa_serial_info tsa_info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
+ if (ret)
+ return ret;
+
+ info->mode = chan->mode;
+ info->rx_fs_rate = tsa_info.rx_fs_rate;
+ info->rx_bit_rate = tsa_info.rx_bit_rate;
+ info->nb_tx_ts = hweight64(chan->tx_ts_mask);
+ info->tx_fs_rate = tsa_info.tx_fs_rate;
+ info->tx_bit_rate = tsa_info.tx_bit_rate;
+ info->nb_rx_ts = hweight64(chan->rx_ts_mask);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_get_info);
+
+int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
+{
+ if (param->mode != chan->mode)
+ return -EINVAL;
+
+ switch (param->mode) {
+ case QMC_HDLC:
+ if ((param->hdlc.max_rx_buf_size % 4) ||
+ (param->hdlc.max_rx_buf_size < 8))
+ return -EINVAL;
+
+ qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
+ param->hdlc.max_rx_buf_size - 8);
+ qmc_write16(chan->s_param + QMC_SPE_MFLR,
+ param->hdlc.max_rx_frame_size);
+ if (param->hdlc.is_crc32) {
+ qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
+ QMC_SPE_CHAMR_HDLC_CRC);
+ } else {
+ qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
+ QMC_SPE_CHAMR_HDLC_CRC);
+ }
+ break;
+
+ case QMC_TRANSPARENT:
+ qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
+ param->transp.max_rx_buf_size);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_set_param);
+
+int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ void (*complete)(void *context), void *context)
+{
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+ cbd_t __iomem *bd;
+ u16 ctrl;
+ int ret;
+
+ /*
+ * R bit UB bit
+ * 0 0 : The BD is free
+ * 1 1 : The BD is in used, waiting for transfer
+ * 0 1 : The BD is in used, waiting for completion
+ * 1 0 : Should not append
+ */
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+ bd = chan->txbd_free;
+
+ ctrl = qmc_read16(&bd->cbd_sc);
+ if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
+ /* We are full ... */
+ ret = -EBUSY;
+ goto end;
+ }
+
+ qmc_write16(&bd->cbd_datlen, length);
+ qmc_write32(&bd->cbd_bufaddr, addr);
+
+ xfer_desc = &chan->tx_desc[bd - chan->txbds];
+ xfer_desc->tx_complete = complete;
+ xfer_desc->context = context;
+
+ /* Activate the descriptor */
+ ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
+ wmb(); /* Be sure to flush the descriptor before control update */
+ qmc_write16(&bd->cbd_sc, ctrl);
+
+ if (!chan->is_tx_stopped)
+ qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
+
+ if (ctrl & QMC_BD_TX_W)
+ chan->txbd_free = chan->txbds;
+ else
+ chan->txbd_free++;
+
+ ret = 0;
+
+end:
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(qmc_chan_write_submit);
+
+static void qmc_chan_write_done(struct qmc_chan *chan)
+{
+ struct qmc_xfer_desc *xfer_desc;
+ void (*complete)(void *context);
+ unsigned long flags;
+ void *context;
+ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ /*
+ * R bit UB bit
+ * 0 0 : The BD is free
+ * 1 1 : The BD is in used, waiting for transfer
+ * 0 1 : The BD is in used, waiting for completion
+ * 1 0 : Should not append
+ */
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+ bd = chan->txbd_done;
+
+ ctrl = qmc_read16(&bd->cbd_sc);
+ while (!(ctrl & QMC_BD_TX_R)) {
+ if (!(ctrl & QMC_BD_TX_UB))
+ goto end;
+
+ xfer_desc = &chan->tx_desc[bd - chan->txbds];
+ complete = xfer_desc->tx_complete;
+ context = xfer_desc->context;
+ xfer_desc->tx_complete = NULL;
+ xfer_desc->context = NULL;
+
+ qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB);
+
+ if (ctrl & QMC_BD_TX_W)
+ chan->txbd_done = chan->txbds;
+ else
+ chan->txbd_done++;
+
+ if (complete) {
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+ complete(context);
+ spin_lock_irqsave(&chan->tx_lock, flags);
+ }
+
+ bd = chan->txbd_done;
+ ctrl = qmc_read16(&bd->cbd_sc);
+ }
+
+end:
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+}
+
+int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
+ void (*complete)(void *context, size_t length), void *context)
+{
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+ cbd_t __iomem *bd;
+ u16 ctrl;
+ int ret;
+
+ /*
+ * E bit UB bit
+ * 0 0 : The BD is free
+ * 1 1 : The BD is in used, waiting for transfer
+ * 0 1 : The BD is in used, waiting for completion
+ * 1 0 : Should not append
+ */
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+ bd = chan->rxbd_free;
+
+ ctrl = qmc_read16(&bd->cbd_sc);
+ if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
+ /* We are full ... */
+ ret = -EBUSY;
+ goto end;
+ }
+
+ qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
+ qmc_write32(&bd->cbd_bufaddr, addr);
+
+ xfer_desc = &chan->rx_desc[bd - chan->rxbds];
+ xfer_desc->rx_complete = complete;
+ xfer_desc->context = context;
+
+ /* Activate the descriptor */
+ ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
+ wmb(); /* Be sure to flush data before descriptor activation */
+ qmc_write16(&bd->cbd_sc, ctrl);
+
+ /* Restart receiver if needed */
+ if (chan->is_rx_halted && !chan->is_rx_stopped) {
+ /* Restart receiver */
+ if (chan->mode == QMC_TRANSPARENT)
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
+ else
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ chan->is_rx_halted = false;
+ }
+ chan->rx_pending++;
+
+ if (ctrl & QMC_BD_RX_W)
+ chan->rxbd_free = chan->rxbds;
+ else
+ chan->rxbd_free++;
+
+ ret = 0;
+end:
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(qmc_chan_read_submit);
+
+static void qmc_chan_read_done(struct qmc_chan *chan)
+{
+ void (*complete)(void *context, size_t size);
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+ cbd_t __iomem *bd;
+ void *context;
+ u16 datalen;
+ u16 ctrl;
+
+ /*
+ * E bit UB bit
+ * 0 0 : The BD is free
+ * 1 1 : The BD is in used, waiting for transfer
+ * 0 1 : The BD is in used, waiting for completion
+ * 1 0 : Should not append
+ */
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+ bd = chan->rxbd_done;
+
+ ctrl = qmc_read16(&bd->cbd_sc);
+ while (!(ctrl & QMC_BD_RX_E)) {
+ if (!(ctrl & QMC_BD_RX_UB))
+ goto end;
+
+ xfer_desc = &chan->rx_desc[bd - chan->rxbds];
+ complete = xfer_desc->rx_complete;
+ context = xfer_desc->context;
+ xfer_desc->rx_complete = NULL;
+ xfer_desc->context = NULL;
+
+ datalen = qmc_read16(&bd->cbd_datlen);
+ qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB);
+
+ if (ctrl & QMC_BD_RX_W)
+ chan->rxbd_done = chan->rxbds;
+ else
+ chan->rxbd_done++;
+
+ chan->rx_pending--;
+
+ if (complete) {
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ complete(context, datalen);
+ spin_lock_irqsave(&chan->rx_lock, flags);
+ }
+
+ bd = chan->rxbd_done;
+ ctrl = qmc_read16(&bd->cbd_sc);
+ }
+
+end:
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+}
+
+static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
+{
+ return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
+}
+
+static int qmc_chan_stop_rx(struct qmc_chan *chan)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+
+ /* Send STOP RECEIVE command */
+ ret = qmc_chan_command(chan, 0x0);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ chan->is_rx_stopped = true;
+
+end:
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ return ret;
+}
+
+static int qmc_chan_stop_tx(struct qmc_chan *chan)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+
+ /* Send STOP TRANSMIT command */
+ ret = qmc_chan_command(chan, 0x1);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ chan->is_tx_stopped = true;
+
+end:
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+ return ret;
+}
+
+int qmc_chan_stop(struct qmc_chan *chan, int direction)
+{
+ int ret;
+
+ if (direction & QMC_CHAN_READ) {
+ ret = qmc_chan_stop_rx(chan);
+ if (ret)
+ return ret;
+ }
+
+ if (direction & QMC_CHAN_WRITE) {
+ ret = qmc_chan_stop_tx(chan);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_stop);
+
+static void qmc_chan_start_rx(struct qmc_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+
+ /* Restart the receiver */
+ if (chan->mode == QMC_TRANSPARENT)
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
+ else
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ chan->is_rx_halted = false;
+
+ chan->is_rx_stopped = false;
+
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+}
+
+static void qmc_chan_start_tx(struct qmc_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+
+ /*
+ * Enable channel transmitter as it could be disabled if
+ * qmc_chan_reset() was called.
+ */
+ qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
+
+ /* Set the POL bit in the channel mode register */
+ qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
+
+ chan->is_tx_stopped = false;
+
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+}
+
+int qmc_chan_start(struct qmc_chan *chan, int direction)
+{
+ if (direction & QMC_CHAN_READ)
+ qmc_chan_start_rx(chan);
+
+ if (direction & QMC_CHAN_WRITE)
+ qmc_chan_start_tx(chan);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_start);
+
+static void qmc_chan_reset_rx(struct qmc_chan *chan)
+{
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ spin_lock_irqsave(&chan->rx_lock, flags);
+ bd = chan->rxbds;
+ do {
+ ctrl = qmc_read16(&bd->cbd_sc);
+ qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
+
+ xfer_desc = &chan->rx_desc[bd - chan->rxbds];
+ xfer_desc->rx_complete = NULL;
+ xfer_desc->context = NULL;
+
+ bd++;
+ } while (!(ctrl & QMC_BD_RX_W));
+
+ chan->rxbd_free = chan->rxbds;
+ chan->rxbd_done = chan->rxbds;
+ qmc_write16(chan->s_param + QMC_SPE_RBPTR,
+ qmc_read16(chan->s_param + QMC_SPE_RBASE));
+
+ chan->rx_pending = 0;
+
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+}
+
+static void qmc_chan_reset_tx(struct qmc_chan *chan)
+{
+ struct qmc_xfer_desc *xfer_desc;
+ unsigned long flags;
+ cbd_t __iomem *bd;
+ u16 ctrl;
+
+ spin_lock_irqsave(&chan->tx_lock, flags);
+
+ /* Disable transmitter. It will be re-enable on qmc_chan_start() */
+ qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
+
+ bd = chan->txbds;
+ do {
+ ctrl = qmc_read16(&bd->cbd_sc);
+ qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
+
+ xfer_desc = &chan->tx_desc[bd - chan->txbds];
+ xfer_desc->tx_complete = NULL;
+ xfer_desc->context = NULL;
+
+ bd++;
+ } while (!(ctrl & QMC_BD_TX_W));
+
+ chan->txbd_free = chan->txbds;
+ chan->txbd_done = chan->txbds;
+ qmc_write16(chan->s_param + QMC_SPE_TBPTR,
+ qmc_read16(chan->s_param + QMC_SPE_TBASE));
+
+ /* Reset TSTATE and ZISTATE to their initial value */
+ qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
+ qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
+
+ spin_unlock_irqrestore(&chan->tx_lock, flags);
+}
+
+int qmc_chan_reset(struct qmc_chan *chan, int direction)
+{
+ if (direction & QMC_CHAN_READ)
+ qmc_chan_reset_rx(chan);
+
+ if (direction & QMC_CHAN_WRITE)
+ qmc_chan_reset_tx(chan);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_reset);
+
+static int qmc_check_chans(struct qmc *qmc)
+{
+ struct tsa_serial_info info;
+ bool is_one_table = false;
+ struct qmc_chan *chan;
+ u64 tx_ts_mask = 0;
+ u64 rx_ts_mask = 0;
+ u64 tx_ts_assigned_mask;
+ u64 rx_ts_assigned_mask;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
+ dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If more than 32 TS are assigned to this serial, one common table is
+ * used for Tx and Rx and so masks must be equal for all channels.
+ */
+ if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
+ if (info.nb_tx_ts != info.nb_rx_ts) {
+ dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
+ return -EINVAL;
+ }
+ is_one_table = true;
+ }
+
+ tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
+ rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
+
+ list_for_each_entry(chan, &qmc->chan_head, list) {
+ if (chan->tx_ts_mask > tx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
+ return -EINVAL;
+ }
+ if (tx_ts_mask & chan->tx_ts_mask) {
+ dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
+ return -EINVAL;
+ }
+
+ if (chan->rx_ts_mask > rx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
+ return -EINVAL;
+ }
+ if (rx_ts_mask & chan->rx_ts_mask) {
+ dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
+ return -EINVAL;
+ }
+
+ if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
+ dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
+ return -EINVAL;
+ }
+
+ tx_ts_mask |= chan->tx_ts_mask;
+ rx_ts_mask |= chan->rx_ts_mask;
+ }
+
+ return 0;
+}
+
+static unsigned int qmc_nb_chans(struct qmc *qmc)
+{
+ unsigned int count = 0;
+ struct qmc_chan *chan;
+
+ list_for_each_entry(chan, &qmc->chan_head, list)
+ count++;
+
+ return count;
+}
+
+static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
+{
+ struct device_node *chan_np;
+ struct qmc_chan *chan;
+ const char *mode;
+ u32 chan_id;
+ u64 ts_mask;
+ int ret;
+
+ for_each_available_child_of_node(np, chan_np) {
+ ret = of_property_read_u32(chan_np, "reg", &chan_id);
+ if (ret) {
+ dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
+ of_node_put(chan_np);
+ return ret;
+ }
+ if (chan_id > 63) {
+ dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
+ of_node_put(chan_np);
+ return -EINVAL;
+ }
+
+ chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan) {
+ of_node_put(chan_np);
+ return -ENOMEM;
+ }
+
+ chan->id = chan_id;
+ spin_lock_init(&chan->rx_lock);
+ spin_lock_init(&chan->tx_lock);
+
+ ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask);
+ if (ret) {
+ dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
+ chan_np);
+ of_node_put(chan_np);
+ return ret;
+ }
+ chan->tx_ts_mask = ts_mask;
+
+ ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
+ if (ret) {
+ dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
+ chan_np);
+ of_node_put(chan_np);
+ return ret;
+ }
+ chan->rx_ts_mask = ts_mask;
+
+ mode = "transparent";
+ ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
+ if (ret && ret != -EINVAL) {
+ dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
+ chan_np);
+ of_node_put(chan_np);
+ return ret;
+ }
+ if (!strcmp(mode, "transparent")) {
+ chan->mode = QMC_TRANSPARENT;
+ } else if (!strcmp(mode, "hdlc")) {
+ chan->mode = QMC_HDLC;
+ } else {
+ dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
+ chan_np, mode);
+ of_node_put(chan_np);
+ return -EINVAL;
+ }
+
+ chan->is_reverse_data = of_property_read_bool(chan_np,
+ "fsl,reverse-data");
+
+ list_add_tail(&chan->list, &qmc->chan_head);
+ qmc->chans[chan->id] = chan;
+ }
+
+ return qmc_check_chans(qmc);
+}
+
+static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
+{
+ struct qmc_chan *chan;
+ unsigned int i;
+ u16 val;
+
+ /*
+ * Use a common Tx/Rx 64 entries table.
+ * Everything was previously checked, Tx and Rx related stuffs are
+ * identical -> Used Rx related stuff to build the table
+ */
+
+ /* Invalidate all entries */
+ for (i = 0; i < 64; i++)
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
+
+ /* Set entries based on Rx stuff*/
+ list_for_each_entry(chan, &qmc->chan_head, list) {
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK |
+ QMC_TSA_CHANNEL(chan->id);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
+ }
+ }
+
+ /* Set Wrap bit on last entry */
+ qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
+ QMC_TSA_WRAP);
+
+ /* Init pointers to the table */
+ val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
+ qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
+ qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
+
+ return 0;
+}
+
+static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
+{
+ struct qmc_chan *chan;
+ unsigned int i;
+ u16 val;
+
+ /*
+ * Use a Tx 32 entries table and a Rx 32 entries table.
+ * Everything was previously checked.
+ */
+
+ /* Invalidate all entries */
+ for (i = 0; i < 32; i++) {
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
+ }
+
+ /* Set entries based on Rx and Tx stuff*/
+ list_for_each_entry(chan, &qmc->chan_head, list) {
+ /* Rx part */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK |
+ QMC_TSA_CHANNEL(chan->id);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
+ }
+ /* Tx part */
+ for (i = 0; i < info->nb_tx_ts; i++) {
+ if (!(chan->tx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK |
+ QMC_TSA_CHANNEL(chan->id);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val);
+ }
+ }
+
+ /* Set Wrap bit on last entries */
+ qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
+ QMC_TSA_WRAP);
+ qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
+ QMC_TSA_WRAP);
+
+ /* Init Rx pointers ...*/
+ val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
+ qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
+ qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
+
+ /* ... and Tx pointers */
+ val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
+ qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
+ qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
+
+ return 0;
+}
+
+static int qmc_setup_tsa(struct qmc *qmc)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /*
+ * Setup one common 64 entries table or two 32 entries (one for Tx and
+ * one for Tx) according to assigned TS numbers.
+ */
+ return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
+ qmc_setup_tsa_64rxtx(qmc, &info) :
+ qmc_setup_tsa_32rx_32tx(qmc, &info);
+}
+
+static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
+{
+ struct tsa_serial_info info;
+ u16 first_rx, last_tx;
+ u16 trnsync;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Find the first Rx TS allocated to the channel */
+ first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
+
+ /* Find the last Tx TS allocated to the channel */
+ last_tx = fls64(chan->tx_ts_mask);
+
+ trnsync = 0;
+ if (info.nb_rx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
+ if (info.nb_tx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
+
+ qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
+
+ dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
+ chan->id, trnsync,
+ first_rx, info.nb_rx_ts, chan->rx_ts_mask,
+ last_tx, info.nb_tx_ts, chan->tx_ts_mask);
+
+ return 0;
+}
+
+static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
+{
+ unsigned int i;
+ cbd_t __iomem *bd;
+ int ret;
+ u16 val;
+
+ chan->qmc = qmc;
+
+ /* Set channel specific parameter base address */
+ chan->s_param = qmc->dpram + (chan->id * 64);
+ /* 16 bd per channel (8 rx and 8 tx) */
+ chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
+ chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
+
+ chan->txbd_free = chan->txbds;
+ chan->txbd_done = chan->txbds;
+ chan->rxbd_free = chan->rxbds;
+ chan->rxbd_done = chan->rxbds;
+
+ /* TBASE and TBPTR*/
+ val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
+ qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
+ qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
+
+ /* RBASE and RBPTR*/
+ val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
+ qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
+ qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
+ qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
+ if (chan->mode == QMC_TRANSPARENT) {
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
+ qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
+ val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
+ if (chan->is_reverse_data)
+ val |= QMC_SPE_CHAMR_TRANSP_RD;
+ qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
+ ret = qmc_setup_chan_trnsync(qmc, chan);
+ if (ret)
+ return ret;
+ } else {
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
+ qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
+ qmc_write16(chan->s_param + QMC_SPE_CHAMR,
+ QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
+ }
+
+ /* Do not enable interrupts now. They will be enabled later */
+ qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
+
+ /* Init Rx BDs and set Wrap bit on last descriptor */
+ BUILD_BUG_ON(QMC_NB_RXBDS == 0);
+ val = QMC_BD_RX_I;
+ for (i = 0; i < QMC_NB_RXBDS; i++) {
+ bd = chan->rxbds + i;
+ qmc_write16(&bd->cbd_sc, val);
+ }
+ bd = chan->rxbds + QMC_NB_RXBDS - 1;
+ qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
+
+ /* Init Tx BDs and set Wrap bit on last descriptor */
+ BUILD_BUG_ON(QMC_NB_TXBDS == 0);
+ val = QMC_BD_TX_I;
+ if (chan->mode == QMC_HDLC)
+ val |= QMC_BD_TX_L | QMC_BD_TX_TC;
+ for (i = 0; i < QMC_NB_TXBDS; i++) {
+ bd = chan->txbds + i;
+ qmc_write16(&bd->cbd_sc, val);
+ }
+ bd = chan->txbds + QMC_NB_TXBDS - 1;
+ qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W);
+
+ return 0;
+}
+
+static int qmc_setup_chans(struct qmc *qmc)
+{
+ struct qmc_chan *chan;
+ int ret;
+
+ list_for_each_entry(chan, &qmc->chan_head, list) {
+ ret = qmc_setup_chan(qmc, chan);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qmc_finalize_chans(struct qmc *qmc)
+{
+ struct qmc_chan *chan;
+ int ret;
+
+ list_for_each_entry(chan, &qmc->chan_head, list) {
+ /* Unmask channel interrupts */
+ if (chan->mode == QMC_HDLC) {
+ qmc_write16(chan->s_param + QMC_SPE_INTMSK,
+ QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
+ QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
+ QMC_INT_TXB | QMC_INT_RXB);
+ } else {
+ qmc_write16(chan->s_param + QMC_SPE_INTMSK,
+ QMC_INT_UN | QMC_INT_BSY |
+ QMC_INT_TXB | QMC_INT_RXB);
+ }
+
+ /* Forced stop the channel */
+ ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qmc_setup_ints(struct qmc *qmc)
+{
+ unsigned int i;
+ u16 __iomem *last;
+
+ /* Raz all entries */
+ for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
+ qmc_write16(qmc->int_table + i, 0x0000);
+
+ /* Set Wrap bit on last entry */
+ if (qmc->int_size >= sizeof(u16)) {
+ last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
+ qmc_write16(last, QMC_INT_W);
+ }
+
+ return 0;
+}
+
+static void qmc_irq_gint(struct qmc *qmc)
+{
+ struct qmc_chan *chan;
+ unsigned int chan_id;
+ unsigned long flags;
+ u16 int_entry;
+
+ int_entry = qmc_read16(qmc->int_curr);
+ while (int_entry & QMC_INT_V) {
+ /* Clear all but the Wrap bit */
+ qmc_write16(qmc->int_curr, int_entry & QMC_INT_W);
+
+ chan_id = QMC_INT_GET_CHANNEL(int_entry);
+ chan = qmc->chans[chan_id];
+ if (!chan) {
+ dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
+ goto int_next;
+ }
+
+ if (int_entry & QMC_INT_TXB)
+ qmc_chan_write_done(chan);
+
+ if (int_entry & QMC_INT_UN) {
+ dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
+ int_entry);
+ chan->nb_tx_underrun++;
+ }
+
+ if (int_entry & QMC_INT_BSY) {
+ dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
+ int_entry);
+ chan->nb_rx_busy++;
+ /* Restart the receiver if needed */
+ spin_lock_irqsave(&chan->rx_lock, flags);
+ if (chan->rx_pending && !chan->is_rx_stopped) {
+ if (chan->mode == QMC_TRANSPARENT)
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
+ else
+ qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
+ qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
+ chan->is_rx_halted = false;
+ } else {
+ chan->is_rx_halted = true;
+ }
+ spin_unlock_irqrestore(&chan->rx_lock, flags);
+ }
+
+ if (int_entry & QMC_INT_RXB)
+ qmc_chan_read_done(chan);
+
+int_next:
+ if (int_entry & QMC_INT_W)
+ qmc->int_curr = qmc->int_table;
+ else
+ qmc->int_curr++;
+ int_entry = qmc_read16(qmc->int_curr);
+ }
+}
+
+static irqreturn_t qmc_irq_handler(int irq, void *priv)
+{
+ struct qmc *qmc = (struct qmc *)priv;
+ u16 scce;
+
+ scce = qmc_read16(qmc->scc_regs + SCC_SCCE);
+ qmc_write16(qmc->scc_regs + SCC_SCCE, scce);
+
+ if (unlikely(scce & SCC_SCCE_IQOV))
+ dev_info(qmc->dev, "IRQ queue overflow\n");
+
+ if (unlikely(scce & SCC_SCCE_GUN))
+ dev_err(qmc->dev, "Global transmitter underrun\n");
+
+ if (unlikely(scce & SCC_SCCE_GOV))
+ dev_err(qmc->dev, "Global receiver overrun\n");
+
+ /* normal interrupt */
+ if (likely(scce & SCC_SCCE_GINT))
+ qmc_irq_gint(qmc);
+
+ return IRQ_HANDLED;
+}
+
+static int qmc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int nb_chans;
+ struct resource *res;
+ struct qmc *qmc;
+ int irq;
+ int ret;
+
+ qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
+ if (!qmc)
+ return -ENOMEM;
+
+ qmc->dev = &pdev->dev;
+ INIT_LIST_HEAD(&qmc->chan_head);
+
+ qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
+ if (IS_ERR(qmc->scc_regs))
+ return PTR_ERR(qmc->scc_regs);
+
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
+ if (!res)
+ return -EINVAL;
+ qmc->scc_pram_offset = res->start - get_immrbase();
+ qmc->scc_pram = devm_ioremap_resource(qmc->dev, res);
+ if (IS_ERR(qmc->scc_pram))
+ return PTR_ERR(qmc->scc_pram);
+
+ qmc->dpram = devm_platform_ioremap_resource_byname(pdev, "dpram");
+ if (IS_ERR(qmc->dpram))
+ return PTR_ERR(qmc->dpram);
+
+ qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
+ if (IS_ERR(qmc->tsa_serial)) {
+ return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
+ "Failed to get TSA serial\n");
+ }
+
+ /* Connect the serial (SCC) to TSA */
+ ret = tsa_serial_connect(qmc->tsa_serial);
+ if (ret) {
+ dev_err(qmc->dev, "Failed to connect TSA serial\n");
+ return ret;
+ }
+
+ /* Parse channels informationss */
+ ret = qmc_of_parse_chans(qmc, np);
+ if (ret)
+ goto err_tsa_serial_disconnect;
+
+ nb_chans = qmc_nb_chans(qmc);
+
+ /* Init GMSR_H and GMSR_L registers */
+ qmc_write32(qmc->scc_regs + SCC_GSMRH,
+ SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP);
+
+ /* enable QMC mode */
+ qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
+
+ /*
+ * Allocate the buffer descriptor table
+ * 8 rx and 8 tx descriptors per channel
+ */
+ qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
+ qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
+ &qmc->bd_dma_addr, GFP_KERNEL);
+ if (!qmc->bd_table) {
+ dev_err(qmc->dev, "Failed to allocate bd table\n");
+ ret = -ENOMEM;
+ goto err_tsa_serial_disconnect;
+ }
+ memset(qmc->bd_table, 0, qmc->bd_size);
+
+ qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr);
+
+ /* Allocate the interrupt table */
+ qmc->int_size = QMC_NB_INTS * sizeof(u16);
+ qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
+ &qmc->int_dma_addr, GFP_KERNEL);
+ if (!qmc->int_table) {
+ dev_err(qmc->dev, "Failed to allocate interrupt table\n");
+ ret = -ENOMEM;
+ goto err_tsa_serial_disconnect;
+ }
+ memset(qmc->int_table, 0, qmc->int_size);
+
+ qmc->int_curr = qmc->int_table;
+ qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr);
+ qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr);
+
+ /* Set MRBLR (valid for HDLC only) max MRU + max CRC */
+ qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
+
+ qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1);
+ qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1);
+
+ qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
+ qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
+
+ ret = qmc_setup_tsa(qmc);
+ if (ret)
+ goto err_tsa_serial_disconnect;
+
+ qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
+
+ ret = qmc_setup_chans(qmc);
+ if (ret)
+ goto err_tsa_serial_disconnect;
+
+ /* Init interrupts table */
+ ret = qmc_setup_ints(qmc);
+ if (ret)
+ goto err_tsa_serial_disconnect;
+
+ /* Disable and clear interrupts, set the irq handler */
+ qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
+ qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto err_tsa_serial_disconnect;
+ ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
+ if (ret < 0)
+ goto err_tsa_serial_disconnect;
+
+ /* Enable interrupts */
+ qmc_write16(qmc->scc_regs + SCC_SCCM,
+ SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
+
+ ret = qmc_finalize_chans(qmc);
+ if (ret < 0)
+ goto err_disable_intr;
+
+ /* Enable transmiter and receiver */
+ qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ platform_set_drvdata(pdev, qmc);
+
+ return 0;
+
+err_disable_intr:
+ qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
+
+err_tsa_serial_disconnect:
+ tsa_serial_disconnect(qmc->tsa_serial);
+ return ret;
+}
+
+static int qmc_remove(struct platform_device *pdev)
+{
+ struct qmc *qmc = platform_get_drvdata(pdev);
+
+ /* Disable transmiter and receiver */
+ qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
+
+ /* Disable interrupts */
+ qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
+
+ /* Disconnect the serial from TSA */
+ tsa_serial_disconnect(qmc->tsa_serial);
+
+ return 0;
+}
+
+static const struct of_device_id qmc_id_table[] = {
+ { .compatible = "fsl,cpm1-scc-qmc" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, qmc_id_table);
+
+static struct platform_driver qmc_driver = {
+ .driver = {
+ .name = "fsl-qmc",
+ .of_match_table = of_match_ptr(qmc_id_table),
+ },
+ .probe = qmc_probe,
+ .remove = qmc_remove,
+};
+module_platform_driver(qmc_driver);
+
+struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
+{
+ struct of_phandle_args out_args;
+ struct platform_device *pdev;
+ struct qmc_chan *qmc_chan;
+ struct qmc *qmc;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
+ &out_args);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!of_match_node(qmc_driver.driver.of_match_table, out_args.np)) {
+ of_node_put(out_args.np);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdev = of_find_device_by_node(out_args.np);
+ of_node_put(out_args.np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ qmc = platform_get_drvdata(pdev);
+ if (!qmc) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (out_args.args_count != 1) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (out_args.args[0] >= ARRAY_SIZE(qmc->chans)) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ qmc_chan = qmc->chans[out_args.args[0]];
+ if (!qmc_chan) {
+ platform_device_put(pdev);
+ return ERR_PTR(-ENOENT);
+ }
+
+ return qmc_chan;
+}
+EXPORT_SYMBOL(qmc_chan_get_byphandle);
+
+void qmc_chan_put(struct qmc_chan *chan)
+{
+ put_device(chan->qmc->dev);
+}
+EXPORT_SYMBOL(qmc_chan_put);
+
+static void devm_qmc_chan_release(struct device *dev, void *res)
+{
+ struct qmc_chan **qmc_chan = res;
+
+ qmc_chan_put(*qmc_chan);
+}
+
+struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
+ struct device_node *np,
+ const char *phandle_name)
+{
+ struct qmc_chan *qmc_chan;
+ struct qmc_chan **dr;
+
+ dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ qmc_chan = qmc_chan_get_byphandle(np, phandle_name);
+ if (!IS_ERR(qmc_chan)) {
+ *dr = qmc_chan;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return qmc_chan;
+}
+EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("CPM QMC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c
new file mode 100644
index 0000000000..e0527b9efd
--- /dev/null
+++ b/drivers/soc/fsl/qe/tsa.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TSA driver
+ *
+ * Copyright 2022 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include "tsa.h"
+#include <dt-bindings/soc/cpm1-fsl,tsa.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+
+/* TSA SI RAM routing tables entry */
+#define TSA_SIRAM_ENTRY_LAST (1 << 16)
+#define TSA_SIRAM_ENTRY_BYTE (1 << 17)
+#define TSA_SIRAM_ENTRY_CNT(x) (((x) & 0x0f) << 18)
+#define TSA_SIRAM_ENTRY_CSEL_MASK (0x7 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_NU (0x0 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_SCC2 (0x2 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_SCC3 (0x3 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_SCC4 (0x4 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_SMC1 (0x5 << 22)
+#define TSA_SIRAM_ENTRY_CSEL_SMC2 (0x6 << 22)
+
+/* SI mode register (32 bits) */
+#define TSA_SIMODE 0x00
+#define TSA_SIMODE_SMC2 0x80000000
+#define TSA_SIMODE_SMC1 0x00008000
+#define TSA_SIMODE_TDMA(x) ((x) << 0)
+#define TSA_SIMODE_TDMB(x) ((x) << 16)
+#define TSA_SIMODE_TDM_MASK 0x0fff
+#define TSA_SIMODE_TDM_SDM_MASK 0x0c00
+#define TSA_SIMODE_TDM_SDM_NORM 0x0000
+#define TSA_SIMODE_TDM_SDM_ECHO 0x0400
+#define TSA_SIMODE_TDM_SDM_INTL_LOOP 0x0800
+#define TSA_SIMODE_TDM_SDM_LOOP_CTRL 0x0c00
+#define TSA_SIMODE_TDM_RFSD(x) ((x) << 8)
+#define TSA_SIMODE_TDM_DSC 0x0080
+#define TSA_SIMODE_TDM_CRT 0x0040
+#define TSA_SIMODE_TDM_STZ 0x0020
+#define TSA_SIMODE_TDM_CE 0x0010
+#define TSA_SIMODE_TDM_FE 0x0008
+#define TSA_SIMODE_TDM_GM 0x0004
+#define TSA_SIMODE_TDM_TFSD(x) ((x) << 0)
+
+/* SI global mode register (8 bits) */
+#define TSA_SIGMR 0x04
+#define TSA_SIGMR_ENB (1<<3)
+#define TSA_SIGMR_ENA (1<<2)
+#define TSA_SIGMR_RDM_MASK 0x03
+#define TSA_SIGMR_RDM_STATIC_TDMA 0x00
+#define TSA_SIGMR_RDM_DYN_TDMA 0x01
+#define TSA_SIGMR_RDM_STATIC_TDMAB 0x02
+#define TSA_SIGMR_RDM_DYN_TDMAB 0x03
+
+/* SI status register (8 bits) */
+#define TSA_SISTR 0x06
+
+/* SI command register (8 bits) */
+#define TSA_SICMR 0x07
+
+/* SI clock route register (32 bits) */
+#define TSA_SICR 0x0C
+#define TSA_SICR_SCC2(x) ((x) << 8)
+#define TSA_SICR_SCC3(x) ((x) << 16)
+#define TSA_SICR_SCC4(x) ((x) << 24)
+#define TSA_SICR_SCC_MASK 0x0ff
+#define TSA_SICR_SCC_GRX (1 << 7)
+#define TSA_SICR_SCC_SCX_TSA (1 << 6)
+#define TSA_SICR_SCC_RXCS_MASK (0x7 << 3)
+#define TSA_SICR_SCC_RXCS_BRG1 (0x0 << 3)
+#define TSA_SICR_SCC_RXCS_BRG2 (0x1 << 3)
+#define TSA_SICR_SCC_RXCS_BRG3 (0x2 << 3)
+#define TSA_SICR_SCC_RXCS_BRG4 (0x3 << 3)
+#define TSA_SICR_SCC_RXCS_CLK15 (0x4 << 3)
+#define TSA_SICR_SCC_RXCS_CLK26 (0x5 << 3)
+#define TSA_SICR_SCC_RXCS_CLK37 (0x6 << 3)
+#define TSA_SICR_SCC_RXCS_CLK48 (0x7 << 3)
+#define TSA_SICR_SCC_TXCS_MASK (0x7 << 0)
+#define TSA_SICR_SCC_TXCS_BRG1 (0x0 << 0)
+#define TSA_SICR_SCC_TXCS_BRG2 (0x1 << 0)
+#define TSA_SICR_SCC_TXCS_BRG3 (0x2 << 0)
+#define TSA_SICR_SCC_TXCS_BRG4 (0x3 << 0)
+#define TSA_SICR_SCC_TXCS_CLK15 (0x4 << 0)
+#define TSA_SICR_SCC_TXCS_CLK26 (0x5 << 0)
+#define TSA_SICR_SCC_TXCS_CLK37 (0x6 << 0)
+#define TSA_SICR_SCC_TXCS_CLK48 (0x7 << 0)
+
+/* Serial interface RAM pointer register (32 bits) */
+#define TSA_SIRP 0x10
+
+struct tsa_entries_area {
+ void __iomem *entries_start;
+ void __iomem *entries_next;
+ void __iomem *last_entry;
+};
+
+struct tsa_tdm {
+ bool is_enable;
+ struct clk *l1rclk_clk;
+ struct clk *l1rsync_clk;
+ struct clk *l1tclk_clk;
+ struct clk *l1tsync_clk;
+ u32 simode_tdm;
+};
+
+#define TSA_TDMA 0
+#define TSA_TDMB 1
+
+struct tsa {
+ struct device *dev;
+ void __iomem *si_regs;
+ void __iomem *si_ram;
+ resource_size_t si_ram_sz;
+ spinlock_t lock;
+ int tdms; /* TSA_TDMx ORed */
+ struct tsa_tdm tdm[2]; /* TDMa and TDMb */
+ struct tsa_serial {
+ unsigned int id;
+ struct tsa_serial_info info;
+ } serials[6];
+};
+
+static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
+{
+ /* The serials table is indexed by the serial id */
+ return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
+}
+
+static inline void tsa_write32(void __iomem *addr, u32 val)
+{
+ iowrite32be(val, addr);
+}
+
+static inline void tsa_write8(void __iomem *addr, u32 val)
+{
+ iowrite8(val, addr);
+}
+
+static inline u32 tsa_read32(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static inline void tsa_clrbits32(void __iomem *addr, u32 clr)
+{
+ tsa_write32(addr, tsa_read32(addr) & ~clr);
+}
+
+static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set)
+{
+ tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
+}
+
+int tsa_serial_connect(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+ unsigned long flags;
+ u32 clear;
+ u32 set;
+
+ switch (tsa_serial->id) {
+ case FSL_CPM_TSA_SCC2:
+ clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
+ set = TSA_SICR_SCC2(TSA_SICR_SCC_SCX_TSA);
+ break;
+ case FSL_CPM_TSA_SCC3:
+ clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
+ set = TSA_SICR_SCC3(TSA_SICR_SCC_SCX_TSA);
+ break;
+ case FSL_CPM_TSA_SCC4:
+ clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
+ set = TSA_SICR_SCC4(TSA_SICR_SCC_SCX_TSA);
+ break;
+ default:
+ dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tsa->lock, flags);
+ tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, set);
+ spin_unlock_irqrestore(&tsa->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(tsa_serial_connect);
+
+int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+ unsigned long flags;
+ u32 clear;
+
+ switch (tsa_serial->id) {
+ case FSL_CPM_TSA_SCC2:
+ clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
+ break;
+ case FSL_CPM_TSA_SCC3:
+ clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
+ break;
+ case FSL_CPM_TSA_SCC4:
+ clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
+ break;
+ default:
+ dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&tsa->lock, flags);
+ tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, 0);
+ spin_unlock_irqrestore(&tsa->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(tsa_serial_disconnect);
+
+int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
+{
+ memcpy(info, &tsa_serial->info, sizeof(*info));
+ return 0;
+}
+EXPORT_SYMBOL(tsa_serial_get_info);
+
+static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 tdms, u32 tdm_id, bool is_rx)
+{
+ resource_size_t quarter;
+ resource_size_t half;
+
+ quarter = tsa->si_ram_sz/4;
+ half = tsa->si_ram_sz/2;
+
+ if (tdms == BIT(TSA_TDMA)) {
+ /* Only TDMA */
+ if (is_rx) {
+ /* First half of si_ram */
+ area->entries_start = tsa->si_ram;
+ area->entries_next = area->entries_start + half;
+ area->last_entry = NULL;
+ } else {
+ /* Second half of si_ram */
+ area->entries_start = tsa->si_ram + half;
+ area->entries_next = area->entries_start + half;
+ area->last_entry = NULL;
+ }
+ } else {
+ /* Only TDMB or both TDMs */
+ if (tdm_id == TSA_TDMA) {
+ if (is_rx) {
+ /* First half of first half of si_ram */
+ area->entries_start = tsa->si_ram;
+ area->entries_next = area->entries_start + quarter;
+ area->last_entry = NULL;
+ } else {
+ /* First half of second half of si_ram */
+ area->entries_start = tsa->si_ram + (2 * quarter);
+ area->entries_next = area->entries_start + quarter;
+ area->last_entry = NULL;
+ }
+ } else {
+ if (is_rx) {
+ /* Second half of first half of si_ram */
+ area->entries_start = tsa->si_ram + quarter;
+ area->entries_next = area->entries_start + quarter;
+ area->last_entry = NULL;
+ } else {
+ /* Second half of second half of si_ram */
+ area->entries_start = tsa->si_ram + (3 * quarter);
+ area->entries_next = area->entries_start + quarter;
+ area->last_entry = NULL;
+ }
+ }
+ }
+}
+
+static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
+{
+ switch (serial_id) {
+ case FSL_CPM_TSA_NU: return "Not used";
+ case FSL_CPM_TSA_SCC2: return "SCC2";
+ case FSL_CPM_TSA_SCC3: return "SCC3";
+ case FSL_CPM_TSA_SCC4: return "SCC4";
+ case FSL_CPM_TSA_SMC1: return "SMC1";
+ case FSL_CPM_TSA_SMC2: return "SMC2";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
+{
+ switch (serial_id) {
+ case FSL_CPM_TSA_SCC2: return TSA_SIRAM_ENTRY_CSEL_SCC2;
+ case FSL_CPM_TSA_SCC3: return TSA_SIRAM_ENTRY_CSEL_SCC3;
+ case FSL_CPM_TSA_SCC4: return TSA_SIRAM_ENTRY_CSEL_SCC4;
+ case FSL_CPM_TSA_SMC1: return TSA_SIRAM_ENTRY_CSEL_SMC1;
+ case FSL_CPM_TSA_SMC2: return TSA_SIRAM_ENTRY_CSEL_SMC2;
+ default:
+ break;
+ }
+ return TSA_SIRAM_ENTRY_CSEL_NU;
+}
+
+static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
+ u32 count, u32 serial_id)
+{
+ void __iomem *addr;
+ u32 left;
+ u32 val;
+ u32 cnt;
+ u32 nb;
+
+ addr = area->last_entry ? area->last_entry + 4 : area->entries_start;
+
+ nb = DIV_ROUND_UP(count, 8);
+ if ((addr + (nb * 4)) > area->entries_next) {
+ dev_err(tsa->dev, "si ram area full\n");
+ return -ENOSPC;
+ }
+
+ if (area->last_entry) {
+ /* Clear last flag */
+ tsa_clrbits32(area->last_entry, TSA_SIRAM_ENTRY_LAST);
+ }
+
+ left = count;
+ while (left) {
+ val = TSA_SIRAM_ENTRY_BYTE | tsa_serial_id2csel(tsa, serial_id);
+
+ if (left > 16) {
+ cnt = 16;
+ } else {
+ cnt = left;
+ val |= TSA_SIRAM_ENTRY_LAST;
+ area->last_entry = addr;
+ }
+ val |= TSA_SIRAM_ENTRY_CNT(cnt - 1);
+
+ tsa_write32(addr, val);
+ addr += 4;
+ left -= cnt;
+ }
+
+ return 0;
+}
+
+static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
+ u32 tdms, u32 tdm_id, bool is_rx)
+{
+ struct tsa_entries_area area;
+ const char *route_name;
+ u32 serial_id;
+ int len, i;
+ u32 count;
+ const char *serial_name;
+ struct tsa_serial_info *serial_info;
+ struct tsa_tdm *tdm;
+ int ret;
+ u32 ts;
+
+ route_name = is_rx ? "fsl,rx-ts-routes" : "fsl,tx-ts-routes";
+
+ len = of_property_count_u32_elems(tdm_np, route_name);
+ if (len < 0) {
+ dev_err(tsa->dev, "%pOF: failed to read %s\n", tdm_np, route_name);
+ return len;
+ }
+ if (len % 2 != 0) {
+ dev_err(tsa->dev, "%pOF: wrong %s format\n", tdm_np, route_name);
+ return -EINVAL;
+ }
+
+ tsa_init_entries_area(tsa, &area, tdms, tdm_id, is_rx);
+ ts = 0;
+ for (i = 0; i < len; i += 2) {
+ of_property_read_u32_index(tdm_np, route_name, i, &count);
+ of_property_read_u32_index(tdm_np, route_name, i + 1, &serial_id);
+
+ if (serial_id >= ARRAY_SIZE(tsa->serials)) {
+ dev_err(tsa->dev, "%pOF: invalid serial id (%u)\n",
+ tdm_np, serial_id);
+ return -EINVAL;
+ }
+
+ serial_name = tsa_serial_id2name(tsa, serial_id);
+ if (!serial_name) {
+ dev_err(tsa->dev, "%pOF: unsupported serial id (%u)\n",
+ tdm_np, serial_id);
+ return -EINVAL;
+ }
+
+ dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
+ tdm_id, route_name, ts, ts+count-1, serial_name);
+ ts += count;
+
+ ret = tsa_add_entry(tsa, &area, count, serial_id);
+ if (ret)
+ return ret;
+
+ serial_info = &tsa->serials[serial_id].info;
+ tdm = &tsa->tdm[tdm_id];
+ if (is_rx) {
+ serial_info->rx_fs_rate = clk_get_rate(tdm->l1rsync_clk);
+ serial_info->rx_bit_rate = clk_get_rate(tdm->l1rclk_clk);
+ serial_info->nb_rx_ts += count;
+ } else {
+ serial_info->tx_fs_rate = tdm->l1tsync_clk ?
+ clk_get_rate(tdm->l1tsync_clk) :
+ clk_get_rate(tdm->l1rsync_clk);
+ serial_info->tx_bit_rate = tdm->l1tclk_clk ?
+ clk_get_rate(tdm->l1tclk_clk) :
+ clk_get_rate(tdm->l1rclk_clk);
+ serial_info->nb_tx_ts += count;
+ }
+ }
+ return 0;
+}
+
+static inline int tsa_of_parse_tdm_rx_route(struct tsa *tsa,
+ struct device_node *tdm_np,
+ u32 tdms, u32 tdm_id)
+{
+ return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, true);
+}
+
+static inline int tsa_of_parse_tdm_tx_route(struct tsa *tsa,
+ struct device_node *tdm_np,
+ u32 tdms, u32 tdm_id)
+{
+ return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, false);
+}
+
+static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
+{
+ struct device_node *tdm_np;
+ struct tsa_tdm *tdm;
+ struct clk *clk;
+ u32 tdm_id, val;
+ int ret;
+ int i;
+
+ tsa->tdms = 0;
+ tsa->tdm[0].is_enable = false;
+ tsa->tdm[1].is_enable = false;
+
+ for_each_available_child_of_node(np, tdm_np) {
+ ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
+ if (ret) {
+ dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
+ of_node_put(tdm_np);
+ return ret;
+ }
+ switch (tdm_id) {
+ case 0:
+ tsa->tdms |= BIT(TSA_TDMA);
+ break;
+ case 1:
+ tsa->tdms |= BIT(TSA_TDMB);
+ break;
+ default:
+ dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
+ tdm_id);
+ of_node_put(tdm_np);
+ return -EINVAL;
+ }
+ }
+
+ for_each_available_child_of_node(np, tdm_np) {
+ ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
+ if (ret) {
+ dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
+ of_node_put(tdm_np);
+ return ret;
+ }
+
+ tdm = &tsa->tdm[tdm_id];
+ tdm->simode_tdm = TSA_SIMODE_TDM_SDM_NORM;
+
+ val = 0;
+ ret = of_property_read_u32(tdm_np, "fsl,rx-frame-sync-delay-bits",
+ &val);
+ if (ret && ret != -EINVAL) {
+ dev_err(tsa->dev,
+ "%pOF: failed to read fsl,rx-frame-sync-delay-bits\n",
+ tdm_np);
+ of_node_put(tdm_np);
+ return ret;
+ }
+ if (val > 3) {
+ dev_err(tsa->dev,
+ "%pOF: Invalid fsl,rx-frame-sync-delay-bits (%u)\n",
+ tdm_np, val);
+ of_node_put(tdm_np);
+ return -EINVAL;
+ }
+ tdm->simode_tdm |= TSA_SIMODE_TDM_RFSD(val);
+
+ val = 0;
+ ret = of_property_read_u32(tdm_np, "fsl,tx-frame-sync-delay-bits",
+ &val);
+ if (ret && ret != -EINVAL) {
+ dev_err(tsa->dev,
+ "%pOF: failed to read fsl,tx-frame-sync-delay-bits\n",
+ tdm_np);
+ of_node_put(tdm_np);
+ return ret;
+ }
+ if (val > 3) {
+ dev_err(tsa->dev,
+ "%pOF: Invalid fsl,tx-frame-sync-delay-bits (%u)\n",
+ tdm_np, val);
+ of_node_put(tdm_np);
+ return -EINVAL;
+ }
+ tdm->simode_tdm |= TSA_SIMODE_TDM_TFSD(val);
+
+ if (of_property_read_bool(tdm_np, "fsl,common-rxtx-pins"))
+ tdm->simode_tdm |= TSA_SIMODE_TDM_CRT;
+
+ if (of_property_read_bool(tdm_np, "fsl,clock-falling-edge"))
+ tdm->simode_tdm |= TSA_SIMODE_TDM_CE;
+
+ if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
+ tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
+
+ if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
+ tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
+
+ clk = of_clk_get_by_name(tdm_np, "l1rsync");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ clk_put(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ tdm->l1rsync_clk = clk;
+
+ clk = of_clk_get_by_name(tdm_np, "l1rclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ clk_put(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ tdm->l1rclk_clk = clk;
+
+ if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
+ clk = of_clk_get_by_name(tdm_np, "l1tsync");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ clk_put(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ tdm->l1tsync_clk = clk;
+
+ clk = of_clk_get_by_name(tdm_np, "l1tclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ clk_put(clk);
+ of_node_put(tdm_np);
+ goto err;
+ }
+ tdm->l1tclk_clk = clk;
+ }
+
+ ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
+ if (ret) {
+ of_node_put(tdm_np);
+ goto err;
+ }
+
+ ret = tsa_of_parse_tdm_tx_route(tsa, tdm_np, tsa->tdms, tdm_id);
+ if (ret) {
+ of_node_put(tdm_np);
+ goto err;
+ }
+
+ tdm->is_enable = true;
+ }
+ return 0;
+
+err:
+ for (i = 0; i < 2; i++) {
+ if (tsa->tdm[i].l1rsync_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
+ clk_put(tsa->tdm[i].l1rsync_clk);
+ }
+ if (tsa->tdm[i].l1rclk_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
+ clk_put(tsa->tdm[i].l1rclk_clk);
+ }
+ if (tsa->tdm[i].l1tsync_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
+ clk_put(tsa->tdm[i].l1rsync_clk);
+ }
+ if (tsa->tdm[i].l1tclk_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
+ clk_put(tsa->tdm[i].l1rclk_clk);
+ }
+ }
+ return ret;
+}
+
+static void tsa_init_si_ram(struct tsa *tsa)
+{
+ resource_size_t i;
+
+ /* Fill all entries as the last one */
+ for (i = 0; i < tsa->si_ram_sz; i += 4)
+ tsa_write32(tsa->si_ram + i, TSA_SIRAM_ENTRY_LAST);
+}
+
+static int tsa_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ struct tsa *tsa;
+ unsigned int i;
+ u32 val;
+ int ret;
+
+ tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
+ if (!tsa)
+ return -ENOMEM;
+
+ tsa->dev = &pdev->dev;
+
+ for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
+ tsa->serials[i].id = i;
+
+ spin_lock_init(&tsa->lock);
+
+ tsa->si_regs = devm_platform_ioremap_resource_byname(pdev, "si_regs");
+ if (IS_ERR(tsa->si_regs))
+ return PTR_ERR(tsa->si_regs);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "si_ram");
+ if (!res) {
+ dev_err(tsa->dev, "si_ram resource missing\n");
+ return -EINVAL;
+ }
+ tsa->si_ram_sz = resource_size(res);
+ tsa->si_ram = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tsa->si_ram))
+ return PTR_ERR(tsa->si_ram);
+
+ tsa_init_si_ram(tsa);
+
+ ret = tsa_of_parse_tdms(tsa, np);
+ if (ret)
+ return ret;
+
+ /* Set SIMODE */
+ val = 0;
+ if (tsa->tdm[0].is_enable)
+ val |= TSA_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
+ if (tsa->tdm[1].is_enable)
+ val |= TSA_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
+
+ tsa_clrsetbits32(tsa->si_regs + TSA_SIMODE,
+ TSA_SIMODE_TDMA(TSA_SIMODE_TDM_MASK) |
+ TSA_SIMODE_TDMB(TSA_SIMODE_TDM_MASK),
+ val);
+
+ /* Set SIGMR */
+ val = (tsa->tdms == BIT(TSA_TDMA)) ?
+ TSA_SIGMR_RDM_STATIC_TDMA : TSA_SIGMR_RDM_STATIC_TDMAB;
+ if (tsa->tdms & BIT(TSA_TDMA))
+ val |= TSA_SIGMR_ENA;
+ if (tsa->tdms & BIT(TSA_TDMB))
+ val |= TSA_SIGMR_ENB;
+ tsa_write8(tsa->si_regs + TSA_SIGMR, val);
+
+ platform_set_drvdata(pdev, tsa);
+
+ return 0;
+}
+
+static int tsa_remove(struct platform_device *pdev)
+{
+ struct tsa *tsa = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (tsa->tdm[i].l1rsync_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
+ clk_put(tsa->tdm[i].l1rsync_clk);
+ }
+ if (tsa->tdm[i].l1rclk_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
+ clk_put(tsa->tdm[i].l1rclk_clk);
+ }
+ if (tsa->tdm[i].l1tsync_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
+ clk_put(tsa->tdm[i].l1rsync_clk);
+ }
+ if (tsa->tdm[i].l1tclk_clk) {
+ clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
+ clk_put(tsa->tdm[i].l1rclk_clk);
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id tsa_id_table[] = {
+ { .compatible = "fsl,cpm1-tsa" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, tsa_id_table);
+
+static struct platform_driver tsa_driver = {
+ .driver = {
+ .name = "fsl-tsa",
+ .of_match_table = of_match_ptr(tsa_id_table),
+ },
+ .probe = tsa_probe,
+ .remove = tsa_remove,
+};
+module_platform_driver(tsa_driver);
+
+struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
+ const char *phandle_name)
+{
+ struct of_phandle_args out_args;
+ struct platform_device *pdev;
+ struct tsa_serial *tsa_serial;
+ struct tsa *tsa;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, &out_args);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!of_match_node(tsa_driver.driver.of_match_table, out_args.np)) {
+ of_node_put(out_args.np);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdev = of_find_device_by_node(out_args.np);
+ of_node_put(out_args.np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ tsa = platform_get_drvdata(pdev);
+ if (!tsa) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (out_args.args_count != 1) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (out_args.args[0] >= ARRAY_SIZE(tsa->serials)) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ tsa_serial = &tsa->serials[out_args.args[0]];
+
+ /*
+ * Be sure that the serial id matches the phandle arg.
+ * The tsa_serials table is indexed by serial ids. The serial id is set
+ * during the probe() call and needs to be coherent.
+ */
+ if (WARN_ON(tsa_serial->id != out_args.args[0])) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return tsa_serial;
+}
+EXPORT_SYMBOL(tsa_serial_get_byphandle);
+
+void tsa_serial_put(struct tsa_serial *tsa_serial)
+{
+ struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
+
+ put_device(tsa->dev);
+}
+EXPORT_SYMBOL(tsa_serial_put);
+
+static void devm_tsa_serial_release(struct device *dev, void *res)
+{
+ struct tsa_serial **tsa_serial = res;
+
+ tsa_serial_put(*tsa_serial);
+}
+
+struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
+ struct device_node *np,
+ const char *phandle_name)
+{
+ struct tsa_serial *tsa_serial;
+ struct tsa_serial **dr;
+
+ dr = devres_alloc(devm_tsa_serial_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ tsa_serial = tsa_serial_get_byphandle(np, phandle_name);
+ if (!IS_ERR(tsa_serial)) {
+ *dr = tsa_serial;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return tsa_serial;
+}
+EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("CPM TSA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/qe/tsa.h b/drivers/soc/fsl/qe/tsa.h
new file mode 100644
index 0000000000..d9df89b6da
--- /dev/null
+++ b/drivers/soc/fsl/qe/tsa.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TSA management
+ *
+ * Copyright 2022 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+#ifndef __SOC_FSL_TSA_H__
+#define __SOC_FSL_TSA_H__
+
+#include <linux/types.h>
+
+struct device_node;
+struct device;
+struct tsa_serial;
+
+struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
+ const char *phandle_name);
+void tsa_serial_put(struct tsa_serial *tsa_serial);
+struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
+ struct device_node *np,
+ const char *phandle_name);
+
+/* Connect and disconnect the TSA serial */
+int tsa_serial_connect(struct tsa_serial *tsa_serial);
+int tsa_serial_disconnect(struct tsa_serial *tsa_serial);
+
+/* Cell information */
+struct tsa_serial_info {
+ unsigned long rx_fs_rate;
+ unsigned long rx_bit_rate;
+ u8 nb_rx_ts;
+ unsigned long tx_fs_rate;
+ unsigned long tx_bit_rate;
+ u8 nb_tx_ts;
+};
+
+/* Get information */
+int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info);
+
+#endif /* __SOC_FSL_TSA_H__ */
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
new file mode 100644
index 0000000000..21dbcd787c
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * arch/powerpc/sysdev/qe_lib/ucc.c
+ *
+ * QE UCC API Set - UCC specific routines implementations.
+ *
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/ucc.h>
+
+#define UCC_TDM_NUM 8
+#define RX_SYNC_SHIFT_BASE 30
+#define TX_SYNC_SHIFT_BASE 14
+#define RX_CLK_SHIFT_BASE 28
+#define TX_CLK_SHIFT_BASE 12
+
+int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
+{
+ unsigned long flags;
+
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+ qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+ ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
+
+/* Configure the UCC to either Slow or Fast.
+ *
+ * A given UCC can be figured to support either "slow" devices (e.g. UART)
+ * or "fast" devices (e.g. Ethernet).
+ *
+ * 'ucc_num' is the UCC number, from 0 - 7.
+ *
+ * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
+ * must always be set to 1.
+ */
+int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
+{
+ u8 __iomem *guemr;
+
+ /* The GUEMR register is at the same location for both slow and fast
+ devices, so we just use uccX.slow.guemr. */
+ switch (ucc_num) {
+ case 0: guemr = &qe_immr->ucc1.slow.guemr;
+ break;
+ case 1: guemr = &qe_immr->ucc2.slow.guemr;
+ break;
+ case 2: guemr = &qe_immr->ucc3.slow.guemr;
+ break;
+ case 3: guemr = &qe_immr->ucc4.slow.guemr;
+ break;
+ case 4: guemr = &qe_immr->ucc5.slow.guemr;
+ break;
+ case 5: guemr = &qe_immr->ucc6.slow.guemr;
+ break;
+ case 6: guemr = &qe_immr->ucc7.slow.guemr;
+ break;
+ case 7: guemr = &qe_immr->ucc8.slow.guemr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
+ UCC_GUEMR_SET_RESERVED3 | speed);
+
+ return 0;
+}
+
+static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
+ unsigned int *reg_num, unsigned int *shift)
+{
+ unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
+
+ *reg_num = cmx + 1;
+ *cmxucr = &qe_immr->qmx.cmxucr[cmx];
+ *shift = 16 - 8 * (ucc_num & 2);
+}
+
+int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
+{
+ __be32 __iomem *cmxucr;
+ unsigned int reg_num;
+ unsigned int shift;
+
+ /* check if the UCC number is in range. */
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+ if (set)
+ qe_setbits_be32(cmxucr, mask << shift);
+ else
+ qe_clrbits_be32(cmxucr, mask << shift);
+
+ return 0;
+}
+
+int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ __be32 __iomem *cmxucr;
+ unsigned int reg_num;
+ unsigned int shift;
+ u32 clock_bits = 0;
+
+ /* check if the UCC number is in range. */
+ if (ucc_num > UCC_MAX_NUM - 1)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
+ return -EINVAL;
+
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+ switch (reg_num) {
+ case 1:
+ switch (clock) {
+ case QE_BRG1: clock_bits = 1; break;
+ case QE_BRG2: clock_bits = 2; break;
+ case QE_BRG7: clock_bits = 3; break;
+ case QE_BRG8: clock_bits = 4; break;
+ case QE_CLK9: clock_bits = 5; break;
+ case QE_CLK10: clock_bits = 6; break;
+ case QE_CLK11: clock_bits = 7; break;
+ case QE_CLK12: clock_bits = 8; break;
+ case QE_CLK15: clock_bits = 9; break;
+ case QE_CLK16: clock_bits = 10; break;
+ default: break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_BRG5: clock_bits = 1; break;
+ case QE_BRG6: clock_bits = 2; break;
+ case QE_BRG7: clock_bits = 3; break;
+ case QE_BRG8: clock_bits = 4; break;
+ case QE_CLK13: clock_bits = 5; break;
+ case QE_CLK14: clock_bits = 6; break;
+ case QE_CLK19: clock_bits = 7; break;
+ case QE_CLK20: clock_bits = 8; break;
+ case QE_CLK15: clock_bits = 9; break;
+ case QE_CLK16: clock_bits = 10; break;
+ default: break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_BRG9: clock_bits = 1; break;
+ case QE_BRG10: clock_bits = 2; break;
+ case QE_BRG15: clock_bits = 3; break;
+ case QE_BRG16: clock_bits = 4; break;
+ case QE_CLK3: clock_bits = 5; break;
+ case QE_CLK4: clock_bits = 6; break;
+ case QE_CLK17: clock_bits = 7; break;
+ case QE_CLK18: clock_bits = 8; break;
+ case QE_CLK7: clock_bits = 9; break;
+ case QE_CLK8: clock_bits = 10; break;
+ case QE_CLK16: clock_bits = 11; break;
+ default: break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_BRG13: clock_bits = 1; break;
+ case QE_BRG14: clock_bits = 2; break;
+ case QE_BRG15: clock_bits = 3; break;
+ case QE_BRG16: clock_bits = 4; break;
+ case QE_CLK5: clock_bits = 5; break;
+ case QE_CLK6: clock_bits = 6; break;
+ case QE_CLK21: clock_bits = 7; break;
+ case QE_CLK22: clock_bits = 8; break;
+ case QE_CLK7: clock_bits = 9; break;
+ case QE_CLK8: clock_bits = 10; break;
+ case QE_CLK16: clock_bits = 11; break;
+ default: break;
+ }
+ break;
+ default: break;
+ }
+
+ /* Check for invalid combination of clock and UCC number */
+ if (!clock_bits)
+ return -ENOENT;
+
+ if (mode == COMM_DIR_RX)
+ shift += 4;
+
+ qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+}
+
+static int ucc_get_tdm_common_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ /*
+ * for TDM[0, 1, 2, 3], TX and RX use common
+ * clock source BRG3,4 and CLK1,2
+ * for TDM[4, 5, 6, 7], TX and RX use common
+ * clock source BRG12,13 and CLK23,24
+ */
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG3:
+ clock_bits = 1;
+ break;
+ case QE_BRG4:
+ clock_bits = 2;
+ break;
+ case QE_CLK1:
+ clock_bits = 4;
+ break;
+ case QE_CLK2:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG12:
+ clock_bits = 1;
+ break;
+ case QE_BRG13:
+ clock_bits = 2;
+ break;
+ case QE_CLK23:
+ clock_bits = 4;
+ break;
+ case QE_CLK24:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_rx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK3:
+ clock_bits = 6;
+ break;
+ case QE_CLK8:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK5:
+ clock_bits = 6;
+ break;
+ case QE_CLK10:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK7:
+ clock_bits = 6;
+ break;
+ case QE_CLK12:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK9:
+ clock_bits = 6;
+ break;
+ case QE_CLK14:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK11:
+ clock_bits = 6;
+ break;
+ case QE_CLK16:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK13:
+ clock_bits = 6;
+ break;
+ case QE_CLK18:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK15:
+ clock_bits = 6;
+ break;
+ case QE_CLK20:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK17:
+ clock_bits = 6;
+ break;
+ case QE_CLK22:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_tx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK4:
+ clock_bits = 6;
+ break;
+ case QE_CLK9:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK6:
+ clock_bits = 6;
+ break;
+ case QE_CLK11:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK8:
+ clock_bits = 6;
+ break;
+ case QE_CLK13:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK10:
+ clock_bits = 6;
+ break;
+ case QE_CLK15:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK12:
+ clock_bits = 6;
+ break;
+ case QE_CLK17:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK14:
+ clock_bits = 6;
+ break;
+ case QE_CLK19:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK16:
+ clock_bits = 6;
+ break;
+ case QE_CLK21:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK18:
+ clock_bits = 6;
+ break;
+ case QE_CLK3:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+/* tdm_num: TDM A-H port num is 0-7 */
+static int ucc_get_tdm_rxtx_clk(enum comm_dir mode, u32 tdm_num,
+ enum qe_clock clock)
+{
+ int clock_bits;
+
+ clock_bits = ucc_get_tdm_common_clk(tdm_num, clock);
+ if (clock_bits > 0)
+ return clock_bits;
+ if (mode == COMM_DIR_RX)
+ clock_bits = ucc_get_tdm_rx_clk(tdm_num, clock);
+ if (mode == COMM_DIR_TX)
+ clock_bits = ucc_get_tdm_tx_clk(tdm_num, clock);
+ return clock_bits;
+}
+
+static u32 ucc_get_tdm_clk_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_CLK_SHIFT_BASE : TX_CLK_SHIFT_BASE;
+ if (tdm_num < 4)
+ shift -= tdm_num * 4;
+ else
+ shift -= (tdm_num - 4) * 4;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int clock_bits;
+ u32 shift;
+ struct qe_mux __iomem *qe_mux_reg;
+ __be32 __iomem *cmxs1cr;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num > 7)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ clock_bits = ucc_get_tdm_rxtx_clk(mode, tdm_num, clock);
+ if (clock_bits < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_clk_shift(mode, tdm_num);
+
+ cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
+ &qe_mux_reg->cmxsi1cr_h;
+
+ qe_clrsetbits_be32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+}
+
+static int ucc_get_tdm_sync_source(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source = -EINVAL;
+
+ if (mode == COMM_DIR_RX && clock == QE_RSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+ if (mode == COMM_DIR_TX && clock == QE_TSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG10:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG11:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG14:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG15:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return source;
+}
+
+static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
+ shift -= tdm_num * 2;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source;
+ u32 shift;
+ struct qe_mux __iomem *qe_mux_reg;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num >= UCC_TDM_NUM)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ source = ucc_get_tdm_sync_source(tdm_num, clock, mode);
+ if (source < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_sync_shift(mode, tdm_num);
+
+ qe_clrsetbits_be32(&qe_mux_reg->cmxsi1syr,
+ QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ source << shift);
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
new file mode 100644
index 0000000000..53d8aafc93
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Fast API Set - UCC Fast specific routines implementations.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
+{
+ printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
+ printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
+
+ printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
+ printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
+ printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
+ printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
+ printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
+ printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
+ printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
+ &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
+ printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
+ printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
+ printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
+ printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfset,
+ ioread16be(&uccf->uf_regs->urfset));
+ printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
+ printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
+ printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
+ printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
+ printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
+ printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
+ printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
+ &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
+}
+EXPORT_SYMBOL(ucc_fast_dump_regs);
+
+u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
+{
+ switch (uccf_num) {
+ case 0: return QE_CR_SUBBLOCK_UCCFAST1;
+ case 1: return QE_CR_SUBBLOCK_UCCFAST2;
+ case 2: return QE_CR_SUBBLOCK_UCCFAST3;
+ case 3: return QE_CR_SUBBLOCK_UCCFAST4;
+ case 4: return QE_CR_SUBBLOCK_UCCFAST5;
+ case 5: return QE_CR_SUBBLOCK_UCCFAST6;
+ case 6: return QE_CR_SUBBLOCK_UCCFAST7;
+ case 7: return QE_CR_SUBBLOCK_UCCFAST8;
+ default: return QE_CR_SUBBLOCK_INVALID;
+ }
+}
+EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
+
+void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
+{
+ iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
+}
+EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
+
+void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+
+ uf_regs = uccf->uf_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+ gumr = ioread32be(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr |= UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 1;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr |= UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 1;
+ }
+ iowrite32be(gumr, &uf_regs->gumr);
+}
+EXPORT_SYMBOL(ucc_fast_enable);
+
+void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
+{
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+
+ uf_regs = uccf->uf_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+ gumr = ioread32be(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr &= ~UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 0;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr &= ~UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 0;
+ }
+ iowrite32be(gumr, &uf_regs->gumr);
+}
+EXPORT_SYMBOL(ucc_fast_disable);
+
+int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
+{
+ struct ucc_fast_private *uccf;
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
+ int ret;
+
+ if (!uf_info)
+ return -EINVAL;
+
+ /* check if the UCC port number is in range. */
+ if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
+ printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check that 'max_rx_buf_length' is properly aligned (4). */
+ if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Validate Virtual Fifo register values */
+ if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
+ printk(KERN_ERR "%s: urfs is too small\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
+ printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
+ return -EINVAL;
+ }
+
+ uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
+ if (!uccf) {
+ printk(KERN_ERR "%s: Cannot allocate private data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ uccf->ucc_fast_tx_virtual_fifo_base_offset = -1;
+ uccf->ucc_fast_rx_virtual_fifo_base_offset = -1;
+
+ /* Fill fast UCC structure */
+ uccf->uf_info = uf_info;
+ /* Set the PHY base address */
+ uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
+ if (uccf->uf_regs == NULL) {
+ printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+ kfree(uccf);
+ return -ENOMEM;
+ }
+
+ uccf->enabled_tx = 0;
+ uccf->enabled_rx = 0;
+ uccf->stopped_tx = 0;
+ uccf->stopped_rx = 0;
+ uf_regs = uccf->uf_regs;
+ uccf->p_ucce = &uf_regs->ucce;
+ uccf->p_uccm = &uf_regs->uccm;
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ uccf->p_utodr = &uf_regs->utodr;
+#endif
+#ifdef STATISTICS
+ uccf->tx_frames = 0;
+ uccf->rx_frames = 0;
+ uccf->rx_discarded = 0;
+#endif /* STATISTICS */
+
+ /* Set UCC to fast type */
+ ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
+ if (ret) {
+ printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
+ ucc_fast_free(uccf);
+ return ret;
+ }
+
+ uccf->mrblr = uf_info->max_rx_buf_length;
+
+ /* Set GUMR */
+ /* For more details see the hardware spec. */
+ gumr = uf_info->ttx_trx;
+ if (uf_info->tci)
+ gumr |= UCC_FAST_GUMR_TCI;
+ if (uf_info->cdp)
+ gumr |= UCC_FAST_GUMR_CDP;
+ if (uf_info->ctsp)
+ gumr |= UCC_FAST_GUMR_CTSP;
+ if (uf_info->cds)
+ gumr |= UCC_FAST_GUMR_CDS;
+ if (uf_info->ctss)
+ gumr |= UCC_FAST_GUMR_CTSS;
+ if (uf_info->txsy)
+ gumr |= UCC_FAST_GUMR_TXSY;
+ if (uf_info->rsyn)
+ gumr |= UCC_FAST_GUMR_RSYN;
+ gumr |= uf_info->synl;
+ if (uf_info->rtsm)
+ gumr |= UCC_FAST_GUMR_RTSM;
+ gumr |= uf_info->renc;
+ if (uf_info->revd)
+ gumr |= UCC_FAST_GUMR_REVD;
+ gumr |= uf_info->tenc;
+ gumr |= uf_info->tcrc;
+ gumr |= uf_info->mode;
+ iowrite32be(gumr, &uf_regs->gumr);
+
+ /* Allocate memory for Tx Virtual Fifo */
+ uccf->ucc_fast_tx_virtual_fifo_base_offset =
+ qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for Rx Virtual Fifo */
+ uccf->ucc_fast_rx_virtual_fifo_base_offset =
+ qe_muram_alloc(uf_info->urfs +
+ UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+ if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -ENOMEM;
+ }
+
+ /* Set Virtual Fifo registers */
+ iowrite16be(uf_info->urfs, &uf_regs->urfs);
+ iowrite16be(uf_info->urfet, &uf_regs->urfet);
+ iowrite16be(uf_info->urfset, &uf_regs->urfset);
+ iowrite16be(uf_info->utfs, &uf_regs->utfs);
+ iowrite16be(uf_info->utfet, &uf_regs->utfet);
+ iowrite16be(uf_info->utftt, &uf_regs->utftt);
+ /* utfb, urfb are offsets from MURAM base */
+ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
+ &uf_regs->utfb);
+ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
+ &uf_regs->urfb);
+
+ /* Mux clocking */
+ /* Grant Support */
+ ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
+ /* Breakpoint Support */
+ ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
+ /* Set Tsa or NMSI mode. */
+ ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
+ /* If NMSI (not Tsa), set Tx and Rx clock. */
+ if (!uf_info->tsa) {
+ /* Rx clock routing */
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ printk(KERN_ERR "%s: illegal value for RX clock\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ /* Tx clock routing */
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ printk(KERN_ERR "%s: illegal value for TX clock\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ } else {
+ /* tdm Rx clock routing */
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx clock routing */
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Rx sync clock routing */
+ if ((uf_info->rx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->rx_sync,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx sync clock routing */
+ if ((uf_info->tx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->tx_sync,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+ }
+
+ /* Set interrupt mask register at UCC level. */
+ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
+
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
+ iowrite32be(0xffffffff, &uf_regs->ucce);
+
+ *uccf_ret = uccf;
+ return 0;
+}
+EXPORT_SYMBOL(ucc_fast_init);
+
+void ucc_fast_free(struct ucc_fast_private * uccf)
+{
+ if (!uccf)
+ return;
+
+ qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
+ qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
+
+ if (uccf->uf_regs)
+ iounmap(uccf->uf_regs);
+
+ kfree(uccf);
+}
+EXPORT_SYMBOL(ucc_fast_free);
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
new file mode 100644
index 0000000000..d5ac1ac0ed
--- /dev/null
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Slow API Set - UCC Slow specific routines implementations.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_slow.h>
+
+u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
+{
+ switch (uccs_num) {
+ case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
+ case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
+ case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
+ case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
+ case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
+ case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
+ case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
+ case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
+ default: return QE_CR_SUBBLOCK_INVALID;
+ }
+}
+EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
+
+void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
+
+void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_stop_tx);
+
+void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
+{
+ struct ucc_slow_info *us_info = uccs->us_info;
+ u32 id;
+
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+}
+EXPORT_SYMBOL(ucc_slow_restart_tx);
+
+void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+ struct ucc_slow __iomem *us_regs;
+ u32 gumr_l;
+
+ us_regs = uccs->us_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+ gumr_l = ioread32be(&us_regs->gumr_l);
+ if (mode & COMM_DIR_TX) {
+ gumr_l |= UCC_SLOW_GUMR_L_ENT;
+ uccs->enabled_tx = 1;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr_l |= UCC_SLOW_GUMR_L_ENR;
+ uccs->enabled_rx = 1;
+ }
+ iowrite32be(gumr_l, &us_regs->gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_enable);
+
+void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
+{
+ struct ucc_slow __iomem *us_regs;
+ u32 gumr_l;
+
+ us_regs = uccs->us_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+ gumr_l = ioread32be(&us_regs->gumr_l);
+ if (mode & COMM_DIR_TX) {
+ gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
+ uccs->enabled_tx = 0;
+ }
+ if (mode & COMM_DIR_RX) {
+ gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
+ uccs->enabled_rx = 0;
+ }
+ iowrite32be(gumr_l, &us_regs->gumr_l);
+}
+EXPORT_SYMBOL(ucc_slow_disable);
+
+/* Initialize the UCC for Slow operations
+ *
+ * The caller should initialize the following us_info
+ */
+int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
+{
+ struct ucc_slow_private *uccs;
+ u32 i;
+ struct ucc_slow __iomem *us_regs;
+ u32 gumr;
+ struct qe_bd __iomem *bd;
+ u32 id;
+ u32 command;
+ int ret = 0;
+
+ if (!us_info)
+ return -EINVAL;
+
+ /* check if the UCC port number is in range. */
+ if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
+ printk(KERN_ERR "%s: illegal UCC number\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Set mrblr
+ * Check that 'max_rx_buf_length' is properly aligned (4), unless
+ * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
+ * case when QE accepts 32 bits at a time.
+ */
+ if ((!us_info->rfw) &&
+ (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
+ printk(KERN_ERR "max_rx_buf_length not aligned.\n");
+ return -EINVAL;
+ }
+
+ uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
+ if (!uccs) {
+ printk(KERN_ERR "%s: Cannot allocate private data\n",
+ __func__);
+ return -ENOMEM;
+ }
+ uccs->rx_base_offset = -1;
+ uccs->tx_base_offset = -1;
+ uccs->us_pram_offset = -1;
+
+ /* Fill slow UCC structure */
+ uccs->us_info = us_info;
+ /* Set the PHY base address */
+ uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
+ if (uccs->us_regs == NULL) {
+ printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+ kfree(uccs);
+ return -ENOMEM;
+ }
+
+ us_regs = uccs->us_regs;
+ uccs->p_ucce = &us_regs->ucce;
+ uccs->p_uccm = &us_regs->uccm;
+
+ /* Get PRAM base */
+ uccs->us_pram_offset =
+ qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
+ if (uccs->us_pram_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
+ uccs->us_pram_offset);
+
+ uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
+
+ /* Set UCC to slow type */
+ ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
+ if (ret) {
+ printk(KERN_ERR "%s: cannot set UCC type", __func__);
+ ucc_slow_free(uccs);
+ return ret;
+ }
+
+ iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
+
+ INIT_LIST_HEAD(&uccs->confQ);
+
+ /* Allocate BDs. */
+ uccs->rx_base_offset =
+ qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
+ QE_ALIGNMENT_OF_BD);
+ if (uccs->rx_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
+ us_info->rx_bd_ring_len);
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+
+ uccs->tx_base_offset =
+ qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
+ QE_ALIGNMENT_OF_BD);
+ if (uccs->tx_base_offset < 0) {
+ printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
+ ucc_slow_free(uccs);
+ return -ENOMEM;
+ }
+
+ /* Init Tx bds */
+ bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
+ for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
+ /* clear bd buffer */
+ iowrite32be(0, &bd->buf);
+ /* set bd status and length */
+ iowrite32be(0, (u32 __iomem *)bd);
+ bd++;
+ }
+ /* for last BD set Wrap bit */
+ iowrite32be(0, &bd->buf);
+ iowrite32be(T_W, (u32 __iomem *)bd);
+
+ /* Init Rx bds */
+ bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
+ for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
+ /* set bd status and length */
+ iowrite32be(0, (u32 __iomem *)bd);
+ /* clear bd buffer */
+ iowrite32be(0, &bd->buf);
+ bd++;
+ }
+ /* for last BD set Wrap bit */
+ iowrite32be(R_W, (u32 __iomem *)bd);
+ iowrite32be(0, &bd->buf);
+
+ /* Set GUMR (For more details see the hardware spec.). */
+ /* gumr_h */
+ gumr = us_info->tcrc;
+ if (us_info->cdp)
+ gumr |= UCC_SLOW_GUMR_H_CDP;
+ if (us_info->ctsp)
+ gumr |= UCC_SLOW_GUMR_H_CTSP;
+ if (us_info->cds)
+ gumr |= UCC_SLOW_GUMR_H_CDS;
+ if (us_info->ctss)
+ gumr |= UCC_SLOW_GUMR_H_CTSS;
+ if (us_info->tfl)
+ gumr |= UCC_SLOW_GUMR_H_TFL;
+ if (us_info->rfw)
+ gumr |= UCC_SLOW_GUMR_H_RFW;
+ if (us_info->txsy)
+ gumr |= UCC_SLOW_GUMR_H_TXSY;
+ if (us_info->rtsm)
+ gumr |= UCC_SLOW_GUMR_H_RTSM;
+ iowrite32be(gumr, &us_regs->gumr_h);
+
+ /* gumr_l */
+ gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
+ (u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode;
+ if (us_info->tci)
+ gumr |= UCC_SLOW_GUMR_L_TCI;
+ if (us_info->rinv)
+ gumr |= UCC_SLOW_GUMR_L_RINV;
+ if (us_info->tinv)
+ gumr |= UCC_SLOW_GUMR_L_TINV;
+ if (us_info->tend)
+ gumr |= UCC_SLOW_GUMR_L_TEND;
+ iowrite32be(gumr, &us_regs->gumr_l);
+
+ /* Function code registers */
+
+ /* if the data is in cachable memory, the 'global' */
+ /* in the function code should be set. */
+ iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
+ iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
+
+ /* rbase, tbase are offsets from MURAM base */
+ iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
+ iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
+
+ /* Mux clocking */
+ /* Grant Support */
+ ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
+ /* Breakpoint Support */
+ ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
+ /* Set Tsa or NMSI mode. */
+ ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
+ /* If NMSI (not Tsa), set Tx and Rx clock. */
+ if (!us_info->tsa) {
+ /* Rx clock routing */
+ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
+ COMM_DIR_RX)) {
+ printk(KERN_ERR "%s: illegal value for RX clock\n",
+ __func__);
+ ucc_slow_free(uccs);
+ return -EINVAL;
+ }
+ /* Tx clock routing */
+ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
+ COMM_DIR_TX)) {
+ printk(KERN_ERR "%s: illegal value for TX clock\n",
+ __func__);
+ ucc_slow_free(uccs);
+ return -EINVAL;
+ }
+ }
+
+ /* Set interrupt mask register at UCC level. */
+ iowrite16be(us_info->uccm_mask, &us_regs->uccm);
+
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
+ iowrite16be(0xffff, &us_regs->ucce);
+
+ /* Issue QE Init command */
+ if (us_info->init_tx && us_info->init_rx)
+ command = QE_INIT_TX_RX;
+ else if (us_info->init_tx)
+ command = QE_INIT_TX;
+ else
+ command = QE_INIT_RX; /* We know at least one is TRUE */
+
+ qe_issue_cmd(command, id, us_info->protocol, 0);
+
+ *uccs_ret = uccs;
+ return 0;
+}
+EXPORT_SYMBOL(ucc_slow_init);
+
+void ucc_slow_free(struct ucc_slow_private * uccs)
+{
+ if (!uccs)
+ return;
+
+ qe_muram_free(uccs->rx_base_offset);
+ qe_muram_free(uccs->tx_base_offset);
+ qe_muram_free(uccs->us_pram_offset);
+
+ if (uccs->us_regs)
+ iounmap(uccs->us_regs);
+
+ kfree(uccs);
+}
+EXPORT_SYMBOL(ucc_slow_free);
+
diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c
new file mode 100644
index 0000000000..890f236ea6
--- /dev/null
+++ b/drivers/soc/fsl/qe/usb.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * QE USB routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc.
+ * Shlomi Gridish <gridish@freescale.com>
+ * Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+int qe_usb_clock_set(enum qe_clock clk, int rate)
+{
+ struct qe_mux __iomem *mux = &qe_immr->qmx;
+ unsigned long flags;
+ u32 val;
+
+ switch (clk) {
+ case QE_CLK3: val = QE_CMXGCR_USBCS_CLK3; break;
+ case QE_CLK5: val = QE_CMXGCR_USBCS_CLK5; break;
+ case QE_CLK7: val = QE_CMXGCR_USBCS_CLK7; break;
+ case QE_CLK9: val = QE_CMXGCR_USBCS_CLK9; break;
+ case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break;
+ case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break;
+ case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break;
+ case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break;
+ case QE_BRG9: val = QE_CMXGCR_USBCS_BRG9; break;
+ case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break;
+ default:
+ pr_err("%s: requested unknown clock %d\n", __func__, clk);
+ return -EINVAL;
+ }
+
+ if (qe_clock_is_brg(clk))
+ qe_setbrg(clk, rate, 1);
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+
+ qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
+
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_usb_clock_set);
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
new file mode 100644
index 0000000000..3d0cae30c7
--- /dev/null
+++ b/drivers/soc/fsl/rcpm.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// rcpm.c - Freescale QorIQ RCPM driver
+//
+// Copyright 2019-2020 NXP
+//
+// Author: Ran Wang <ran.wang_1@nxp.com>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+
+#define RCPM_WAKEUP_CELL_MAX_SIZE 7
+
+struct rcpm {
+ unsigned int wakeup_cells;
+ void __iomem *ippdexpcr_base;
+ bool little_endian;
+};
+
+#define SCFG_SPARECR8 0x051c
+
+static void copy_ippdexpcr1_setting(u32 val)
+{
+ struct device_node *np;
+ void __iomem *regs;
+ u32 reg_val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021a-scfg");
+ if (!np)
+ return;
+
+ regs = of_iomap(np, 0);
+ if (!regs)
+ return;
+
+ reg_val = ioread32be(regs + SCFG_SPARECR8);
+ iowrite32be(val | reg_val, regs + SCFG_SPARECR8);
+
+ iounmap(regs);
+}
+
+/**
+ * rcpm_pm_prepare - performs device-level tasks associated with power
+ * management, such as programming related to the wakeup source control.
+ * @dev: Device to handle.
+ *
+ */
+static int rcpm_pm_prepare(struct device *dev)
+{
+ int i, ret, idx;
+ void __iomem *base;
+ struct wakeup_source *ws;
+ struct rcpm *rcpm;
+ struct device_node *np = dev->of_node;
+ u32 value[RCPM_WAKEUP_CELL_MAX_SIZE + 1];
+ u32 setting[RCPM_WAKEUP_CELL_MAX_SIZE] = {0};
+
+ rcpm = dev_get_drvdata(dev);
+ if (!rcpm)
+ return -EINVAL;
+
+ base = rcpm->ippdexpcr_base;
+ idx = wakeup_sources_read_lock();
+
+ /* Begin with first registered wakeup source */
+ for_each_wakeup_source(ws) {
+
+ /* skip object which is not attached to device */
+ if (!ws->dev || !ws->dev->parent)
+ continue;
+
+ ret = device_property_read_u32_array(ws->dev->parent,
+ "fsl,rcpm-wakeup", value,
+ rcpm->wakeup_cells + 1);
+
+ if (ret)
+ continue;
+
+ /*
+ * For DT mode, would handle devices with "fsl,rcpm-wakeup"
+ * pointing to the current RCPM node.
+ *
+ * For ACPI mode, currently we assume there is only one
+ * RCPM controller existing.
+ */
+ if (is_of_node(dev->fwnode))
+ if (np->phandle != value[0])
+ continue;
+
+ /* Property "#fsl,rcpm-wakeup-cells" of rcpm node defines the
+ * number of IPPDEXPCR register cells, and "fsl,rcpm-wakeup"
+ * of wakeup source IP contains an integer array: <phandle to
+ * RCPM node, IPPDEXPCR0 setting, IPPDEXPCR1 setting,
+ * IPPDEXPCR2 setting, etc>.
+ *
+ * So we will go thought them to collect setting data.
+ */
+ for (i = 0; i < rcpm->wakeup_cells; i++)
+ setting[i] |= value[i + 1];
+ }
+
+ wakeup_sources_read_unlock(idx);
+
+ /* Program all IPPDEXPCRn once */
+ for (i = 0; i < rcpm->wakeup_cells; i++) {
+ u32 tmp = setting[i];
+ void __iomem *address = base + i * 4;
+
+ if (!tmp)
+ continue;
+
+ /* We can only OR related bits */
+ if (rcpm->little_endian) {
+ tmp |= ioread32(address);
+ iowrite32(tmp, address);
+ } else {
+ tmp |= ioread32be(address);
+ iowrite32be(tmp, address);
+ }
+ /*
+ * Workaround of errata A-008646 on SoC LS1021A:
+ * There is a bug of register ippdexpcr1.
+ * Reading configuration register RCPM_IPPDEXPCR1
+ * always return zero. So save ippdexpcr1's value
+ * to register SCFG_SPARECR8.And the value of
+ * ippdexpcr1 will be read from SCFG_SPARECR8.
+ */
+ if (dev_of_node(dev) && (i == 1))
+ if (of_device_is_compatible(np, "fsl,ls1021a-rcpm"))
+ copy_ippdexpcr1_setting(tmp);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops rcpm_pm_ops = {
+ .prepare = rcpm_pm_prepare,
+};
+
+static int rcpm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcpm *rcpm;
+ int ret;
+
+ rcpm = devm_kzalloc(dev, sizeof(*rcpm), GFP_KERNEL);
+ if (!rcpm)
+ return -ENOMEM;
+
+ rcpm->ippdexpcr_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rcpm->ippdexpcr_base)) {
+ ret = PTR_ERR(rcpm->ippdexpcr_base);
+ return ret;
+ }
+
+ rcpm->little_endian = device_property_read_bool(
+ &pdev->dev, "little-endian");
+
+ ret = device_property_read_u32(&pdev->dev,
+ "#fsl,rcpm-wakeup-cells", &rcpm->wakeup_cells);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(&pdev->dev, rcpm);
+
+ return 0;
+}
+
+static const struct of_device_id rcpm_of_match[] = {
+ { .compatible = "fsl,qoriq-rcpm-2.1+", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rcpm_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id rcpm_acpi_ids[] = {
+ {"NXP0015",},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, rcpm_acpi_ids);
+#endif
+
+static struct platform_driver rcpm_driver = {
+ .driver = {
+ .name = "rcpm",
+ .of_match_table = rcpm_of_match,
+ .acpi_match_table = ACPI_PTR(rcpm_acpi_ids),
+ .pm = &rcpm_pm_ops,
+ },
+ .probe = rcpm_probe,
+};
+
+module_platform_driver(rcpm_driver);
diff --git a/drivers/soc/fujitsu/Kconfig b/drivers/soc/fujitsu/Kconfig
new file mode 100644
index 0000000000..987731e806
--- /dev/null
+++ b/drivers/soc/fujitsu/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "fujitsu SoC drivers"
+
+config A64FX_DIAG
+ bool "A64FX diag driver"
+ depends on ARM64
+ depends on ACPI
+ help
+ Say Y here if you want to enable diag interrupt on Fujitsu A64FX.
+ This driver enables BMC's diagnostic requests and enables
+ A64FX-specific interrupts. This allows administrators to obtain
+ kernel dumps via diagnostic requests using ipmitool, etc.
+
+ If unsure, say N.
+
+endmenu
diff --git a/drivers/soc/fujitsu/Makefile b/drivers/soc/fujitsu/Makefile
new file mode 100644
index 0000000000..945bc1c14a
--- /dev/null
+++ b/drivers/soc/fujitsu/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_A64FX_DIAG) += a64fx-diag.o
diff --git a/drivers/soc/fujitsu/a64fx-diag.c b/drivers/soc/fujitsu/a64fx-diag.c
new file mode 100644
index 0000000000..524fbfeb94
--- /dev/null
+++ b/drivers/soc/fujitsu/a64fx-diag.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A64FX diag driver.
+ * Copyright (c) 2022 Fujitsu Ltd.
+ */
+
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define A64FX_DIAG_IRQ 1
+#define BMC_DIAG_INTERRUPT_ENABLE 0x40
+#define BMC_DIAG_INTERRUPT_STATUS 0x44
+#define BMC_DIAG_INTERRUPT_MASK BIT(31)
+
+struct a64fx_diag_priv {
+ void __iomem *mmsc_reg_base;
+ int irq;
+ bool has_nmi;
+};
+
+static irqreturn_t a64fx_diag_handler_nmi(int irq, void *dev_id)
+{
+ nmi_panic(NULL, "a64fx_diag: interrupt received\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t a64fx_diag_handler_irq(int irq, void *dev_id)
+{
+ panic("a64fx_diag: interrupt received\n");
+
+ return IRQ_HANDLED;
+}
+
+static void a64fx_diag_interrupt_clear(struct a64fx_diag_priv *priv)
+{
+ void __iomem *diag_status_reg_addr;
+ u32 mmsc;
+
+ diag_status_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_STATUS;
+ mmsc = readl(diag_status_reg_addr);
+ if (mmsc & BMC_DIAG_INTERRUPT_MASK)
+ writel(BMC_DIAG_INTERRUPT_MASK, diag_status_reg_addr);
+}
+
+static void a64fx_diag_interrupt_enable(struct a64fx_diag_priv *priv)
+{
+ void __iomem *diag_enable_reg_addr;
+ u32 mmsc;
+
+ diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE;
+ mmsc = readl(diag_enable_reg_addr);
+ if (!(mmsc & BMC_DIAG_INTERRUPT_MASK)) {
+ mmsc |= BMC_DIAG_INTERRUPT_MASK;
+ writel(mmsc, diag_enable_reg_addr);
+ }
+}
+
+static void a64fx_diag_interrupt_disable(struct a64fx_diag_priv *priv)
+{
+ void __iomem *diag_enable_reg_addr;
+ u32 mmsc;
+
+ diag_enable_reg_addr = priv->mmsc_reg_base + BMC_DIAG_INTERRUPT_ENABLE;
+ mmsc = readl(diag_enable_reg_addr);
+ if (mmsc & BMC_DIAG_INTERRUPT_MASK) {
+ mmsc &= ~BMC_DIAG_INTERRUPT_MASK;
+ writel(mmsc, diag_enable_reg_addr);
+ }
+}
+
+static int a64fx_diag_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct a64fx_diag_priv *priv;
+ unsigned long irq_flags;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ priv->mmsc_reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->mmsc_reg_base))
+ return PTR_ERR(priv->mmsc_reg_base);
+
+ priv->irq = platform_get_irq(pdev, A64FX_DIAG_IRQ);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ platform_set_drvdata(pdev, priv);
+
+ irq_flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_AUTOEN |
+ IRQF_NO_THREAD;
+ ret = request_nmi(priv->irq, &a64fx_diag_handler_nmi, irq_flags,
+ "a64fx_diag_nmi", NULL);
+ if (ret) {
+ ret = request_irq(priv->irq, &a64fx_diag_handler_irq,
+ irq_flags, "a64fx_diag_irq", NULL);
+ if (ret) {
+ dev_err(dev, "cannot register IRQ %d\n", ret);
+ return ret;
+ }
+ enable_irq(priv->irq);
+ } else {
+ enable_nmi(priv->irq);
+ priv->has_nmi = true;
+ }
+
+ a64fx_diag_interrupt_clear(priv);
+ a64fx_diag_interrupt_enable(priv);
+
+ return 0;
+}
+
+static int a64fx_diag_remove(struct platform_device *pdev)
+{
+ struct a64fx_diag_priv *priv = platform_get_drvdata(pdev);
+
+ a64fx_diag_interrupt_disable(priv);
+ a64fx_diag_interrupt_clear(priv);
+
+ if (priv->has_nmi)
+ free_nmi(priv->irq, NULL);
+ else
+ free_irq(priv->irq, NULL);
+
+ return 0;
+}
+
+static const struct acpi_device_id a64fx_diag_acpi_match[] = {
+ { "FUJI2007", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, a64fx_diag_acpi_match);
+
+
+static struct platform_driver a64fx_diag_driver = {
+ .driver = {
+ .name = "a64fx_diag_driver",
+ .acpi_match_table = ACPI_PTR(a64fx_diag_acpi_match),
+ },
+ .probe = a64fx_diag_probe,
+ .remove = a64fx_diag_remove,
+};
+
+module_platform_driver(a64fx_diag_driver);
+
+MODULE_AUTHOR("Hitomi Hasegawa <hasegawa-hitomi@fujitsu.com>");
+MODULE_DESCRIPTION("A64FX diag driver");
diff --git a/drivers/soc/gemini/Makefile b/drivers/soc/gemini/Makefile
new file mode 100644
index 0000000000..8cbd1e45db
--- /dev/null
+++ b/drivers/soc/gemini/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += soc-gemini.o
diff --git a/drivers/soc/gemini/soc-gemini.c b/drivers/soc/gemini/soc-gemini.c
new file mode 100644
index 0000000000..642b96c91a
--- /dev/null
+++ b/drivers/soc/gemini/soc-gemini.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+
+#define GLOBAL_WORD_ID 0x00
+#define GEMINI_GLOBAL_ARB1_CTRL 0x2c
+#define GEMINI_ARB1_BURST_MASK GENMASK(21, 16)
+#define GEMINI_ARB1_BURST_SHIFT 16
+/* These all define the priority on the BUS2 backplane */
+#define GEMINI_ARB1_PRIO_MASK GENMASK(9, 0)
+#define GEMINI_ARB1_DMAC_HIGH_PRIO BIT(0)
+#define GEMINI_ARB1_IDE_HIGH_PRIO BIT(1)
+#define GEMINI_ARB1_RAID_HIGH_PRIO BIT(2)
+#define GEMINI_ARB1_SECURITY_HIGH_PRIO BIT(3)
+#define GEMINI_ARB1_GMAC0_HIGH_PRIO BIT(4)
+#define GEMINI_ARB1_GMAC1_HIGH_PRIO BIT(5)
+#define GEMINI_ARB1_USB0_HIGH_PRIO BIT(6)
+#define GEMINI_ARB1_USB1_HIGH_PRIO BIT(7)
+#define GEMINI_ARB1_PCI_HIGH_PRIO BIT(8)
+#define GEMINI_ARB1_TVE_HIGH_PRIO BIT(9)
+
+#define GEMINI_DEFAULT_BURST_SIZE 0x20
+#define GEMINI_DEFAULT_PRIO (GEMINI_ARB1_GMAC0_HIGH_PRIO | \
+ GEMINI_ARB1_GMAC1_HIGH_PRIO)
+
+static int __init gemini_soc_init(void)
+{
+ struct regmap *map;
+ u32 rev;
+ u32 val;
+ int ret;
+
+ /* Multiplatform guard, only proceed on Gemini */
+ if (!of_machine_is_compatible("cortina,gemini"))
+ return 0;
+
+ map = syscon_regmap_lookup_by_compatible("cortina,gemini-syscon");
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ ret = regmap_read(map, GLOBAL_WORD_ID, &rev);
+ if (ret)
+ return ret;
+
+ val = (GEMINI_DEFAULT_BURST_SIZE << GEMINI_ARB1_BURST_SHIFT) |
+ GEMINI_DEFAULT_PRIO;
+
+ /* Set up system arbitration */
+ regmap_update_bits(map,
+ GEMINI_GLOBAL_ARB1_CTRL,
+ GEMINI_ARB1_BURST_MASK | GEMINI_ARB1_PRIO_MASK,
+ val);
+
+ pr_info("Gemini SoC %04x revision %02x, set arbitration %08x\n",
+ rev >> 8, rev & 0xff, val);
+
+ return 0;
+}
+subsys_initcall(gemini_soc_init);
diff --git a/drivers/soc/hisilicon/Kconfig b/drivers/soc/hisilicon/Kconfig
new file mode 100644
index 0000000000..0ab688af30
--- /dev/null
+++ b/drivers/soc/hisilicon/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "Hisilicon SoC drivers"
+ depends on ARCH_HISI || COMPILE_TEST
+
+config KUNPENG_HCCS
+ tristate "HCCS driver on Kunpeng SoC"
+ depends on ACPI
+ depends on MAILBOX
+ depends on ARM64 || COMPILE_TEST
+ help
+ The Huawei Cache Coherence System (HCCS) is a multi-chip
+ interconnection bus protocol.
+ The performance of application may be affected if some HCCS
+ ports are not in full lane status, have a large number of CRC
+ errors and so on.
+
+ Say M here if you want to include support for querying the
+ health status and port information of HCCS on Kunpeng SoC.
+
+endmenu
diff --git a/drivers/soc/hisilicon/Makefile b/drivers/soc/hisilicon/Makefile
new file mode 100644
index 0000000000..226e747e70
--- /dev/null
+++ b/drivers/soc/hisilicon/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_KUNPENG_HCCS) += kunpeng_hccs.o
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
new file mode 100644
index 0000000000..f3810d9d1c
--- /dev/null
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -0,0 +1,1276 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * The Huawei Cache Coherence System (HCCS) is a multi-chip interconnection
+ * bus protocol.
+ *
+ * Copyright (c) 2023 Hisilicon Limited.
+ * Author: Huisong Li <lihuisong@huawei.com>
+ *
+ * HCCS driver for Kunpeng SoC provides the following features:
+ * - Retrieve the following information about each port:
+ * - port type
+ * - lane mode
+ * - enable
+ * - current lane mode
+ * - link finite state machine
+ * - lane mask
+ * - CRC error count
+ *
+ * - Retrieve the following information about all the ports on the chip or
+ * the die:
+ * - if all enabled ports are in linked
+ * - if all linked ports are in full lane
+ * - CRC error count sum
+ */
+#include <linux/acpi.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+
+#include <acpi/pcc.h>
+
+#include "kunpeng_hccs.h"
+
+/* PCC defines */
+#define HCCS_PCC_SIGNATURE_MASK 0x50434300
+#define HCCS_PCC_STATUS_CMD_COMPLETE BIT(0)
+
+/*
+ * Arbitrary retries in case the remote processor is slow to respond
+ * to PCC commands
+ */
+#define HCCS_PCC_CMD_WAIT_RETRIES_NUM 500ULL
+#define HCCS_POLL_STATUS_TIME_INTERVAL_US 3
+
+static struct hccs_port_info *kobj_to_port_info(struct kobject *k)
+{
+ return container_of(k, struct hccs_port_info, kobj);
+}
+
+static struct hccs_die_info *kobj_to_die_info(struct kobject *k)
+{
+ return container_of(k, struct hccs_die_info, kobj);
+}
+
+static struct hccs_chip_info *kobj_to_chip_info(struct kobject *k)
+{
+ return container_of(k, struct hccs_chip_info, kobj);
+}
+
+struct hccs_register_ctx {
+ struct device *dev;
+ u8 chan_id;
+ int err;
+};
+
+static acpi_status hccs_get_register_cb(struct acpi_resource *ares,
+ void *context)
+{
+ struct acpi_resource_generic_register *reg;
+ struct hccs_register_ctx *ctx = context;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+ return AE_OK;
+
+ reg = &ares->data.generic_reg;
+ if (reg->space_id != ACPI_ADR_SPACE_PLATFORM_COMM) {
+ dev_err(ctx->dev, "Bad register resource.\n");
+ ctx->err = -EINVAL;
+ return AE_ERROR;
+ }
+ ctx->chan_id = reg->access_size;
+
+ return AE_OK;
+}
+
+static int hccs_get_pcc_chan_id(struct hccs_dev *hdev)
+{
+ acpi_handle handle = ACPI_HANDLE(hdev->dev);
+ struct hccs_register_ctx ctx = {0};
+ acpi_status status;
+
+ if (!acpi_has_method(handle, METHOD_NAME__CRS))
+ return -ENODEV;
+
+ ctx.dev = hdev->dev;
+ status = acpi_walk_resources(handle, METHOD_NAME__CRS,
+ hccs_get_register_cb, &ctx);
+ if (ACPI_FAILURE(status))
+ return ctx.err;
+ hdev->chan_id = ctx.chan_id;
+
+ return 0;
+}
+
+static void hccs_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
+{
+ if (ret < 0)
+ pr_debug("TX did not complete: CMD sent:0x%x, ret:%d\n",
+ *(u8 *)msg, ret);
+ else
+ pr_debug("TX completed. CMD sent:0x%x, ret:%d\n",
+ *(u8 *)msg, ret);
+}
+
+static void hccs_unregister_pcc_channel(struct hccs_dev *hdev)
+{
+ struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
+
+ if (cl_info->pcc_comm_addr)
+ iounmap(cl_info->pcc_comm_addr);
+ pcc_mbox_free_channel(hdev->cl_info.pcc_chan);
+}
+
+static int hccs_register_pcc_channel(struct hccs_dev *hdev)
+{
+ struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
+ struct mbox_client *cl = &cl_info->client;
+ struct pcc_mbox_chan *pcc_chan;
+ struct device *dev = hdev->dev;
+ int rc;
+
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->knows_txdone = true;
+ cl->tx_done = hccs_chan_tx_done;
+ pcc_chan = pcc_mbox_request_channel(cl, hdev->chan_id);
+ if (IS_ERR(pcc_chan)) {
+ dev_err(dev, "PPC channel request failed.\n");
+ rc = -ENODEV;
+ goto out;
+ }
+ cl_info->pcc_chan = pcc_chan;
+ cl_info->mbox_chan = pcc_chan->mchan;
+
+ /*
+ * pcc_chan->latency is just a nominal value. In reality the remote
+ * processor could be much slower to reply. So add an arbitrary amount
+ * of wait on top of nominal.
+ */
+ cl_info->deadline_us =
+ HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency;
+ if (cl_info->mbox_chan->mbox->txdone_irq) {
+ dev_err(dev, "PCC IRQ in PCCT is enabled.\n");
+ rc = -EINVAL;
+ goto err_mbx_channel_free;
+ }
+
+ if (pcc_chan->shmem_base_addr) {
+ cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr,
+ pcc_chan->shmem_size);
+ if (!cl_info->pcc_comm_addr) {
+ dev_err(dev, "Failed to ioremap PCC communication region for channel-%d.\n",
+ hdev->chan_id);
+ rc = -ENOMEM;
+ goto err_mbx_channel_free;
+ }
+ }
+
+ return 0;
+
+err_mbx_channel_free:
+ pcc_mbox_free_channel(cl_info->pcc_chan);
+out:
+ return rc;
+}
+
+static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev)
+{
+ struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
+ struct acpi_pcct_shared_memory __iomem *comm_base =
+ cl_info->pcc_comm_addr;
+ u16 status;
+ int ret;
+
+ /*
+ * Poll PCC status register every 3us(delay_us) for maximum of
+ * deadline_us(timeout_us) until PCC command complete bit is set(cond)
+ */
+ ret = readw_poll_timeout(&comm_base->status, status,
+ status & HCCS_PCC_STATUS_CMD_COMPLETE,
+ HCCS_POLL_STATUS_TIME_INTERVAL_US,
+ cl_info->deadline_us);
+ if (unlikely(ret))
+ dev_err(hdev->dev, "poll PCC status failed, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
+ struct hccs_desc *desc)
+{
+ struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
+ void __iomem *comm_space = cl_info->pcc_comm_addr +
+ sizeof(struct acpi_pcct_shared_memory);
+ struct hccs_fw_inner_head *fw_inner_head;
+ struct acpi_pcct_shared_memory tmp = {0};
+ u16 comm_space_size;
+ int ret;
+
+ /* Write signature for this subspace */
+ tmp.signature = HCCS_PCC_SIGNATURE_MASK | hdev->chan_id;
+ /* Write to the shared command region */
+ tmp.command = cmd;
+ /* Clear cmd complete bit */
+ tmp.status = 0;
+ memcpy_toio(cl_info->pcc_comm_addr, (void *)&tmp,
+ sizeof(struct acpi_pcct_shared_memory));
+
+ /* Copy the message to the PCC comm space */
+ comm_space_size = HCCS_PCC_SHARE_MEM_BYTES -
+ sizeof(struct acpi_pcct_shared_memory);
+ memcpy_toio(comm_space, (void *)desc, comm_space_size);
+
+ /* Ring doorbell */
+ ret = mbox_send_message(cl_info->mbox_chan, &cmd);
+ if (ret < 0) {
+ dev_err(hdev->dev, "Send PCC mbox message failed, ret = %d.\n",
+ ret);
+ goto end;
+ }
+
+ /* Wait for completion */
+ ret = hccs_check_chan_cmd_complete(hdev);
+ if (ret)
+ goto end;
+
+ /* Copy response data */
+ memcpy_fromio((void *)desc, comm_space, comm_space_size);
+ fw_inner_head = &desc->rsp.fw_inner_head;
+ if (fw_inner_head->retStatus) {
+ dev_err(hdev->dev, "Execute PCC command failed, error code = %u.\n",
+ fw_inner_head->retStatus);
+ ret = -EIO;
+ }
+
+end:
+ mbox_client_txdone(cl_info->mbox_chan, ret);
+ return ret;
+}
+
+static void hccs_init_req_desc(struct hccs_desc *desc)
+{
+ struct hccs_req_desc *req = &desc->req;
+
+ memset(desc, 0, sizeof(*desc));
+ req->req_head.module_code = HCCS_SERDES_MODULE_CODE;
+}
+
+static int hccs_get_dev_caps(struct hccs_dev *hdev)
+{
+ struct hccs_desc desc;
+ int ret;
+
+ hccs_init_req_desc(&desc);
+ ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DEV_CAP, &desc);
+ if (ret) {
+ dev_err(hdev->dev, "Get device capabilities failed, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ memcpy(&hdev->caps, desc.rsp.data, sizeof(hdev->caps));
+
+ return 0;
+}
+
+static int hccs_query_chip_num_on_platform(struct hccs_dev *hdev)
+{
+ struct hccs_desc desc;
+ int ret;
+
+ hccs_init_req_desc(&desc);
+ ret = hccs_pcc_cmd_send(hdev, HCCS_GET_CHIP_NUM, &desc);
+ if (ret) {
+ dev_err(hdev->dev, "query system chip number failed, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ hdev->chip_num = *((u8 *)&desc.rsp.data);
+ if (!hdev->chip_num) {
+ dev_err(hdev->dev, "chip num obtained from firmware is zero.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hccs_get_chip_info(struct hccs_dev *hdev,
+ struct hccs_chip_info *chip)
+{
+ struct hccs_die_num_req_param *req_param;
+ struct hccs_desc desc;
+ int ret;
+
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_die_num_req_param *)desc.req.data;
+ req_param->chip_id = chip->chip_id;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_NUM, &desc);
+ if (ret)
+ return ret;
+
+ chip->die_num = *((u8 *)&desc.rsp.data);
+
+ return 0;
+}
+
+static int hccs_query_chip_info_on_platform(struct hccs_dev *hdev)
+{
+ struct hccs_chip_info *chip;
+ int ret;
+ u8 idx;
+
+ ret = hccs_query_chip_num_on_platform(hdev);
+ if (ret) {
+ dev_err(hdev->dev, "query chip number on platform failed, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ hdev->chips = devm_kzalloc(hdev->dev,
+ hdev->chip_num * sizeof(struct hccs_chip_info),
+ GFP_KERNEL);
+ if (!hdev->chips) {
+ dev_err(hdev->dev, "allocate all chips memory failed.\n");
+ return -ENOMEM;
+ }
+
+ for (idx = 0; idx < hdev->chip_num; idx++) {
+ chip = &hdev->chips[idx];
+ chip->chip_id = idx;
+ ret = hccs_get_chip_info(hdev, chip);
+ if (ret) {
+ dev_err(hdev->dev, "get chip%u info failed, ret = %d.\n",
+ idx, ret);
+ return ret;
+ }
+ chip->hdev = hdev;
+ }
+
+ return 0;
+}
+
+static int hccs_query_die_info_on_chip(struct hccs_dev *hdev, u8 chip_id,
+ u8 die_idx, struct hccs_die_info *die)
+{
+ struct hccs_die_info_req_param *req_param;
+ struct hccs_die_info_rsp_data *rsp_data;
+ struct hccs_desc desc;
+ int ret;
+
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_die_info_req_param *)desc.req.data;
+ req_param->chip_id = chip_id;
+ req_param->die_idx = die_idx;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_INFO, &desc);
+ if (ret)
+ return ret;
+
+ rsp_data = (struct hccs_die_info_rsp_data *)desc.rsp.data;
+ die->die_id = rsp_data->die_id;
+ die->port_num = rsp_data->port_num;
+ die->min_port_id = rsp_data->min_port_id;
+ die->max_port_id = rsp_data->max_port_id;
+ if (die->min_port_id > die->max_port_id) {
+ dev_err(hdev->dev, "min port id(%u) > max port id(%u) on die_idx(%u).\n",
+ die->min_port_id, die->max_port_id, die_idx);
+ return -EINVAL;
+ }
+ if (die->max_port_id > HCCS_DIE_MAX_PORT_ID) {
+ dev_err(hdev->dev, "max port id(%u) on die_idx(%u) is too big.\n",
+ die->max_port_id, die_idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hccs_query_all_die_info_on_platform(struct hccs_dev *hdev)
+{
+ struct device *dev = hdev->dev;
+ struct hccs_chip_info *chip;
+ struct hccs_die_info *die;
+ u8 i, j;
+ int ret;
+
+ for (i = 0; i < hdev->chip_num; i++) {
+ chip = &hdev->chips[i];
+ if (!chip->die_num)
+ continue;
+
+ chip->dies = devm_kzalloc(hdev->dev,
+ chip->die_num * sizeof(struct hccs_die_info),
+ GFP_KERNEL);
+ if (!chip->dies) {
+ dev_err(dev, "allocate all dies memory on chip%u failed.\n",
+ i);
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < chip->die_num; j++) {
+ die = &chip->dies[j];
+ ret = hccs_query_die_info_on_chip(hdev, i, j, die);
+ if (ret) {
+ dev_err(dev, "get die idx (%u) info on chip%u failed, ret = %d.\n",
+ j, i, ret);
+ return ret;
+ }
+ die->chip = chip;
+ }
+ }
+
+ return 0;
+}
+
+static int hccs_get_bd_info(struct hccs_dev *hdev, u8 opcode,
+ struct hccs_desc *desc,
+ void *buf, size_t buf_len,
+ struct hccs_rsp_head *rsp_head)
+{
+ struct hccs_rsp_head *head;
+ struct hccs_rsp_desc *rsp;
+ int ret;
+
+ ret = hccs_pcc_cmd_send(hdev, opcode, desc);
+ if (ret)
+ return ret;
+
+ rsp = &desc->rsp;
+ head = &rsp->rsp_head;
+ if (head->data_len > buf_len) {
+ dev_err(hdev->dev,
+ "buffer overflow (buf_len = %zu, data_len = %u)!\n",
+ buf_len, head->data_len);
+ return -ENOMEM;
+ }
+
+ memcpy(buf, rsp->data, head->data_len);
+ *rsp_head = *head;
+
+ return 0;
+}
+
+static int hccs_get_all_port_attr(struct hccs_dev *hdev,
+ struct hccs_die_info *die,
+ struct hccs_port_attr *attrs, u16 size)
+{
+ struct hccs_die_comm_req_param *req_param;
+ struct hccs_req_head *req_head;
+ struct hccs_rsp_head rsp_head;
+ struct hccs_desc desc;
+ size_t left_buf_len;
+ u32 data_len = 0;
+ u8 start_id;
+ u8 *buf;
+ int ret;
+
+ buf = (u8 *)attrs;
+ left_buf_len = sizeof(struct hccs_port_attr) * size;
+ start_id = die->min_port_id;
+ while (start_id <= die->max_port_id) {
+ hccs_init_req_desc(&desc);
+ req_head = &desc.req.req_head;
+ req_head->start_id = start_id;
+ req_param = (struct hccs_die_comm_req_param *)desc.req.data;
+ req_param->chip_id = die->chip->chip_id;
+ req_param->die_id = die->die_id;
+
+ ret = hccs_get_bd_info(hdev, HCCS_GET_DIE_PORT_INFO, &desc,
+ buf + data_len, left_buf_len, &rsp_head);
+ if (ret) {
+ dev_err(hdev->dev,
+ "get the information of port%u on die%u failed, ret = %d.\n",
+ start_id, die->die_id, ret);
+ return ret;
+ }
+
+ data_len += rsp_head.data_len;
+ left_buf_len -= rsp_head.data_len;
+ if (unlikely(rsp_head.next_id <= start_id)) {
+ dev_err(hdev->dev,
+ "next port id (%u) is not greater than last start id (%u) on die%u.\n",
+ rsp_head.next_id, start_id, die->die_id);
+ return -EINVAL;
+ }
+ start_id = rsp_head.next_id;
+ }
+
+ return 0;
+}
+
+static int hccs_get_all_port_info_on_die(struct hccs_dev *hdev,
+ struct hccs_die_info *die)
+{
+ struct hccs_port_attr *attrs;
+ struct hccs_port_info *port;
+ int ret;
+ u8 i;
+
+ attrs = kcalloc(die->port_num, sizeof(struct hccs_port_attr),
+ GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ ret = hccs_get_all_port_attr(hdev, die, attrs, die->port_num);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < die->port_num; i++) {
+ port = &die->ports[i];
+ port->port_id = attrs[i].port_id;
+ port->port_type = attrs[i].port_type;
+ port->lane_mode = attrs[i].lane_mode;
+ port->enable = attrs[i].enable;
+ port->die = die;
+ }
+
+out:
+ kfree(attrs);
+ return ret;
+}
+
+static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev)
+{
+
+ struct device *dev = hdev->dev;
+ struct hccs_chip_info *chip;
+ struct hccs_die_info *die;
+ u8 i, j;
+ int ret;
+
+ for (i = 0; i < hdev->chip_num; i++) {
+ chip = &hdev->chips[i];
+ for (j = 0; j < chip->die_num; j++) {
+ die = &chip->dies[j];
+ if (!die->port_num)
+ continue;
+
+ die->ports = devm_kzalloc(dev,
+ die->port_num * sizeof(struct hccs_port_info),
+ GFP_KERNEL);
+ if (!die->ports) {
+ dev_err(dev, "allocate ports memory on chip%u/die%u failed.\n",
+ i, die->die_id);
+ return -ENOMEM;
+ }
+
+ ret = hccs_get_all_port_info_on_die(hdev, die);
+ if (ret) {
+ dev_err(dev, "get all port info on chip%u/die%u failed, ret = %d.\n",
+ i, die->die_id, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int hccs_get_hw_info(struct hccs_dev *hdev)
+{
+ int ret;
+
+ ret = hccs_query_chip_info_on_platform(hdev);
+ if (ret) {
+ dev_err(hdev->dev, "query chip info on platform failed, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hccs_query_all_die_info_on_platform(hdev);
+ if (ret) {
+ dev_err(hdev->dev, "query all die info on platform failed, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hccs_query_all_port_info_on_platform(hdev);
+ if (ret) {
+ dev_err(hdev->dev, "query all port info on platform failed, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hccs_query_port_link_status(struct hccs_dev *hdev,
+ const struct hccs_port_info *port,
+ struct hccs_link_status *link_status)
+{
+ const struct hccs_die_info *die = port->die;
+ const struct hccs_chip_info *chip = die->chip;
+ struct hccs_port_comm_req_param *req_param;
+ struct hccs_desc desc;
+ int ret;
+
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_port_comm_req_param *)desc.req.data;
+ req_param->chip_id = chip->chip_id;
+ req_param->die_id = die->die_id;
+ req_param->port_id = port->port_id;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_LINK_STATUS, &desc);
+ if (ret) {
+ dev_err(hdev->dev,
+ "get port link status info failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *link_status = *((struct hccs_link_status *)desc.rsp.data);
+
+ return 0;
+}
+
+static int hccs_query_port_crc_err_cnt(struct hccs_dev *hdev,
+ const struct hccs_port_info *port,
+ u64 *crc_err_cnt)
+{
+ const struct hccs_die_info *die = port->die;
+ const struct hccs_chip_info *chip = die->chip;
+ struct hccs_port_comm_req_param *req_param;
+ struct hccs_desc desc;
+ int ret;
+
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_port_comm_req_param *)desc.req.data;
+ req_param->chip_id = chip->chip_id;
+ req_param->die_id = die->die_id;
+ req_param->port_id = port->port_id;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_CRC_ERR_CNT, &desc);
+ if (ret) {
+ dev_err(hdev->dev,
+ "get port crc error count failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ memcpy(crc_err_cnt, &desc.rsp.data, sizeof(u64));
+
+ return 0;
+}
+
+static int hccs_get_die_all_link_status(struct hccs_dev *hdev,
+ const struct hccs_die_info *die,
+ u8 *all_linked)
+{
+ struct hccs_die_comm_req_param *req_param;
+ struct hccs_desc desc;
+ int ret;
+
+ if (die->port_num == 0) {
+ *all_linked = 1;
+ return 0;
+ }
+
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_die_comm_req_param *)desc.req.data;
+ req_param->chip_id = die->chip->chip_id;
+ req_param->die_id = die->die_id;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_LINK_STA, &desc);
+ if (ret) {
+ dev_err(hdev->dev,
+ "get link status of all ports failed on die%u, ret = %d.\n",
+ die->die_id, ret);
+ return ret;
+ }
+
+ *all_linked = *((u8 *)&desc.rsp.data);
+
+ return 0;
+}
+
+static int hccs_get_die_all_port_lane_status(struct hccs_dev *hdev,
+ const struct hccs_die_info *die,
+ u8 *full_lane)
+{
+ struct hccs_die_comm_req_param *req_param;
+ struct hccs_desc desc;
+ int ret;
+
+ if (die->port_num == 0) {
+ *full_lane = 1;
+ return 0;
+ }
+
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_die_comm_req_param *)desc.req.data;
+ req_param->chip_id = die->chip->chip_id;
+ req_param->die_id = die->die_id;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_LANE_STA, &desc);
+ if (ret) {
+ dev_err(hdev->dev, "get lane status of all ports failed on die%u, ret = %d.\n",
+ die->die_id, ret);
+ return ret;
+ }
+
+ *full_lane = *((u8 *)&desc.rsp.data);
+
+ return 0;
+}
+
+static int hccs_get_die_total_crc_err_cnt(struct hccs_dev *hdev,
+ const struct hccs_die_info *die,
+ u64 *total_crc_err_cnt)
+{
+ struct hccs_die_comm_req_param *req_param;
+ struct hccs_desc desc;
+ int ret;
+
+ if (die->port_num == 0) {
+ *total_crc_err_cnt = 0;
+ return 0;
+ }
+
+ hccs_init_req_desc(&desc);
+ req_param = (struct hccs_die_comm_req_param *)desc.req.data;
+ req_param->chip_id = die->chip->chip_id;
+ req_param->die_id = die->die_id;
+ ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_CRC_ERR_CNT, &desc);
+ if (ret) {
+ dev_err(hdev->dev, "get crc error count sum failed on die%u, ret = %d.\n",
+ die->die_id, ret);
+ return ret;
+ }
+
+ memcpy(total_crc_err_cnt, &desc.rsp.data, sizeof(u64));
+
+ return 0;
+}
+
+static ssize_t hccs_show(struct kobject *k, struct attribute *attr, char *buf)
+{
+ struct kobj_attribute *kobj_attr;
+
+ kobj_attr = container_of(attr, struct kobj_attribute, attr);
+
+ return kobj_attr->show(k, kobj_attr, buf);
+}
+
+static const struct sysfs_ops hccs_comm_ops = {
+ .show = hccs_show,
+};
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ const struct hccs_port_info *port = kobj_to_port_info(kobj);
+
+ return sysfs_emit(buf, "HCCS-v%u\n", port->port_type);
+}
+static struct kobj_attribute hccs_type_attr = __ATTR_RO(type);
+
+static ssize_t lane_mode_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ const struct hccs_port_info *port = kobj_to_port_info(kobj);
+
+ return sysfs_emit(buf, "x%u\n", port->lane_mode);
+}
+static struct kobj_attribute lane_mode_attr = __ATTR_RO(lane_mode);
+
+static ssize_t enable_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ const struct hccs_port_info *port = kobj_to_port_info(kobj);
+
+ return sysfs_emit(buf, "%u\n", port->enable);
+}
+static struct kobj_attribute port_enable_attr = __ATTR_RO(enable);
+
+static ssize_t cur_lane_num_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ const struct hccs_port_info *port = kobj_to_port_info(kobj);
+ struct hccs_dev *hdev = port->die->chip->hdev;
+ struct hccs_link_status link_status = {0};
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = hccs_query_port_link_status(hdev, port, &link_status);
+ mutex_unlock(&hdev->lock);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", link_status.lane_num);
+}
+static struct kobj_attribute cur_lane_num_attr = __ATTR_RO(cur_lane_num);
+
+static ssize_t link_fsm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ const struct hccs_port_info *port = kobj_to_port_info(kobj);
+ struct hccs_dev *hdev = port->die->chip->hdev;
+ struct hccs_link_status link_status = {0};
+ const struct {
+ u8 link_fsm;
+ char *str;
+ } link_fsm_map[] = {
+ {HCCS_PORT_RESET, "reset"},
+ {HCCS_PORT_SETUP, "setup"},
+ {HCCS_PORT_CONFIG, "config"},
+ {HCCS_PORT_READY, "link-up"},
+ };
+ const char *link_fsm_str = "unknown";
+ size_t i;
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = hccs_query_port_link_status(hdev, port, &link_status);
+ mutex_unlock(&hdev->lock);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(link_fsm_map); i++) {
+ if (link_fsm_map[i].link_fsm == link_status.link_fsm) {
+ link_fsm_str = link_fsm_map[i].str;
+ break;
+ }
+ }
+
+ return sysfs_emit(buf, "%s\n", link_fsm_str);
+}
+static struct kobj_attribute link_fsm_attr = __ATTR_RO(link_fsm);
+
+static ssize_t lane_mask_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ const struct hccs_port_info *port = kobj_to_port_info(kobj);
+ struct hccs_dev *hdev = port->die->chip->hdev;
+ struct hccs_link_status link_status = {0};
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = hccs_query_port_link_status(hdev, port, &link_status);
+ mutex_unlock(&hdev->lock);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "0x%x\n", link_status.lane_mask);
+}
+static struct kobj_attribute lane_mask_attr = __ATTR_RO(lane_mask);
+
+static ssize_t crc_err_cnt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ const struct hccs_port_info *port = kobj_to_port_info(kobj);
+ struct hccs_dev *hdev = port->die->chip->hdev;
+ u64 crc_err_cnt;
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = hccs_query_port_crc_err_cnt(hdev, port, &crc_err_cnt);
+ mutex_unlock(&hdev->lock);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", crc_err_cnt);
+}
+static struct kobj_attribute crc_err_cnt_attr = __ATTR_RO(crc_err_cnt);
+
+static struct attribute *hccs_port_default_attrs[] = {
+ &hccs_type_attr.attr,
+ &lane_mode_attr.attr,
+ &port_enable_attr.attr,
+ &cur_lane_num_attr.attr,
+ &link_fsm_attr.attr,
+ &lane_mask_attr.attr,
+ &crc_err_cnt_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(hccs_port_default);
+
+static const struct kobj_type hccs_port_type = {
+ .sysfs_ops = &hccs_comm_ops,
+ .default_groups = hccs_port_default_groups,
+};
+
+static ssize_t all_linked_on_die_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ const struct hccs_die_info *die = kobj_to_die_info(kobj);
+ struct hccs_dev *hdev = die->chip->hdev;
+ u8 all_linked;
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = hccs_get_die_all_link_status(hdev, die, &all_linked);
+ mutex_unlock(&hdev->lock);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", all_linked);
+}
+static struct kobj_attribute all_linked_on_die_attr =
+ __ATTR(all_linked, 0444, all_linked_on_die_show, NULL);
+
+static ssize_t linked_full_lane_on_die_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ const struct hccs_die_info *die = kobj_to_die_info(kobj);
+ struct hccs_dev *hdev = die->chip->hdev;
+ u8 full_lane;
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = hccs_get_die_all_port_lane_status(hdev, die, &full_lane);
+ mutex_unlock(&hdev->lock);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", full_lane);
+}
+static struct kobj_attribute linked_full_lane_on_die_attr =
+ __ATTR(linked_full_lane, 0444, linked_full_lane_on_die_show, NULL);
+
+static ssize_t crc_err_cnt_sum_on_die_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ const struct hccs_die_info *die = kobj_to_die_info(kobj);
+ struct hccs_dev *hdev = die->chip->hdev;
+ u64 total_crc_err_cnt;
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = hccs_get_die_total_crc_err_cnt(hdev, die, &total_crc_err_cnt);
+ mutex_unlock(&hdev->lock);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", total_crc_err_cnt);
+}
+static struct kobj_attribute crc_err_cnt_sum_on_die_attr =
+ __ATTR(crc_err_cnt, 0444, crc_err_cnt_sum_on_die_show, NULL);
+
+static struct attribute *hccs_die_default_attrs[] = {
+ &all_linked_on_die_attr.attr,
+ &linked_full_lane_on_die_attr.attr,
+ &crc_err_cnt_sum_on_die_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(hccs_die_default);
+
+static const struct kobj_type hccs_die_type = {
+ .sysfs_ops = &hccs_comm_ops,
+ .default_groups = hccs_die_default_groups,
+};
+
+static ssize_t all_linked_on_chip_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
+ struct hccs_dev *hdev = chip->hdev;
+ const struct hccs_die_info *die;
+ u8 all_linked = 1;
+ u8 i, tmp;
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ for (i = 0; i < chip->die_num; i++) {
+ die = &chip->dies[i];
+ ret = hccs_get_die_all_link_status(hdev, die, &tmp);
+ if (ret) {
+ mutex_unlock(&hdev->lock);
+ return ret;
+ }
+ if (tmp != all_linked) {
+ all_linked = 0;
+ break;
+ }
+ }
+ mutex_unlock(&hdev->lock);
+
+ return sysfs_emit(buf, "%u\n", all_linked);
+}
+static struct kobj_attribute all_linked_on_chip_attr =
+ __ATTR(all_linked, 0444, all_linked_on_chip_show, NULL);
+
+static ssize_t linked_full_lane_on_chip_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
+ struct hccs_dev *hdev = chip->hdev;
+ const struct hccs_die_info *die;
+ u8 full_lane = 1;
+ u8 i, tmp;
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ for (i = 0; i < chip->die_num; i++) {
+ die = &chip->dies[i];
+ ret = hccs_get_die_all_port_lane_status(hdev, die, &tmp);
+ if (ret) {
+ mutex_unlock(&hdev->lock);
+ return ret;
+ }
+ if (tmp != full_lane) {
+ full_lane = 0;
+ break;
+ }
+ }
+ mutex_unlock(&hdev->lock);
+
+ return sysfs_emit(buf, "%u\n", full_lane);
+}
+static struct kobj_attribute linked_full_lane_on_chip_attr =
+ __ATTR(linked_full_lane, 0444, linked_full_lane_on_chip_show, NULL);
+
+static ssize_t crc_err_cnt_sum_on_chip_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
+ u64 crc_err_cnt, total_crc_err_cnt = 0;
+ struct hccs_dev *hdev = chip->hdev;
+ const struct hccs_die_info *die;
+ int ret;
+ u16 i;
+
+ mutex_lock(&hdev->lock);
+ for (i = 0; i < chip->die_num; i++) {
+ die = &chip->dies[i];
+ ret = hccs_get_die_total_crc_err_cnt(hdev, die, &crc_err_cnt);
+ if (ret) {
+ mutex_unlock(&hdev->lock);
+ return ret;
+ }
+
+ total_crc_err_cnt += crc_err_cnt;
+ }
+ mutex_unlock(&hdev->lock);
+
+ return sysfs_emit(buf, "%llu\n", total_crc_err_cnt);
+}
+static struct kobj_attribute crc_err_cnt_sum_on_chip_attr =
+ __ATTR(crc_err_cnt, 0444, crc_err_cnt_sum_on_chip_show, NULL);
+
+static struct attribute *hccs_chip_default_attrs[] = {
+ &all_linked_on_chip_attr.attr,
+ &linked_full_lane_on_chip_attr.attr,
+ &crc_err_cnt_sum_on_chip_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(hccs_chip_default);
+
+static const struct kobj_type hccs_chip_type = {
+ .sysfs_ops = &hccs_comm_ops,
+ .default_groups = hccs_chip_default_groups,
+};
+
+static void hccs_remove_die_dir(struct hccs_die_info *die)
+{
+ struct hccs_port_info *port;
+ u8 i;
+
+ for (i = 0; i < die->port_num; i++) {
+ port = &die->ports[i];
+ if (port->dir_created)
+ kobject_put(&port->kobj);
+ }
+
+ kobject_put(&die->kobj);
+}
+
+static void hccs_remove_chip_dir(struct hccs_chip_info *chip)
+{
+ struct hccs_die_info *die;
+ u8 i;
+
+ for (i = 0; i < chip->die_num; i++) {
+ die = &chip->dies[i];
+ if (die->dir_created)
+ hccs_remove_die_dir(die);
+ }
+
+ kobject_put(&chip->kobj);
+}
+
+static void hccs_remove_topo_dirs(struct hccs_dev *hdev)
+{
+ u8 i;
+
+ for (i = 0; i < hdev->chip_num; i++)
+ hccs_remove_chip_dir(&hdev->chips[i]);
+}
+
+static int hccs_create_hccs_dir(struct hccs_dev *hdev,
+ struct hccs_die_info *die,
+ struct hccs_port_info *port)
+{
+ int ret;
+
+ ret = kobject_init_and_add(&port->kobj, &hccs_port_type,
+ &die->kobj, "hccs%d", port->port_id);
+ if (ret) {
+ kobject_put(&port->kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hccs_create_die_dir(struct hccs_dev *hdev,
+ struct hccs_chip_info *chip,
+ struct hccs_die_info *die)
+{
+ struct hccs_port_info *port;
+ int ret;
+ u16 i;
+
+ ret = kobject_init_and_add(&die->kobj, &hccs_die_type,
+ &chip->kobj, "die%d", die->die_id);
+ if (ret) {
+ kobject_put(&die->kobj);
+ return ret;
+ }
+
+ for (i = 0; i < die->port_num; i++) {
+ port = &die->ports[i];
+ ret = hccs_create_hccs_dir(hdev, die, port);
+ if (ret) {
+ dev_err(hdev->dev, "create hccs%d dir failed.\n",
+ port->port_id);
+ goto err;
+ }
+ port->dir_created = true;
+ }
+
+ return 0;
+err:
+ hccs_remove_die_dir(die);
+
+ return ret;
+}
+
+static int hccs_create_chip_dir(struct hccs_dev *hdev,
+ struct hccs_chip_info *chip)
+{
+ struct hccs_die_info *die;
+ int ret;
+ u16 id;
+
+ ret = kobject_init_and_add(&chip->kobj, &hccs_chip_type,
+ &hdev->dev->kobj, "chip%d", chip->chip_id);
+ if (ret) {
+ kobject_put(&chip->kobj);
+ return ret;
+ }
+
+ for (id = 0; id < chip->die_num; id++) {
+ die = &chip->dies[id];
+ ret = hccs_create_die_dir(hdev, chip, die);
+ if (ret)
+ goto err;
+ die->dir_created = true;
+ }
+
+ return 0;
+err:
+ hccs_remove_chip_dir(chip);
+
+ return ret;
+}
+
+static int hccs_create_topo_dirs(struct hccs_dev *hdev)
+{
+ struct hccs_chip_info *chip;
+ u8 id, k;
+ int ret;
+
+ for (id = 0; id < hdev->chip_num; id++) {
+ chip = &hdev->chips[id];
+ ret = hccs_create_chip_dir(hdev, chip);
+ if (ret) {
+ dev_err(hdev->dev, "init chip%d dir failed!\n", id);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ for (k = 0; k < id; k++)
+ hccs_remove_chip_dir(&hdev->chips[k]);
+
+ return ret;
+}
+
+static int hccs_probe(struct platform_device *pdev)
+{
+ struct acpi_device *acpi_dev;
+ struct hccs_dev *hdev;
+ int rc;
+
+ if (acpi_disabled) {
+ dev_err(&pdev->dev, "acpi is disabled.\n");
+ return -ENODEV;
+ }
+ acpi_dev = ACPI_COMPANION(&pdev->dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+ hdev->acpi_dev = acpi_dev;
+ hdev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, hdev);
+
+ mutex_init(&hdev->lock);
+ rc = hccs_get_pcc_chan_id(hdev);
+ if (rc)
+ return rc;
+ rc = hccs_register_pcc_channel(hdev);
+ if (rc)
+ return rc;
+
+ rc = hccs_get_dev_caps(hdev);
+ if (rc)
+ goto unregister_pcc_chan;
+
+ rc = hccs_get_hw_info(hdev);
+ if (rc)
+ goto unregister_pcc_chan;
+
+ rc = hccs_create_topo_dirs(hdev);
+ if (rc)
+ goto unregister_pcc_chan;
+
+ return 0;
+
+unregister_pcc_chan:
+ hccs_unregister_pcc_channel(hdev);
+
+ return rc;
+}
+
+static int hccs_remove(struct platform_device *pdev)
+{
+ struct hccs_dev *hdev = platform_get_drvdata(pdev);
+
+ hccs_remove_topo_dirs(hdev);
+ hccs_unregister_pcc_channel(hdev);
+
+ return 0;
+}
+
+static const struct acpi_device_id hccs_acpi_match[] = {
+ { "HISI04B1"},
+ { ""},
+};
+MODULE_DEVICE_TABLE(acpi, hccs_acpi_match);
+
+static struct platform_driver hccs_driver = {
+ .probe = hccs_probe,
+ .remove = hccs_remove,
+ .driver = {
+ .name = "kunpeng_hccs",
+ .acpi_match_table = hccs_acpi_match,
+ },
+};
+
+module_platform_driver(hccs_driver);
+
+MODULE_DESCRIPTION("Kunpeng SoC HCCS driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huisong Li <lihuisong@huawei.com>");
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.h b/drivers/soc/hisilicon/kunpeng_hccs.h
new file mode 100644
index 0000000000..6012d27760
--- /dev/null
+++ b/drivers/soc/hisilicon/kunpeng_hccs.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2023 Hisilicon Limited. */
+
+#ifndef __KUNPENG_HCCS_H__
+#define __KUNPENG_HCCS_H__
+
+/*
+ * |--------------- Chip0 ---------------|---------------- ChipN -------------|
+ * |--------Die0-------|--------DieN-------|--------Die0-------|-------DieN-------|
+ * | P0 | P1 | P2 | P3 | P0 | P1 | P2 | P3 | P0 | P1 | P2 | P3 |P0 | P1 | P2 | P3 |
+ */
+
+/*
+ * This value cannot be 255, otherwise the loop of the multi-BD communication
+ * case cannot end.
+ */
+#define HCCS_DIE_MAX_PORT_ID 254
+
+struct hccs_port_info {
+ u8 port_id;
+ u8 port_type;
+ u8 lane_mode;
+ bool enable; /* if the port is enabled */
+ struct kobject kobj;
+ bool dir_created;
+ struct hccs_die_info *die; /* point to the die the port is located */
+};
+
+struct hccs_die_info {
+ u8 die_id;
+ u8 port_num;
+ u8 min_port_id;
+ u8 max_port_id;
+ struct hccs_port_info *ports;
+ struct kobject kobj;
+ bool dir_created;
+ struct hccs_chip_info *chip; /* point to the chip the die is located */
+};
+
+struct hccs_chip_info {
+ u8 chip_id;
+ u8 die_num;
+ struct hccs_die_info *dies;
+ struct kobject kobj;
+ struct hccs_dev *hdev;
+};
+
+struct hccs_mbox_client_info {
+ struct mbox_client client;
+ struct mbox_chan *mbox_chan;
+ struct pcc_mbox_chan *pcc_chan;
+ u64 deadline_us;
+ void __iomem *pcc_comm_addr;
+};
+
+struct hccs_dev {
+ struct device *dev;
+ struct acpi_device *acpi_dev;
+ u64 caps;
+ u8 chip_num;
+ struct hccs_chip_info *chips;
+ u8 chan_id;
+ struct mutex lock;
+ struct hccs_mbox_client_info cl_info;
+};
+
+#define HCCS_SERDES_MODULE_CODE 0x32
+enum hccs_subcmd_type {
+ HCCS_GET_CHIP_NUM = 0x1,
+ HCCS_GET_DIE_NUM,
+ HCCS_GET_DIE_INFO,
+ HCCS_GET_DIE_PORT_INFO,
+ HCCS_GET_DEV_CAP,
+ HCCS_GET_PORT_LINK_STATUS,
+ HCCS_GET_PORT_CRC_ERR_CNT,
+ HCCS_GET_DIE_PORTS_LANE_STA,
+ HCCS_GET_DIE_PORTS_LINK_STA,
+ HCCS_GET_DIE_PORTS_CRC_ERR_CNT,
+ HCCS_SUB_CMD_MAX = 255,
+};
+
+struct hccs_die_num_req_param {
+ u8 chip_id;
+};
+
+struct hccs_die_info_req_param {
+ u8 chip_id;
+ u8 die_idx;
+};
+
+struct hccs_die_info_rsp_data {
+ u8 die_id;
+ u8 port_num;
+ u8 min_port_id;
+ u8 max_port_id;
+};
+
+struct hccs_port_attr {
+ u8 port_id;
+ u8 port_type;
+ u8 lane_mode;
+ u8 enable : 1; /* if the port is enabled */
+ u16 rsv[2];
+};
+
+/*
+ * The common command request for getting the information of all HCCS port on
+ * specified DIE.
+ */
+struct hccs_die_comm_req_param {
+ u8 chip_id;
+ u8 die_id; /* id in hardware */
+};
+
+/* The common command request for getting the information of a specific port */
+struct hccs_port_comm_req_param {
+ u8 chip_id;
+ u8 die_id;
+ u8 port_id;
+};
+
+#define HCCS_PORT_RESET 1
+#define HCCS_PORT_SETUP 2
+#define HCCS_PORT_CONFIG 3
+#define HCCS_PORT_READY 4
+struct hccs_link_status {
+ u8 lane_mask; /* indicate which lanes are used. */
+ u8 link_fsm : 3; /* link fsm, 1: reset 2: setup 3: config 4: link-up */
+ u8 lane_num : 5; /* current lane number */
+};
+
+struct hccs_req_head {
+ u8 module_code; /* set to 0x32 for serdes */
+ u8 start_id;
+ u8 rsv[2];
+};
+
+struct hccs_rsp_head {
+ u8 data_len;
+ u8 next_id;
+ u8 rsv[2];
+};
+
+struct hccs_fw_inner_head {
+ u8 retStatus; /* 0: success, other: failure */
+ u8 rsv[7];
+};
+
+#define HCCS_PCC_SHARE_MEM_BYTES 64
+#define HCCS_FW_INNER_HEAD_BYTES 8
+#define HCCS_RSP_HEAD_BYTES 4
+
+#define HCCS_MAX_RSP_DATA_BYTES (HCCS_PCC_SHARE_MEM_BYTES - \
+ HCCS_FW_INNER_HEAD_BYTES - \
+ HCCS_RSP_HEAD_BYTES)
+#define HCCS_MAX_RSP_DATA_SIZE_MAX (HCCS_MAX_RSP_DATA_BYTES / 4)
+
+/*
+ * Note: Actual available size of data field also depands on the PCC header
+ * bytes of the specific type. Driver needs to copy the response data in the
+ * communication space based on the real length.
+ */
+struct hccs_rsp_desc {
+ struct hccs_fw_inner_head fw_inner_head; /* 8 Bytes */
+ struct hccs_rsp_head rsp_head; /* 4 Bytes */
+ u32 data[HCCS_MAX_RSP_DATA_SIZE_MAX];
+};
+
+#define HCCS_REQ_HEAD_BYTES 4
+#define HCCS_MAX_REQ_DATA_BYTES (HCCS_PCC_SHARE_MEM_BYTES - \
+ HCCS_REQ_HEAD_BYTES)
+#define HCCS_MAX_REQ_DATA_SIZE_MAX (HCCS_MAX_REQ_DATA_BYTES / 4)
+
+/*
+ * Note: Actual available size of data field also depands on the PCC header
+ * bytes of the specific type. Driver needs to copy the request data to the
+ * communication space based on the real length.
+ */
+struct hccs_req_desc {
+ struct hccs_req_head req_head; /* 4 Bytes */
+ u32 data[HCCS_MAX_REQ_DATA_SIZE_MAX];
+};
+
+struct hccs_desc {
+ union {
+ struct hccs_req_desc req;
+ struct hccs_rsp_desc rsp;
+ };
+};
+
+#endif /* __KUNPENG_HCCS_H__ */
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
new file mode 100644
index 0000000000..76a4593baf
--- /dev/null
+++ b/drivers/soc/imx/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "i.MX SoC drivers"
+
+config IMX_GPCV2_PM_DOMAINS
+ bool "i.MX GPCv2 PM domains"
+ depends on ARCH_MXC || (COMPILE_TEST && OF)
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ select REGMAP_MMIO
+ default y if SOC_IMX7D
+
+config SOC_IMX8M
+ tristate "i.MX8M SoC family support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC && ARM64
+ select SOC_BUS
+ select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7
+ help
+ If you say yes here you get support for the NXP i.MX8M family
+ support, it will provide the SoC info like SoC family,
+ ID and revision etc.
+
+config SOC_IMX9
+ tristate "i.MX9 SoC family support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC && ARM64
+ select SOC_BUS
+ help
+ If you say yes here, you get support for the NXP i.MX9 family
+
+config IMX8M_BLK_CTRL
+ bool
+ default SOC_IMX8M && IMX_GPCV2_PM_DOMAINS
+ depends on PM_GENERIC_DOMAINS
+ depends on COMMON_CLK
+
+config IMX9_BLK_CTRL
+ bool
+ default SOC_IMX9 && IMX_GPCV2_PM_DOMAINS
+ depends on PM_GENERIC_DOMAINS
+
+endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
new file mode 100644
index 0000000000..3ad321ca60
--- /dev/null
+++ b/drivers/soc/imx/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ifeq ($(CONFIG_ARM),y)
+obj-$(CONFIG_ARCH_MXC) += soc-imx.o
+endif
+obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
+obj-$(CONFIG_SOC_IMX9) += imx93-src.o
diff --git a/drivers/soc/imx/imx93-src.c b/drivers/soc/imx/imx93-src.c
new file mode 100644
index 0000000000..f1c2e22d5c
--- /dev/null
+++ b/drivers/soc/imx/imx93-src.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2022 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+static int imx93_src_probe(struct platform_device *pdev)
+{
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id imx93_src_ids[] = {
+ { .compatible = "fsl,imx93-src" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx93_src_ids);
+
+static struct platform_driver imx93_src_driver = {
+ .driver = {
+ .name = "imx93_src",
+ .of_match_table = imx93_src_ids,
+ },
+ .probe = imx93_src_probe,
+};
+module_platform_driver(imx93_src_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX93 src driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
new file mode 100644
index 0000000000..fab668c83f
--- /dev/null
+++ b/drivers/soc/imx/soc-imx.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 NXP
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include <soc/imx/cpu.h>
+#include <soc/imx/revision.h>
+
+#define IIM_UID 0x820
+
+#define OCOTP_UID_H 0x420
+#define OCOTP_UID_L 0x410
+
+#define OCOTP_ULP_UID_1 0x4b0
+#define OCOTP_ULP_UID_2 0x4c0
+#define OCOTP_ULP_UID_3 0x4d0
+#define OCOTP_ULP_UID_4 0x4e0
+
+static int __init imx_soc_device_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const char *ocotp_compat = NULL;
+ struct soc_device *soc_dev;
+ struct device_node *root;
+ struct regmap *ocotp = NULL;
+ const char *soc_id;
+ u64 soc_uid = 0;
+ u32 val;
+ int ret;
+ int i;
+
+ /* Return early if this is running on devices with different SoCs */
+ if (!__mxc_cpu_type)
+ return 0;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ root = of_find_node_by_path("/");
+ ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
+ of_node_put(root);
+ if (ret)
+ goto free_soc;
+
+ switch (__mxc_cpu_type) {
+ case MXC_CPU_MX1:
+ soc_id = "i.MX1";
+ break;
+ case MXC_CPU_MX21:
+ soc_id = "i.MX21";
+ break;
+ case MXC_CPU_MX25:
+ soc_id = "i.MX25";
+ break;
+ case MXC_CPU_MX27:
+ soc_id = "i.MX27";
+ break;
+ case MXC_CPU_MX31:
+ soc_id = "i.MX31";
+ break;
+ case MXC_CPU_MX35:
+ soc_id = "i.MX35";
+ break;
+ case MXC_CPU_MX50:
+ soc_id = "i.MX50";
+ break;
+ case MXC_CPU_MX51:
+ ocotp_compat = "fsl,imx51-iim";
+ soc_id = "i.MX51";
+ break;
+ case MXC_CPU_MX53:
+ ocotp_compat = "fsl,imx53-iim";
+ soc_id = "i.MX53";
+ break;
+ case MXC_CPU_IMX6SL:
+ ocotp_compat = "fsl,imx6sl-ocotp";
+ soc_id = "i.MX6SL";
+ break;
+ case MXC_CPU_IMX6DL:
+ ocotp_compat = "fsl,imx6q-ocotp";
+ soc_id = "i.MX6DL";
+ break;
+ case MXC_CPU_IMX6SX:
+ ocotp_compat = "fsl,imx6sx-ocotp";
+ soc_id = "i.MX6SX";
+ break;
+ case MXC_CPU_IMX6Q:
+ ocotp_compat = "fsl,imx6q-ocotp";
+ soc_id = "i.MX6Q";
+ break;
+ case MXC_CPU_IMX6UL:
+ ocotp_compat = "fsl,imx6ul-ocotp";
+ soc_id = "i.MX6UL";
+ break;
+ case MXC_CPU_IMX6ULL:
+ ocotp_compat = "fsl,imx6ull-ocotp";
+ soc_id = "i.MX6ULL";
+ break;
+ case MXC_CPU_IMX6ULZ:
+ ocotp_compat = "fsl,imx6ull-ocotp";
+ soc_id = "i.MX6ULZ";
+ break;
+ case MXC_CPU_IMX6SLL:
+ ocotp_compat = "fsl,imx6sll-ocotp";
+ soc_id = "i.MX6SLL";
+ break;
+ case MXC_CPU_IMX7D:
+ ocotp_compat = "fsl,imx7d-ocotp";
+ soc_id = "i.MX7D";
+ break;
+ case MXC_CPU_IMX7ULP:
+ ocotp_compat = "fsl,imx7ulp-ocotp";
+ soc_id = "i.MX7ULP";
+ break;
+ case MXC_CPU_VF500:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF500";
+ break;
+ case MXC_CPU_VF510:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF510";
+ break;
+ case MXC_CPU_VF600:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF600";
+ break;
+ case MXC_CPU_VF610:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF610";
+ break;
+ default:
+ soc_id = "Unknown";
+ }
+ soc_dev_attr->soc_id = soc_id;
+
+ if (ocotp_compat) {
+ ocotp = syscon_regmap_lookup_by_compatible(ocotp_compat);
+ if (IS_ERR(ocotp))
+ pr_err("%s: failed to find %s regmap!\n", __func__, ocotp_compat);
+ }
+
+ if (!IS_ERR_OR_NULL(ocotp)) {
+ if (__mxc_cpu_type == MXC_CPU_IMX7ULP) {
+ regmap_read(ocotp, OCOTP_ULP_UID_4, &val);
+ soc_uid = val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_3, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_2, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_1, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ } else if (__mxc_cpu_type == MXC_CPU_MX51 ||
+ __mxc_cpu_type == MXC_CPU_MX53) {
+ for (i=0; i < 8; i++) {
+ regmap_read(ocotp, IIM_UID + i*4, &val);
+ soc_uid <<= 8;
+ soc_uid |= (val & 0xff);
+ }
+ } else {
+ regmap_read(ocotp, OCOTP_UID_H, &val);
+ soc_uid = val;
+ regmap_read(ocotp, OCOTP_UID_L, &val);
+ soc_uid <<= 32;
+ soc_uid |= val;
+ }
+ }
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
+ (imx_get_soc_revision() >> 4) & 0xf,
+ imx_get_soc_revision() & 0xf);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto free_soc;
+ }
+
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
+ if (!soc_dev_attr->serial_number) {
+ ret = -ENOMEM;
+ goto free_rev;
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto free_serial_number;
+ }
+
+ return 0;
+
+free_serial_number:
+ kfree(soc_dev_attr->serial_number);
+free_rev:
+ kfree(soc_dev_attr->revision);
+free_soc:
+ kfree(soc_dev_attr);
+ return ret;
+}
+device_initcall(imx_soc_device_init);
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
new file mode 100644
index 0000000000..ec87d9d878
--- /dev/null
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/arm-smccc.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+
+#define REV_B1 0x21
+
+#define IMX8MQ_SW_INFO_B1 0x40
+#define IMX8MQ_SW_MAGIC_B1 0xff0055aa
+
+#define IMX_SIP_GET_SOC_INFO 0xc2000006
+
+#define OCOTP_UID_LOW 0x410
+#define OCOTP_UID_HIGH 0x420
+
+#define IMX8MP_OCOTP_UID_OFFSET 0x10
+
+/* Same as ANADIG_DIGPROG_IMX7D */
+#define ANADIG_DIGPROG_IMX8MM 0x800
+
+struct imx8_soc_data {
+ char *name;
+ u32 (*soc_revision)(void);
+};
+
+static u64 soc_uid;
+
+#ifdef CONFIG_HAVE_ARM_SMCCC
+static u32 imx8mq_soc_revision_from_atf(void)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(IMX_SIP_GET_SOC_INFO, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return 0;
+ else
+ return res.a0 & 0xff;
+}
+#else
+static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
+#endif
+
+static u32 __init imx8mq_soc_revision(void)
+{
+ struct device_node *np;
+ void __iomem *ocotp_base;
+ u32 magic;
+ u32 rev;
+ struct clk *clk;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
+ if (!np)
+ return 0;
+
+ ocotp_base = of_iomap(np, 0);
+ WARN_ON(!ocotp_base);
+ clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(clk)) {
+ WARN_ON(IS_ERR(clk));
+ return 0;
+ }
+
+ clk_prepare_enable(clk);
+
+ /*
+ * SOC revision on older imx8mq is not available in fuses so query
+ * the value from ATF instead.
+ */
+ rev = imx8mq_soc_revision_from_atf();
+ if (!rev) {
+ magic = readl_relaxed(ocotp_base + IMX8MQ_SW_INFO_B1);
+ if (magic == IMX8MQ_SW_MAGIC_B1)
+ rev = REV_B1;
+ }
+
+ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ soc_uid <<= 32;
+ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ iounmap(ocotp_base);
+ of_node_put(np);
+
+ return rev;
+}
+
+static void __init imx8mm_soc_uid(void)
+{
+ void __iomem *ocotp_base;
+ struct device_node *np;
+ struct clk *clk;
+ u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+ IMX8MP_OCOTP_UID_OFFSET : 0;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
+ if (!np)
+ return;
+
+ ocotp_base = of_iomap(np, 0);
+ WARN_ON(!ocotp_base);
+ clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(clk)) {
+ WARN_ON(IS_ERR(clk));
+ return;
+ }
+
+ clk_prepare_enable(clk);
+
+ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
+ soc_uid <<= 32;
+ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
+
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ iounmap(ocotp_base);
+ of_node_put(np);
+}
+
+static u32 __init imx8mm_soc_revision(void)
+{
+ struct device_node *np;
+ void __iomem *anatop_base;
+ u32 rev;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
+ if (!np)
+ return 0;
+
+ anatop_base = of_iomap(np, 0);
+ WARN_ON(!anatop_base);
+
+ rev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
+
+ iounmap(anatop_base);
+ of_node_put(np);
+
+ imx8mm_soc_uid();
+
+ return rev;
+}
+
+static const struct imx8_soc_data imx8mq_soc_data = {
+ .name = "i.MX8MQ",
+ .soc_revision = imx8mq_soc_revision,
+};
+
+static const struct imx8_soc_data imx8mm_soc_data = {
+ .name = "i.MX8MM",
+ .soc_revision = imx8mm_soc_revision,
+};
+
+static const struct imx8_soc_data imx8mn_soc_data = {
+ .name = "i.MX8MN",
+ .soc_revision = imx8mm_soc_revision,
+};
+
+static const struct imx8_soc_data imx8mp_soc_data = {
+ .name = "i.MX8MP",
+ .soc_revision = imx8mm_soc_revision,
+};
+
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
+ { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
+ { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
+ { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
+ { .compatible = "fsl,imx8mp", .data = &imx8mp_soc_data, },
+ { }
+};
+
+#define imx8_revision(soc_rev) \
+ soc_rev ? \
+ kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \
+ "unknown"
+
+static int __init imx8_soc_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ const struct of_device_id *id;
+ u32 soc_rev = 0;
+ const struct imx8_soc_data *data;
+ int ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine);
+ if (ret)
+ goto free_soc;
+
+ id = of_match_node(imx8_soc_match, of_root);
+ if (!id) {
+ ret = -ENODEV;
+ goto free_soc;
+ }
+
+ data = id->data;
+ if (data) {
+ soc_dev_attr->soc_id = data->name;
+ if (data->soc_revision)
+ soc_rev = data->soc_revision();
+ }
+
+ soc_dev_attr->revision = imx8_revision(soc_rev);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto free_soc;
+ }
+
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
+ if (!soc_dev_attr->serial_number) {
+ ret = -ENOMEM;
+ goto free_rev;
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto free_serial_number;
+ }
+
+ pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
+ soc_dev_attr->revision);
+
+ if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
+ platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+
+ return 0;
+
+free_serial_number:
+ kfree(soc_dev_attr->serial_number);
+free_rev:
+ if (strcmp(soc_dev_attr->revision, "unknown"))
+ kfree(soc_dev_attr->revision);
+free_soc:
+ kfree(soc_dev_attr);
+ return ret;
+}
+device_initcall(imx8_soc_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
new file mode 100644
index 0000000000..c55f0c9ae5
--- /dev/null
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if ARCH_IXP4XX || COMPILE_TEST
+
+menu "IXP4xx SoC drivers"
+
+config IXP4XX_QMGR
+ tristate "IXP4xx Queue Manager support"
+ help
+ This driver supports IXP4xx built-in hardware queue manager
+ and is automatically selected by Ethernet and HSS drivers.
+
+config IXP4XX_NPE
+ tristate "IXP4xx Network Processor Engine support"
+ select FW_LOADER
+ select MFD_SYSCON
+ help
+ This driver supports IXP4xx built-in network coprocessors
+ and is automatically selected by Ethernet and HSS drivers.
+
+endmenu
+
+endif
diff --git a/drivers/soc/ixp4xx/Makefile b/drivers/soc/ixp4xx/Makefile
new file mode 100644
index 0000000000..bebb07d52a
--- /dev/null
+++ b/drivers/soc/ixp4xx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx-qmgr.o
+obj-$(CONFIG_IXP4XX_NPE) += ixp4xx-npe.o
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
new file mode 100644
index 0000000000..5be9988f30
--- /dev/null
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IXP4xx Network Processor Engine driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * The code is based on publicly available information:
+ * - Intel IXP4xx Developer's Manual and other e-papers
+ * - Intel IXP400 Access Library Software (BSD license)
+ * - previous works by Christian Hohnstaedt <chohnstaedt@innominate.com>
+ * Thanks, Christian.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/cpu.h>
+
+#define DEBUG_MSG 0
+#define DEBUG_FW 0
+
+#define NPE_COUNT 3
+#define MAX_RETRIES 1000 /* microseconds */
+#define NPE_42X_DATA_SIZE 0x800 /* in dwords */
+#define NPE_46X_DATA_SIZE 0x1000
+#define NPE_A_42X_INSTR_SIZE 0x1000
+#define NPE_B_AND_C_42X_INSTR_SIZE 0x800
+#define NPE_46X_INSTR_SIZE 0x1000
+#define REGS_SIZE 0x1000
+
+#define NPE_PHYS_REG 32
+
+#define FW_MAGIC 0xFEEDF00D
+#define FW_BLOCK_TYPE_INSTR 0x0
+#define FW_BLOCK_TYPE_DATA 0x1
+#define FW_BLOCK_TYPE_EOF 0xF
+
+/* NPE exec status (read) and command (write) */
+#define CMD_NPE_STEP 0x01
+#define CMD_NPE_START 0x02
+#define CMD_NPE_STOP 0x03
+#define CMD_NPE_CLR_PIPE 0x04
+#define CMD_CLR_PROFILE_CNT 0x0C
+#define CMD_RD_INS_MEM 0x10 /* instruction memory */
+#define CMD_WR_INS_MEM 0x11
+#define CMD_RD_DATA_MEM 0x12 /* data memory */
+#define CMD_WR_DATA_MEM 0x13
+#define CMD_RD_ECS_REG 0x14 /* exec access register */
+#define CMD_WR_ECS_REG 0x15
+
+#define STAT_RUN 0x80000000
+#define STAT_STOP 0x40000000
+#define STAT_CLEAR 0x20000000
+#define STAT_ECS_K 0x00800000 /* pipeline clean */
+
+#define NPE_STEVT 0x1B
+#define NPE_STARTPC 0x1C
+#define NPE_REGMAP 0x1E
+#define NPE_CINDEX 0x1F
+
+#define INSTR_WR_REG_SHORT 0x0000C000
+#define INSTR_WR_REG_BYTE 0x00004000
+#define INSTR_RD_FIFO 0x0F888220
+#define INSTR_RESET_MBOX 0x0FAC8210
+
+#define ECS_BG_CTXT_REG_0 0x00 /* Background Executing Context */
+#define ECS_BG_CTXT_REG_1 0x01 /* Stack level */
+#define ECS_BG_CTXT_REG_2 0x02
+#define ECS_PRI_1_CTXT_REG_0 0x04 /* Priority 1 Executing Context */
+#define ECS_PRI_1_CTXT_REG_1 0x05 /* Stack level */
+#define ECS_PRI_1_CTXT_REG_2 0x06
+#define ECS_PRI_2_CTXT_REG_0 0x08 /* Priority 2 Executing Context */
+#define ECS_PRI_2_CTXT_REG_1 0x09 /* Stack level */
+#define ECS_PRI_2_CTXT_REG_2 0x0A
+#define ECS_DBG_CTXT_REG_0 0x0C /* Debug Executing Context */
+#define ECS_DBG_CTXT_REG_1 0x0D /* Stack level */
+#define ECS_DBG_CTXT_REG_2 0x0E
+#define ECS_INSTRUCT_REG 0x11 /* NPE Instruction Register */
+
+#define ECS_REG_0_ACTIVE 0x80000000 /* all levels */
+#define ECS_REG_0_NEXTPC_MASK 0x1FFF0000 /* BG/PRI1/PRI2 levels */
+#define ECS_REG_0_LDUR_BITS 8
+#define ECS_REG_0_LDUR_MASK 0x00000700 /* all levels */
+#define ECS_REG_1_CCTXT_BITS 16
+#define ECS_REG_1_CCTXT_MASK 0x000F0000 /* all levels */
+#define ECS_REG_1_SELCTXT_BITS 0
+#define ECS_REG_1_SELCTXT_MASK 0x0000000F /* all levels */
+#define ECS_DBG_REG_2_IF 0x00100000 /* debug level */
+#define ECS_DBG_REG_2_IE 0x00080000 /* debug level */
+
+/* NPE watchpoint_fifo register bit */
+#define WFIFO_VALID 0x80000000
+
+/* NPE messaging_status register bit definitions */
+#define MSGSTAT_OFNE 0x00010000 /* OutFifoNotEmpty */
+#define MSGSTAT_IFNF 0x00020000 /* InFifoNotFull */
+#define MSGSTAT_OFNF 0x00040000 /* OutFifoNotFull */
+#define MSGSTAT_IFNE 0x00080000 /* InFifoNotEmpty */
+#define MSGSTAT_MBINT 0x00100000 /* Mailbox interrupt */
+#define MSGSTAT_IFINT 0x00200000 /* InFifo interrupt */
+#define MSGSTAT_OFINT 0x00400000 /* OutFifo interrupt */
+#define MSGSTAT_WFINT 0x00800000 /* WatchFifo interrupt */
+
+/* NPE messaging_control register bit definitions */
+#define MSGCTL_OUT_FIFO 0x00010000 /* enable output FIFO */
+#define MSGCTL_IN_FIFO 0x00020000 /* enable input FIFO */
+#define MSGCTL_OUT_FIFO_WRITE 0x01000000 /* enable FIFO + WRITE */
+#define MSGCTL_IN_FIFO_WRITE 0x02000000
+
+/* NPE mailbox_status value for reset */
+#define RESET_MBOX_STAT 0x0000F0F0
+
+#define NPE_A_FIRMWARE "NPE-A"
+#define NPE_B_FIRMWARE "NPE-B"
+#define NPE_C_FIRMWARE "NPE-C"
+
+const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE };
+
+#define print_npe(pri, npe, fmt, ...) \
+ printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__)
+
+#if DEBUG_MSG
+#define debug_msg(npe, fmt, ...) \
+ print_npe(KERN_DEBUG, npe, fmt, ## __VA_ARGS__)
+#else
+#define debug_msg(npe, fmt, ...)
+#endif
+
+static struct {
+ u32 reg, val;
+} ecs_reset[] = {
+ { ECS_BG_CTXT_REG_0, 0xA0000000 },
+ { ECS_BG_CTXT_REG_1, 0x01000000 },
+ { ECS_BG_CTXT_REG_2, 0x00008000 },
+ { ECS_PRI_1_CTXT_REG_0, 0x20000080 },
+ { ECS_PRI_1_CTXT_REG_1, 0x01000000 },
+ { ECS_PRI_1_CTXT_REG_2, 0x00008000 },
+ { ECS_PRI_2_CTXT_REG_0, 0x20000080 },
+ { ECS_PRI_2_CTXT_REG_1, 0x01000000 },
+ { ECS_PRI_2_CTXT_REG_2, 0x00008000 },
+ { ECS_DBG_CTXT_REG_0, 0x20000000 },
+ { ECS_DBG_CTXT_REG_1, 0x00000000 },
+ { ECS_DBG_CTXT_REG_2, 0x001E0000 },
+ { ECS_INSTRUCT_REG, 0x1003C00F },
+};
+
+static struct npe npe_tab[NPE_COUNT] = {
+ {
+ .id = 0,
+ }, {
+ .id = 1,
+ }, {
+ .id = 2,
+ }
+};
+
+int npe_running(struct npe *npe)
+{
+ return (__raw_readl(&npe->regs->exec_status_cmd) & STAT_RUN) != 0;
+}
+
+static void npe_cmd_write(struct npe *npe, u32 addr, int cmd, u32 data)
+{
+ __raw_writel(data, &npe->regs->exec_data);
+ __raw_writel(addr, &npe->regs->exec_addr);
+ __raw_writel(cmd, &npe->regs->exec_status_cmd);
+}
+
+static u32 npe_cmd_read(struct npe *npe, u32 addr, int cmd)
+{
+ __raw_writel(addr, &npe->regs->exec_addr);
+ __raw_writel(cmd, &npe->regs->exec_status_cmd);
+ /* Iintroduce extra read cycles after issuing read command to NPE
+ so that we read the register after the NPE has updated it.
+ This is to overcome race condition between XScale and NPE */
+ __raw_readl(&npe->regs->exec_data);
+ __raw_readl(&npe->regs->exec_data);
+ return __raw_readl(&npe->regs->exec_data);
+}
+
+static void npe_clear_active(struct npe *npe, u32 reg)
+{
+ u32 val = npe_cmd_read(npe, reg, CMD_RD_ECS_REG);
+ npe_cmd_write(npe, reg, CMD_WR_ECS_REG, val & ~ECS_REG_0_ACTIVE);
+}
+
+static void npe_start(struct npe *npe)
+{
+ /* ensure only Background Context Stack Level is active */
+ npe_clear_active(npe, ECS_PRI_1_CTXT_REG_0);
+ npe_clear_active(npe, ECS_PRI_2_CTXT_REG_0);
+ npe_clear_active(npe, ECS_DBG_CTXT_REG_0);
+
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+ __raw_writel(CMD_NPE_START, &npe->regs->exec_status_cmd);
+}
+
+static void npe_stop(struct npe *npe)
+{
+ __raw_writel(CMD_NPE_STOP, &npe->regs->exec_status_cmd);
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /*FIXME?*/
+}
+
+static int __must_check npe_debug_instr(struct npe *npe, u32 instr, u32 ctx,
+ u32 ldur)
+{
+ u32 wc;
+ int i;
+
+ /* set the Active bit, and the LDUR, in the debug level */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG,
+ ECS_REG_0_ACTIVE | (ldur << ECS_REG_0_LDUR_BITS));
+
+ /* set CCTXT at ECS DEBUG L3 to specify in which context to execute
+ the instruction, and set SELCTXT at ECS DEBUG Level to specify
+ which context store to access.
+ Debug ECS Level Reg 1 has form 0x000n000n, where n = context number
+ */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_1, CMD_WR_ECS_REG,
+ (ctx << ECS_REG_1_CCTXT_BITS) |
+ (ctx << ECS_REG_1_SELCTXT_BITS));
+
+ /* clear the pipeline */
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+
+ /* load NPE instruction into the instruction register */
+ npe_cmd_write(npe, ECS_INSTRUCT_REG, CMD_WR_ECS_REG, instr);
+
+ /* we need this value later to wait for completion of NPE execution
+ step */
+ wc = __raw_readl(&npe->regs->watch_count);
+
+ /* issue a Step One command via the Execution Control register */
+ __raw_writel(CMD_NPE_STEP, &npe->regs->exec_status_cmd);
+
+ /* Watch Count register increments when NPE completes an instruction */
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (wc != __raw_readl(&npe->regs->watch_count))
+ return 0;
+ udelay(1);
+ }
+
+ print_npe(KERN_ERR, npe, "reset: npe_debug_instr(): timeout\n");
+ return -ETIMEDOUT;
+}
+
+static int __must_check npe_logical_reg_write8(struct npe *npe, u32 addr,
+ u8 val, u32 ctx)
+{
+ /* here we build the NPE assembler instruction: mov8 d0, #0 */
+ u32 instr = INSTR_WR_REG_BYTE | /* OpCode */
+ addr << 9 | /* base Operand */
+ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */
+ (val & ~0x1F) << (18 - 5);/* higher 3 bits to CoProc instr. */
+ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */
+}
+
+static int __must_check npe_logical_reg_write16(struct npe *npe, u32 addr,
+ u16 val, u32 ctx)
+{
+ /* here we build the NPE assembler instruction: mov16 d0, #0 */
+ u32 instr = INSTR_WR_REG_SHORT | /* OpCode */
+ addr << 9 | /* base Operand */
+ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */
+ (val & ~0x1F) << (18 - 5);/* higher 11 bits to CoProc instr. */
+ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */
+}
+
+static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr,
+ u32 val, u32 ctx)
+{
+ /* write in 16 bit steps first the high and then the low value */
+ if (npe_logical_reg_write16(npe, addr, val >> 16, ctx))
+ return -ETIMEDOUT;
+ return npe_logical_reg_write16(npe, addr + 2, val & 0xFFFF, ctx);
+}
+
+static int npe_reset(struct npe *npe)
+{
+ u32 reset_bit = (IXP4XX_FEATURE_RESET_NPEA << npe->id);
+ u32 val, ctl, exec_count, ctx_reg2;
+ int i;
+
+ ctl = (__raw_readl(&npe->regs->messaging_control) | 0x3F000000) &
+ 0x3F3FFFFF;
+
+ /* disable parity interrupt */
+ __raw_writel(ctl & 0x3F00FFFF, &npe->regs->messaging_control);
+
+ /* pre exec - debug instruction */
+ /* turn off the halt bit by clearing Execution Count register. */
+ exec_count = __raw_readl(&npe->regs->exec_count);
+ __raw_writel(0, &npe->regs->exec_count);
+ /* ensure that IF and IE are on (temporarily), so that we don't end up
+ stepping forever */
+ ctx_reg2 = npe_cmd_read(npe, ECS_DBG_CTXT_REG_2, CMD_RD_ECS_REG);
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2 |
+ ECS_DBG_REG_2_IF | ECS_DBG_REG_2_IE);
+
+ /* clear the FIFOs */
+ while (__raw_readl(&npe->regs->watchpoint_fifo) & WFIFO_VALID)
+ ;
+ while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE)
+ /* read from the outFIFO until empty */
+ print_npe(KERN_DEBUG, npe, "npe_reset: read FIFO = 0x%X\n",
+ __raw_readl(&npe->regs->in_out_fifo));
+
+ while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)
+ /* step execution of the NPE intruction to read inFIFO using
+ the Debug Executing Context stack */
+ if (npe_debug_instr(npe, INSTR_RD_FIFO, 0, 0))
+ return -ETIMEDOUT;
+
+ /* reset the mailbox reg from the XScale side */
+ __raw_writel(RESET_MBOX_STAT, &npe->regs->mailbox_status);
+ /* from NPE side */
+ if (npe_debug_instr(npe, INSTR_RESET_MBOX, 0, 0))
+ return -ETIMEDOUT;
+
+ /* Reset the physical registers in the NPE register file */
+ for (val = 0; val < NPE_PHYS_REG; val++) {
+ if (npe_logical_reg_write16(npe, NPE_REGMAP, val >> 1, 0))
+ return -ETIMEDOUT;
+ /* address is either 0 or 4 */
+ if (npe_logical_reg_write32(npe, (val & 1) * 4, 0, 0))
+ return -ETIMEDOUT;
+ }
+
+ /* Reset the context store = each context's Context Store registers */
+
+ /* Context 0 has no STARTPC. Instead, this value is used to set NextPC
+ for Background ECS, to set where NPE starts executing code */
+ val = npe_cmd_read(npe, ECS_BG_CTXT_REG_0, CMD_RD_ECS_REG);
+ val &= ~ECS_REG_0_NEXTPC_MASK;
+ val |= (0 /* NextPC */ << 16) & ECS_REG_0_NEXTPC_MASK;
+ npe_cmd_write(npe, ECS_BG_CTXT_REG_0, CMD_WR_ECS_REG, val);
+
+ for (i = 0; i < 16; i++) {
+ if (i) { /* Context 0 has no STEVT nor STARTPC */
+ /* STEVT = off, 0x80 */
+ if (npe_logical_reg_write8(npe, NPE_STEVT, 0x80, i))
+ return -ETIMEDOUT;
+ if (npe_logical_reg_write16(npe, NPE_STARTPC, 0, i))
+ return -ETIMEDOUT;
+ }
+ /* REGMAP = d0->p0, d8->p2, d16->p4 */
+ if (npe_logical_reg_write16(npe, NPE_REGMAP, 0x820, i))
+ return -ETIMEDOUT;
+ if (npe_logical_reg_write8(npe, NPE_CINDEX, 0, i))
+ return -ETIMEDOUT;
+ }
+
+ /* post exec */
+ /* clear active bit in debug level */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, 0);
+ /* clear the pipeline */
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+ /* restore previous values */
+ __raw_writel(exec_count, &npe->regs->exec_count);
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2);
+
+ /* write reset values to Execution Context Stack registers */
+ for (val = 0; val < ARRAY_SIZE(ecs_reset); val++)
+ npe_cmd_write(npe, ecs_reset[val].reg, CMD_WR_ECS_REG,
+ ecs_reset[val].val);
+
+ /* clear the profile counter */
+ __raw_writel(CMD_CLR_PROFILE_CNT, &npe->regs->exec_status_cmd);
+
+ __raw_writel(0, &npe->regs->exec_count);
+ __raw_writel(0, &npe->regs->action_points[0]);
+ __raw_writel(0, &npe->regs->action_points[1]);
+ __raw_writel(0, &npe->regs->action_points[2]);
+ __raw_writel(0, &npe->regs->action_points[3]);
+ __raw_writel(0, &npe->regs->watch_count);
+
+ /*
+ * We need to work on cached values here because the register
+ * will read inverted but needs to be written non-inverted.
+ */
+ val = cpu_ixp4xx_features(npe->rmap);
+ /* reset the NPE */
+ regmap_write(npe->rmap, IXP4XX_EXP_CNFG2, val & ~reset_bit);
+ /* deassert reset */
+ regmap_write(npe->rmap, IXP4XX_EXP_CNFG2, val | reset_bit);
+
+ for (i = 0; i < MAX_RETRIES; i++) {
+ val = cpu_ixp4xx_features(npe->rmap);
+ if (val & reset_bit)
+ break; /* NPE is back alive */
+ udelay(1);
+ }
+ if (i == MAX_RETRIES)
+ return -ETIMEDOUT;
+
+ npe_stop(npe);
+
+ /* restore NPE configuration bus Control Register - parity settings */
+ __raw_writel(ctl, &npe->regs->messaging_control);
+ return 0;
+}
+
+
+int npe_send_message(struct npe *npe, const void *msg, const char *what)
+{
+ const u32 *send = msg;
+ int cycles = 0;
+
+ debug_msg(npe, "Trying to send message %s [%08X:%08X]\n",
+ what, send[0], send[1]);
+
+ if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) {
+ debug_msg(npe, "NPE input FIFO not empty\n");
+ return -EIO;
+ }
+
+ __raw_writel(send[0], &npe->regs->in_out_fifo);
+
+ if (!(__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNF)) {
+ debug_msg(npe, "NPE input FIFO full\n");
+ return -EIO;
+ }
+
+ __raw_writel(send[1], &npe->regs->in_out_fifo);
+
+ while ((cycles < MAX_RETRIES) &&
+ (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)) {
+ udelay(1);
+ cycles++;
+ }
+
+ if (cycles == MAX_RETRIES) {
+ debug_msg(npe, "Timeout sending message\n");
+ return -ETIMEDOUT;
+ }
+
+#if DEBUG_MSG > 1
+ debug_msg(npe, "Sending a message took %i cycles\n", cycles);
+#endif
+ return 0;
+}
+
+int npe_recv_message(struct npe *npe, void *msg, const char *what)
+{
+ u32 *recv = msg;
+ int cycles = 0, cnt = 0;
+
+ debug_msg(npe, "Trying to receive message %s\n", what);
+
+ while (cycles < MAX_RETRIES) {
+ if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) {
+ recv[cnt++] = __raw_readl(&npe->regs->in_out_fifo);
+ if (cnt == 2)
+ break;
+ } else {
+ udelay(1);
+ cycles++;
+ }
+ }
+
+ switch(cnt) {
+ case 1:
+ debug_msg(npe, "Received [%08X]\n", recv[0]);
+ break;
+ case 2:
+ debug_msg(npe, "Received [%08X:%08X]\n", recv[0], recv[1]);
+ break;
+ }
+
+ if (cycles == MAX_RETRIES) {
+ debug_msg(npe, "Timeout waiting for message\n");
+ return -ETIMEDOUT;
+ }
+
+#if DEBUG_MSG > 1
+ debug_msg(npe, "Receiving a message took %i cycles\n", cycles);
+#endif
+ return 0;
+}
+
+int npe_send_recv_message(struct npe *npe, void *msg, const char *what)
+{
+ int result;
+ u32 *send = msg, recv[2];
+
+ if ((result = npe_send_message(npe, msg, what)) != 0)
+ return result;
+ if ((result = npe_recv_message(npe, recv, what)) != 0)
+ return result;
+
+ if ((recv[0] != send[0]) || (recv[1] != send[1])) {
+ debug_msg(npe, "Message %s: unexpected message received\n",
+ what);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+int npe_load_firmware(struct npe *npe, const char *name, struct device *dev)
+{
+ const struct firmware *fw_entry;
+
+ struct dl_block {
+ u32 type;
+ u32 offset;
+ } *blk;
+
+ struct dl_image {
+ u32 magic;
+ u32 id;
+ u32 size;
+ union {
+ DECLARE_FLEX_ARRAY(u32, data);
+ DECLARE_FLEX_ARRAY(struct dl_block, blocks);
+ };
+ } *image;
+
+ struct dl_codeblock {
+ u32 npe_addr;
+ u32 size;
+ u32 data[];
+ } *cb;
+
+ int i, j, err, data_size, instr_size, blocks, table_end;
+ u32 cmd;
+
+ if ((err = request_firmware(&fw_entry, name, dev)) != 0)
+ return err;
+
+ err = -EINVAL;
+ if (fw_entry->size < sizeof(struct dl_image)) {
+ print_npe(KERN_ERR, npe, "incomplete firmware file\n");
+ goto err;
+ }
+ image = (struct dl_image*)fw_entry->data;
+
+#if DEBUG_FW
+ print_npe(KERN_DEBUG, npe, "firmware: %08X %08X %08X (0x%X bytes)\n",
+ image->magic, image->id, image->size, image->size * 4);
+#endif
+
+ if (image->magic == swab32(FW_MAGIC)) { /* swapped file */
+ image->id = swab32(image->id);
+ image->size = swab32(image->size);
+ } else if (image->magic != FW_MAGIC) {
+ print_npe(KERN_ERR, npe, "bad firmware file magic: 0x%X\n",
+ image->magic);
+ goto err;
+ }
+ if ((image->size * 4 + sizeof(struct dl_image)) != fw_entry->size) {
+ print_npe(KERN_ERR, npe,
+ "inconsistent size of firmware file\n");
+ goto err;
+ }
+ if (((image->id >> 24) & 0xF /* NPE ID */) != npe->id) {
+ print_npe(KERN_ERR, npe, "firmware file NPE ID mismatch\n");
+ goto err;
+ }
+ if (image->magic == swab32(FW_MAGIC))
+ for (i = 0; i < image->size; i++)
+ image->data[i] = swab32(image->data[i]);
+
+ if (cpu_is_ixp42x() && ((image->id >> 28) & 0xF /* device ID */)) {
+ print_npe(KERN_INFO, npe, "IXP43x/IXP46x firmware ignored on "
+ "IXP42x\n");
+ goto err;
+ }
+
+ if (npe_running(npe)) {
+ print_npe(KERN_INFO, npe, "unable to load firmware, NPE is "
+ "already running\n");
+ err = -EBUSY;
+ goto err;
+ }
+#if 0
+ npe_stop(npe);
+ npe_reset(npe);
+#endif
+
+ print_npe(KERN_INFO, npe, "firmware functionality 0x%X, "
+ "revision 0x%X:%X\n", (image->id >> 16) & 0xFF,
+ (image->id >> 8) & 0xFF, image->id & 0xFF);
+
+ if (cpu_is_ixp42x()) {
+ if (!npe->id)
+ instr_size = NPE_A_42X_INSTR_SIZE;
+ else
+ instr_size = NPE_B_AND_C_42X_INSTR_SIZE;
+ data_size = NPE_42X_DATA_SIZE;
+ } else {
+ instr_size = NPE_46X_INSTR_SIZE;
+ data_size = NPE_46X_DATA_SIZE;
+ }
+
+ for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size;
+ blocks++)
+ if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF)
+ break;
+ if (blocks * sizeof(struct dl_block) / 4 >= image->size) {
+ print_npe(KERN_INFO, npe, "firmware EOF block marker not "
+ "found\n");
+ goto err;
+ }
+
+#if DEBUG_FW
+ print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks);
+#endif
+
+ table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */;
+ for (i = 0, blk = image->blocks; i < blocks; i++, blk++) {
+ if (blk->offset > image->size - sizeof(struct dl_codeblock) / 4
+ || blk->offset < table_end) {
+ print_npe(KERN_INFO, npe, "invalid offset 0x%X of "
+ "firmware block #%i\n", blk->offset, i);
+ goto err;
+ }
+
+ cb = (struct dl_codeblock*)&image->data[blk->offset];
+ if (blk->type == FW_BLOCK_TYPE_INSTR) {
+ if (cb->npe_addr + cb->size > instr_size)
+ goto too_big;
+ cmd = CMD_WR_INS_MEM;
+ } else if (blk->type == FW_BLOCK_TYPE_DATA) {
+ if (cb->npe_addr + cb->size > data_size)
+ goto too_big;
+ cmd = CMD_WR_DATA_MEM;
+ } else {
+ print_npe(KERN_INFO, npe, "invalid firmware block #%i "
+ "type 0x%X\n", i, blk->type);
+ goto err;
+ }
+ if (blk->offset + sizeof(*cb) / 4 + cb->size > image->size) {
+ print_npe(KERN_INFO, npe, "firmware block #%i doesn't "
+ "fit in firmware image: type %c, start 0x%X,"
+ " length 0x%X\n", i,
+ blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D',
+ cb->npe_addr, cb->size);
+ goto err;
+ }
+
+ for (j = 0; j < cb->size; j++)
+ npe_cmd_write(npe, cb->npe_addr + j, cmd, cb->data[j]);
+ }
+
+ npe_start(npe);
+ if (!npe_running(npe))
+ print_npe(KERN_ERR, npe, "unable to start\n");
+ release_firmware(fw_entry);
+ return 0;
+
+too_big:
+ print_npe(KERN_INFO, npe, "firmware block #%i doesn't fit in NPE "
+ "memory: type %c, start 0x%X, length 0x%X\n", i,
+ blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D',
+ cb->npe_addr, cb->size);
+err:
+ release_firmware(fw_entry);
+ return err;
+}
+
+
+struct npe *npe_request(unsigned id)
+{
+ if (id < NPE_COUNT)
+ if (npe_tab[id].valid)
+ if (try_module_get(THIS_MODULE))
+ return &npe_tab[id];
+ return NULL;
+}
+
+void npe_release(struct npe *npe)
+{
+ module_put(THIS_MODULE);
+}
+
+static int ixp4xx_npe_probe(struct platform_device *pdev)
+{
+ int i, found = 0;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ struct regmap *rmap;
+ u32 val;
+
+ /* This system has only one syscon, so fetch it */
+ rmap = syscon_regmap_lookup_by_compatible("syscon");
+ if (IS_ERR(rmap))
+ return dev_err_probe(dev, PTR_ERR(rmap),
+ "failed to look up syscon\n");
+
+ for (i = 0; i < NPE_COUNT; i++) {
+ struct npe *npe = &npe_tab[i];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ return -ENODEV;
+
+ val = cpu_ixp4xx_features(rmap);
+
+ if (!(val & (IXP4XX_FEATURE_RESET_NPEA << i))) {
+ dev_info(dev, "NPE%d at %pR not available\n",
+ i, res);
+ continue; /* NPE already disabled or not present */
+ }
+ npe->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(npe->regs))
+ return PTR_ERR(npe->regs);
+ npe->rmap = rmap;
+
+ if (npe_reset(npe)) {
+ dev_info(dev, "NPE%d at %pR does not reset\n",
+ i, res);
+ continue;
+ }
+ npe->valid = 1;
+ dev_info(dev, "NPE%d at %pR registered\n", i, res);
+ found++;
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ /* Spawn crypto subdevice if using device tree */
+ if (IS_ENABLED(CONFIG_OF) && np)
+ devm_of_platform_populate(dev);
+
+ return 0;
+}
+
+static int ixp4xx_npe_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < NPE_COUNT; i++)
+ if (npe_tab[i].regs) {
+ npe_reset(&npe_tab[i]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_npe_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-network-processing-engine",
+ },
+ {},
+};
+
+static struct platform_driver ixp4xx_npe_driver = {
+ .driver = {
+ .name = "ixp4xx-npe",
+ .of_match_table = ixp4xx_npe_of_match,
+ },
+ .probe = ixp4xx_npe_probe,
+ .remove = ixp4xx_npe_remove,
+};
+module_platform_driver(ixp4xx_npe_driver);
+
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(NPE_A_FIRMWARE);
+MODULE_FIRMWARE(NPE_B_FIRMWARE);
+MODULE_FIRMWARE(NPE_C_FIRMWARE);
+
+EXPORT_SYMBOL(npe_names);
+EXPORT_SYMBOL(npe_running);
+EXPORT_SYMBOL(npe_request);
+EXPORT_SYMBOL(npe_release);
+EXPORT_SYMBOL(npe_load_firmware);
+EXPORT_SYMBOL(npe_send_message);
+EXPORT_SYMBOL(npe_recv_message);
+EXPORT_SYMBOL(npe_send_recv_message);
diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
new file mode 100644
index 0000000000..291086bb93
--- /dev/null
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IXP4xx Queue Manager driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ */
+
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ixp4xx/qmgr.h>
+#include <linux/soc/ixp4xx/cpu.h>
+
+static struct qmgr_regs __iomem *qmgr_regs;
+static int qmgr_irq_1;
+static int qmgr_irq_2;
+static spinlock_t qmgr_lock;
+static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
+static void (*irq_handlers[QUEUES])(void *pdev);
+static void *irq_pdevs[QUEUES];
+
+#if DEBUG_QMGR
+char qmgr_queue_descs[QUEUES][32];
+#endif
+
+void qmgr_put_entry(unsigned int queue, u32 val)
+{
+#if DEBUG_QMGR
+ BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
+
+ printk(KERN_DEBUG "Queue %s(%i) put %X\n",
+ qmgr_queue_descs[queue], queue, val);
+#endif
+ __raw_writel(val, &qmgr_regs->acc[queue][0]);
+}
+
+u32 qmgr_get_entry(unsigned int queue)
+{
+ u32 val;
+ val = __raw_readl(&qmgr_regs->acc[queue][0]);
+#if DEBUG_QMGR
+ BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
+
+ printk(KERN_DEBUG "Queue %s(%i) get %X\n",
+ qmgr_queue_descs[queue], queue, val);
+#endif
+ return val;
+}
+
+static int __qmgr_get_stat1(unsigned int queue)
+{
+ return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
+ >> ((queue & 7) << 2)) & 0xF;
+}
+
+static int __qmgr_get_stat2(unsigned int queue)
+{
+ BUG_ON(queue >= HALF_QUEUES);
+ return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
+ >> ((queue & 0xF) << 1)) & 0x3;
+}
+
+/**
+ * qmgr_stat_empty() - checks if a hardware queue is empty
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is empty.
+ */
+int qmgr_stat_empty(unsigned int queue)
+{
+ BUG_ON(queue >= HALF_QUEUES);
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
+}
+
+/**
+ * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is below low watermark.
+ */
+int qmgr_stat_below_low_watermark(unsigned int queue)
+{
+ if (queue >= HALF_QUEUES)
+ return (__raw_readl(&qmgr_regs->statne_h) >>
+ (queue - HALF_QUEUES)) & 0x01;
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
+}
+
+/**
+ * qmgr_stat_full() - checks if a hardware queue is full
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is full.
+ */
+int qmgr_stat_full(unsigned int queue)
+{
+ if (queue >= HALF_QUEUES)
+ return (__raw_readl(&qmgr_regs->statf_h) >>
+ (queue - HALF_QUEUES)) & 0x01;
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
+}
+
+/**
+ * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue experienced overflow.
+ */
+int qmgr_stat_overflow(unsigned int queue)
+{
+ return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
+}
+
+void qmgr_set_irq(unsigned int queue, int src,
+ void (*handler)(void *pdev), void *pdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ if (queue < HALF_QUEUES) {
+ u32 __iomem *reg;
+ int bit;
+ BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
+ reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
+ bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
+ __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
+ reg);
+ } else
+ /* IRQ source for queues 32-63 is fixed */
+ BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
+
+ irq_handlers[queue] = handler;
+ irq_pdevs[queue] = pdev;
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+
+static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
+{
+ int i, ret = 0;
+ u32 en_bitmap, src, stat;
+
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
+
+ en_bitmap = __raw_readl(&qmgr_regs->irqen[0]);
+ while (en_bitmap) {
+ i = __fls(en_bitmap); /* number of the last "low" queue */
+ en_bitmap &= ~BIT(i);
+ src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]);
+ stat = __raw_readl(&qmgr_regs->stat1[i >> 3]);
+ if (src & 4) /* the IRQ condition is inverted */
+ stat = ~stat;
+ if (stat & BIT(src & 3)) {
+ irq_handlers[i](irq_pdevs[i]);
+ ret = IRQ_HANDLED;
+ }
+ }
+ return ret;
+}
+
+
+static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
+{
+ int i, ret = 0;
+ u32 req_bitmap;
+
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
+
+ req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) &
+ __raw_readl(&qmgr_regs->statne_h);
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last "high" queue */
+ req_bitmap &= ~BIT(i);
+ irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+
+static irqreturn_t qmgr_irq(int irq, void *pdev)
+{
+ int i, half = (irq == qmgr_irq_1 ? 0 : 1);
+ u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
+
+ if (!req_bitmap)
+ return 0;
+ __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
+
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last queue */
+ req_bitmap &= ~BIT(i);
+ i += half * HALF_QUEUES;
+ irq_handlers[i](irq_pdevs[i]);
+ }
+ return IRQ_HANDLED;
+}
+
+
+void qmgr_enable_irq(unsigned int queue)
+{
+ unsigned long flags;
+ int half = queue / 32;
+ u32 mask = 1 << (queue & (HALF_QUEUES - 1));
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
+ &qmgr_regs->irqen[half]);
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+void qmgr_disable_irq(unsigned int queue)
+{
+ unsigned long flags;
+ int half = queue / 32;
+ u32 mask = 1 << (queue & (HALF_QUEUES - 1));
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
+ &qmgr_regs->irqen[half]);
+ __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+static inline void shift_mask(u32 *mask)
+{
+ mask[3] = mask[3] << 1 | mask[2] >> 31;
+ mask[2] = mask[2] << 1 | mask[1] >> 31;
+ mask[1] = mask[1] << 1 | mask[0] >> 31;
+ mask[0] <<= 1;
+}
+
+#if DEBUG_QMGR
+int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
+ unsigned int nearly_empty_watermark,
+ unsigned int nearly_full_watermark,
+ const char *desc_format, const char* name)
+#else
+int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
+ unsigned int nearly_empty_watermark,
+ unsigned int nearly_full_watermark)
+#endif
+{
+ u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
+ int err;
+
+ BUG_ON(queue >= QUEUES);
+
+ if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
+ return -EINVAL;
+
+ switch (len) {
+ case 16:
+ cfg = 0 << 24;
+ mask[0] = 0x1;
+ break;
+ case 32:
+ cfg = 1 << 24;
+ mask[0] = 0x3;
+ break;
+ case 64:
+ cfg = 2 << 24;
+ mask[0] = 0xF;
+ break;
+ case 128:
+ cfg = 3 << 24;
+ mask[0] = 0xFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cfg |= nearly_empty_watermark << 26;
+ cfg |= nearly_full_watermark << 29;
+ len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
+ mask[1] = mask[2] = mask[3] = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ spin_lock_irq(&qmgr_lock);
+ if (__raw_readl(&qmgr_regs->sram[queue])) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ while (1) {
+ if (!(used_sram_bitmap[0] & mask[0]) &&
+ !(used_sram_bitmap[1] & mask[1]) &&
+ !(used_sram_bitmap[2] & mask[2]) &&
+ !(used_sram_bitmap[3] & mask[3]))
+ break; /* found free space */
+
+ addr++;
+ shift_mask(mask);
+ if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
+ printk(KERN_ERR "qmgr: no free SRAM space for"
+ " queue %i\n", queue);
+ err = -ENOMEM;
+ goto err;
+ }
+ }
+
+ used_sram_bitmap[0] |= mask[0];
+ used_sram_bitmap[1] |= mask[1];
+ used_sram_bitmap[2] |= mask[2];
+ used_sram_bitmap[3] |= mask[3];
+ __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
+#if DEBUG_QMGR
+ snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
+ desc_format, name);
+ printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
+ qmgr_queue_descs[queue], queue, addr);
+#endif
+ spin_unlock_irq(&qmgr_lock);
+ return 0;
+
+err:
+ spin_unlock_irq(&qmgr_lock);
+ module_put(THIS_MODULE);
+ return err;
+}
+
+void qmgr_release_queue(unsigned int queue)
+{
+ u32 cfg, addr, mask[4];
+
+ BUG_ON(queue >= QUEUES); /* not in valid range */
+
+ spin_lock_irq(&qmgr_lock);
+ cfg = __raw_readl(&qmgr_regs->sram[queue]);
+ addr = (cfg >> 14) & 0xFF;
+
+ BUG_ON(!addr); /* not requested */
+
+ switch ((cfg >> 24) & 3) {
+ case 0: mask[0] = 0x1; break;
+ case 1: mask[0] = 0x3; break;
+ case 2: mask[0] = 0xF; break;
+ case 3: mask[0] = 0xFF; break;
+ }
+
+ mask[1] = mask[2] = mask[3] = 0;
+
+ while (addr--)
+ shift_mask(mask);
+
+#if DEBUG_QMGR
+ printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
+ qmgr_queue_descs[queue], queue);
+ qmgr_queue_descs[queue][0] = '\x0';
+#endif
+
+ while ((addr = qmgr_get_entry(queue)))
+ printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
+ queue, addr);
+
+ __raw_writel(0, &qmgr_regs->sram[queue]);
+
+ used_sram_bitmap[0] &= ~mask[0];
+ used_sram_bitmap[1] &= ~mask[1];
+ used_sram_bitmap[2] &= ~mask[2];
+ used_sram_bitmap[3] &= ~mask[3];
+ irq_handlers[queue] = NULL; /* catch IRQ bugs */
+ spin_unlock_irq(&qmgr_lock);
+
+ module_put(THIS_MODULE);
+}
+
+static int ixp4xx_qmgr_probe(struct platform_device *pdev)
+{
+ int i, err;
+ irq_handler_t handler1, handler2;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq1, irq2;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ qmgr_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qmgr_regs))
+ return PTR_ERR(qmgr_regs);
+
+ irq1 = platform_get_irq(pdev, 0);
+ if (irq1 <= 0)
+ return irq1 ? irq1 : -EINVAL;
+ qmgr_irq_1 = irq1;
+ irq2 = platform_get_irq(pdev, 1);
+ if (irq2 <= 0)
+ return irq2 ? irq2 : -EINVAL;
+ qmgr_irq_2 = irq2;
+
+ /* reset qmgr registers */
+ for (i = 0; i < 4; i++) {
+ __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
+ __raw_writel(0, &qmgr_regs->irqsrc[i]);
+ }
+ for (i = 0; i < 2; i++) {
+ __raw_writel(0, &qmgr_regs->stat2[i]);
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
+ __raw_writel(0, &qmgr_regs->irqen[i]);
+ }
+
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
+ __raw_writel(0, &qmgr_regs->statf_h);
+
+ for (i = 0; i < QUEUES; i++)
+ __raw_writel(0, &qmgr_regs->sram[i]);
+
+ if (cpu_is_ixp42x_rev_a0()) {
+ handler1 = qmgr_irq1_a0;
+ handler2 = qmgr_irq2_a0;
+ } else
+ handler1 = handler2 = qmgr_irq;
+
+ err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager",
+ NULL);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%i (%i)\n",
+ irq1, err);
+ return err;
+ }
+
+ err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager",
+ NULL);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%i (%i)\n",
+ irq2, err);
+ return err;
+ }
+
+ used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
+ spin_lock_init(&qmgr_lock);
+
+ dev_info(dev, "IXP4xx Queue Manager initialized.\n");
+ return 0;
+}
+
+static int ixp4xx_qmgr_remove(struct platform_device *pdev)
+{
+ synchronize_irq(qmgr_irq_1);
+ synchronize_irq(qmgr_irq_2);
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_qmgr_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-ahb-queue-manager",
+ },
+ {},
+};
+
+static struct platform_driver ixp4xx_qmgr_driver = {
+ .driver = {
+ .name = "ixp4xx-qmgr",
+ .of_match_table = ixp4xx_qmgr_of_match,
+ },
+ .probe = ixp4xx_qmgr_probe,
+ .remove = ixp4xx_qmgr_remove,
+};
+module_platform_driver(ixp4xx_qmgr_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Krzysztof Halasa");
+
+EXPORT_SYMBOL(qmgr_put_entry);
+EXPORT_SYMBOL(qmgr_get_entry);
+EXPORT_SYMBOL(qmgr_stat_empty);
+EXPORT_SYMBOL(qmgr_stat_below_low_watermark);
+EXPORT_SYMBOL(qmgr_stat_full);
+EXPORT_SYMBOL(qmgr_stat_overflow);
+EXPORT_SYMBOL(qmgr_set_irq);
+EXPORT_SYMBOL(qmgr_enable_irq);
+EXPORT_SYMBOL(qmgr_disable_irq);
+#if DEBUG_QMGR
+EXPORT_SYMBOL(qmgr_queue_descs);
+EXPORT_SYMBOL(qmgr_request_queue);
+#else
+EXPORT_SYMBOL(__qmgr_request_queue);
+#endif
+EXPORT_SYMBOL(qmgr_release_queue);
diff --git a/drivers/soc/lantiq/Makefile b/drivers/soc/lantiq/Makefile
new file mode 100644
index 0000000000..976f42f806
--- /dev/null
+++ b/drivers/soc/lantiq/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += fpi-bus.o
diff --git a/drivers/soc/lantiq/fpi-bus.c b/drivers/soc/lantiq/fpi-bus.c
new file mode 100644
index 0000000000..dff1375851
--- /dev/null
+++ b/drivers/soc/lantiq/fpi-bus.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011-2015 John Crispin <blogic@phrozen.org>
+ * Copyright (C) 2015 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ * Copyright (C) 2017 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#include <lantiq_soc.h>
+
+#define XBAR_ALWAYS_LAST 0x430
+#define XBAR_FPI_BURST_EN BIT(1)
+#define XBAR_AHB_BURST_EN BIT(2)
+
+#define RCU_VR9_BE_AHB1S 0x00000008
+
+static int ltq_fpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap *rcu_regmap;
+ void __iomem *xbar_membase;
+ u32 rcu_ahb_endianness_reg_offset;
+ int ret;
+
+ xbar_membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xbar_membase))
+ return PTR_ERR(xbar_membase);
+
+ /* RCU configuration is optional */
+ rcu_regmap = syscon_regmap_lookup_by_phandle(np, "lantiq,rcu");
+ if (IS_ERR(rcu_regmap))
+ return PTR_ERR(rcu_regmap);
+
+ ret = device_property_read_u32(dev, "lantiq,offset-endianness",
+ &rcu_ahb_endianness_reg_offset);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get RCU reg offset\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(rcu_regmap, rcu_ahb_endianness_reg_offset,
+ RCU_VR9_BE_AHB1S, RCU_VR9_BE_AHB1S);
+ if (ret) {
+ dev_warn(&pdev->dev,
+ "Failed to configure RCU AHB endianness\n");
+ return ret;
+ }
+
+ /* disable fpi burst */
+ ltq_w32_mask(XBAR_FPI_BURST_EN, 0, xbar_membase + XBAR_ALWAYS_LAST);
+
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+}
+
+static const struct of_device_id ltq_fpi_match[] = {
+ { .compatible = "lantiq,xrx200-fpi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_fpi_match);
+
+static struct platform_driver ltq_fpi_driver = {
+ .probe = ltq_fpi_probe,
+ .driver = {
+ .name = "fpi-xway",
+ .of_match_table = ltq_fpi_match,
+ },
+};
+
+module_platform_driver(ltq_fpi_driver);
+
+MODULE_DESCRIPTION("Lantiq FPI bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/litex/Kconfig b/drivers/soc/litex/Kconfig
new file mode 100644
index 0000000000..e6ba3573a7
--- /dev/null
+++ b/drivers/soc/litex/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License_Identifier: GPL-2.0
+
+menu "Enable LiteX SoC Builder specific drivers"
+
+config LITEX
+ bool
+
+config LITEX_SOC_CONTROLLER
+ tristate "Enable LiteX SoC Controller driver"
+ depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
+ select LITEX
+ help
+ This option enables the SoC Controller Driver which verifies
+ LiteX CSR access and provides common litex_[read|write]*
+ accessors.
+ All drivers that use functions from litex.h must depend on
+ LITEX.
+
+endmenu
diff --git a/drivers/soc/litex/Makefile b/drivers/soc/litex/Makefile
new file mode 100644
index 0000000000..98ff7325b1
--- /dev/null
+++ b/drivers/soc/litex/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License_Identifier: GPL-2.0
+
+obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex_soc_ctrl.o
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
new file mode 100644
index 0000000000..f75790091d
--- /dev/null
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LiteX SoC Controller Driver
+ *
+ * Copyright (C) 2020 Antmicro <www.antmicro.com>
+ *
+ */
+
+#include <linux/litex.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/reboot.h>
+
+/* reset register located at the base address */
+#define RESET_REG_OFF 0x00
+#define RESET_REG_VALUE 0x00000001
+
+#define SCRATCH_REG_OFF 0x04
+#define SCRATCH_REG_VALUE 0x12345678
+#define SCRATCH_TEST_VALUE 0xdeadbeef
+
+/*
+ * Check LiteX CSR read/write access
+ *
+ * This function reads and writes a scratch register in order to verify if CSR
+ * access works.
+ *
+ * In case any problems are detected, the driver should panic.
+ *
+ * Access to the LiteX CSR is, by design, done in CPU native endianness.
+ * The driver should not dynamically configure access functions when
+ * the endianness mismatch is detected. Such situation indicates problems in
+ * the soft SoC design and should be solved at the LiteX generator level,
+ * not in the software.
+ */
+static int litex_check_csr_access(void __iomem *reg_addr)
+{
+ unsigned long reg;
+
+ reg = litex_read32(reg_addr + SCRATCH_REG_OFF);
+
+ if (reg != SCRATCH_REG_VALUE) {
+ panic("Scratch register read error - the system is probably broken! Expected: 0x%x but got: 0x%lx",
+ SCRATCH_REG_VALUE, reg);
+ return -EINVAL;
+ }
+
+ litex_write32(reg_addr + SCRATCH_REG_OFF, SCRATCH_TEST_VALUE);
+ reg = litex_read32(reg_addr + SCRATCH_REG_OFF);
+
+ if (reg != SCRATCH_TEST_VALUE) {
+ panic("Scratch register write error - the system is probably broken! Expected: 0x%x but got: 0x%lx",
+ SCRATCH_TEST_VALUE, reg);
+ return -EINVAL;
+ }
+
+ /* restore original value of the SCRATCH register */
+ litex_write32(reg_addr + SCRATCH_REG_OFF, SCRATCH_REG_VALUE);
+
+ pr_info("LiteX SoC Controller driver initialized");
+
+ return 0;
+}
+
+struct litex_soc_ctrl_device {
+ void __iomem *base;
+ struct notifier_block reset_nb;
+};
+
+static int litex_reset_handler(struct notifier_block *this, unsigned long mode,
+ void *cmd)
+{
+ struct litex_soc_ctrl_device *soc_ctrl_dev =
+ container_of(this, struct litex_soc_ctrl_device, reset_nb);
+
+ litex_write32(soc_ctrl_dev->base + RESET_REG_OFF, RESET_REG_VALUE);
+ return NOTIFY_DONE;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id litex_soc_ctrl_of_match[] = {
+ {.compatible = "litex,soc-controller"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
+#endif /* CONFIG_OF */
+
+static int litex_soc_ctrl_probe(struct platform_device *pdev)
+{
+ struct litex_soc_ctrl_device *soc_ctrl_dev;
+ int error;
+
+ soc_ctrl_dev = devm_kzalloc(&pdev->dev, sizeof(*soc_ctrl_dev), GFP_KERNEL);
+ if (!soc_ctrl_dev)
+ return -ENOMEM;
+
+ soc_ctrl_dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(soc_ctrl_dev->base))
+ return PTR_ERR(soc_ctrl_dev->base);
+
+ error = litex_check_csr_access(soc_ctrl_dev->base);
+ if (error)
+ return error;
+
+ platform_set_drvdata(pdev, soc_ctrl_dev);
+
+ soc_ctrl_dev->reset_nb.notifier_call = litex_reset_handler;
+ soc_ctrl_dev->reset_nb.priority = 128;
+ error = register_restart_handler(&soc_ctrl_dev->reset_nb);
+ if (error) {
+ dev_warn(&pdev->dev, "cannot register restart handler: %d\n",
+ error);
+ }
+
+ return 0;
+}
+
+static int litex_soc_ctrl_remove(struct platform_device *pdev)
+{
+ struct litex_soc_ctrl_device *soc_ctrl_dev = platform_get_drvdata(pdev);
+
+ unregister_restart_handler(&soc_ctrl_dev->reset_nb);
+ return 0;
+}
+
+static struct platform_driver litex_soc_ctrl_driver = {
+ .driver = {
+ .name = "litex-soc-controller",
+ .of_match_table = of_match_ptr(litex_soc_ctrl_of_match)
+ },
+ .probe = litex_soc_ctrl_probe,
+ .remove = litex_soc_ctrl_remove,
+};
+
+module_platform_driver(litex_soc_ctrl_driver);
+MODULE_DESCRIPTION("LiteX SoC Controller driver");
+MODULE_AUTHOR("Antmicro <www.antmicro.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/loongson/Kconfig b/drivers/soc/loongson/Kconfig
new file mode 100644
index 0000000000..368344943a
--- /dev/null
+++ b/drivers/soc/loongson/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Loongson-2 series SoC drivers
+#
+
+config LOONGSON2_GUTS
+ tristate "Loongson-2 SoC Global UtiliTieS (GUTS) register block"
+ depends on LOONGARCH || COMPILE_TEST
+ select SOC_BUS
+ help
+ The global utilities block controls PCIE device enabling, alternate
+ function selection for multiplexed signals, consistency of HDA, USB
+ and PCIE, configuration of memory controller, rtc controller, lio
+ controller, and clock control. This patch adds a driver to manage
+ and access global utilities block for LoongArch architecture Loongson-2
+ SoCs. Initially only reading SVR and registering soc device are
+ supported. Other guts accesses, such as reading firmware configuration
+ by default, should eventually be added into this driver as well.
+
+config LOONGSON2_PM
+ bool "Loongson-2 SoC Power Management Controller Driver"
+ depends on LOONGARCH && OF
+ depends on INPUT=y
+ help
+ The Loongson-2's power management controller was ACPI, supports ACPI
+ S2Idle (Suspend To Idle), ACPI S3 (Suspend To RAM), ACPI S4 (Suspend To
+ Disk), ACPI S5 (Soft Shutdown) and supports multiple wake-up methods
+ (USB, GMAC, PWRBTN, etc.). This driver was to add power management
+ controller support that base on dts for Loongson-2 series SoCs.
diff --git a/drivers/soc/loongson/Makefile b/drivers/soc/loongson/Makefile
new file mode 100644
index 0000000000..4118f50f55
--- /dev/null
+++ b/drivers/soc/loongson/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the Linux Kernel SoC Loongson-2 specific device drivers
+#
+
+obj-$(CONFIG_LOONGSON2_GUTS) += loongson2_guts.o
+obj-$(CONFIG_LOONGSON2_PM) += loongson2_pm.o
diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c
new file mode 100644
index 0000000000..9a469779ee
--- /dev/null
+++ b/drivers/soc/loongson/loongson2_guts.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Yinbo Zhu <zhuyinbo@loongson.cn>
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/sys_soc.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+static struct soc_device_attribute soc_dev_attr;
+static struct soc_device *soc_dev;
+
+/*
+ * Global Utility Registers.
+ *
+ * Not all registers defined in this structure are available on all chips, so
+ * you are expected to know whether a given register actually exists on your
+ * chip before you access it.
+ *
+ * Also, some registers are similar on different chips but have slightly
+ * different names. In these cases, one name is chosen to avoid extraneous
+ * #ifdefs.
+ */
+struct scfg_guts {
+ u32 svr; /* Version Register */
+ u8 res0[4];
+ u16 feature; /* Feature Register */
+ u32 vendor; /* Vendor Register */
+ u8 res1[6];
+ u32 id;
+ u8 res2[0x3ff8 - 0x18];
+ u32 chip;
+};
+
+static struct guts {
+ struct scfg_guts __iomem *regs;
+ bool little_endian;
+} *guts;
+
+struct loongson2_soc_die_attr {
+ char *die;
+ u32 svr;
+ u32 mask;
+};
+
+/* SoC die attribute definition for Loongson-2 platform */
+static const struct loongson2_soc_die_attr loongson2_soc_die[] = {
+
+ /*
+ * LoongArch-based SoCs Loongson-2 Series
+ */
+
+ /* Die: 2k1000, SoC: 2k1000 */
+ { .die = "2K1000",
+ .svr = 0x00000013,
+ .mask = 0x000000ff,
+ },
+ { },
+};
+
+static const struct loongson2_soc_die_attr *loongson2_soc_die_match(
+ u32 svr, const struct loongson2_soc_die_attr *matches)
+{
+ while (matches->svr) {
+ if (matches->svr == (svr & matches->mask))
+ return matches;
+ matches++;
+ }
+
+ return NULL;
+}
+
+static u32 loongson2_guts_get_svr(void)
+{
+ u32 svr = 0;
+
+ if (!guts || !guts->regs)
+ return svr;
+
+ if (guts->little_endian)
+ svr = ioread32(&guts->regs->svr);
+ else
+ svr = ioread32be(&guts->regs->svr);
+
+ return svr;
+}
+
+static int loongson2_guts_probe(struct platform_device *pdev)
+{
+ struct device_node *root, *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ const struct loongson2_soc_die_attr *soc_die;
+ const char *machine;
+ u32 svr;
+
+ /* Initialize guts */
+ guts = devm_kzalloc(dev, sizeof(*guts), GFP_KERNEL);
+ if (!guts)
+ return -ENOMEM;
+
+ guts->little_endian = of_property_read_bool(np, "little-endian");
+
+ guts->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(guts->regs))
+ return PTR_ERR(guts->regs);
+
+ /* Register soc device */
+ root = of_find_node_by_path("/");
+ if (of_property_read_string(root, "model", &machine))
+ of_property_read_string_index(root, "compatible", 0, &machine);
+ of_node_put(root);
+ if (machine)
+ soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+
+ svr = loongson2_guts_get_svr();
+ soc_die = loongson2_soc_die_match(svr, loongson2_soc_die);
+ if (soc_die) {
+ soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL,
+ "Loongson %s", soc_die->die);
+ } else {
+ soc_dev_attr.family = devm_kasprintf(dev, GFP_KERNEL, "Loongson");
+ }
+ if (!soc_dev_attr.family)
+ return -ENOMEM;
+ soc_dev_attr.soc_id = devm_kasprintf(dev, GFP_KERNEL,
+ "svr:0x%08x", svr);
+ if (!soc_dev_attr.soc_id)
+ return -ENOMEM;
+ soc_dev_attr.revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
+ (svr >> 4) & 0xf, svr & 0xf);
+ if (!soc_dev_attr.revision)
+ return -ENOMEM;
+
+ soc_dev = soc_device_register(&soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ pr_info("Machine: %s\n", soc_dev_attr.machine);
+ pr_info("SoC family: %s\n", soc_dev_attr.family);
+ pr_info("SoC ID: %s, Revision: %s\n",
+ soc_dev_attr.soc_id, soc_dev_attr.revision);
+
+ return 0;
+}
+
+static int loongson2_guts_remove(struct platform_device *dev)
+{
+ soc_device_unregister(soc_dev);
+
+ return 0;
+}
+
+/*
+ * Table for matching compatible strings, for device tree
+ * guts node, for Loongson-2 SoCs.
+ */
+static const struct of_device_id loongson2_guts_of_match[] = {
+ { .compatible = "loongson,ls2k-chipid", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, loongson2_guts_of_match);
+
+static struct platform_driver loongson2_guts_driver = {
+ .driver = {
+ .name = "loongson2-guts",
+ .of_match_table = loongson2_guts_of_match,
+ },
+ .probe = loongson2_guts_probe,
+ .remove = loongson2_guts_remove,
+};
+
+static int __init loongson2_guts_init(void)
+{
+ return platform_driver_register(&loongson2_guts_driver);
+}
+core_initcall(loongson2_guts_init);
+
+static void __exit loongson2_guts_exit(void)
+{
+ platform_driver_unregister(&loongson2_guts_driver);
+}
+module_exit(loongson2_guts_exit);
+
+MODULE_DESCRIPTION("Loongson2 GUTS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/loongson/loongson2_pm.c b/drivers/soc/loongson/loongson2_pm.c
new file mode 100644
index 0000000000..b8e5e1e352
--- /dev/null
+++ b/drivers/soc/loongson/loongson2_pm.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Loongson-2 PM Support
+ *
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/suspend.h>
+#include <linux/interrupt.h>
+#include <linux/of_platform.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/platform_device.h>
+#include <asm/bootinfo.h>
+#include <asm/suspend.h>
+
+#define LOONGSON2_PM1_CNT_REG 0x14
+#define LOONGSON2_PM1_STS_REG 0x0c
+#define LOONGSON2_PM1_ENA_REG 0x10
+#define LOONGSON2_GPE0_STS_REG 0x28
+#define LOONGSON2_GPE0_ENA_REG 0x2c
+
+#define LOONGSON2_PM1_PWRBTN_STS BIT(8)
+#define LOONGSON2_PM1_PCIEXP_WAKE_STS BIT(14)
+#define LOONGSON2_PM1_WAKE_STS BIT(15)
+#define LOONGSON2_PM1_CNT_INT_EN BIT(0)
+#define LOONGSON2_PM1_PWRBTN_EN LOONGSON2_PM1_PWRBTN_STS
+
+static struct loongson2_pm {
+ void __iomem *base;
+ struct input_dev *dev;
+ bool suspended;
+} loongson2_pm;
+
+#define loongson2_pm_readw(reg) readw(loongson2_pm.base + reg)
+#define loongson2_pm_readl(reg) readl(loongson2_pm.base + reg)
+#define loongson2_pm_writew(val, reg) writew(val, loongson2_pm.base + reg)
+#define loongson2_pm_writel(val, reg) writel(val, loongson2_pm.base + reg)
+
+static void loongson2_pm_status_clear(void)
+{
+ u16 value;
+
+ value = loongson2_pm_readw(LOONGSON2_PM1_STS_REG);
+ value |= (LOONGSON2_PM1_PWRBTN_STS | LOONGSON2_PM1_PCIEXP_WAKE_STS |
+ LOONGSON2_PM1_WAKE_STS);
+ loongson2_pm_writew(value, LOONGSON2_PM1_STS_REG);
+ loongson2_pm_writel(loongson2_pm_readl(LOONGSON2_GPE0_STS_REG), LOONGSON2_GPE0_STS_REG);
+}
+
+static void loongson2_pm_irq_enable(void)
+{
+ u16 value;
+
+ value = loongson2_pm_readw(LOONGSON2_PM1_CNT_REG);
+ value |= LOONGSON2_PM1_CNT_INT_EN;
+ loongson2_pm_writew(value, LOONGSON2_PM1_CNT_REG);
+
+ value = loongson2_pm_readw(LOONGSON2_PM1_ENA_REG);
+ value |= LOONGSON2_PM1_PWRBTN_EN;
+ loongson2_pm_writew(value, LOONGSON2_PM1_ENA_REG);
+}
+
+static int loongson2_suspend_enter(suspend_state_t state)
+{
+ loongson2_pm_status_clear();
+ loongarch_common_suspend();
+ loongarch_suspend_enter();
+ loongarch_common_resume();
+ loongson2_pm_irq_enable();
+ pm_set_resume_via_firmware();
+
+ return 0;
+}
+
+static int loongson2_suspend_begin(suspend_state_t state)
+{
+ pm_set_suspend_via_firmware();
+
+ return 0;
+}
+
+static int loongson2_suspend_valid_state(suspend_state_t state)
+{
+ return (state == PM_SUSPEND_MEM);
+}
+
+static const struct platform_suspend_ops loongson2_suspend_ops = {
+ .valid = loongson2_suspend_valid_state,
+ .begin = loongson2_suspend_begin,
+ .enter = loongson2_suspend_enter,
+};
+
+static int loongson2_power_button_init(struct device *dev, int irq)
+{
+ int ret;
+ struct input_dev *button;
+
+ button = input_allocate_device();
+ if (!dev)
+ return -ENOMEM;
+
+ button->name = "Power Button";
+ button->phys = "pm/button/input0";
+ button->id.bustype = BUS_HOST;
+ button->dev.parent = NULL;
+ input_set_capability(button, EV_KEY, KEY_POWER);
+
+ ret = input_register_device(button);
+ if (ret)
+ goto free_dev;
+
+ dev_pm_set_wake_irq(&button->dev, irq);
+ device_set_wakeup_capable(&button->dev, true);
+ device_set_wakeup_enable(&button->dev, true);
+
+ loongson2_pm.dev = button;
+ dev_info(dev, "Power Button: Init successful!\n");
+
+ return 0;
+
+free_dev:
+ input_free_device(button);
+
+ return ret;
+}
+
+static irqreturn_t loongson2_pm_irq_handler(int irq, void *dev_id)
+{
+ u16 status = loongson2_pm_readw(LOONGSON2_PM1_STS_REG);
+
+ if (!loongson2_pm.suspended && (status & LOONGSON2_PM1_PWRBTN_STS)) {
+ pr_info("Power Button pressed...\n");
+ input_report_key(loongson2_pm.dev, KEY_POWER, 1);
+ input_sync(loongson2_pm.dev);
+ input_report_key(loongson2_pm.dev, KEY_POWER, 0);
+ input_sync(loongson2_pm.dev);
+ }
+
+ loongson2_pm_status_clear();
+
+ return IRQ_HANDLED;
+}
+
+static int __maybe_unused loongson2_pm_suspend(struct device *dev)
+{
+ loongson2_pm.suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused loongson2_pm_resume(struct device *dev)
+{
+ loongson2_pm.suspended = false;
+
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(loongson2_pm_ops, loongson2_pm_suspend, loongson2_pm_resume);
+
+static int loongson2_pm_probe(struct platform_device *pdev)
+{
+ int irq, retval;
+ u64 suspend_addr;
+ struct device *dev = &pdev->dev;
+
+ loongson2_pm.base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(loongson2_pm.base))
+ return PTR_ERR(loongson2_pm.base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ if (!device_property_read_u64(dev, "loongson,suspend-address", &suspend_addr))
+ loongson_sysconf.suspend_addr = (u64)phys_to_virt(suspend_addr);
+ else
+ dev_err(dev, "No loongson,suspend-address, could not support S3!\n");
+
+ if (loongson2_power_button_init(dev, irq))
+ return -EINVAL;
+
+ retval = devm_request_irq(&pdev->dev, irq, loongson2_pm_irq_handler,
+ IRQF_SHARED, "pm_irq", &loongson2_pm);
+ if (retval)
+ return retval;
+
+ loongson2_pm_irq_enable();
+ loongson2_pm_status_clear();
+
+ if (loongson_sysconf.suspend_addr)
+ suspend_set_ops(&loongson2_suspend_ops);
+
+ /* Populate children */
+ retval = devm_of_platform_populate(dev);
+ if (retval)
+ dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
+
+ return 0;
+}
+
+static const struct of_device_id loongson2_pm_match[] = {
+ { .compatible = "loongson,ls2k0500-pmc", },
+ {},
+};
+
+static struct platform_driver loongson2_pm_driver = {
+ .driver = {
+ .name = "ls2k-pm",
+ .pm = &loongson2_pm_ops,
+ .of_match_table = loongson2_pm_match,
+ },
+ .probe = loongson2_pm_probe,
+};
+module_platform_driver(loongson2_pm_driver);
+
+MODULE_DESCRIPTION("Loongson-2 PM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
new file mode 100644
index 0000000000..a88cf04fc8
--- /dev/null
+++ b/drivers/soc/mediatek/Kconfig
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# MediaTek SoC drivers
+#
+menu "MediaTek SoC drivers"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+
+config MTK_CMDQ
+ tristate "MediaTek CMDQ Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select MAILBOX
+ select MTK_CMDQ_MBOX
+ select MTK_INFRACFG
+ help
+ Say yes here to add support for the MediaTek Command Queue (CMDQ)
+ driver. The CMDQ is used to help read/write registers with critical
+ time limitation, such as updating display configuration during the
+ vblank.
+
+config MTK_DEVAPC
+ tristate "Mediatek Device APC Support"
+ help
+ Say yes here to enable support for Mediatek Device APC driver.
+ This driver is mainly used to handle the violation which catches
+ unexpected transaction.
+ The violation information is logged for further analysis or
+ countermeasures.
+
+config MTK_INFRACFG
+ bool "MediaTek INFRACFG Support"
+ select REGMAP
+ help
+ Say yes here to add support for the MediaTek INFRACFG controller. The
+ INFRACFG controller contains various infrastructure registers not
+ directly associated to any device.
+
+config MTK_PMIC_WRAP
+ tristate "MediaTek PMIC Wrapper Support"
+ depends on RESET_CONTROLLER
+ depends on OF
+ select REGMAP
+ help
+ Say yes here to add support for MediaTek PMIC Wrapper found
+ on different MediaTek SoCs. The PMIC wrapper is a proprietary
+ hardware to connect the PMIC.
+
+config MTK_REGULATOR_COUPLER
+ bool "MediaTek SoC Regulator Coupler" if COMPILE_TEST
+ default ARCH_MEDIATEK
+ depends on REGULATOR
+
+config MTK_SCPSYS
+ bool "MediaTek SCPSYS Support"
+ default ARCH_MEDIATEK
+ depends on OF
+ select REGMAP
+ select MTK_INFRACFG
+ select PM_GENERIC_DOMAINS if PM
+ help
+ Say yes here to add support for the MediaTek SCPSYS power domain
+ driver.
+
+config MTK_SCPSYS_PM_DOMAINS
+ bool "MediaTek SCPSYS generic power domain"
+ default ARCH_MEDIATEK
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ select REGMAP
+ help
+ Say y here to enable power domain support.
+ In order to meet high performance and low power requirements, the System
+ Control Processor System (SCPSYS) has several power management related
+ tasks in the system.
+
+config MTK_MMSYS
+ tristate "MediaTek MMSYS Support"
+ default ARCH_MEDIATEK
+ depends on HAS_IOMEM
+ depends on MTK_CMDQ || MTK_CMDQ=n
+ help
+ Say yes here to add support for the MediaTek Multimedia
+ Subsystem (MMSYS).
+
+config MTK_SVS
+ tristate "MediaTek Smart Voltage Scaling(SVS)"
+ depends on NVMEM_MTK_EFUSE && NVMEM
+ help
+ The Smart Voltage Scaling(SVS) engine is a piece of hardware
+ which has several controllers(banks) for calculating suitable
+ voltage to different power domains(CPU/GPU/CCI) according to
+ chip process corner, temperatures and other factors. Then DVFS
+ driver could apply SVS bank voltage to PMIC/Buck.
+
+endmenu
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
new file mode 100644
index 0000000000..9d3ce7878c
--- /dev/null
+++ b/drivers/soc/mediatek/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
+obj-$(CONFIG_MTK_DEVAPC) += mtk-devapc.o
+obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
+obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
+obj-$(CONFIG_MTK_REGULATOR_COUPLER) += mtk-regulator-coupler.o
+obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
+obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o
+obj-$(CONFIG_MTK_SVS) += mtk-svs.o
diff --git a/drivers/soc/mediatek/mt8167-mmsys.h b/drivers/soc/mediatek/mt8167-mmsys.h
new file mode 100644
index 0000000000..f7a35b3656
--- /dev/null
+++ b/drivers/soc/mediatek/mt8167-mmsys.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8167_MMSYS_H
+#define __SOC_MEDIATEK_MT8167_MMSYS_H
+
+#define MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x030
+#define MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN 0x038
+#define MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x058
+#define MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN 0x064
+#define MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN 0x06c
+
+#define MT8167_DITHER_MOUT_EN_RDMA0 0x1
+#define MT8167_RDMA0_SOUT_DSI0 0x2
+#define MT8167_DSI0_SEL_IN_RDMA0 0x1
+
+static const struct mtk_mmsys_routes mt8167_mmsys_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_RDMA0,
+ MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0
+ },
+};
+
+#endif /* __SOC_MEDIATEK_MT8167_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8173-mmsys.h b/drivers/soc/mediatek/mt8173-mmsys.h
new file mode 100644
index 0000000000..9d24e38127
--- /dev/null
+++ b/drivers/soc/mediatek/mt8173-mmsys.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8173_MMSYS_H
+#define __SOC_MEDIATEK_MT8173_MMSYS_H
+
+#define MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
+#define MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
+#define MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
+#define MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
+#define MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
+#define MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
+#define MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
+#define MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN 0x08c
+#define MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN 0x0a0
+#define MT8173_DISP_REG_CONFIG_DSI0_SEL_IN 0x0a4
+#define MT8173_DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
+#define MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN 0x0b0
+#define MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
+#define MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN 0x0bc
+
+#define MT8173_AAL_SEL_IN_MERGE BIT(0)
+#define MT8173_COLOR0_SEL_IN_OVL0 BIT(0)
+#define MT8173_COLOR0_SOUT_MERGE BIT(0)
+#define MT8173_DPI0_SEL_IN_MASK GENMASK(1, 0)
+#define MT8173_DPI0_SEL_IN_RDMA1 BIT(0)
+#define MT8173_DSI0_SEL_IN_UFOE BIT(0)
+#define MT8173_GAMMA_MOUT_EN_RDMA1 BIT(0)
+#define MT8173_OD0_MOUT_EN_RDMA0 BIT(0)
+#define MT8173_OVL0_MOUT_EN_COLOR0 BIT(0)
+#define MT8173_OVL1_MOUT_EN_COLOR1 BIT(0)
+#define MT8173_UFOE_MOUT_EN_DSI0 BIT(0)
+#define MT8173_UFOE_SEL_IN_RDMA0 BIT(0)
+#define MT8173_RDMA0_SOUT_COLOR0 BIT(0)
+
+static const struct mtk_mmsys_routes mt8173_mmsys_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
+ MT8173_OVL0_MOUT_EN_COLOR0, MT8173_OVL0_MOUT_EN_COLOR0
+ }, {
+ DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
+ MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN,
+ MT8173_OD0_MOUT_EN_RDMA0, MT8173_OD0_MOUT_EN_RDMA0
+ }, {
+ DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
+ MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN,
+ MT8173_UFOE_MOUT_EN_DSI0, MT8173_UFOE_MOUT_EN_DSI0
+ }, {
+ DDP_COMPONENT_COLOR0, DDP_COMPONENT_AAL0,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN,
+ MT8173_COLOR0_SOUT_MERGE, 0 /* SOUT to AAL */
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
+ MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN,
+ MT8173_RDMA0_SOUT_COLOR0, 0 /* SOUT to UFOE */
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
+ MT8173_COLOR0_SEL_IN_OVL0, MT8173_COLOR0_SEL_IN_OVL0
+ }, {
+ DDP_COMPONENT_AAL0, DDP_COMPONENT_COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN,
+ MT8173_AAL_SEL_IN_MERGE, 0 /* SEL_IN from COLOR0 */
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
+ MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN,
+ MT8173_UFOE_SEL_IN_RDMA0, 0 /* SEL_IN from RDMA0 */
+ }, {
+ DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
+ MT8173_DISP_REG_CONFIG_DSI0_SEL_IN,
+ MT8173_DSI0_SEL_IN_UFOE, 0, /* SEL_IN from UFOE */
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
+ MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN,
+ MT8173_OVL1_MOUT_EN_COLOR1, MT8173_OVL1_MOUT_EN_COLOR1
+ }, {
+ DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
+ MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN,
+ MT8173_GAMMA_MOUT_EN_RDMA1, MT8173_GAMMA_MOUT_EN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN,
+ RDMA1_SOUT_MASK, RDMA1_SOUT_DPI0
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN,
+ COLOR1_SEL_IN_OVL1, COLOR1_SEL_IN_OVL1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8173_DISP_REG_CONFIG_DPI_SEL_IN,
+ MT8173_DPI0_SEL_IN_MASK, MT8173_DPI0_SEL_IN_RDMA1
+ }
+};
+
+#endif /* __SOC_MEDIATEK_MT8173_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h
new file mode 100644
index 0000000000..ff6be17034
--- /dev/null
+++ b/drivers/soc/mediatek/mt8183-mmsys.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8183_MMSYS_H
+#define __SOC_MEDIATEK_MT8183_MMSYS_H
+
+#define MT8183_DISP_OVL0_MOUT_EN 0xf00
+#define MT8183_DISP_OVL0_2L_MOUT_EN 0xf04
+#define MT8183_DISP_OVL1_2L_MOUT_EN 0xf08
+#define MT8183_DISP_DITHER0_MOUT_EN 0xf0c
+#define MT8183_DISP_PATH0_SEL_IN 0xf24
+#define MT8183_DISP_DSI0_SEL_IN 0xf2c
+#define MT8183_DISP_DPI0_SEL_IN 0xf30
+#define MT8183_DISP_RDMA0_SOUT_SEL_IN 0xf50
+#define MT8183_DISP_RDMA1_SOUT_SEL_IN 0xf54
+
+#define MT8183_OVL0_MOUT_EN_OVL0_2L BIT(4)
+#define MT8183_OVL0_2L_MOUT_EN_DISP_PATH0 BIT(0)
+#define MT8183_OVL1_2L_MOUT_EN_RDMA1 BIT(4)
+#define MT8183_DITHER0_MOUT_IN_DSI0 BIT(0)
+#define MT8183_DISP_PATH0_SEL_IN_OVL0_2L 0x1
+#define MT8183_DSI0_SEL_IN_RDMA0 0x1
+#define MT8183_DSI0_SEL_IN_RDMA1 0x3
+#define MT8183_DPI0_SEL_IN_RDMA0 0x1
+#define MT8183_DPI0_SEL_IN_RDMA1 0x2
+#define MT8183_RDMA0_SOUT_COLOR0 0x1
+#define MT8183_RDMA1_SOUT_DSI0 0x1
+
+#define MT8183_MMSYS_SW0_RST_B 0x140
+
+static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
+ MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
+ MT8183_OVL0_MOUT_EN_OVL0_2L
+ }, {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
+ MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
+ }, {
+ DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1,
+ MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
+ MT8183_OVL1_2L_MOUT_EN_RDMA1
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
+ MT8183_DITHER0_MOUT_IN_DSI0
+ }, {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
+ MT8183_DISP_PATH0_SEL_IN_OVL0_2L
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
+ MT8183_DPI0_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
+ MT8183_RDMA0_SOUT_COLOR0
+ }
+};
+
+#endif /* __SOC_MEDIATEK_MT8183_MMSYS_H */
+
diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h
new file mode 100644
index 0000000000..279d413852
--- /dev/null
+++ b/drivers/soc/mediatek/mt8186-mmsys.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8186_MMSYS_H
+#define __SOC_MEDIATEK_MT8186_MMSYS_H
+
+/* Values for DPI configuration in MMSYS address space */
+#define MT8186_MMSYS_DPI_OUTPUT_FORMAT 0x400
+#define MT8186_DPI_FORMAT_MASK GENMASK(1, 0)
+#define MT8186_DPI_RGB888_SDR_CON 0
+#define MT8186_DPI_RGB888_DDR_CON 1
+#define MT8186_DPI_RGB565_SDR_CON 2
+#define MT8186_DPI_RGB565_DDR_CON 3
+
+#define MT8186_MMSYS_OVL_CON 0xF04
+#define MT8186_MMSYS_OVL0_CON_MASK 0x3
+#define MT8186_MMSYS_OVL0_2L_CON_MASK 0xC
+#define MT8186_OVL0_GO_BLEND BIT(0)
+#define MT8186_OVL0_GO_BG BIT(1)
+#define MT8186_OVL0_2L_GO_BLEND BIT(2)
+#define MT8186_OVL0_2L_GO_BG BIT(3)
+#define MT8186_DISP_RDMA0_SOUT_SEL 0xF0C
+#define MT8186_RDMA0_SOUT_SEL_MASK 0xF
+#define MT8186_RDMA0_SOUT_TO_DSI0 (0)
+#define MT8186_RDMA0_SOUT_TO_COLOR0 (1)
+#define MT8186_RDMA0_SOUT_TO_DPI0 (2)
+#define MT8186_DISP_OVL0_2L_MOUT_EN 0xF14
+#define MT8186_OVL0_2L_MOUT_EN_MASK 0xF
+#define MT8186_OVL0_2L_MOUT_TO_RDMA0 BIT(0)
+#define MT8186_OVL0_2L_MOUT_TO_RDMA1 BIT(3)
+#define MT8186_DISP_OVL0_MOUT_EN 0xF18
+#define MT8186_OVL0_MOUT_EN_MASK 0xF
+#define MT8186_OVL0_MOUT_TO_RDMA0 BIT(0)
+#define MT8186_OVL0_MOUT_TO_RDMA1 BIT(3)
+#define MT8186_DISP_DITHER0_MOUT_EN 0xF20
+#define MT8186_DITHER0_MOUT_EN_MASK 0xF
+#define MT8186_DITHER0_MOUT_TO_DSI0 BIT(0)
+#define MT8186_DITHER0_MOUT_TO_RDMA1 BIT(2)
+#define MT8186_DITHER0_MOUT_TO_DPI0 BIT(3)
+#define MT8186_DISP_RDMA0_SEL_IN 0xF28
+#define MT8186_RDMA0_SEL_IN_MASK 0xF
+#define MT8186_RDMA0_FROM_OVL0 0
+#define MT8186_RDMA0_FROM_OVL0_2L 2
+#define MT8186_DISP_DSI0_SEL_IN 0xF30
+#define MT8186_DSI0_SEL_IN_MASK 0xF
+#define MT8186_DSI0_FROM_RDMA0 0
+#define MT8186_DSI0_FROM_DITHER0 1
+#define MT8186_DSI0_FROM_RDMA1 2
+#define MT8186_DISP_RDMA1_MOUT_EN 0xF3C
+#define MT8186_RDMA1_MOUT_EN_MASK 0xF
+#define MT8186_RDMA1_MOUT_TO_DPI0_SEL BIT(0)
+#define MT8186_RDMA1_MOUT_TO_DSI0_SEL BIT(2)
+#define MT8186_DISP_RDMA1_SEL_IN 0xF40
+#define MT8186_RDMA1_SEL_IN_MASK 0xF
+#define MT8186_RDMA1_FROM_OVL0 0
+#define MT8186_RDMA1_FROM_OVL0_2L 2
+#define MT8186_RDMA1_FROM_DITHER0 3
+#define MT8186_DISP_DPI0_SEL_IN 0xF44
+#define MT8186_DPI0_SEL_IN_MASK 0xF
+#define MT8186_DPI0_FROM_RDMA1 0
+#define MT8186_DPI0_FROM_DITHER0 1
+#define MT8186_DPI0_FROM_RDMA0 2
+
+#define MT8186_MMSYS_SW0_RST_B 0x160
+
+static const struct mtk_mmsys_routes mmsys_mt8186_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
+ MT8186_OVL0_MOUT_TO_RDMA0
+ },
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
+ MT8186_RDMA0_FROM_OVL0
+ },
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
+ MT8186_OVL0_GO_BLEND
+ },
+ {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
+ MT8186_RDMA0_SOUT_TO_COLOR0
+ },
+ {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
+ MT8186_DITHER0_MOUT_TO_DSI0,
+ },
+ {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
+ MT8186_DSI0_FROM_DITHER0
+ },
+ {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
+ MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
+ MT8186_OVL0_2L_MOUT_TO_RDMA1
+ },
+ {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
+ MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
+ MT8186_RDMA1_FROM_OVL0_2L
+ },
+ {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
+ MT8186_OVL0_2L_GO_BLEND
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
+ MT8186_RDMA1_MOUT_TO_DPI0_SEL
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
+ MT8186_DPI0_FROM_RDMA1
+ },
+};
+
+#endif /* __SOC_MEDIATEK_MT8186_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8188-mmsys.h b/drivers/soc/mediatek/mt8188-mmsys.h
new file mode 100644
index 0000000000..448cc3761b
--- /dev/null
+++ b/drivers/soc/mediatek/mt8188-mmsys.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8188_MMSYS_H
+#define __SOC_MEDIATEK_MT8188_MMSYS_H
+
+#define MT8188_VDO0_OVL_MOUT_EN 0xf14
+#define MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0 BIT(0)
+#define MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0 BIT(1)
+#define MT8188_MOUT_DISP_OVL0_TO_DISP_OVL1 BIT(2)
+#define MT8188_MOUT_DISP_OVL1_TO_DISP_RDMA1 BIT(4)
+#define MT8188_MOUT_DISP_OVL1_TO_DISP_WDMA1 BIT(5)
+#define MT8188_MOUT_DISP_OVL1_TO_DISP_OVL0 BIT(6)
+
+#define MT8188_VDO0_SEL_IN 0xf34
+#define MT8188_VDO0_SEL_OUT 0xf38
+
+#define MT8188_VDO0_DISP_RDMA_SEL 0xf40
+#define MT8188_SOUT_DISP_RDMA0_TO_MASK GENMASK(2, 0)
+#define MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0 (0 << 0)
+#define MT8188_SOUT_DISP_RDMA0_TO_DISP_DSI0 (1 << 0)
+#define MT8188_SOUT_DISP_RDMA0_TO_DISP_DP_INTF0 (5 << 0)
+#define MT8188_SEL_IN_DISP_RDMA0_FROM_MASK GENMASK(8, 8)
+#define MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0 (0 << 8)
+#define MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_RSZ0 (1 << 8)
+
+
+#define MT8188_VDO0_DSI0_SEL_IN 0xf44
+#define MT8188_SEL_IN_DSI0_FROM_MASK BIT(0)
+#define MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT (0 << 0)
+#define MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0 (1 << 0)
+
+#define MT8188_VDO0_DP_INTF0_SEL_IN 0xf4C
+#define MT8188_SEL_IN_DP_INTF0_FROM_MASK GENMASK(2, 0)
+#define MT8188_SEL_IN_DP_INTF0_FROM_DSC_WRAP0C1_OUT (0 << 0)
+#define MT8188_SEL_IN_DP_INTF0_FROM_VPP_MERGE (1 << 0)
+#define MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0 (3 << 0)
+
+#define MT8188_VDO0_DISP_DITHER0_SEL_OUT 0xf58
+#define MT8188_SOUT_DISP_DITHER0_TO_MASK GENMASK(2, 0)
+#define MT8188_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN (0 << 0)
+#define MT8188_SOUT_DISP_DITHER0_TO_DSI0 (1 << 0)
+#define MT8188_SOUT_DISP_DITHER0_TO_VPP_MERGE0 (6 << 0)
+#define MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0 (7 << 0)
+
+#define MT8188_VDO0_VPP_MERGE_SEL 0xf60
+#define MT8188_SEL_IN_VPP_MERGE_FROM_MASK GENMASK(1, 0)
+#define MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT (0 << 0)
+#define MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT (3 << 0)
+
+#define MT8188_SOUT_VPP_MERGE_TO_MASK GENMASK(6, 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DSI1 (0 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DP_INTF0 (1 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0 (2 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA1 (3 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN (4 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0 (5 << 4)
+#define MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK GENMASK(11, 11)
+#define MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN (0 << 11)
+
+#define MT8188_VDO0_DSC_WARP_SEL 0xf64
+#define MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK GENMASK(0, 0)
+#define MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0 (0 << 0)
+#define MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_VPP_MERGE (1 << 0)
+#define MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK GENMASK(19, 16)
+#define MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0 BIT(16)
+#define MT8188_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0 BIT(17)
+#define MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE BIT(18)
+#define MT8188_SOUT_DSC_WRAP0_OUT_TO_DISP_WDMA0 BIT(19)
+
+static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
+ MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
+ MT8188_VDO0_DSC_WARP_SEL,
+ MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
+ MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
+ MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8188_VDO0_DISP_RDMA_SEL, MT8188_SOUT_DISP_RDMA0_TO_MASK,
+ MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT,
+ MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DSI0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT,
+ MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DP_INTF0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
+ },
+};
+
+#endif /* __SOC_MEDIATEK_MT8188_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h
new file mode 100644
index 0000000000..a016d80b4b
--- /dev/null
+++ b/drivers/soc/mediatek/mt8192-mmsys.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8192_MMSYS_H
+#define __SOC_MEDIATEK_MT8192_MMSYS_H
+
+#define MT8192_MMSYS_OVL_MOUT_EN 0xf04
+#define MT8192_DISP_OVL1_2L_MOUT_EN 0xf08
+#define MT8192_DISP_OVL0_2L_MOUT_EN 0xf18
+#define MT8192_DISP_OVL0_MOUT_EN 0xf1c
+#define MT8192_DISP_RDMA0_SEL_IN 0xf2c
+#define MT8192_DISP_RDMA0_SOUT_SEL 0xf30
+#define MT8192_DISP_CCORR0_SOUT_SEL 0xf34
+#define MT8192_DISP_AAL0_SEL_IN 0xf38
+#define MT8192_DISP_DITHER0_MOUT_EN 0xf3c
+#define MT8192_DISP_DSI0_SEL_IN 0xf40
+#define MT8192_DISP_OVL2_2L_MOUT_EN 0xf4c
+
+#define MT8192_DISP_OVL0_GO_BLEND BIT(0)
+#define MT8192_DITHER0_MOUT_IN_DSI0 BIT(0)
+#define MT8192_OVL0_MOUT_EN_DISP_RDMA0 BIT(0)
+#define MT8192_OVL2_2L_MOUT_EN_RDMA4 BIT(0)
+#define MT8192_DISP_OVL0_GO_BG BIT(1)
+#define MT8192_DISP_OVL0_2L_GO_BLEND BIT(2)
+#define MT8192_DISP_OVL0_2L_GO_BG BIT(3)
+#define MT8192_OVL1_2L_MOUT_EN_RDMA1 BIT(4)
+#define MT8192_OVL0_MOUT_EN_OVL0_2L BIT(4)
+#define MT8192_RDMA0_SEL_IN_OVL0_2L 0x3
+#define MT8192_RDMA0_SOUT_COLOR0 0x1
+#define MT8192_CCORR0_SOUT_AAL0 0x1
+#define MT8192_AAL0_SEL_IN_CCORR0 0x1
+#define MT8192_DSI0_SEL_IN_DITHER0 0x1
+
+static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
+ MT8192_OVL0_MOUT_EN_DISP_RDMA0
+ }, {
+ DDP_COMPONENT_OVL_2L2, DDP_COMPONENT_RDMA4,
+ MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
+ MT8192_OVL2_2L_MOUT_EN_RDMA4
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
+ MT8192_DITHER0_MOUT_IN_DSI0
+ }, {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
+ MT8192_RDMA0_SEL_IN_OVL0_2L
+ }, {
+ DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
+ MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
+ MT8192_AAL0_SEL_IN_CCORR0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
+ MT8192_DSI0_SEL_IN_DITHER0
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
+ MT8192_RDMA0_SOUT_COLOR0
+ }, {
+ DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
+ MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
+ MT8192_CCORR0_SOUT_AAL0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
+ MT8192_DISP_OVL0_GO_BG
+ }, {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
+ MT8192_DISP_OVL0_2L_GO_BLEND
+ }
+};
+
+#endif /* __SOC_MEDIATEK_MT8192_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8195-mmsys.h b/drivers/soc/mediatek/mt8195-mmsys.h
new file mode 100644
index 0000000000..9be2df2832
--- /dev/null
+++ b/drivers/soc/mediatek/mt8195-mmsys.h
@@ -0,0 +1,529 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8195_MMSYS_H
+#define __SOC_MEDIATEK_MT8195_MMSYS_H
+
+#define MT8195_VDO0_OVL_MOUT_EN 0xf14
+#define MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0 BIT(0)
+#define MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0 BIT(1)
+#define MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1 BIT(2)
+#define MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1 BIT(4)
+#define MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1 BIT(5)
+#define MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0 BIT(6)
+
+#define MT8195_VDO0_SEL_IN 0xf34
+#define MT8195_SEL_IN_VPP_MERGE_FROM_MASK GENMASK(1, 0)
+#define MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT (0 << 0)
+#define MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1 (1 << 0)
+#define MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0 (2 << 0)
+#define MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK GENMASK(4, 4)
+#define MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0 (0 << 4)
+#define MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE (1 << 4)
+#define MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK GENMASK(5, 5)
+#define MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1 (0 << 5)
+#define MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE (1 << 5)
+#define MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK GENMASK(8, 8)
+#define MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE (0 << 8)
+#define MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT (1 << 8)
+#define MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK GENMASK(9, 9)
+#define MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT (0 << 9)
+#define MT8195_SEL_IN_DP_INTF0_FROM_MASK GENMASK(13, 12)
+#define MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT (0 << 0)
+#define MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE (1 << 12)
+#define MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0 (2 << 12)
+#define MT8195_SEL_IN_DSI0_FROM_MASK GENMASK(16, 16)
+#define MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT (0 << 16)
+#define MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0 (1 << 16)
+#define MT8195_SEL_IN_DSI1_FROM_MASK GENMASK(17, 17)
+#define MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT (0 << 17)
+#define MT8195_SEL_IN_DSI1_FROM_VPP_MERGE (1 << 17)
+#define MT8195_SEL_IN_DISP_WDMA1_FROM_MASK GENMASK(20, 20)
+#define MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1 (0 << 20)
+#define MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE (1 << 20)
+#define MT8195_SEL_IN_DSC_WRAP1_FROM_MASK GENMASK(21, 21)
+#define MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN (0 << 21)
+#define MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1 (1 << 21)
+#define MT8195_SEL_IN_DISP_WDMA0_FROM_MASK GENMASK(22, 22)
+#define MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0 (0 << 22)
+
+#define MT8195_VDO0_SEL_OUT 0xf38
+#define MT8195_SOUT_DISP_DITHER0_TO_MASK BIT(0)
+#define MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN (0 << 0)
+#define MT8195_SOUT_DISP_DITHER0_TO_DSI0 (1 << 0)
+#define MT8195_SOUT_DISP_DITHER1_TO_MASK GENMASK(2, 1)
+#define MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN (0 << 1)
+#define MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE (1 << 1)
+#define MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT (2 << 1)
+#define MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK GENMASK(4, 4)
+#define MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE (0 << 4)
+#define MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0 (1 << 4)
+#define MT8195_SOUT_VPP_MERGE_TO_MASK GENMASK(10, 8)
+#define MT8195_SOUT_VPP_MERGE_TO_DSI1 (0 << 8)
+#define MT8195_SOUT_VPP_MERGE_TO_DP_INTF0 (1 << 8)
+#define MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0 (2 << 8)
+#define MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1 (3 << 8)
+#define MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN (4 << 8)
+#define MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK GENMASK(11, 11)
+#define MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN (0 << 11)
+#define MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK GENMASK(13, 12)
+#define MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0 (0 << 12)
+#define MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0 (1 << 12)
+#define MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE (2 << 12)
+#define MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK GENMASK(17, 16)
+#define MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1 (0 << 16)
+#define MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0 (1 << 16)
+#define MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0 (2 << 16)
+#define MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE (3 << 16)
+
+#define MT8195_VDO1_SW0_RST_B 0x1d0
+#define MT8195_VDO1_MERGE0_ASYNC_CFG_WD 0xe30
+#define MT8195_VDO1_HDRBE_ASYNC_CFG_WD 0xe70
+#define MT8195_VDO1_HDR_TOP_CFG 0xd00
+#define MT8195_VDO1_MIXER_IN1_ALPHA 0xd30
+#define MT8195_VDO1_MIXER_IN1_PAD 0xd40
+
+#define MT8195_VDO1_VPP_MERGE0_P0_SEL_IN 0xf04
+#define MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0 1
+
+#define MT8195_VDO1_VPP_MERGE0_P1_SEL_IN 0xf08
+#define MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1 1
+
+#define MT8195_VDO1_DISP_DPI1_SEL_IN 0xf10
+#define MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT 0
+
+#define MT8195_VDO1_DISP_DP_INTF0_SEL_IN 0xf14
+#define MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT 0
+
+#define MT8195_VDO1_MERGE4_SOUT_SEL 0xf18
+#define MT8195_MERGE4_SOUT_TO_DPI1_SEL 2
+#define MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL 3
+
+#define MT8195_VDO1_MIXER_IN1_SEL_IN 0xf24
+#define MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT 1
+
+#define MT8195_VDO1_MIXER_IN2_SEL_IN 0xf28
+#define MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT 1
+
+#define MT8195_VDO1_MIXER_IN3_SEL_IN 0xf2c
+#define MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT 1
+
+#define MT8195_VDO1_MIXER_IN4_SEL_IN 0xf30
+#define MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT 1
+
+#define MT8195_VDO1_MIXER_OUT_SOUT_SEL 0xf34
+#define MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL 1
+
+#define MT8195_VDO1_VPP_MERGE1_P0_SEL_IN 0xf3c
+#define MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2 1
+
+#define MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL 0xf40
+#define MT8195_SOUT_TO_MIXER_IN1_SEL 1
+
+#define MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL 0xf44
+#define MT8195_SOUT_TO_MIXER_IN2_SEL 1
+
+#define MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL 0xf48
+#define MT8195_SOUT_TO_MIXER_IN3_SEL 1
+
+#define MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL 0xf4c
+#define MT8195_SOUT_TO_MIXER_IN4_SEL 1
+
+#define MT8195_VDO1_MERGE4_ASYNC_SEL_IN 0xf50
+#define MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT 1
+
+#define MT8195_VDO1_MIXER_IN1_SOUT_SEL 0xf58
+#define MT8195_MIXER_IN1_SOUT_TO_DISP_MIXER 0
+
+#define MT8195_VDO1_MIXER_IN2_SOUT_SEL 0xf5c
+#define MT8195_MIXER_IN2_SOUT_TO_DISP_MIXER 0
+
+#define MT8195_VDO1_MIXER_IN3_SOUT_SEL 0xf60
+#define MT8195_MIXER_IN3_SOUT_TO_DISP_MIXER 0
+
+#define MT8195_VDO1_MIXER_IN4_SOUT_SEL 0xf64
+#define MT8195_MIXER_IN4_SOUT_TO_DISP_MIXER 0
+
+#define MT8195_VDO1_MIXER_SOUT_SEL_IN 0xf68
+#define MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER 0
+
+/* VPPSYS1 */
+#define MT8195_VPP1_HW_DCM_1ST_DIS0 0x150
+#define MT8195_VPP1_HW_DCM_1ST_DIS1 0x160
+#define MT8195_VPP1_HW_DCM_2ND_DIS0 0x1a0
+#define MT8195_VPP1_HW_DCM_2ND_DIS1 0x1b0
+#define MT8195_SVPP2_BUF_BF_RSZ_SWITCH 0xf48
+#define MT8195_SVPP3_BUF_BF_RSZ_SWITCH 0xf74
+
+/* VPPSYS1 HW DCM client*/
+#define MT8195_SVPP1_MDP_RSZ BIT(25)
+#define MT8195_SVPP2_MDP_RSZ BIT(4)
+#define MT8195_SVPP3_MDP_RSZ BIT(5)
+
+static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_RDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_OVL0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN
+ }, {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSI0
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSI1
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DP_INTF0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
+ }, {
+ DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
+ }, {
+ DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE
+ }
+};
+
+static const struct mtk_mmsys_routes mmsys_mt8195_vdo1_routing_table[] = {
+ {
+ DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
+ }, {
+ DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
+ }, {
+ DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
+ MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
+ }, {
+ DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN1_SEL
+ }, {
+ DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN2_SEL
+ }, {
+ DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN3_SEL
+ }, {
+ DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN4_SEL
+ }, {
+ DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
+ MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
+ }, {
+ DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
+ MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
+ }, {
+ DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
+ MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
+ MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DPI1_SEL
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL
+ }
+};
+#endif /* __SOC_MEDIATEK_MT8195_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h
new file mode 100644
index 0000000000..7abaf048d9
--- /dev/null
+++ b/drivers/soc/mediatek/mt8365-mmsys.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8365_MMSYS_H
+#define __SOC_MEDIATEK_MT8365_MMSYS_H
+
+#define MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0xf3c
+#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL 0xf4c
+#define MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN 0xf50
+#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN 0xf54
+#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN 0xf60
+#define MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0xf64
+#define MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN 0xf68
+#define MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL 0xfd0
+#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8
+#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc
+
+#define MT8365_RDMA0_SOUT_COLOR0 0x1
+#define MT8365_DITHER_MOUT_EN_DSI0 0x1
+#define MT8365_DSI0_SEL_IN_DITHER 0x1
+#define MT8365_RDMA0_SEL_IN_OVL0 0x0
+#define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0
+#define MT8365_DISP_COLOR_SEL_IN_COLOR0 0x0
+#define MT8365_OVL0_MOUT_PATH0_SEL BIT(0)
+#define MT8365_RDMA1_SOUT_DPI0 0x1
+#define MT8365_DPI0_SEL_IN_RDMA1 0x0
+#define MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK 0x1
+#define MT8365_DPI0_SEL_IN_RDMA1 0x0
+
+static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
+ MT8365_OVL0_MOUT_PATH0_SEL, MT8365_OVL0_MOUT_PATH0_SEL
+ },
+ {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
+ MT8365_RDMA0_SEL_IN_OVL0, MT8365_RDMA0_SEL_IN_OVL0
+ },
+ {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
+ MT8365_RDMA0_SOUT_COLOR0, MT8365_RDMA0_SOUT_COLOR0
+ },
+ {
+ DDP_COMPONENT_COLOR0, DDP_COMPONENT_CCORR,
+ MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
+ MT8365_DISP_COLOR_SEL_IN_COLOR0,MT8365_DISP_COLOR_SEL_IN_COLOR0
+ },
+ {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
+ MT8365_DITHER_MOUT_EN_DSI0, MT8365_DITHER_MOUT_EN_DSI0
+ },
+ {
+ DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
+ MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
+ MT8365_DSI0_SEL_IN_DITHER, MT8365_DSI0_SEL_IN_DITHER
+ },
+ {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
+ MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
+ MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1
+ },
+ {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
+ MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0
+ },
+};
+
+#endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
new file mode 100644
index 0000000000..b0cd071c47
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/mailbox_controller.h>
+#include <linux/of.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#define CMDQ_WRITE_ENABLE_MASK BIT(0)
+#define CMDQ_POLL_ENABLE_MASK BIT(0)
+#define CMDQ_EOC_IRQ_EN BIT(0)
+#define CMDQ_REG_TYPE 1
+#define CMDQ_JUMP_RELATIVE 1
+
+struct cmdq_instruction {
+ union {
+ u32 value;
+ u32 mask;
+ struct {
+ u16 arg_c;
+ u16 src_reg;
+ };
+ };
+ union {
+ u16 offset;
+ u16 event;
+ u16 reg_dst;
+ };
+ union {
+ u8 subsys;
+ struct {
+ u8 sop:5;
+ u8 arg_c_t:1;
+ u8 src_t:1;
+ u8 dst_t:1;
+ };
+ };
+ u8 op;
+};
+
+int cmdq_dev_get_client_reg(struct device *dev,
+ struct cmdq_client_reg *client_reg, int idx)
+{
+ struct of_phandle_args spec;
+ int err;
+
+ if (!client_reg)
+ return -ENOENT;
+
+ err = of_parse_phandle_with_fixed_args(dev->of_node,
+ "mediatek,gce-client-reg",
+ 3, idx, &spec);
+ if (err < 0) {
+ dev_err(dev,
+ "error %d can't parse gce-client-reg property (%d)",
+ err, idx);
+
+ return err;
+ }
+
+ client_reg->subsys = (u8)spec.args[0];
+ client_reg->offset = (u16)spec.args[1];
+ client_reg->size = (u16)spec.args[2];
+ of_node_put(spec.np);
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_dev_get_client_reg);
+
+struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
+{
+ struct cmdq_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return (struct cmdq_client *)-ENOMEM;
+
+ client->client.dev = dev;
+ client->client.tx_block = false;
+ client->client.knows_txdone = true;
+ client->chan = mbox_request_channel(&client->client, index);
+
+ if (IS_ERR(client->chan)) {
+ long err;
+
+ dev_err(dev, "failed to request channel\n");
+ err = PTR_ERR(client->chan);
+ kfree(client);
+
+ return ERR_PTR(err);
+ }
+
+ return client;
+}
+EXPORT_SYMBOL(cmdq_mbox_create);
+
+void cmdq_mbox_destroy(struct cmdq_client *client)
+{
+ mbox_free_channel(client->chan);
+ kfree(client);
+}
+EXPORT_SYMBOL(cmdq_mbox_destroy);
+
+struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
+{
+ struct cmdq_pkt *pkt;
+ struct device *dev;
+ dma_addr_t dma_addr;
+
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+ pkt->va_base = kzalloc(size, GFP_KERNEL);
+ if (!pkt->va_base) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->buf_size = size;
+ pkt->cl = (void *)client;
+
+ dev = client->chan->mbox->dev;
+ dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+ kfree(pkt->va_base);
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pkt->pa_base = dma_addr;
+
+ return pkt;
+}
+EXPORT_SYMBOL(cmdq_pkt_create);
+
+void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+{
+ struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+ dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ kfree(pkt->va_base);
+ kfree(pkt);
+}
+EXPORT_SYMBOL(cmdq_pkt_destroy);
+
+static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
+ struct cmdq_instruction inst)
+{
+ struct cmdq_instruction *cmd_ptr;
+
+ if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
+ /*
+ * In the case of allocated buffer size (pkt->buf_size) is used
+ * up, the real required size (pkt->cmdq_buf_size) is still
+ * increased, so that the user knows how much memory should be
+ * ultimately allocated after appending all commands and
+ * flushing the command packet. Therefor, the user can call
+ * cmdq_pkt_create() again with the real required buffer size.
+ */
+ pkt->cmd_buf_size += CMDQ_INST_SIZE;
+ WARN_ONCE(1, "%s: buffer size %u is too small !\n",
+ __func__, (u32)pkt->buf_size);
+ return -ENOMEM;
+ }
+
+ cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
+ *cmd_ptr = inst;
+ pkt->cmd_buf_size += CMDQ_INST_SIZE;
+
+ return 0;
+}
+
+int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
+{
+ struct cmdq_instruction inst;
+
+ inst.op = CMDQ_CODE_WRITE;
+ inst.value = value;
+ inst.offset = offset;
+ inst.subsys = subsys;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write);
+
+int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask)
+{
+ struct cmdq_instruction inst = { {0} };
+ u16 offset_mask = offset;
+ int err;
+
+ if (mask != 0xffffffff) {
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ offset_mask |= CMDQ_WRITE_ENABLE_MASK;
+ }
+ err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_write_mask);
+
+int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
+ u16 reg_idx)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_READ_S;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.sop = high_addr_reg_idx;
+ inst.reg_dst = reg_idx;
+ inst.src_reg = addr_low;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_read_s);
+
+int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_WRITE_S;
+ inst.src_t = CMDQ_REG_TYPE;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.src_reg = src_reg_idx;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s);
+
+int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx, u32 mask)
+{
+ struct cmdq_instruction inst = {};
+ int err;
+
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ inst.mask = 0;
+ inst.op = CMDQ_CODE_WRITE_S_MASK;
+ inst.src_t = CMDQ_REG_TYPE;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.src_reg = src_reg_idx;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
+
+int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_WRITE_S;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.value = value;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s_value);
+
+int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value, u32 mask)
+{
+ struct cmdq_instruction inst = {};
+ int err;
+
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ inst.op = CMDQ_CODE_WRITE_S_MASK;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.value = value;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
+
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
+{
+ struct cmdq_instruction inst = { {0} };
+ u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
+
+ if (event >= CMDQ_MAX_EVENT)
+ return -EINVAL;
+
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_OPTION | clear_option;
+ inst.event = event;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_wfe);
+
+int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
+{
+ struct cmdq_instruction inst = { {0} };
+
+ if (event >= CMDQ_MAX_EVENT)
+ return -EINVAL;
+
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_UPDATE;
+ inst.event = event;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_clear_event);
+
+int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
+{
+ struct cmdq_instruction inst = {};
+
+ if (event >= CMDQ_MAX_EVENT)
+ return -EINVAL;
+
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
+ inst.event = event;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_set_event);
+
+int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value)
+{
+ struct cmdq_instruction inst = { {0} };
+ int err;
+
+ inst.op = CMDQ_CODE_POLL;
+ inst.value = value;
+ inst.offset = offset;
+ inst.subsys = subsys;
+ err = cmdq_pkt_append_command(pkt, inst);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll);
+
+int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask)
+{
+ struct cmdq_instruction inst = { {0} };
+ int err;
+
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ offset = offset | CMDQ_POLL_ENABLE_MASK;
+ err = cmdq_pkt_poll(pkt, subsys, offset, value);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll_mask);
+
+int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_LOGIC;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.reg_dst = reg_idx;
+ inst.value = value;
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_assign);
+
+int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_JUMP;
+ inst.offset = CMDQ_JUMP_RELATIVE;
+ inst.value = addr >>
+ cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_jump);
+
+int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+{
+ struct cmdq_instruction inst = { {0} };
+ int err;
+
+ /* insert EOC and generate IRQ for each command iteration */
+ inst.op = CMDQ_CODE_EOC;
+ inst.value = CMDQ_EOC_IRQ_EN;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ /* JUMP to end */
+ inst.op = CMDQ_CODE_JUMP;
+ inst.value = CMDQ_JUMP_PASS >>
+ cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
+ err = cmdq_pkt_append_command(pkt, inst);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_finalize);
+
+int cmdq_pkt_flush_async(struct cmdq_pkt *pkt)
+{
+ int err;
+ struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+ err = mbox_send_message(client->chan, pkt);
+ if (err < 0)
+ return err;
+ /* We can send next packet immediately, so just call txdone. */
+ mbox_client_txdone(client->chan, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_pkt_flush_async);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
new file mode 100644
index 0000000000..b28feb9675
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#define VIO_MOD_TO_REG_IND(m) ((m) / 32)
+#define VIO_MOD_TO_REG_OFF(m) ((m) % 32)
+
+struct mtk_devapc_vio_dbgs {
+ union {
+ u32 vio_dbg0;
+ struct {
+ u32 mstid:16;
+ u32 dmnid:6;
+ u32 vio_w:1;
+ u32 vio_r:1;
+ u32 addr_h:4;
+ u32 resv:4;
+ } dbg0_bits;
+ };
+
+ u32 vio_dbg1;
+};
+
+struct mtk_devapc_regs_ofs {
+ /* reg offset */
+ u32 vio_mask_offset;
+ u32 vio_sta_offset;
+ u32 vio_dbg0_offset;
+ u32 vio_dbg1_offset;
+ u32 apc_con_offset;
+ u32 vio_shift_sta_offset;
+ u32 vio_shift_sel_offset;
+ u32 vio_shift_con_offset;
+};
+
+struct mtk_devapc_data {
+ /* numbers of violation index */
+ u32 vio_idx_num;
+ const struct mtk_devapc_regs_ofs *regs_ofs;
+};
+
+struct mtk_devapc_context {
+ struct device *dev;
+ void __iomem *infra_base;
+ struct clk *infra_clk;
+ const struct mtk_devapc_data *data;
+};
+
+static void clear_vio_status(struct mtk_devapc_context *ctx)
+{
+ void __iomem *reg;
+ int i;
+
+ reg = ctx->infra_base + ctx->data->regs_ofs->vio_sta_offset;
+
+ for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++)
+ writel(GENMASK(31, 0), reg + 4 * i);
+
+ writel(GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1, 0),
+ reg + 4 * i);
+}
+
+static void mask_module_irq(struct mtk_devapc_context *ctx, bool mask)
+{
+ void __iomem *reg;
+ u32 val;
+ int i;
+
+ reg = ctx->infra_base + ctx->data->regs_ofs->vio_mask_offset;
+
+ if (mask)
+ val = GENMASK(31, 0);
+ else
+ val = 0;
+
+ for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++)
+ writel(val, reg + 4 * i);
+
+ val = readl(reg + 4 * i);
+ if (mask)
+ val |= GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1,
+ 0);
+ else
+ val &= ~GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1,
+ 0);
+
+ writel(val, reg + 4 * i);
+}
+
+#define PHY_DEVAPC_TIMEOUT 0x10000
+
+/*
+ * devapc_sync_vio_dbg - do "shift" mechansim" to get full violation information.
+ * shift mechanism is depends on devapc hardware design.
+ * Mediatek devapc set multiple slaves as a group.
+ * When violation is triggered, violation info is kept
+ * inside devapc hardware.
+ * Driver should do shift mechansim to sync full violation
+ * info to VIO_DBGs registers.
+ *
+ */
+static int devapc_sync_vio_dbg(struct mtk_devapc_context *ctx)
+{
+ void __iomem *pd_vio_shift_sta_reg;
+ void __iomem *pd_vio_shift_sel_reg;
+ void __iomem *pd_vio_shift_con_reg;
+ int min_shift_group;
+ int ret;
+ u32 val;
+
+ pd_vio_shift_sta_reg = ctx->infra_base +
+ ctx->data->regs_ofs->vio_shift_sta_offset;
+ pd_vio_shift_sel_reg = ctx->infra_base +
+ ctx->data->regs_ofs->vio_shift_sel_offset;
+ pd_vio_shift_con_reg = ctx->infra_base +
+ ctx->data->regs_ofs->vio_shift_con_offset;
+
+ /* Find the minimum shift group which has violation */
+ val = readl(pd_vio_shift_sta_reg);
+ if (!val)
+ return false;
+
+ min_shift_group = __ffs(val);
+
+ /* Assign the group to sync */
+ writel(0x1 << min_shift_group, pd_vio_shift_sel_reg);
+
+ /* Start syncing */
+ writel(0x1, pd_vio_shift_con_reg);
+
+ ret = readl_poll_timeout(pd_vio_shift_con_reg, val, val == 0x3, 0,
+ PHY_DEVAPC_TIMEOUT);
+ if (ret) {
+ dev_err(ctx->dev, "%s: Shift violation info failed\n", __func__);
+ return false;
+ }
+
+ /* Stop syncing */
+ writel(0x0, pd_vio_shift_con_reg);
+
+ /* Write clear */
+ writel(0x1 << min_shift_group, pd_vio_shift_sta_reg);
+
+ return true;
+}
+
+/*
+ * devapc_extract_vio_dbg - extract full violation information after doing
+ * shift mechanism.
+ */
+static void devapc_extract_vio_dbg(struct mtk_devapc_context *ctx)
+{
+ struct mtk_devapc_vio_dbgs vio_dbgs;
+ void __iomem *vio_dbg0_reg;
+ void __iomem *vio_dbg1_reg;
+
+ vio_dbg0_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg0_offset;
+ vio_dbg1_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg1_offset;
+
+ vio_dbgs.vio_dbg0 = readl(vio_dbg0_reg);
+ vio_dbgs.vio_dbg1 = readl(vio_dbg1_reg);
+
+ /* Print violation information */
+ if (vio_dbgs.dbg0_bits.vio_w)
+ dev_info(ctx->dev, "Write Violation\n");
+ else if (vio_dbgs.dbg0_bits.vio_r)
+ dev_info(ctx->dev, "Read Violation\n");
+
+ dev_info(ctx->dev, "Bus ID:0x%x, Dom ID:0x%x, Vio Addr:0x%x\n",
+ vio_dbgs.dbg0_bits.mstid, vio_dbgs.dbg0_bits.dmnid,
+ vio_dbgs.vio_dbg1);
+}
+
+/*
+ * devapc_violation_irq - the devapc Interrupt Service Routine (ISR) will dump
+ * violation information including which master violates
+ * access slave.
+ */
+static irqreturn_t devapc_violation_irq(int irq_number, void *data)
+{
+ struct mtk_devapc_context *ctx = data;
+
+ while (devapc_sync_vio_dbg(ctx))
+ devapc_extract_vio_dbg(ctx);
+
+ clear_vio_status(ctx);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * start_devapc - unmask slave's irq to start receiving devapc violation.
+ */
+static void start_devapc(struct mtk_devapc_context *ctx)
+{
+ writel(BIT(31), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset);
+
+ mask_module_irq(ctx, false);
+}
+
+/*
+ * stop_devapc - mask slave's irq to stop service.
+ */
+static void stop_devapc(struct mtk_devapc_context *ctx)
+{
+ mask_module_irq(ctx, true);
+
+ writel(BIT(2), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset);
+}
+
+static const struct mtk_devapc_regs_ofs devapc_regs_ofs_mt6779 = {
+ .vio_mask_offset = 0x0,
+ .vio_sta_offset = 0x400,
+ .vio_dbg0_offset = 0x900,
+ .vio_dbg1_offset = 0x904,
+ .apc_con_offset = 0xF00,
+ .vio_shift_sta_offset = 0xF10,
+ .vio_shift_sel_offset = 0xF14,
+ .vio_shift_con_offset = 0xF20,
+};
+
+static const struct mtk_devapc_data devapc_mt6779 = {
+ .vio_idx_num = 511,
+ .regs_ofs = &devapc_regs_ofs_mt6779,
+};
+
+static const struct mtk_devapc_data devapc_mt8186 = {
+ .vio_idx_num = 519,
+ .regs_ofs = &devapc_regs_ofs_mt6779,
+};
+
+static const struct of_device_id mtk_devapc_dt_match[] = {
+ {
+ .compatible = "mediatek,mt6779-devapc",
+ .data = &devapc_mt6779,
+ }, {
+ .compatible = "mediatek,mt8186-devapc",
+ .data = &devapc_mt8186,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, mtk_devapc_dt_match);
+
+static int mtk_devapc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct mtk_devapc_context *ctx;
+ u32 devapc_irq;
+ int ret;
+
+ if (IS_ERR(node))
+ return -ENODEV;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->data = of_device_get_match_data(&pdev->dev);
+ ctx->dev = &pdev->dev;
+
+ ctx->infra_base = of_iomap(node, 0);
+ if (!ctx->infra_base)
+ return -EINVAL;
+
+ devapc_irq = irq_of_parse_and_map(node, 0);
+ if (!devapc_irq)
+ return -EINVAL;
+
+ ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock");
+ if (IS_ERR(ctx->infra_clk))
+ return -EINVAL;
+
+ ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
+ IRQF_TRIGGER_NONE, "devapc", ctx);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, ctx);
+
+ start_devapc(ctx);
+
+ return 0;
+}
+
+static int mtk_devapc_remove(struct platform_device *pdev)
+{
+ struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
+
+ stop_devapc(ctx);
+
+ return 0;
+}
+
+static struct platform_driver mtk_devapc_driver = {
+ .probe = mtk_devapc_probe,
+ .remove = mtk_devapc_remove,
+ .driver = {
+ .name = "mtk-devapc",
+ .of_match_table = mtk_devapc_dt_match,
+ },
+};
+
+module_platform_driver(mtk_devapc_driver);
+
+MODULE_DESCRIPTION("Mediatek Device APC Driver");
+MODULE_AUTHOR("Neal Liu <neal.liu@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c
new file mode 100644
index 0000000000..2acf19676a
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-infracfg.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ */
+
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/soc/mediatek/infracfg.h>
+#include <asm/processor.h>
+
+#define MTK_POLL_DELAY_US 10
+#define MTK_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+
+/**
+ * mtk_infracfg_set_bus_protection - enable bus protection
+ * @infracfg: The infracfg regmap
+ * @mask: The mask containing the protection bits to be enabled.
+ * @reg_update: The boolean flag determines to set the protection bits
+ * by regmap_update_bits with enable register(PROTECTEN) or
+ * by regmap_write with set register(PROTECTEN_SET).
+ *
+ * This function enables the bus protection bits for disabled power
+ * domains so that the system does not hang when some unit accesses the
+ * bus while in power down.
+ */
+int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask,
+ bool reg_update)
+{
+ u32 val;
+ int ret;
+
+ if (reg_update)
+ regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask,
+ mask);
+ else
+ regmap_write(infracfg, INFRA_TOPAXI_PROTECTEN_SET, mask);
+
+ ret = regmap_read_poll_timeout(infracfg, INFRA_TOPAXI_PROTECTSTA1,
+ val, (val & mask) == mask,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+
+ return ret;
+}
+
+/**
+ * mtk_infracfg_clear_bus_protection - disable bus protection
+ * @infracfg: The infracfg regmap
+ * @mask: The mask containing the protection bits to be disabled.
+ * @reg_update: The boolean flag determines to clear the protection bits
+ * by regmap_update_bits with enable register(PROTECTEN) or
+ * by regmap_write with clear register(PROTECTEN_CLR).
+ *
+ * This function disables the bus protection bits previously enabled with
+ * mtk_infracfg_set_bus_protection.
+ */
+
+int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask,
+ bool reg_update)
+{
+ int ret;
+ u32 val;
+
+ if (reg_update)
+ regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask, 0);
+ else
+ regmap_write(infracfg, INFRA_TOPAXI_PROTECTEN_CLR, mask);
+
+ ret = regmap_read_poll_timeout(infracfg, INFRA_TOPAXI_PROTECTSTA1,
+ val, !(val & mask),
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+
+ return ret;
+}
+
+static int __init mtk_infracfg_init(void)
+{
+ struct regmap *infracfg;
+
+ /*
+ * MT8192 has an experimental path to route GPU traffic to the DSU's
+ * Accelerator Coherency Port, which is inadvertently enabled by
+ * default. It turns out not to work, so disable it to prevent spurious
+ * GPU faults.
+ */
+ infracfg = syscon_regmap_lookup_by_compatible("mediatek,mt8192-infracfg");
+ if (!IS_ERR(infracfg))
+ regmap_set_bits(infracfg, MT8192_INFRA_CTRL,
+ MT8192_INFRA_CTRL_DISABLE_MFG2ACP);
+ return 0;
+}
+postcore_initcall(mtk_infracfg_init);
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
new file mode 100644
index 0000000000..ffb75711a1
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
+
+#include "mtk-mmsys.h"
+#include "mt8167-mmsys.h"
+#include "mt8173-mmsys.h"
+#include "mt8183-mmsys.h"
+#include "mt8186-mmsys.h"
+#include "mt8188-mmsys.h"
+#include "mt8192-mmsys.h"
+#include "mt8195-mmsys.h"
+#include "mt8365-mmsys.h"
+
+#define MMSYS_SW_RESET_PER_REG 32
+
+static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
+ .clk_driver = "clk-mt2701-mm",
+ .routes = mmsys_default_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_default_routing_table),
+};
+
+static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
+ .clk_driver = "clk-mt2712-mm",
+ .routes = mmsys_default_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_default_routing_table),
+};
+
+static const struct mtk_mmsys_driver_data mt6779_mmsys_driver_data = {
+ .clk_driver = "clk-mt6779-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt6795_mmsys_driver_data = {
+ .clk_driver = "clk-mt6795-mm",
+ .routes = mt8173_mmsys_routing_table,
+ .num_routes = ARRAY_SIZE(mt8173_mmsys_routing_table),
+ .sw0_rst_offset = MT8183_MMSYS_SW0_RST_B,
+ .num_resets = 64,
+};
+
+static const struct mtk_mmsys_driver_data mt6797_mmsys_driver_data = {
+ .clk_driver = "clk-mt6797-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt8167_mmsys_driver_data = {
+ .clk_driver = "clk-mt8167-mm",
+ .routes = mt8167_mmsys_routing_table,
+ .num_routes = ARRAY_SIZE(mt8167_mmsys_routing_table),
+};
+
+static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
+ .clk_driver = "clk-mt8173-mm",
+ .routes = mt8173_mmsys_routing_table,
+ .num_routes = ARRAY_SIZE(mt8173_mmsys_routing_table),
+ .sw0_rst_offset = MT8183_MMSYS_SW0_RST_B,
+ .num_resets = 64,
+};
+
+static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
+ .clk_driver = "clk-mt8183-mm",
+ .routes = mmsys_mt8183_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table),
+ .sw0_rst_offset = MT8183_MMSYS_SW0_RST_B,
+ .num_resets = 32,
+};
+
+static const struct mtk_mmsys_driver_data mt8186_mmsys_driver_data = {
+ .clk_driver = "clk-mt8186-mm",
+ .routes = mmsys_mt8186_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8186_routing_table),
+ .sw0_rst_offset = MT8186_MMSYS_SW0_RST_B,
+ .num_resets = 32,
+};
+
+static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
+ .clk_driver = "clk-mt8188-vdo0",
+ .routes = mmsys_mt8188_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8188_routing_table),
+};
+
+static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
+ .clk_driver = "clk-mt8192-mm",
+ .routes = mmsys_mt8192_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8192_routing_table),
+ .sw0_rst_offset = MT8186_MMSYS_SW0_RST_B,
+ .num_resets = 32,
+};
+
+static const struct mtk_mmsys_driver_data mt8195_vdosys0_driver_data = {
+ .clk_driver = "clk-mt8195-vdo0",
+ .routes = mmsys_mt8195_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8195_routing_table),
+};
+
+static const struct mtk_mmsys_driver_data mt8195_vdosys1_driver_data = {
+ .clk_driver = "clk-mt8195-vdo1",
+ .routes = mmsys_mt8195_vdo1_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8195_vdo1_routing_table),
+ .sw0_rst_offset = MT8195_VDO1_SW0_RST_B,
+ .num_resets = 64,
+};
+
+static const struct mtk_mmsys_driver_data mt8195_vppsys0_driver_data = {
+ .clk_driver = "clk-mt8195-vpp0",
+ .is_vppsys = true,
+};
+
+static const struct mtk_mmsys_driver_data mt8195_vppsys1_driver_data = {
+ .clk_driver = "clk-mt8195-vpp1",
+ .is_vppsys = true,
+};
+
+static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
+ .clk_driver = "clk-mt8365-mm",
+ .routes = mt8365_mmsys_routing_table,
+ .num_routes = ARRAY_SIZE(mt8365_mmsys_routing_table),
+};
+
+struct mtk_mmsys {
+ void __iomem *regs;
+ const struct mtk_mmsys_driver_data *data;
+ struct platform_device *clks_pdev;
+ struct platform_device *drm_pdev;
+ spinlock_t lock; /* protects mmsys_sw_rst_b reg */
+ struct reset_controller_dev rcdev;
+ struct cmdq_client_reg cmdq_base;
+};
+
+static void mtk_mmsys_update_bits(struct mtk_mmsys *mmsys, u32 offset, u32 mask, u32 val,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ int ret;
+ u32 tmp;
+
+ if (mmsys->cmdq_base.size && cmdq_pkt) {
+ ret = cmdq_pkt_write_mask(cmdq_pkt, mmsys->cmdq_base.subsys,
+ mmsys->cmdq_base.offset + offset, val,
+ mask);
+ if (ret)
+ pr_debug("CMDQ unavailable: using CPU write\n");
+ else
+ return;
+ }
+ tmp = readl_relaxed(mmsys->regs + offset);
+ tmp = (tmp & ~mask) | (val & mask);
+ writel_relaxed(tmp, mmsys->regs + offset);
+}
+
+void mtk_mmsys_ddp_connect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ struct mtk_mmsys *mmsys = dev_get_drvdata(dev);
+ const struct mtk_mmsys_routes *routes = mmsys->data->routes;
+ int i;
+
+ for (i = 0; i < mmsys->data->num_routes; i++)
+ if (cur == routes[i].from_comp && next == routes[i].to_comp)
+ mtk_mmsys_update_bits(mmsys, routes[i].addr, routes[i].mask,
+ routes[i].val, NULL);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_connect);
+
+void mtk_mmsys_ddp_disconnect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ struct mtk_mmsys *mmsys = dev_get_drvdata(dev);
+ const struct mtk_mmsys_routes *routes = mmsys->data->routes;
+ int i;
+
+ for (i = 0; i < mmsys->data->num_routes; i++)
+ if (cur == routes[i].from_comp && next == routes[i].to_comp)
+ mtk_mmsys_update_bits(mmsys, routes[i].addr, routes[i].mask, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect);
+
+void mtk_mmsys_merge_async_config(struct device *dev, int idx, int width, int height,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8195_VDO1_MERGE0_ASYNC_CFG_WD + 0x10 * idx,
+ ~0, height << 16 | width, cmdq_pkt);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_merge_async_config);
+
+void mtk_mmsys_hdr_config(struct device *dev, int be_width, int be_height,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8195_VDO1_HDRBE_ASYNC_CFG_WD, ~0,
+ be_height << 16 | be_width, cmdq_pkt);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_hdr_config);
+
+void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16 alpha,
+ u8 mode, u32 biwidth, struct cmdq_pkt *cmdq_pkt)
+{
+ struct mtk_mmsys *mmsys = dev_get_drvdata(dev);
+
+ mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_ALPHA + (idx - 1) * 4, ~0,
+ alpha << 16 | alpha, cmdq_pkt);
+ mtk_mmsys_update_bits(mmsys, MT8195_VDO1_HDR_TOP_CFG, BIT(19 + idx),
+ alpha_sel << (19 + idx), cmdq_pkt);
+ mtk_mmsys_update_bits(mmsys, MT8195_VDO1_MIXER_IN1_PAD + (idx - 1) * 4,
+ GENMASK(31, 16) | GENMASK(1, 0), biwidth << 16 | mode, cmdq_pkt);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_mixer_in_config);
+
+void mtk_mmsys_mixer_in_channel_swap(struct device *dev, int idx, bool channel_swap,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8195_VDO1_MIXER_IN1_PAD + (idx - 1) * 4,
+ BIT(4), channel_swap << 4, cmdq_pkt);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_mixer_in_channel_swap);
+
+void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val)
+{
+ struct mtk_mmsys *mmsys = dev_get_drvdata(dev);
+
+ switch (val) {
+ case MTK_DPI_RGB888_SDR_CON:
+ mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT,
+ MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB888_SDR_CON, NULL);
+ break;
+ case MTK_DPI_RGB565_SDR_CON:
+ mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT,
+ MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB565_SDR_CON, NULL);
+ break;
+ case MTK_DPI_RGB565_DDR_CON:
+ mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT,
+ MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB565_DDR_CON, NULL);
+ break;
+ case MTK_DPI_RGB888_DDR_CON:
+ default:
+ mtk_mmsys_update_bits(mmsys, MT8186_MMSYS_DPI_OUTPUT_FORMAT,
+ MT8186_DPI_FORMAT_MASK, MT8186_DPI_RGB888_DDR_CON, NULL);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_dpi_fmt_config);
+
+void mtk_mmsys_vpp_rsz_merge_config(struct device *dev, u32 id, bool enable,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ u32 reg;
+
+ switch (id) {
+ case 2:
+ reg = MT8195_SVPP2_BUF_BF_RSZ_SWITCH;
+ break;
+ case 3:
+ reg = MT8195_SVPP3_BUF_BF_RSZ_SWITCH;
+ break;
+ default:
+ dev_err(dev, "Invalid id %d\n", id);
+ return;
+ }
+
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), reg, ~0, enable, cmdq_pkt);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_vpp_rsz_merge_config);
+
+void mtk_mmsys_vpp_rsz_dcm_config(struct device *dev, bool enable,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ u32 client;
+
+ client = MT8195_SVPP1_MDP_RSZ;
+ mtk_mmsys_update_bits(dev_get_drvdata(dev),
+ MT8195_VPP1_HW_DCM_1ST_DIS0, client,
+ ((enable) ? client : 0), cmdq_pkt);
+ mtk_mmsys_update_bits(dev_get_drvdata(dev),
+ MT8195_VPP1_HW_DCM_2ND_DIS0, client,
+ ((enable) ? client : 0), cmdq_pkt);
+
+ client = MT8195_SVPP2_MDP_RSZ | MT8195_SVPP3_MDP_RSZ;
+ mtk_mmsys_update_bits(dev_get_drvdata(dev),
+ MT8195_VPP1_HW_DCM_1ST_DIS1, client,
+ ((enable) ? client : 0), cmdq_pkt);
+ mtk_mmsys_update_bits(dev_get_drvdata(dev),
+ MT8195_VPP1_HW_DCM_2ND_DIS1, client,
+ ((enable) ? client : 0), cmdq_pkt);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_vpp_rsz_dcm_config);
+
+static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned long id,
+ bool assert)
+{
+ struct mtk_mmsys *mmsys = container_of(rcdev, struct mtk_mmsys, rcdev);
+ unsigned long flags;
+ u32 offset;
+ u32 reg;
+
+ offset = (id / MMSYS_SW_RESET_PER_REG) * sizeof(u32);
+ id = id % MMSYS_SW_RESET_PER_REG;
+ reg = mmsys->data->sw0_rst_offset + offset;
+
+ spin_lock_irqsave(&mmsys->lock, flags);
+
+ if (assert)
+ mtk_mmsys_update_bits(mmsys, reg, BIT(id), 0, NULL);
+ else
+ mtk_mmsys_update_bits(mmsys, reg, BIT(id), BIT(id), NULL);
+
+ spin_unlock_irqrestore(&mmsys->lock, flags);
+
+ return 0;
+}
+
+static int mtk_mmsys_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return mtk_mmsys_reset_update(rcdev, id, true);
+}
+
+static int mtk_mmsys_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return mtk_mmsys_reset_update(rcdev, id, false);
+}
+
+static int mtk_mmsys_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ int ret;
+
+ ret = mtk_mmsys_reset_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 1100);
+
+ return mtk_mmsys_reset_deassert(rcdev, id);
+}
+
+static const struct reset_control_ops mtk_mmsys_reset_ops = {
+ .assert = mtk_mmsys_reset_assert,
+ .deassert = mtk_mmsys_reset_deassert,
+ .reset = mtk_mmsys_reset,
+};
+
+static int mtk_mmsys_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct platform_device *clks;
+ struct platform_device *drm;
+ struct mtk_mmsys *mmsys;
+ int ret;
+
+ mmsys = devm_kzalloc(dev, sizeof(*mmsys), GFP_KERNEL);
+ if (!mmsys)
+ return -ENOMEM;
+
+ mmsys->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mmsys->regs)) {
+ ret = PTR_ERR(mmsys->regs);
+ dev_err(dev, "Failed to ioremap mmsys registers: %d\n", ret);
+ return ret;
+ }
+
+ mmsys->data = of_device_get_match_data(&pdev->dev);
+
+ if (mmsys->data->num_resets > 0) {
+ spin_lock_init(&mmsys->lock);
+
+ mmsys->rcdev.owner = THIS_MODULE;
+ mmsys->rcdev.nr_resets = mmsys->data->num_resets;
+ mmsys->rcdev.ops = &mtk_mmsys_reset_ops;
+ mmsys->rcdev.of_node = pdev->dev.of_node;
+ ret = devm_reset_controller_register(&pdev->dev, &mmsys->rcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register mmsys reset controller: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* CMDQ is optional */
+ ret = cmdq_dev_get_client_reg(dev, &mmsys->cmdq_base, 0);
+ if (ret)
+ dev_dbg(dev, "No mediatek,gce-client-reg!\n");
+
+ platform_set_drvdata(pdev, mmsys);
+
+ clks = platform_device_register_data(&pdev->dev, mmsys->data->clk_driver,
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(clks))
+ return PTR_ERR(clks);
+ mmsys->clks_pdev = clks;
+
+ if (mmsys->data->is_vppsys)
+ goto out_probe_done;
+
+ drm = platform_device_register_data(&pdev->dev, "mediatek-drm",
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(drm)) {
+ platform_device_unregister(clks);
+ return PTR_ERR(drm);
+ }
+ mmsys->drm_pdev = drm;
+
+out_probe_done:
+ return 0;
+}
+
+static int mtk_mmsys_remove(struct platform_device *pdev)
+{
+ struct mtk_mmsys *mmsys = platform_get_drvdata(pdev);
+
+ platform_device_unregister(mmsys->drm_pdev);
+ platform_device_unregister(mmsys->clks_pdev);
+
+ return 0;
+}
+
+static const struct of_device_id of_match_mtk_mmsys[] = {
+ { .compatible = "mediatek,mt2701-mmsys", .data = &mt2701_mmsys_driver_data },
+ { .compatible = "mediatek,mt2712-mmsys", .data = &mt2712_mmsys_driver_data },
+ { .compatible = "mediatek,mt6779-mmsys", .data = &mt6779_mmsys_driver_data },
+ { .compatible = "mediatek,mt6795-mmsys", .data = &mt6795_mmsys_driver_data },
+ { .compatible = "mediatek,mt6797-mmsys", .data = &mt6797_mmsys_driver_data },
+ { .compatible = "mediatek,mt8167-mmsys", .data = &mt8167_mmsys_driver_data },
+ { .compatible = "mediatek,mt8173-mmsys", .data = &mt8173_mmsys_driver_data },
+ { .compatible = "mediatek,mt8183-mmsys", .data = &mt8183_mmsys_driver_data },
+ { .compatible = "mediatek,mt8186-mmsys", .data = &mt8186_mmsys_driver_data },
+ { .compatible = "mediatek,mt8188-vdosys0", .data = &mt8188_vdosys0_driver_data },
+ { .compatible = "mediatek,mt8192-mmsys", .data = &mt8192_mmsys_driver_data },
+ /* "mediatek,mt8195-mmsys" compatible is deprecated */
+ { .compatible = "mediatek,mt8195-mmsys", .data = &mt8195_vdosys0_driver_data },
+ { .compatible = "mediatek,mt8195-vdosys0", .data = &mt8195_vdosys0_driver_data },
+ { .compatible = "mediatek,mt8195-vdosys1", .data = &mt8195_vdosys1_driver_data },
+ { .compatible = "mediatek,mt8195-vppsys0", .data = &mt8195_vppsys0_driver_data },
+ { .compatible = "mediatek,mt8195-vppsys1", .data = &mt8195_vppsys1_driver_data },
+ { .compatible = "mediatek,mt8365-mmsys", .data = &mt8365_mmsys_driver_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mtk_mmsys);
+
+static struct platform_driver mtk_mmsys_drv = {
+ .driver = {
+ .name = "mtk-mmsys",
+ .of_match_table = of_match_mtk_mmsys,
+ },
+ .probe = mtk_mmsys_probe,
+ .remove = mtk_mmsys_remove,
+};
+module_platform_driver(mtk_mmsys_drv);
+
+MODULE_AUTHOR("Yongqiang Niu <yongqiang.niu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek SoC MMSYS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
new file mode 100644
index 0000000000..6725403d2e
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MTK_MMSYS_H
+#define __SOC_MEDIATEK_MTK_MMSYS_H
+
+#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
+#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
+#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
+#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
+#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
+#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
+#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
+#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
+#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
+#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
+#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
+#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
+#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
+#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
+
+#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
+#define DISP_REG_CONFIG_OUT_SEL 0x04c
+#define DISP_REG_CONFIG_DSI_SEL 0x050
+#define DISP_REG_CONFIG_DPI_SEL 0x064
+
+#define OVL0_MOUT_EN_COLOR0 0x1
+#define OD_MOUT_EN_RDMA0 0x1
+#define OD1_MOUT_EN_RDMA1 BIT(16)
+#define UFOE_MOUT_EN_DSI0 0x1
+#define COLOR0_SEL_IN_OVL0 0x1
+#define OVL1_MOUT_EN_COLOR1 0x1
+#define GAMMA_MOUT_EN_RDMA1 0x1
+#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI1 0x3
+#define RDMA0_SOUT_DSI1 0x1
+#define RDMA0_SOUT_DSI2 0x4
+#define RDMA0_SOUT_DSI3 0x5
+#define RDMA0_SOUT_MASK 0x7
+#define RDMA1_SOUT_DPI0 0x2
+#define RDMA1_SOUT_DPI1 0x3
+#define RDMA1_SOUT_DSI1 0x1
+#define RDMA1_SOUT_DSI2 0x4
+#define RDMA1_SOUT_DSI3 0x5
+#define RDMA1_SOUT_MASK 0x7
+#define RDMA2_SOUT_DPI0 0x2
+#define RDMA2_SOUT_DPI1 0x3
+#define RDMA2_SOUT_DSI1 0x1
+#define RDMA2_SOUT_DSI2 0x4
+#define RDMA2_SOUT_DSI3 0x5
+#define RDMA2_SOUT_MASK 0x7
+#define DPI0_SEL_IN_RDMA1 0x1
+#define DPI0_SEL_IN_RDMA2 0x3
+#define DPI0_SEL_IN_MASK 0x3
+#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
+#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DPI1_SEL_IN_MASK (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1 0x1
+#define DSI0_SEL_IN_RDMA2 0x4
+#define DSI0_SEL_IN_MASK 0x7
+#define DSI1_SEL_IN_RDMA1 0x1
+#define DSI1_SEL_IN_RDMA2 0x4
+#define DSI1_SEL_IN_MASK 0x7
+#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
+#define DSI2_SEL_IN_MASK (0x7 << 16)
+#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
+#define DSI3_SEL_IN_MASK (0x7 << 16)
+#define COLOR1_SEL_IN_OVL1 0x1
+
+#define OVL_MOUT_EN_RDMA 0x1
+#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
+#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
+#define BLS_RDMA1_DSI_DPI_MASK 0xf
+#define DSI_SEL_IN_BLS 0x0
+#define DPI_SEL_IN_BLS 0x0
+#define DPI_SEL_IN_MASK 0x1
+#define DSI_SEL_IN_RDMA 0x1
+#define DSI_SEL_IN_MASK 0x1
+
+struct mtk_mmsys_routes {
+ u32 from_comp;
+ u32 to_comp;
+ u32 addr;
+ u32 mask;
+ u32 val;
+};
+
+struct mtk_mmsys_driver_data {
+ const char *clk_driver;
+ const struct mtk_mmsys_routes *routes;
+ const unsigned int num_routes;
+ const u16 sw0_rst_offset;
+ const u32 num_resets;
+ const bool is_vppsys;
+};
+
+/*
+ * Routes in mt2701 and mt2712 are different. That means
+ * in the same register address, it controls different input/output
+ * selection for each SoC. But, right now, they use the same table as
+ * default routes meet their requirements. But we don't have the complete
+ * route information for these three SoC, so just keep them in the same
+ * table. After we've more information, we could separate mt2701, mt2712
+ * to an independent table.
+ */
+static const struct mtk_mmsys_routes mmsys_default_routing_table[] = {
+ {
+ DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0,
+ DISP_REG_CONFIG_OUT_SEL, BLS_RDMA1_DSI_DPI_MASK,
+ BLS_TO_DSI_RDMA1_TO_DPI1
+ }, {
+ DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0,
+ DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_MASK,
+ DSI_SEL_IN_BLS
+ }, {
+ DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_OUT_SEL, BLS_RDMA1_DSI_DPI_MASK,
+ BLS_TO_DPI_RDMA1_TO_DSI
+ }, {
+ DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_MASK,
+ DSI_SEL_IN_RDMA
+ }, {
+ DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DPI_SEL, DPI_SEL_IN_MASK,
+ DPI_SEL_IN_BLS
+ }, {
+ DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
+ DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, GAMMA_MOUT_EN_RDMA1,
+ GAMMA_MOUT_EN_RDMA1
+ }, {
+ DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
+ DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD_MOUT_EN_RDMA0,
+ OD_MOUT_EN_RDMA0
+ }, {
+ DDP_COMPONENT_OD1, DDP_COMPONENT_RDMA1,
+ DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD1_MOUT_EN_RDMA1,
+ OD1_MOUT_EN_RDMA1
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
+ DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
+ OVL0_MOUT_EN_COLOR0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
+ DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0,
+ COLOR0_SEL_IN_OVL0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
+ DISP_REG_CONFIG_DISP_OVL_MOUT_EN, OVL_MOUT_EN_RDMA,
+ OVL_MOUT_EN_RDMA
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
+ DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, OVL1_MOUT_EN_COLOR1,
+ OVL1_MOUT_EN_COLOR1
+ }, {
+ DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
+ DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1,
+ COLOR1_SEL_IN_OVL1
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
+ RDMA0_SOUT_DPI0
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI1,
+ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
+ RDMA0_SOUT_DPI1
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI1,
+ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
+ RDMA0_SOUT_DSI1
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI2,
+ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
+ RDMA0_SOUT_DSI2
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI3,
+ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
+ RDMA0_SOUT_DSI3
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+ RDMA1_SOUT_DPI0
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_MASK,
+ DPI0_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1,
+ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+ RDMA1_SOUT_DPI1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1,
+ DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_MASK,
+ DPI1_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI0,
+ DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_MASK,
+ DSI0_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1,
+ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+ RDMA1_SOUT_DSI1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1,
+ DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_MASK,
+ DSI1_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2,
+ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+ RDMA1_SOUT_DSI2
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2,
+ DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_MASK,
+ DSI2_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3,
+ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+ RDMA1_SOUT_DSI3
+ }, {
+ DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3,
+ DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK,
+ DSI3_SEL_IN_RDMA1
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
+ RDMA2_SOUT_DPI0
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0,
+ DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_MASK,
+ DPI0_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1,
+ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
+ RDMA2_SOUT_DPI1
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1,
+ DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_MASK,
+ DPI1_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI0,
+ DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_MASK,
+ DSI0_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1,
+ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
+ RDMA2_SOUT_DSI1
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1,
+ DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_MASK,
+ DSI1_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2,
+ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
+ RDMA2_SOUT_DSI2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2,
+ DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_MASK,
+ DSI2_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
+ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
+ RDMA2_SOUT_DSI3
+ }, {
+ DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
+ DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK,
+ DSI3_SEL_IN_RDMA2
+ }, {
+ DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
+ DISP_REG_CONFIG_DISP_UFOE_MOUT_EN, UFOE_MOUT_EN_DSI0,
+ UFOE_MOUT_EN_DSI0
+ }
+};
+
+#endif /* __SOC_MEDIATEK_MTK_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
new file mode 100644
index 0000000000..9d9f5ae578
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -0,0 +1,1060 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
+#include <linux/soc/mediatek/mtk-mutex.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#define MTK_MUTEX_MAX_HANDLES 10
+
+#define MT2701_MUTEX0_MOD0 0x2c
+#define MT2701_MUTEX0_SOF0 0x30
+#define MT8183_MUTEX0_MOD0 0x30
+#define MT8183_MUTEX0_SOF0 0x2c
+
+#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
+#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD(mutex_mod_reg, n) (mutex_mod_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD1(mutex_mod_reg, n) ((mutex_mod_reg) + 0x20 * (n) + 0x4)
+#define DISP_REG_MUTEX_SOF(mutex_sof_reg, n) (mutex_sof_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
+
+#define INT_MUTEX BIT(1)
+
+#define MT8186_MUTEX_MOD_DISP_OVL0 0
+#define MT8186_MUTEX_MOD_DISP_OVL0_2L 1
+#define MT8186_MUTEX_MOD_DISP_RDMA0 2
+#define MT8186_MUTEX_MOD_DISP_COLOR0 4
+#define MT8186_MUTEX_MOD_DISP_CCORR0 5
+#define MT8186_MUTEX_MOD_DISP_AAL0 7
+#define MT8186_MUTEX_MOD_DISP_GAMMA0 8
+#define MT8186_MUTEX_MOD_DISP_POSTMASK0 9
+#define MT8186_MUTEX_MOD_DISP_DITHER0 10
+#define MT8186_MUTEX_MOD_DISP_RDMA1 17
+
+#define MT8186_MUTEX_SOF_SINGLE_MODE 0
+#define MT8186_MUTEX_SOF_DSI0 1
+#define MT8186_MUTEX_SOF_DPI0 2
+#define MT8186_MUTEX_EOF_DSI0 (MT8186_MUTEX_SOF_DSI0 << 6)
+#define MT8186_MUTEX_EOF_DPI0 (MT8186_MUTEX_SOF_DPI0 << 6)
+
+#define MT8167_MUTEX_MOD_DISP_PWM 1
+#define MT8167_MUTEX_MOD_DISP_OVL0 6
+#define MT8167_MUTEX_MOD_DISP_OVL1 7
+#define MT8167_MUTEX_MOD_DISP_RDMA0 8
+#define MT8167_MUTEX_MOD_DISP_RDMA1 9
+#define MT8167_MUTEX_MOD_DISP_WDMA0 10
+#define MT8167_MUTEX_MOD_DISP_CCORR 11
+#define MT8167_MUTEX_MOD_DISP_COLOR 12
+#define MT8167_MUTEX_MOD_DISP_AAL 13
+#define MT8167_MUTEX_MOD_DISP_GAMMA 14
+#define MT8167_MUTEX_MOD_DISP_DITHER 15
+#define MT8167_MUTEX_MOD_DISP_UFOE 16
+
+#define MT8192_MUTEX_MOD_DISP_OVL0 0
+#define MT8192_MUTEX_MOD_DISP_OVL0_2L 1
+#define MT8192_MUTEX_MOD_DISP_RDMA0 2
+#define MT8192_MUTEX_MOD_DISP_COLOR0 4
+#define MT8192_MUTEX_MOD_DISP_CCORR0 5
+#define MT8192_MUTEX_MOD_DISP_AAL0 6
+#define MT8192_MUTEX_MOD_DISP_GAMMA0 7
+#define MT8192_MUTEX_MOD_DISP_POSTMASK0 8
+#define MT8192_MUTEX_MOD_DISP_DITHER0 9
+#define MT8192_MUTEX_MOD_DISP_OVL2_2L 16
+#define MT8192_MUTEX_MOD_DISP_RDMA4 17
+
+#define MT8183_MUTEX_MOD_DISP_RDMA0 0
+#define MT8183_MUTEX_MOD_DISP_RDMA1 1
+#define MT8183_MUTEX_MOD_DISP_OVL0 9
+#define MT8183_MUTEX_MOD_DISP_OVL0_2L 10
+#define MT8183_MUTEX_MOD_DISP_OVL1_2L 11
+#define MT8183_MUTEX_MOD_DISP_WDMA0 12
+#define MT8183_MUTEX_MOD_DISP_COLOR0 13
+#define MT8183_MUTEX_MOD_DISP_CCORR0 14
+#define MT8183_MUTEX_MOD_DISP_AAL0 15
+#define MT8183_MUTEX_MOD_DISP_GAMMA0 16
+#define MT8183_MUTEX_MOD_DISP_DITHER0 17
+
+#define MT8183_MUTEX_MOD_MDP_RDMA0 2
+#define MT8183_MUTEX_MOD_MDP_RSZ0 4
+#define MT8183_MUTEX_MOD_MDP_RSZ1 5
+#define MT8183_MUTEX_MOD_MDP_TDSHP0 6
+#define MT8183_MUTEX_MOD_MDP_WROT0 7
+#define MT8183_MUTEX_MOD_MDP_WDMA 8
+#define MT8183_MUTEX_MOD_MDP_AAL0 23
+#define MT8183_MUTEX_MOD_MDP_CCORR0 24
+
+#define MT8186_MUTEX_MOD_MDP_RDMA0 0
+#define MT8186_MUTEX_MOD_MDP_AAL0 2
+#define MT8186_MUTEX_MOD_MDP_HDR0 4
+#define MT8186_MUTEX_MOD_MDP_RSZ0 5
+#define MT8186_MUTEX_MOD_MDP_RSZ1 6
+#define MT8186_MUTEX_MOD_MDP_WROT0 7
+#define MT8186_MUTEX_MOD_MDP_TDSHP0 9
+#define MT8186_MUTEX_MOD_MDP_COLOR0 14
+
+#define MT8173_MUTEX_MOD_DISP_OVL0 11
+#define MT8173_MUTEX_MOD_DISP_OVL1 12
+#define MT8173_MUTEX_MOD_DISP_RDMA0 13
+#define MT8173_MUTEX_MOD_DISP_RDMA1 14
+#define MT8173_MUTEX_MOD_DISP_RDMA2 15
+#define MT8173_MUTEX_MOD_DISP_WDMA0 16
+#define MT8173_MUTEX_MOD_DISP_WDMA1 17
+#define MT8173_MUTEX_MOD_DISP_COLOR0 18
+#define MT8173_MUTEX_MOD_DISP_COLOR1 19
+#define MT8173_MUTEX_MOD_DISP_AAL 20
+#define MT8173_MUTEX_MOD_DISP_GAMMA 21
+#define MT8173_MUTEX_MOD_DISP_UFOE 22
+#define MT8173_MUTEX_MOD_DISP_PWM0 23
+#define MT8173_MUTEX_MOD_DISP_PWM1 24
+#define MT8173_MUTEX_MOD_DISP_OD 25
+
+#define MT8188_MUTEX_MOD_DISP_OVL0 0
+#define MT8188_MUTEX_MOD_DISP_WDMA0 1
+#define MT8188_MUTEX_MOD_DISP_RDMA0 2
+#define MT8188_MUTEX_MOD_DISP_COLOR0 3
+#define MT8188_MUTEX_MOD_DISP_CCORR0 4
+#define MT8188_MUTEX_MOD_DISP_AAL0 5
+#define MT8188_MUTEX_MOD_DISP_GAMMA0 6
+#define MT8188_MUTEX_MOD_DISP_DITHER0 7
+#define MT8188_MUTEX_MOD_DISP_DSI0 8
+#define MT8188_MUTEX_MOD_DISP_DSC_WRAP0_CORE0 9
+#define MT8188_MUTEX_MOD_DISP_VPP_MERGE 20
+#define MT8188_MUTEX_MOD_DISP_DP_INTF0 21
+#define MT8188_MUTEX_MOD_DISP_POSTMASK0 24
+#define MT8188_MUTEX_MOD2_DISP_PWM0 33
+
+#define MT8195_MUTEX_MOD_DISP_OVL0 0
+#define MT8195_MUTEX_MOD_DISP_WDMA0 1
+#define MT8195_MUTEX_MOD_DISP_RDMA0 2
+#define MT8195_MUTEX_MOD_DISP_COLOR0 3
+#define MT8195_MUTEX_MOD_DISP_CCORR0 4
+#define MT8195_MUTEX_MOD_DISP_AAL0 5
+#define MT8195_MUTEX_MOD_DISP_GAMMA0 6
+#define MT8195_MUTEX_MOD_DISP_DITHER0 7
+#define MT8195_MUTEX_MOD_DISP_DSI0 8
+#define MT8195_MUTEX_MOD_DISP_DSC_WRAP0_CORE0 9
+#define MT8195_MUTEX_MOD_DISP_VPP_MERGE 20
+#define MT8195_MUTEX_MOD_DISP_DP_INTF0 21
+#define MT8195_MUTEX_MOD_DISP_PWM0 27
+
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA0 0
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA1 1
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA2 2
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA3 3
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA4 4
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA5 5
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA6 6
+#define MT8195_MUTEX_MOD_DISP1_MDP_RDMA7 7
+#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE0 8
+#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE1 9
+#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE2 10
+#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE3 11
+#define MT8195_MUTEX_MOD_DISP1_VPP_MERGE4 12
+#define MT8195_MUTEX_MOD_DISP1_DISP_MIXER 18
+#define MT8195_MUTEX_MOD_DISP1_DPI0 25
+#define MT8195_MUTEX_MOD_DISP1_DPI1 26
+#define MT8195_MUTEX_MOD_DISP1_DP_INTF0 27
+
+/* VPPSYS0 */
+#define MT8195_MUTEX_MOD_MDP_RDMA0 0
+#define MT8195_MUTEX_MOD_MDP_FG0 1
+#define MT8195_MUTEX_MOD_MDP_STITCH0 2
+#define MT8195_MUTEX_MOD_MDP_HDR0 3
+#define MT8195_MUTEX_MOD_MDP_AAL0 4
+#define MT8195_MUTEX_MOD_MDP_RSZ0 5
+#define MT8195_MUTEX_MOD_MDP_TDSHP0 6
+#define MT8195_MUTEX_MOD_MDP_COLOR0 7
+#define MT8195_MUTEX_MOD_MDP_OVL0 8
+#define MT8195_MUTEX_MOD_MDP_PAD0 9
+#define MT8195_MUTEX_MOD_MDP_TCC0 10
+#define MT8195_MUTEX_MOD_MDP_WROT0 11
+
+/* VPPSYS1 */
+#define MT8195_MUTEX_MOD_MDP_TCC1 3
+#define MT8195_MUTEX_MOD_MDP_RDMA1 4
+#define MT8195_MUTEX_MOD_MDP_RDMA2 5
+#define MT8195_MUTEX_MOD_MDP_RDMA3 6
+#define MT8195_MUTEX_MOD_MDP_FG1 7
+#define MT8195_MUTEX_MOD_MDP_FG2 8
+#define MT8195_MUTEX_MOD_MDP_FG3 9
+#define MT8195_MUTEX_MOD_MDP_HDR1 10
+#define MT8195_MUTEX_MOD_MDP_HDR2 11
+#define MT8195_MUTEX_MOD_MDP_HDR3 12
+#define MT8195_MUTEX_MOD_MDP_AAL1 13
+#define MT8195_MUTEX_MOD_MDP_AAL2 14
+#define MT8195_MUTEX_MOD_MDP_AAL3 15
+#define MT8195_MUTEX_MOD_MDP_RSZ1 16
+#define MT8195_MUTEX_MOD_MDP_RSZ2 17
+#define MT8195_MUTEX_MOD_MDP_RSZ3 18
+#define MT8195_MUTEX_MOD_MDP_TDSHP1 19
+#define MT8195_MUTEX_MOD_MDP_TDSHP2 20
+#define MT8195_MUTEX_MOD_MDP_TDSHP3 21
+#define MT8195_MUTEX_MOD_MDP_MERGE2 22
+#define MT8195_MUTEX_MOD_MDP_MERGE3 23
+#define MT8195_MUTEX_MOD_MDP_COLOR1 24
+#define MT8195_MUTEX_MOD_MDP_COLOR2 25
+#define MT8195_MUTEX_MOD_MDP_COLOR3 26
+#define MT8195_MUTEX_MOD_MDP_OVL1 27
+#define MT8195_MUTEX_MOD_MDP_PAD1 28
+#define MT8195_MUTEX_MOD_MDP_PAD2 29
+#define MT8195_MUTEX_MOD_MDP_PAD3 30
+#define MT8195_MUTEX_MOD_MDP_WROT1 31
+#define MT8195_MUTEX_MOD_MDP_WROT2 32
+#define MT8195_MUTEX_MOD_MDP_WROT3 33
+
+#define MT8365_MUTEX_MOD_DISP_OVL0 7
+#define MT8365_MUTEX_MOD_DISP_OVL0_2L 8
+#define MT8365_MUTEX_MOD_DISP_RDMA0 9
+#define MT8365_MUTEX_MOD_DISP_RDMA1 10
+#define MT8365_MUTEX_MOD_DISP_WDMA0 11
+#define MT8365_MUTEX_MOD_DISP_COLOR0 12
+#define MT8365_MUTEX_MOD_DISP_CCORR 13
+#define MT8365_MUTEX_MOD_DISP_AAL 14
+#define MT8365_MUTEX_MOD_DISP_GAMMA 15
+#define MT8365_MUTEX_MOD_DISP_DITHER 16
+#define MT8365_MUTEX_MOD_DISP_DSI0 17
+#define MT8365_MUTEX_MOD_DISP_PWM0 20
+#define MT8365_MUTEX_MOD_DISP_DPI0 22
+
+#define MT2712_MUTEX_MOD_DISP_PWM2 10
+#define MT2712_MUTEX_MOD_DISP_OVL0 11
+#define MT2712_MUTEX_MOD_DISP_OVL1 12
+#define MT2712_MUTEX_MOD_DISP_RDMA0 13
+#define MT2712_MUTEX_MOD_DISP_RDMA1 14
+#define MT2712_MUTEX_MOD_DISP_RDMA2 15
+#define MT2712_MUTEX_MOD_DISP_WDMA0 16
+#define MT2712_MUTEX_MOD_DISP_WDMA1 17
+#define MT2712_MUTEX_MOD_DISP_COLOR0 18
+#define MT2712_MUTEX_MOD_DISP_COLOR1 19
+#define MT2712_MUTEX_MOD_DISP_AAL0 20
+#define MT2712_MUTEX_MOD_DISP_UFOE 22
+#define MT2712_MUTEX_MOD_DISP_PWM0 23
+#define MT2712_MUTEX_MOD_DISP_PWM1 24
+#define MT2712_MUTEX_MOD_DISP_OD0 25
+#define MT2712_MUTEX_MOD2_DISP_AAL1 33
+#define MT2712_MUTEX_MOD2_DISP_OD1 34
+
+#define MT2701_MUTEX_MOD_DISP_OVL 3
+#define MT2701_MUTEX_MOD_DISP_WDMA 6
+#define MT2701_MUTEX_MOD_DISP_COLOR 7
+#define MT2701_MUTEX_MOD_DISP_BLS 9
+#define MT2701_MUTEX_MOD_DISP_RDMA0 10
+#define MT2701_MUTEX_MOD_DISP_RDMA1 12
+
+#define MT2712_MUTEX_SOF_SINGLE_MODE 0
+#define MT2712_MUTEX_SOF_DSI0 1
+#define MT2712_MUTEX_SOF_DSI1 2
+#define MT2712_MUTEX_SOF_DPI0 3
+#define MT2712_MUTEX_SOF_DPI1 4
+#define MT2712_MUTEX_SOF_DSI2 5
+#define MT2712_MUTEX_SOF_DSI3 6
+#define MT8167_MUTEX_SOF_DPI0 2
+#define MT8167_MUTEX_SOF_DPI1 3
+#define MT8183_MUTEX_SOF_DSI0 1
+#define MT8183_MUTEX_SOF_DPI0 2
+#define MT8188_MUTEX_SOF_DSI0 1
+#define MT8188_MUTEX_SOF_DP_INTF0 3
+#define MT8195_MUTEX_SOF_DSI0 1
+#define MT8195_MUTEX_SOF_DSI1 2
+#define MT8195_MUTEX_SOF_DP_INTF0 3
+#define MT8195_MUTEX_SOF_DP_INTF1 4
+#define MT8195_MUTEX_SOF_DPI0 6 /* for HDMI_TX */
+#define MT8195_MUTEX_SOF_DPI1 5 /* for digital video out */
+
+#define MT8183_MUTEX_EOF_DSI0 (MT8183_MUTEX_SOF_DSI0 << 6)
+#define MT8183_MUTEX_EOF_DPI0 (MT8183_MUTEX_SOF_DPI0 << 6)
+#define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7)
+#define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7)
+#define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7)
+#define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7)
+#define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7)
+#define MT8195_MUTEX_EOF_DP_INTF1 (MT8195_MUTEX_SOF_DP_INTF1 << 7)
+#define MT8195_MUTEX_EOF_DPI0 (MT8195_MUTEX_SOF_DPI0 << 7)
+#define MT8195_MUTEX_EOF_DPI1 (MT8195_MUTEX_SOF_DPI1 << 7)
+
+struct mtk_mutex {
+ u8 id;
+ bool claimed;
+};
+
+enum mtk_mutex_sof_id {
+ MUTEX_SOF_SINGLE_MODE,
+ MUTEX_SOF_DSI0,
+ MUTEX_SOF_DSI1,
+ MUTEX_SOF_DPI0,
+ MUTEX_SOF_DPI1,
+ MUTEX_SOF_DSI2,
+ MUTEX_SOF_DSI3,
+ MUTEX_SOF_DP_INTF0,
+ MUTEX_SOF_DP_INTF1,
+ DDP_MUTEX_SOF_MAX,
+};
+
+struct mtk_mutex_data {
+ const unsigned int *mutex_mod;
+ const unsigned int *mutex_sof;
+ const unsigned int mutex_mod_reg;
+ const unsigned int mutex_sof_reg;
+ const unsigned int *mutex_table_mod;
+ const bool no_clk;
+};
+
+struct mtk_mutex_ctx {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *regs;
+ struct mtk_mutex mutex[MTK_MUTEX_MAX_HANDLES];
+ const struct mtk_mutex_data *data;
+ phys_addr_t addr;
+ struct cmdq_client_reg cmdq_reg;
+};
+
+static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_BLS] = MT2701_MUTEX_MOD_DISP_BLS,
+ [DDP_COMPONENT_COLOR0] = MT2701_MUTEX_MOD_DISP_COLOR,
+ [DDP_COMPONENT_OVL0] = MT2701_MUTEX_MOD_DISP_OVL,
+ [DDP_COMPONENT_RDMA0] = MT2701_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT2701_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
+};
+
+static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT2712_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_AAL1] = MT2712_MUTEX_MOD2_DISP_AAL1,
+ [DDP_COMPONENT_COLOR0] = MT2712_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_COLOR1] = MT2712_MUTEX_MOD_DISP_COLOR1,
+ [DDP_COMPONENT_OD0] = MT2712_MUTEX_MOD_DISP_OD0,
+ [DDP_COMPONENT_OD1] = MT2712_MUTEX_MOD2_DISP_OD1,
+ [DDP_COMPONENT_OVL0] = MT2712_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL1] = MT2712_MUTEX_MOD_DISP_OVL1,
+ [DDP_COMPONENT_PWM0] = MT2712_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_PWM1] = MT2712_MUTEX_MOD_DISP_PWM1,
+ [DDP_COMPONENT_PWM2] = MT2712_MUTEX_MOD_DISP_PWM2,
+ [DDP_COMPONENT_RDMA0] = MT2712_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT2712_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_RDMA2] = MT2712_MUTEX_MOD_DISP_RDMA2,
+ [DDP_COMPONENT_UFOE] = MT2712_MUTEX_MOD_DISP_UFOE,
+ [DDP_COMPONENT_WDMA0] = MT2712_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1,
+};
+
+static const unsigned int mt8167_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8167_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_CCORR] = MT8167_MUTEX_MOD_DISP_CCORR,
+ [DDP_COMPONENT_COLOR0] = MT8167_MUTEX_MOD_DISP_COLOR,
+ [DDP_COMPONENT_DITHER0] = MT8167_MUTEX_MOD_DISP_DITHER,
+ [DDP_COMPONENT_GAMMA] = MT8167_MUTEX_MOD_DISP_GAMMA,
+ [DDP_COMPONENT_OVL0] = MT8167_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL1] = MT8167_MUTEX_MOD_DISP_OVL1,
+ [DDP_COMPONENT_PWM0] = MT8167_MUTEX_MOD_DISP_PWM,
+ [DDP_COMPONENT_RDMA0] = MT8167_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8167_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_UFOE] = MT8167_MUTEX_MOD_DISP_UFOE,
+ [DDP_COMPONENT_WDMA0] = MT8167_MUTEX_MOD_DISP_WDMA0,
+};
+
+static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
+ [DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA,
+ [DDP_COMPONENT_OD0] = MT8173_MUTEX_MOD_DISP_OD,
+ [DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1,
+ [DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_PWM1] = MT8173_MUTEX_MOD_DISP_PWM1,
+ [DDP_COMPONENT_RDMA0] = MT8173_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8173_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_RDMA2] = MT8173_MUTEX_MOD_DISP_RDMA2,
+ [DDP_COMPONENT_UFOE] = MT8173_MUTEX_MOD_DISP_UFOE,
+ [DDP_COMPONENT_WDMA0] = MT8173_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
+};
+
+static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8183_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_CCORR] = MT8183_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_COLOR0] = MT8183_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_DITHER0] = MT8183_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_GAMMA] = MT8183_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_OVL0] = MT8183_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL_2L0] = MT8183_MUTEX_MOD_DISP_OVL0_2L,
+ [DDP_COMPONENT_OVL_2L1] = MT8183_MUTEX_MOD_DISP_OVL1_2L,
+ [DDP_COMPONENT_RDMA0] = MT8183_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8183_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0,
+};
+
+static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+ [MUTEX_MOD_IDX_MDP_RDMA0] = MT8183_MUTEX_MOD_MDP_RDMA0,
+ [MUTEX_MOD_IDX_MDP_RSZ0] = MT8183_MUTEX_MOD_MDP_RSZ0,
+ [MUTEX_MOD_IDX_MDP_RSZ1] = MT8183_MUTEX_MOD_MDP_RSZ1,
+ [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8183_MUTEX_MOD_MDP_TDSHP0,
+ [MUTEX_MOD_IDX_MDP_WROT0] = MT8183_MUTEX_MOD_MDP_WROT0,
+ [MUTEX_MOD_IDX_MDP_WDMA] = MT8183_MUTEX_MOD_MDP_WDMA,
+ [MUTEX_MOD_IDX_MDP_AAL0] = MT8183_MUTEX_MOD_MDP_AAL0,
+ [MUTEX_MOD_IDX_MDP_CCORR0] = MT8183_MUTEX_MOD_MDP_CCORR0,
+};
+
+static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_COLOR0] = MT8186_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_DITHER0] = MT8186_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_GAMMA] = MT8186_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_OVL0] = MT8186_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL_2L0] = MT8186_MUTEX_MOD_DISP_OVL0_2L,
+ [DDP_COMPONENT_POSTMASK0] = MT8186_MUTEX_MOD_DISP_POSTMASK0,
+ [DDP_COMPONENT_RDMA0] = MT8186_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8186_MUTEX_MOD_DISP_RDMA1,
+};
+
+static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+ [MUTEX_MOD_IDX_MDP_RDMA0] = MT8186_MUTEX_MOD_MDP_RDMA0,
+ [MUTEX_MOD_IDX_MDP_RSZ0] = MT8186_MUTEX_MOD_MDP_RSZ0,
+ [MUTEX_MOD_IDX_MDP_RSZ1] = MT8186_MUTEX_MOD_MDP_RSZ1,
+ [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8186_MUTEX_MOD_MDP_TDSHP0,
+ [MUTEX_MOD_IDX_MDP_WROT0] = MT8186_MUTEX_MOD_MDP_WROT0,
+ [MUTEX_MOD_IDX_MDP_HDR0] = MT8186_MUTEX_MOD_MDP_HDR0,
+ [MUTEX_MOD_IDX_MDP_AAL0] = MT8186_MUTEX_MOD_MDP_AAL0,
+ [MUTEX_MOD_IDX_MDP_COLOR0] = MT8186_MUTEX_MOD_MDP_COLOR0,
+};
+
+static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_OVL0] = MT8188_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_WDMA0] = MT8188_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_RDMA0] = MT8188_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_COLOR0] = MT8188_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_CCORR] = MT8188_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_AAL0] = MT8188_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_GAMMA] = MT8188_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_POSTMASK0] = MT8188_MUTEX_MOD_DISP_POSTMASK0,
+ [DDP_COMPONENT_DITHER0] = MT8188_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_MERGE0] = MT8188_MUTEX_MOD_DISP_VPP_MERGE,
+ [DDP_COMPONENT_DSC0] = MT8188_MUTEX_MOD_DISP_DSC_WRAP0_CORE0,
+ [DDP_COMPONENT_DSI0] = MT8188_MUTEX_MOD_DISP_DSI0,
+ [DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0,
+ [DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0,
+};
+
+static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_COLOR0] = MT8192_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_DITHER0] = MT8192_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_GAMMA] = MT8192_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_POSTMASK0] = MT8192_MUTEX_MOD_DISP_POSTMASK0,
+ [DDP_COMPONENT_OVL0] = MT8192_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL_2L0] = MT8192_MUTEX_MOD_DISP_OVL0_2L,
+ [DDP_COMPONENT_OVL_2L2] = MT8192_MUTEX_MOD_DISP_OVL2_2L,
+ [DDP_COMPONENT_RDMA0] = MT8192_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA4] = MT8192_MUTEX_MOD_DISP_RDMA4,
+};
+
+static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_OVL0] = MT8195_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_WDMA0] = MT8195_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_RDMA0] = MT8195_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_COLOR0] = MT8195_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_CCORR] = MT8195_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_AAL0] = MT8195_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_GAMMA] = MT8195_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_DITHER0] = MT8195_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_MERGE0] = MT8195_MUTEX_MOD_DISP_VPP_MERGE,
+ [DDP_COMPONENT_DSC0] = MT8195_MUTEX_MOD_DISP_DSC_WRAP0_CORE0,
+ [DDP_COMPONENT_DSI0] = MT8195_MUTEX_MOD_DISP_DSI0,
+ [DDP_COMPONENT_PWM0] = MT8195_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_DP_INTF0] = MT8195_MUTEX_MOD_DISP_DP_INTF0,
+ [DDP_COMPONENT_MDP_RDMA0] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA0,
+ [DDP_COMPONENT_MDP_RDMA1] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA1,
+ [DDP_COMPONENT_MDP_RDMA2] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA2,
+ [DDP_COMPONENT_MDP_RDMA3] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA3,
+ [DDP_COMPONENT_MDP_RDMA4] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA4,
+ [DDP_COMPONENT_MDP_RDMA5] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA5,
+ [DDP_COMPONENT_MDP_RDMA6] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA6,
+ [DDP_COMPONENT_MDP_RDMA7] = MT8195_MUTEX_MOD_DISP1_MDP_RDMA7,
+ [DDP_COMPONENT_MERGE1] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE0,
+ [DDP_COMPONENT_MERGE2] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE1,
+ [DDP_COMPONENT_MERGE3] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE2,
+ [DDP_COMPONENT_MERGE4] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE3,
+ [DDP_COMPONENT_ETHDR_MIXER] = MT8195_MUTEX_MOD_DISP1_DISP_MIXER,
+ [DDP_COMPONENT_MERGE5] = MT8195_MUTEX_MOD_DISP1_VPP_MERGE4,
+ [DDP_COMPONENT_DP_INTF1] = MT8195_MUTEX_MOD_DISP1_DP_INTF0,
+};
+
+static const unsigned int mt8195_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+ [MUTEX_MOD_IDX_MDP_RDMA0] = MT8195_MUTEX_MOD_MDP_RDMA0,
+ [MUTEX_MOD_IDX_MDP_RDMA1] = MT8195_MUTEX_MOD_MDP_RDMA1,
+ [MUTEX_MOD_IDX_MDP_RDMA2] = MT8195_MUTEX_MOD_MDP_RDMA2,
+ [MUTEX_MOD_IDX_MDP_RDMA3] = MT8195_MUTEX_MOD_MDP_RDMA3,
+ [MUTEX_MOD_IDX_MDP_STITCH0] = MT8195_MUTEX_MOD_MDP_STITCH0,
+ [MUTEX_MOD_IDX_MDP_FG0] = MT8195_MUTEX_MOD_MDP_FG0,
+ [MUTEX_MOD_IDX_MDP_FG1] = MT8195_MUTEX_MOD_MDP_FG1,
+ [MUTEX_MOD_IDX_MDP_FG2] = MT8195_MUTEX_MOD_MDP_FG2,
+ [MUTEX_MOD_IDX_MDP_FG3] = MT8195_MUTEX_MOD_MDP_FG3,
+ [MUTEX_MOD_IDX_MDP_HDR0] = MT8195_MUTEX_MOD_MDP_HDR0,
+ [MUTEX_MOD_IDX_MDP_HDR1] = MT8195_MUTEX_MOD_MDP_HDR1,
+ [MUTEX_MOD_IDX_MDP_HDR2] = MT8195_MUTEX_MOD_MDP_HDR2,
+ [MUTEX_MOD_IDX_MDP_HDR3] = MT8195_MUTEX_MOD_MDP_HDR3,
+ [MUTEX_MOD_IDX_MDP_AAL0] = MT8195_MUTEX_MOD_MDP_AAL0,
+ [MUTEX_MOD_IDX_MDP_AAL1] = MT8195_MUTEX_MOD_MDP_AAL1,
+ [MUTEX_MOD_IDX_MDP_AAL2] = MT8195_MUTEX_MOD_MDP_AAL2,
+ [MUTEX_MOD_IDX_MDP_AAL3] = MT8195_MUTEX_MOD_MDP_AAL3,
+ [MUTEX_MOD_IDX_MDP_RSZ0] = MT8195_MUTEX_MOD_MDP_RSZ0,
+ [MUTEX_MOD_IDX_MDP_RSZ1] = MT8195_MUTEX_MOD_MDP_RSZ1,
+ [MUTEX_MOD_IDX_MDP_RSZ2] = MT8195_MUTEX_MOD_MDP_RSZ2,
+ [MUTEX_MOD_IDX_MDP_RSZ3] = MT8195_MUTEX_MOD_MDP_RSZ3,
+ [MUTEX_MOD_IDX_MDP_MERGE2] = MT8195_MUTEX_MOD_MDP_MERGE2,
+ [MUTEX_MOD_IDX_MDP_MERGE3] = MT8195_MUTEX_MOD_MDP_MERGE3,
+ [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8195_MUTEX_MOD_MDP_TDSHP0,
+ [MUTEX_MOD_IDX_MDP_TDSHP1] = MT8195_MUTEX_MOD_MDP_TDSHP1,
+ [MUTEX_MOD_IDX_MDP_TDSHP2] = MT8195_MUTEX_MOD_MDP_TDSHP2,
+ [MUTEX_MOD_IDX_MDP_TDSHP3] = MT8195_MUTEX_MOD_MDP_TDSHP3,
+ [MUTEX_MOD_IDX_MDP_COLOR0] = MT8195_MUTEX_MOD_MDP_COLOR0,
+ [MUTEX_MOD_IDX_MDP_COLOR1] = MT8195_MUTEX_MOD_MDP_COLOR1,
+ [MUTEX_MOD_IDX_MDP_COLOR2] = MT8195_MUTEX_MOD_MDP_COLOR2,
+ [MUTEX_MOD_IDX_MDP_COLOR3] = MT8195_MUTEX_MOD_MDP_COLOR3,
+ [MUTEX_MOD_IDX_MDP_OVL0] = MT8195_MUTEX_MOD_MDP_OVL0,
+ [MUTEX_MOD_IDX_MDP_OVL1] = MT8195_MUTEX_MOD_MDP_OVL1,
+ [MUTEX_MOD_IDX_MDP_PAD0] = MT8195_MUTEX_MOD_MDP_PAD0,
+ [MUTEX_MOD_IDX_MDP_PAD1] = MT8195_MUTEX_MOD_MDP_PAD1,
+ [MUTEX_MOD_IDX_MDP_PAD2] = MT8195_MUTEX_MOD_MDP_PAD2,
+ [MUTEX_MOD_IDX_MDP_PAD3] = MT8195_MUTEX_MOD_MDP_PAD3,
+ [MUTEX_MOD_IDX_MDP_TCC0] = MT8195_MUTEX_MOD_MDP_TCC0,
+ [MUTEX_MOD_IDX_MDP_TCC1] = MT8195_MUTEX_MOD_MDP_TCC1,
+ [MUTEX_MOD_IDX_MDP_WROT0] = MT8195_MUTEX_MOD_MDP_WROT0,
+ [MUTEX_MOD_IDX_MDP_WROT1] = MT8195_MUTEX_MOD_MDP_WROT1,
+ [MUTEX_MOD_IDX_MDP_WROT2] = MT8195_MUTEX_MOD_MDP_WROT2,
+ [MUTEX_MOD_IDX_MDP_WROT3] = MT8195_MUTEX_MOD_MDP_WROT3,
+};
+
+static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8365_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_CCORR] = MT8365_MUTEX_MOD_DISP_CCORR,
+ [DDP_COMPONENT_COLOR0] = MT8365_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_DITHER0] = MT8365_MUTEX_MOD_DISP_DITHER,
+ [DDP_COMPONENT_DPI0] = MT8365_MUTEX_MOD_DISP_DPI0,
+ [DDP_COMPONENT_DSI0] = MT8365_MUTEX_MOD_DISP_DSI0,
+ [DDP_COMPONENT_GAMMA] = MT8365_MUTEX_MOD_DISP_GAMMA,
+ [DDP_COMPONENT_OVL0] = MT8365_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL_2L0] = MT8365_MUTEX_MOD_DISP_OVL0_2L,
+ [DDP_COMPONENT_PWM0] = MT8365_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_RDMA0] = MT8365_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8365_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_WDMA0] = MT8365_MUTEX_MOD_DISP_WDMA0,
+};
+
+static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+ [MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+ [MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
+ [MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
+ [MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
+};
+
+static const unsigned int mt6795_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+ [MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+};
+
+static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [MUTEX_SOF_DPI0] = MT8167_MUTEX_SOF_DPI0,
+ [MUTEX_SOF_DPI1] = MT8167_MUTEX_SOF_DPI1,
+};
+
+/* Add EOF setting so overlay hardware can receive frame done irq */
+static const unsigned int mt8183_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0 | MT8183_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DPI0] = MT8183_MUTEX_SOF_DPI0 | MT8183_MUTEX_EOF_DPI0,
+};
+
+static const unsigned int mt8186_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MT8186_MUTEX_SOF_DSI0 | MT8186_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DPI0] = MT8186_MUTEX_SOF_DPI0 | MT8186_MUTEX_EOF_DPI0,
+};
+
+/*
+ * To support refresh mode(video mode), DISP_REG_MUTEX_SOF should
+ * select the EOF source and configure the EOF plus timing from the
+ * module that provides the timing signal.
+ * So that MUTEX can not only send a STREAM_DONE event to GCE
+ * but also detect the error at end of frame(EAEOF) when EOF signal
+ * arrives.
+ */
+static const unsigned int mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] =
+ MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DP_INTF0] =
+ MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0,
+};
+
+static const unsigned int mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MT8195_MUTEX_SOF_DSI0 | MT8195_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DSI1] = MT8195_MUTEX_SOF_DSI1 | MT8195_MUTEX_EOF_DSI1,
+ [MUTEX_SOF_DPI0] = MT8195_MUTEX_SOF_DPI0 | MT8195_MUTEX_EOF_DPI0,
+ [MUTEX_SOF_DPI1] = MT8195_MUTEX_SOF_DPI1 | MT8195_MUTEX_EOF_DPI1,
+ [MUTEX_SOF_DP_INTF0] =
+ MT8195_MUTEX_SOF_DP_INTF0 | MT8195_MUTEX_EOF_DP_INTF0,
+ [MUTEX_SOF_DP_INTF1] =
+ MT8195_MUTEX_SOF_DP_INTF1 | MT8195_MUTEX_EOF_DP_INTF1,
+};
+
+static const struct mtk_mutex_data mt2701_mutex_driver_data = {
+ .mutex_mod = mt2701_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
+};
+
+static const struct mtk_mutex_data mt2712_mutex_driver_data = {
+ .mutex_mod = mt2712_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
+};
+
+static const struct mtk_mutex_data mt6795_mutex_driver_data = {
+ .mutex_mod = mt8173_mutex_mod,
+ .mutex_sof = mt6795_mutex_sof,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
+};
+
+static const struct mtk_mutex_data mt8167_mutex_driver_data = {
+ .mutex_mod = mt8167_mutex_mod,
+ .mutex_sof = mt8167_mutex_sof,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
+ .no_clk = true,
+};
+
+static const struct mtk_mutex_data mt8173_mutex_driver_data = {
+ .mutex_mod = mt8173_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
+};
+
+static const struct mtk_mutex_data mt8183_mutex_driver_data = {
+ .mutex_mod = mt8183_mutex_mod,
+ .mutex_sof = mt8183_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .mutex_table_mod = mt8183_mutex_table_mod,
+ .no_clk = true,
+};
+
+static const struct mtk_mutex_data mt8186_mdp_mutex_driver_data = {
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .mutex_table_mod = mt8186_mdp_mutex_table_mod,
+};
+
+static const struct mtk_mutex_data mt8186_mutex_driver_data = {
+ .mutex_mod = mt8186_mutex_mod,
+ .mutex_sof = mt8186_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+};
+
+static const struct mtk_mutex_data mt8188_mutex_driver_data = {
+ .mutex_mod = mt8188_mutex_mod,
+ .mutex_sof = mt8188_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+};
+
+static const struct mtk_mutex_data mt8192_mutex_driver_data = {
+ .mutex_mod = mt8192_mutex_mod,
+ .mutex_sof = mt8183_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+};
+
+static const struct mtk_mutex_data mt8195_mutex_driver_data = {
+ .mutex_mod = mt8195_mutex_mod,
+ .mutex_sof = mt8195_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+};
+
+static const struct mtk_mutex_data mt8195_vpp_mutex_driver_data = {
+ .mutex_sof = mt8195_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .mutex_table_mod = mt8195_mutex_table_mod,
+};
+
+static const struct mtk_mutex_data mt8365_mutex_driver_data = {
+ .mutex_mod = mt8365_mutex_mod,
+ .mutex_sof = mt8183_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .no_clk = true,
+};
+
+struct mtk_mutex *mtk_mutex_get(struct device *dev)
+{
+ struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < MTK_MUTEX_MAX_HANDLES; i++)
+ if (!mtx->mutex[i].claimed) {
+ mtx->mutex[i].claimed = true;
+ return &mtx->mutex[i];
+ }
+
+ return ERR_PTR(-EBUSY);
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_get);
+
+void mtk_mutex_put(struct mtk_mutex *mutex)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ mutex->claimed = false;
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_put);
+
+int mtk_mutex_prepare(struct mtk_mutex *mutex)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ return clk_prepare_enable(mtx->clk);
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_prepare);
+
+void mtk_mutex_unprepare(struct mtk_mutex *mutex)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ clk_disable_unprepare(mtx->clk);
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_unprepare);
+
+void mtk_mutex_add_comp(struct mtk_mutex *mutex,
+ enum mtk_ddp_comp_id id)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ unsigned int reg;
+ unsigned int sof_id;
+ unsigned int offset;
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ switch (id) {
+ case DDP_COMPONENT_DSI0:
+ sof_id = MUTEX_SOF_DSI0;
+ break;
+ case DDP_COMPONENT_DSI1:
+ sof_id = MUTEX_SOF_DSI0;
+ break;
+ case DDP_COMPONENT_DSI2:
+ sof_id = MUTEX_SOF_DSI2;
+ break;
+ case DDP_COMPONENT_DSI3:
+ sof_id = MUTEX_SOF_DSI3;
+ break;
+ case DDP_COMPONENT_DPI0:
+ sof_id = MUTEX_SOF_DPI0;
+ break;
+ case DDP_COMPONENT_DPI1:
+ sof_id = MUTEX_SOF_DPI1;
+ break;
+ case DDP_COMPONENT_DP_INTF0:
+ sof_id = MUTEX_SOF_DP_INTF0;
+ break;
+ case DDP_COMPONENT_DP_INTF1:
+ sof_id = MUTEX_SOF_DP_INTF1;
+ break;
+ default:
+ if (mtx->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
+ mutex->id);
+ reg = readl_relaxed(mtx->regs + offset);
+ reg |= 1 << mtx->data->mutex_mod[id];
+ writel_relaxed(reg, mtx->regs + offset);
+ } else {
+ offset = DISP_REG_MUTEX_MOD2(mutex->id);
+ reg = readl_relaxed(mtx->regs + offset);
+ reg |= 1 << (mtx->data->mutex_mod[id] - 32);
+ writel_relaxed(reg, mtx->regs + offset);
+ }
+ return;
+ }
+
+ writel_relaxed(mtx->data->mutex_sof[sof_id],
+ mtx->regs +
+ DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg, mutex->id));
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_add_comp);
+
+void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
+ enum mtk_ddp_comp_id id)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ unsigned int reg;
+ unsigned int offset;
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ switch (id) {
+ case DDP_COMPONENT_DSI0:
+ case DDP_COMPONENT_DSI1:
+ case DDP_COMPONENT_DSI2:
+ case DDP_COMPONENT_DSI3:
+ case DDP_COMPONENT_DPI0:
+ case DDP_COMPONENT_DPI1:
+ case DDP_COMPONENT_DP_INTF0:
+ case DDP_COMPONENT_DP_INTF1:
+ writel_relaxed(MUTEX_SOF_SINGLE_MODE,
+ mtx->regs +
+ DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg,
+ mutex->id));
+ break;
+ default:
+ if (mtx->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
+ mutex->id);
+ reg = readl_relaxed(mtx->regs + offset);
+ reg &= ~(1 << mtx->data->mutex_mod[id]);
+ writel_relaxed(reg, mtx->regs + offset);
+ } else {
+ offset = DISP_REG_MUTEX_MOD2(mutex->id);
+ reg = readl_relaxed(mtx->regs + offset);
+ reg &= ~(1 << (mtx->data->mutex_mod[id] - 32));
+ writel_relaxed(reg, mtx->regs + offset);
+ }
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_remove_comp);
+
+void mtk_mutex_enable(struct mtk_mutex *mutex)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ writel(1, mtx->regs + DISP_REG_MUTEX_EN(mutex->id));
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_enable);
+
+int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex, void *pkt)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ struct cmdq_pkt *cmdq_pkt = (struct cmdq_pkt *)pkt;
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ if (!mtx->cmdq_reg.size) {
+ dev_err(mtx->dev, "mediatek,gce-client-reg hasn't been set");
+ return -ENODEV;
+ }
+
+ cmdq_pkt_write(cmdq_pkt, mtx->cmdq_reg.subsys,
+ mtx->addr + DISP_REG_MUTEX_EN(mutex->id), 1);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_enable_by_cmdq);
+
+void mtk_mutex_disable(struct mtk_mutex *mutex)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ writel(0, mtx->regs + DISP_REG_MUTEX_EN(mutex->id));
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_disable);
+
+void mtk_mutex_acquire(struct mtk_mutex *mutex)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ u32 tmp;
+
+ writel(1, mtx->regs + DISP_REG_MUTEX_EN(mutex->id));
+ writel(1, mtx->regs + DISP_REG_MUTEX(mutex->id));
+ if (readl_poll_timeout_atomic(mtx->regs + DISP_REG_MUTEX(mutex->id),
+ tmp, tmp & INT_MUTEX, 1, 10000))
+ pr_err("could not acquire mutex %d\n", mutex->id);
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_acquire);
+
+void mtk_mutex_release(struct mtk_mutex *mutex)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+
+ writel(0, mtx->regs + DISP_REG_MUTEX(mutex->id));
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_release);
+
+int mtk_mutex_write_mod(struct mtk_mutex *mutex,
+ enum mtk_mutex_mod_index idx, bool clear)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+ unsigned int reg;
+ u32 reg_offset, id_offset = 0;
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ if (idx < MUTEX_MOD_IDX_MDP_RDMA0 ||
+ idx >= MUTEX_MOD_IDX_MAX) {
+ dev_err(mtx->dev, "Not supported MOD table index : %d", idx);
+ return -EINVAL;
+ }
+
+ /*
+ * Some SoCs may have multiple MUTEX_MOD registers as more than 32 mods
+ * are present, hence requiring multiple 32-bits registers.
+ *
+ * The mutex_table_mod fully represents that by defining the number of
+ * the mod sequentially, later used as a bit number, which can be more
+ * than 0..31.
+ *
+ * In order to retain compatibility with older SoCs, we perform R/W on
+ * the single 32 bits registers, but this requires us to translate the
+ * mutex ID bit accordingly.
+ */
+ if (mtx->data->mutex_table_mod[idx] < 32) {
+ reg_offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg,
+ mutex->id);
+ } else {
+ reg_offset = DISP_REG_MUTEX_MOD1(mtx->data->mutex_mod_reg,
+ mutex->id);
+ id_offset = 32;
+ }
+
+ reg = readl_relaxed(mtx->regs + reg_offset);
+ if (clear)
+ reg &= ~BIT(mtx->data->mutex_table_mod[idx] - id_offset);
+ else
+ reg |= BIT(mtx->data->mutex_table_mod[idx] - id_offset);
+
+ writel_relaxed(reg, mtx->regs + reg_offset);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_write_mod);
+
+int mtk_mutex_write_sof(struct mtk_mutex *mutex,
+ enum mtk_mutex_sof_index idx)
+{
+ struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx,
+ mutex[mutex->id]);
+
+ WARN_ON(&mtx->mutex[mutex->id] != mutex);
+
+ if (idx < MUTEX_SOF_IDX_SINGLE_MODE ||
+ idx >= MUTEX_SOF_IDX_MAX) {
+ dev_err(mtx->dev, "Not supported SOF index : %d", idx);
+ return -EINVAL;
+ }
+
+ writel_relaxed(idx, mtx->regs +
+ DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg, mutex->id));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_mutex_write_sof);
+
+static int mtk_mutex_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_mutex_ctx *mtx;
+ struct resource *regs;
+ int i, ret;
+
+ mtx = devm_kzalloc(dev, sizeof(*mtx), GFP_KERNEL);
+ if (!mtx)
+ return -ENOMEM;
+
+ for (i = 0; i < MTK_MUTEX_MAX_HANDLES; i++)
+ mtx->mutex[i].id = i;
+
+ mtx->data = of_device_get_match_data(dev);
+
+ if (!mtx->data->no_clk) {
+ mtx->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mtx->clk))
+ return dev_err_probe(dev, PTR_ERR(mtx->clk), "Failed to get clock\n");
+ }
+
+ mtx->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
+ if (IS_ERR(mtx->regs)) {
+ dev_err(dev, "Failed to map mutex registers\n");
+ return PTR_ERR(mtx->regs);
+ }
+ mtx->addr = regs->start;
+
+ /* CMDQ is optional */
+ ret = cmdq_dev_get_client_reg(dev, &mtx->cmdq_reg, 0);
+ if (ret)
+ dev_dbg(dev, "No mediatek,gce-client-reg!\n");
+
+ platform_set_drvdata(pdev, mtx);
+
+ return 0;
+}
+
+static const struct of_device_id mutex_driver_dt_match[] = {
+ { .compatible = "mediatek,mt2701-disp-mutex", .data = &mt2701_mutex_driver_data },
+ { .compatible = "mediatek,mt2712-disp-mutex", .data = &mt2712_mutex_driver_data },
+ { .compatible = "mediatek,mt6795-disp-mutex", .data = &mt6795_mutex_driver_data },
+ { .compatible = "mediatek,mt8167-disp-mutex", .data = &mt8167_mutex_driver_data },
+ { .compatible = "mediatek,mt8173-disp-mutex", .data = &mt8173_mutex_driver_data },
+ { .compatible = "mediatek,mt8183-disp-mutex", .data = &mt8183_mutex_driver_data },
+ { .compatible = "mediatek,mt8186-disp-mutex", .data = &mt8186_mutex_driver_data },
+ { .compatible = "mediatek,mt8186-mdp3-mutex", .data = &mt8186_mdp_mutex_driver_data },
+ { .compatible = "mediatek,mt8188-disp-mutex", .data = &mt8188_mutex_driver_data },
+ { .compatible = "mediatek,mt8192-disp-mutex", .data = &mt8192_mutex_driver_data },
+ { .compatible = "mediatek,mt8195-disp-mutex", .data = &mt8195_mutex_driver_data },
+ { .compatible = "mediatek,mt8195-vpp-mutex", .data = &mt8195_vpp_mutex_driver_data },
+ { .compatible = "mediatek,mt8365-disp-mutex", .data = &mt8365_mutex_driver_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mutex_driver_dt_match);
+
+static struct platform_driver mtk_mutex_driver = {
+ .probe = mtk_mutex_probe,
+ .driver = {
+ .name = "mediatek-mutex",
+ .of_match_table = mutex_driver_dt_match,
+ },
+};
+module_platform_driver(mtk_mutex_driver);
+
+MODULE_AUTHOR("Yongqiang Niu <yongqiang.niu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek SoC MUTEX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
new file mode 100644
index 0000000000..efd9cae212
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -0,0 +1,2675 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define PWRAP_POLL_DELAY_US 10
+#define PWRAP_POLL_TIMEOUT_US 10000
+
+#define PWRAP_MT8135_BRIDGE_IORD_ARB_EN 0x4
+#define PWRAP_MT8135_BRIDGE_WACS3_EN 0x10
+#define PWRAP_MT8135_BRIDGE_INIT_DONE3 0x14
+#define PWRAP_MT8135_BRIDGE_WACS4_EN 0x24
+#define PWRAP_MT8135_BRIDGE_INIT_DONE4 0x28
+#define PWRAP_MT8135_BRIDGE_INT_EN 0x38
+#define PWRAP_MT8135_BRIDGE_TIMER_EN 0x48
+#define PWRAP_MT8135_BRIDGE_WDT_UNIT 0x50
+#define PWRAP_MT8135_BRIDGE_WDT_SRC_EN 0x54
+
+/* macro for wrapper status */
+#define PWRAP_GET_WACS_RDATA(x) (((x) >> 0) & 0x0000ffff)
+#define PWRAP_GET_WACS_ARB_FSM(x) (((x) >> 1) & 0x00000007)
+#define PWRAP_GET_WACS_FSM(x) (((x) >> 16) & 0x00000007)
+#define PWRAP_GET_WACS_REQ(x) (((x) >> 19) & 0x00000001)
+#define PWRAP_STATE_SYNC_IDLE0 BIT(20)
+#define PWRAP_STATE_INIT_DONE0 BIT(21)
+#define PWRAP_STATE_INIT_DONE0_MT8186 BIT(22)
+#define PWRAP_STATE_INIT_DONE1 BIT(15)
+
+/* macro for WACS FSM */
+#define PWRAP_WACS_FSM_IDLE 0x00
+#define PWRAP_WACS_FSM_REQ 0x02
+#define PWRAP_WACS_FSM_WFDLE 0x04
+#define PWRAP_WACS_FSM_WFVLDCLR 0x06
+#define PWRAP_WACS_INIT_DONE 0x01
+#define PWRAP_WACS_WACS_SYNC_IDLE 0x01
+#define PWRAP_WACS_SYNC_BUSY 0x00
+
+/* macro for device wrapper default value */
+#define PWRAP_DEW_READ_TEST_VAL 0x5aa5
+#define PWRAP_DEW_COMP_READ_TEST_VAL 0xa55a
+#define PWRAP_DEW_WRITE_TEST_VAL 0xa55a
+
+/* macro for manual command */
+#define PWRAP_MAN_CMD_SPI_WRITE_NEW (1 << 14)
+#define PWRAP_MAN_CMD_SPI_WRITE (1 << 13)
+#define PWRAP_MAN_CMD_OP_CSH (0x0 << 8)
+#define PWRAP_MAN_CMD_OP_CSL (0x1 << 8)
+#define PWRAP_MAN_CMD_OP_CK (0x2 << 8)
+#define PWRAP_MAN_CMD_OP_OUTS (0x8 << 8)
+#define PWRAP_MAN_CMD_OP_OUTD (0x9 << 8)
+#define PWRAP_MAN_CMD_OP_OUTQ (0xa << 8)
+
+/* macro for Watch Dog Timer Source */
+#define PWRAP_WDT_SRC_EN_STAUPD_TRIG (1 << 25)
+#define PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE (1 << 20)
+#define PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE (1 << 6)
+#define PWRAP_WDT_SRC_MASK_ALL 0xffffffff
+#define PWRAP_WDT_SRC_MASK_NO_STAUPD ~(PWRAP_WDT_SRC_EN_STAUPD_TRIG | \
+ PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE | \
+ PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE)
+
+/* Group of bits used for shown slave capability */
+#define PWRAP_SLV_CAP_SPI BIT(0)
+#define PWRAP_SLV_CAP_DUALIO BIT(1)
+#define PWRAP_SLV_CAP_SECURITY BIT(2)
+#define HAS_CAP(_c, _x) (((_c) & (_x)) == (_x))
+
+/* Group of bits used for shown pwrap capability */
+#define PWRAP_CAP_BRIDGE BIT(0)
+#define PWRAP_CAP_RESET BIT(1)
+#define PWRAP_CAP_DCM BIT(2)
+#define PWRAP_CAP_INT1_EN BIT(3)
+#define PWRAP_CAP_WDT_SRC1 BIT(4)
+#define PWRAP_CAP_ARB BIT(5)
+#define PWRAP_CAP_ARB_MT8186 BIT(8)
+
+/* defines for slave device wrapper registers */
+enum dew_regs {
+ PWRAP_DEW_BASE,
+ PWRAP_DEW_DIO_EN,
+ PWRAP_DEW_READ_TEST,
+ PWRAP_DEW_WRITE_TEST,
+ PWRAP_DEW_CRC_EN,
+ PWRAP_DEW_CRC_VAL,
+ PWRAP_DEW_MON_GRP_SEL,
+ PWRAP_DEW_CIPHER_KEY_SEL,
+ PWRAP_DEW_CIPHER_IV_SEL,
+ PWRAP_DEW_CIPHER_RDY,
+ PWRAP_DEW_CIPHER_MODE,
+ PWRAP_DEW_CIPHER_SWRST,
+
+ /* MT6323 only regs */
+ PWRAP_DEW_CIPHER_EN,
+ PWRAP_DEW_RDDMY_NO,
+
+ /* MT6358 only regs */
+ PWRAP_SMT_CON1,
+ PWRAP_DRV_CON1,
+ PWRAP_FILTER_CON0,
+ PWRAP_GPIO_PULLEN0_CLR,
+ PWRAP_RG_SPI_CON0,
+ PWRAP_RG_SPI_RECORD0,
+ PWRAP_RG_SPI_CON2,
+ PWRAP_RG_SPI_CON3,
+ PWRAP_RG_SPI_CON4,
+ PWRAP_RG_SPI_CON5,
+ PWRAP_RG_SPI_CON6,
+ PWRAP_RG_SPI_CON7,
+ PWRAP_RG_SPI_CON8,
+ PWRAP_RG_SPI_CON13,
+ PWRAP_SPISLV_KEY,
+
+ /* MT6359 only regs */
+ PWRAP_DEW_CRC_SWRST,
+ PWRAP_DEW_RG_EN_RECORD,
+ PWRAP_DEW_RECORD_CMD0,
+ PWRAP_DEW_RECORD_CMD1,
+ PWRAP_DEW_RECORD_CMD2,
+ PWRAP_DEW_RECORD_CMD3,
+ PWRAP_DEW_RECORD_CMD4,
+ PWRAP_DEW_RECORD_CMD5,
+ PWRAP_DEW_RECORD_WDATA0,
+ PWRAP_DEW_RECORD_WDATA1,
+ PWRAP_DEW_RECORD_WDATA2,
+ PWRAP_DEW_RECORD_WDATA3,
+ PWRAP_DEW_RECORD_WDATA4,
+ PWRAP_DEW_RECORD_WDATA5,
+ PWRAP_DEW_RG_ADDR_TARGET,
+ PWRAP_DEW_RG_ADDR_MASK,
+ PWRAP_DEW_RG_WDATA_TARGET,
+ PWRAP_DEW_RG_WDATA_MASK,
+ PWRAP_DEW_RG_SPI_RECORD_CLR,
+ PWRAP_DEW_RG_CMD_ALERT_CLR,
+
+ /* MT6397 only regs */
+ PWRAP_DEW_EVENT_OUT_EN,
+ PWRAP_DEW_EVENT_SRC_EN,
+ PWRAP_DEW_EVENT_SRC,
+ PWRAP_DEW_EVENT_FLAG,
+ PWRAP_DEW_MON_FLAG_SEL,
+ PWRAP_DEW_EVENT_TEST,
+ PWRAP_DEW_CIPHER_LOAD,
+ PWRAP_DEW_CIPHER_START,
+};
+
+static const u32 mt6323_regs[] = {
+ [PWRAP_DEW_BASE] = 0x0000,
+ [PWRAP_DEW_DIO_EN] = 0x018a,
+ [PWRAP_DEW_READ_TEST] = 0x018c,
+ [PWRAP_DEW_WRITE_TEST] = 0x018e,
+ [PWRAP_DEW_CRC_EN] = 0x0192,
+ [PWRAP_DEW_CRC_VAL] = 0x0194,
+ [PWRAP_DEW_MON_GRP_SEL] = 0x0196,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0198,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x019a,
+ [PWRAP_DEW_CIPHER_EN] = 0x019c,
+ [PWRAP_DEW_CIPHER_RDY] = 0x019e,
+ [PWRAP_DEW_CIPHER_MODE] = 0x01a0,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x01a2,
+ [PWRAP_DEW_RDDMY_NO] = 0x01a4,
+};
+
+static const u32 mt6331_regs[] = {
+ [PWRAP_DEW_DIO_EN] = 0x018c,
+ [PWRAP_DEW_READ_TEST] = 0x018e,
+ [PWRAP_DEW_WRITE_TEST] = 0x0190,
+ [PWRAP_DEW_CRC_SWRST] = 0x0192,
+ [PWRAP_DEW_CRC_EN] = 0x0194,
+ [PWRAP_DEW_CRC_VAL] = 0x0196,
+ [PWRAP_DEW_MON_GRP_SEL] = 0x0198,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x019a,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x019c,
+ [PWRAP_DEW_CIPHER_EN] = 0x019e,
+ [PWRAP_DEW_CIPHER_RDY] = 0x01a0,
+ [PWRAP_DEW_CIPHER_MODE] = 0x01a2,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x01a4,
+ [PWRAP_DEW_RDDMY_NO] = 0x01a6,
+};
+
+static const u32 mt6332_regs[] = {
+ [PWRAP_DEW_DIO_EN] = 0x80f6,
+ [PWRAP_DEW_READ_TEST] = 0x80f8,
+ [PWRAP_DEW_WRITE_TEST] = 0x80fa,
+ [PWRAP_DEW_CRC_SWRST] = 0x80fc,
+ [PWRAP_DEW_CRC_EN] = 0x80fe,
+ [PWRAP_DEW_CRC_VAL] = 0x8100,
+ [PWRAP_DEW_MON_GRP_SEL] = 0x8102,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x8104,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x8106,
+ [PWRAP_DEW_CIPHER_EN] = 0x8108,
+ [PWRAP_DEW_CIPHER_RDY] = 0x810a,
+ [PWRAP_DEW_CIPHER_MODE] = 0x810c,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x810e,
+ [PWRAP_DEW_RDDMY_NO] = 0x8110,
+};
+
+static const u32 mt6351_regs[] = {
+ [PWRAP_DEW_DIO_EN] = 0x02F2,
+ [PWRAP_DEW_READ_TEST] = 0x02F4,
+ [PWRAP_DEW_WRITE_TEST] = 0x02F6,
+ [PWRAP_DEW_CRC_EN] = 0x02FA,
+ [PWRAP_DEW_CRC_VAL] = 0x02FC,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0300,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x0302,
+ [PWRAP_DEW_CIPHER_EN] = 0x0304,
+ [PWRAP_DEW_CIPHER_RDY] = 0x0306,
+ [PWRAP_DEW_CIPHER_MODE] = 0x0308,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x030A,
+ [PWRAP_DEW_RDDMY_NO] = 0x030C,
+};
+
+static const u32 mt6357_regs[] = {
+ [PWRAP_DEW_DIO_EN] = 0x040A,
+ [PWRAP_DEW_READ_TEST] = 0x040C,
+ [PWRAP_DEW_WRITE_TEST] = 0x040E,
+ [PWRAP_DEW_CRC_EN] = 0x0412,
+ [PWRAP_DEW_CRC_VAL] = 0x0414,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0418,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x041A,
+ [PWRAP_DEW_CIPHER_EN] = 0x041C,
+ [PWRAP_DEW_CIPHER_RDY] = 0x041E,
+ [PWRAP_DEW_CIPHER_MODE] = 0x0420,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x0422,
+ [PWRAP_DEW_RDDMY_NO] = 0x0424,
+};
+
+static const u32 mt6358_regs[] = {
+ [PWRAP_SMT_CON1] = 0x0030,
+ [PWRAP_DRV_CON1] = 0x0038,
+ [PWRAP_FILTER_CON0] = 0x0040,
+ [PWRAP_GPIO_PULLEN0_CLR] = 0x0098,
+ [PWRAP_RG_SPI_CON0] = 0x0408,
+ [PWRAP_RG_SPI_RECORD0] = 0x040a,
+ [PWRAP_DEW_DIO_EN] = 0x040c,
+ [PWRAP_DEW_READ_TEST] = 0x040e,
+ [PWRAP_DEW_WRITE_TEST] = 0x0410,
+ [PWRAP_DEW_CRC_EN] = 0x0414,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x041a,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x041c,
+ [PWRAP_DEW_CIPHER_EN] = 0x041e,
+ [PWRAP_DEW_CIPHER_RDY] = 0x0420,
+ [PWRAP_DEW_CIPHER_MODE] = 0x0422,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x0424,
+ [PWRAP_RG_SPI_CON2] = 0x0432,
+ [PWRAP_RG_SPI_CON3] = 0x0434,
+ [PWRAP_RG_SPI_CON4] = 0x0436,
+ [PWRAP_RG_SPI_CON5] = 0x0438,
+ [PWRAP_RG_SPI_CON6] = 0x043a,
+ [PWRAP_RG_SPI_CON7] = 0x043c,
+ [PWRAP_RG_SPI_CON8] = 0x043e,
+ [PWRAP_RG_SPI_CON13] = 0x0448,
+ [PWRAP_SPISLV_KEY] = 0x044a,
+};
+
+static const u32 mt6359_regs[] = {
+ [PWRAP_DEW_RG_EN_RECORD] = 0x040a,
+ [PWRAP_DEW_DIO_EN] = 0x040c,
+ [PWRAP_DEW_READ_TEST] = 0x040e,
+ [PWRAP_DEW_WRITE_TEST] = 0x0410,
+ [PWRAP_DEW_CRC_SWRST] = 0x0412,
+ [PWRAP_DEW_CRC_EN] = 0x0414,
+ [PWRAP_DEW_CRC_VAL] = 0x0416,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0418,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x041a,
+ [PWRAP_DEW_CIPHER_EN] = 0x041c,
+ [PWRAP_DEW_CIPHER_RDY] = 0x041e,
+ [PWRAP_DEW_CIPHER_MODE] = 0x0420,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x0422,
+ [PWRAP_DEW_RDDMY_NO] = 0x0424,
+ [PWRAP_DEW_RECORD_CMD0] = 0x0428,
+ [PWRAP_DEW_RECORD_CMD1] = 0x042a,
+ [PWRAP_DEW_RECORD_CMD2] = 0x042c,
+ [PWRAP_DEW_RECORD_CMD3] = 0x042e,
+ [PWRAP_DEW_RECORD_CMD4] = 0x0430,
+ [PWRAP_DEW_RECORD_CMD5] = 0x0432,
+ [PWRAP_DEW_RECORD_WDATA0] = 0x0434,
+ [PWRAP_DEW_RECORD_WDATA1] = 0x0436,
+ [PWRAP_DEW_RECORD_WDATA2] = 0x0438,
+ [PWRAP_DEW_RECORD_WDATA3] = 0x043a,
+ [PWRAP_DEW_RECORD_WDATA4] = 0x043c,
+ [PWRAP_DEW_RECORD_WDATA5] = 0x043e,
+ [PWRAP_DEW_RG_ADDR_TARGET] = 0x0440,
+ [PWRAP_DEW_RG_ADDR_MASK] = 0x0442,
+ [PWRAP_DEW_RG_WDATA_TARGET] = 0x0444,
+ [PWRAP_DEW_RG_WDATA_MASK] = 0x0446,
+ [PWRAP_DEW_RG_SPI_RECORD_CLR] = 0x0448,
+ [PWRAP_DEW_RG_CMD_ALERT_CLR] = 0x0448,
+ [PWRAP_SPISLV_KEY] = 0x044a,
+};
+
+static const u32 mt6397_regs[] = {
+ [PWRAP_DEW_BASE] = 0xbc00,
+ [PWRAP_DEW_EVENT_OUT_EN] = 0xbc00,
+ [PWRAP_DEW_DIO_EN] = 0xbc02,
+ [PWRAP_DEW_EVENT_SRC_EN] = 0xbc04,
+ [PWRAP_DEW_EVENT_SRC] = 0xbc06,
+ [PWRAP_DEW_EVENT_FLAG] = 0xbc08,
+ [PWRAP_DEW_READ_TEST] = 0xbc0a,
+ [PWRAP_DEW_WRITE_TEST] = 0xbc0c,
+ [PWRAP_DEW_CRC_EN] = 0xbc0e,
+ [PWRAP_DEW_CRC_VAL] = 0xbc10,
+ [PWRAP_DEW_MON_GRP_SEL] = 0xbc12,
+ [PWRAP_DEW_MON_FLAG_SEL] = 0xbc14,
+ [PWRAP_DEW_EVENT_TEST] = 0xbc16,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0xbc18,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0xbc1a,
+ [PWRAP_DEW_CIPHER_LOAD] = 0xbc1c,
+ [PWRAP_DEW_CIPHER_START] = 0xbc1e,
+ [PWRAP_DEW_CIPHER_RDY] = 0xbc20,
+ [PWRAP_DEW_CIPHER_MODE] = 0xbc22,
+ [PWRAP_DEW_CIPHER_SWRST] = 0xbc24,
+};
+
+enum pwrap_regs {
+ PWRAP_MUX_SEL,
+ PWRAP_WRAP_EN,
+ PWRAP_DIO_EN,
+ PWRAP_SIDLY,
+ PWRAP_CSHEXT_WRITE,
+ PWRAP_CSHEXT_READ,
+ PWRAP_CSLEXT_START,
+ PWRAP_CSLEXT_END,
+ PWRAP_STAUPD_PRD,
+ PWRAP_STAUPD_GRPEN,
+ PWRAP_STAUPD_MAN_TRIG,
+ PWRAP_STAUPD_STA,
+ PWRAP_WRAP_STA,
+ PWRAP_HARB_INIT,
+ PWRAP_HARB_HPRIO,
+ PWRAP_HIPRIO_ARB_EN,
+ PWRAP_HARB_STA0,
+ PWRAP_HARB_STA1,
+ PWRAP_MAN_EN,
+ PWRAP_MAN_CMD,
+ PWRAP_MAN_RDATA,
+ PWRAP_MAN_VLDCLR,
+ PWRAP_WACS0_EN,
+ PWRAP_INIT_DONE0,
+ PWRAP_WACS0_CMD,
+ PWRAP_WACS0_RDATA,
+ PWRAP_WACS0_VLDCLR,
+ PWRAP_WACS1_EN,
+ PWRAP_INIT_DONE1,
+ PWRAP_WACS1_CMD,
+ PWRAP_WACS1_RDATA,
+ PWRAP_WACS1_VLDCLR,
+ PWRAP_WACS2_EN,
+ PWRAP_INIT_DONE2,
+ PWRAP_WACS2_CMD,
+ PWRAP_WACS2_RDATA,
+ PWRAP_WACS2_VLDCLR,
+ PWRAP_INT_EN,
+ PWRAP_INT_FLG_RAW,
+ PWRAP_INT_FLG,
+ PWRAP_INT_CLR,
+ PWRAP_SIG_ADR,
+ PWRAP_SIG_MODE,
+ PWRAP_SIG_VALUE,
+ PWRAP_SIG_ERRVAL,
+ PWRAP_CRC_EN,
+ PWRAP_TIMER_EN,
+ PWRAP_TIMER_STA,
+ PWRAP_WDT_UNIT,
+ PWRAP_WDT_SRC_EN,
+ PWRAP_WDT_FLG,
+ PWRAP_DEBUG_INT_SEL,
+ PWRAP_CIPHER_KEY_SEL,
+ PWRAP_CIPHER_IV_SEL,
+ PWRAP_CIPHER_RDY,
+ PWRAP_CIPHER_MODE,
+ PWRAP_CIPHER_SWRST,
+ PWRAP_DCM_EN,
+ PWRAP_DCM_DBC_PRD,
+ PWRAP_EINT_STA0_ADR,
+ PWRAP_EINT_STA1_ADR,
+ PWRAP_SWINF_2_WDATA_31_0,
+ PWRAP_SWINF_2_RDATA_31_0,
+
+ /* MT2701 only regs */
+ PWRAP_ADC_CMD_ADDR,
+ PWRAP_PWRAP_ADC_CMD,
+ PWRAP_ADC_RDY_ADDR,
+ PWRAP_ADC_RDATA_ADDR1,
+ PWRAP_ADC_RDATA_ADDR2,
+
+ /* MT7622 only regs */
+ PWRAP_STA,
+ PWRAP_CLR,
+ PWRAP_DVFS_ADR8,
+ PWRAP_DVFS_WDATA8,
+ PWRAP_DVFS_ADR9,
+ PWRAP_DVFS_WDATA9,
+ PWRAP_DVFS_ADR10,
+ PWRAP_DVFS_WDATA10,
+ PWRAP_DVFS_ADR11,
+ PWRAP_DVFS_WDATA11,
+ PWRAP_DVFS_ADR12,
+ PWRAP_DVFS_WDATA12,
+ PWRAP_DVFS_ADR13,
+ PWRAP_DVFS_WDATA13,
+ PWRAP_DVFS_ADR14,
+ PWRAP_DVFS_WDATA14,
+ PWRAP_DVFS_ADR15,
+ PWRAP_DVFS_WDATA15,
+ PWRAP_EXT_CK,
+ PWRAP_ADC_RDATA_ADDR,
+ PWRAP_GPS_STA,
+ PWRAP_SW_RST,
+ PWRAP_DVFS_STEP_CTRL0,
+ PWRAP_DVFS_STEP_CTRL1,
+ PWRAP_DVFS_STEP_CTRL2,
+ PWRAP_SPI2_CTRL,
+
+ /* MT8135 only regs */
+ PWRAP_CSHEXT,
+ PWRAP_EVENT_IN_EN,
+ PWRAP_EVENT_DST_EN,
+ PWRAP_RRARB_INIT,
+ PWRAP_RRARB_EN,
+ PWRAP_RRARB_STA0,
+ PWRAP_RRARB_STA1,
+ PWRAP_EVENT_STA,
+ PWRAP_EVENT_STACLR,
+ PWRAP_CIPHER_LOAD,
+ PWRAP_CIPHER_START,
+
+ /* MT8173 only regs */
+ PWRAP_RDDMY,
+ PWRAP_SI_CK_CON,
+ PWRAP_DVFS_ADR0,
+ PWRAP_DVFS_WDATA0,
+ PWRAP_DVFS_ADR1,
+ PWRAP_DVFS_WDATA1,
+ PWRAP_DVFS_ADR2,
+ PWRAP_DVFS_WDATA2,
+ PWRAP_DVFS_ADR3,
+ PWRAP_DVFS_WDATA3,
+ PWRAP_DVFS_ADR4,
+ PWRAP_DVFS_WDATA4,
+ PWRAP_DVFS_ADR5,
+ PWRAP_DVFS_WDATA5,
+ PWRAP_DVFS_ADR6,
+ PWRAP_DVFS_WDATA6,
+ PWRAP_DVFS_ADR7,
+ PWRAP_DVFS_WDATA7,
+ PWRAP_SPMINF_STA,
+ PWRAP_CIPHER_EN,
+
+ /* MT8183 only regs */
+ PWRAP_SI_SAMPLE_CTRL,
+ PWRAP_CSLEXT_WRITE,
+ PWRAP_CSLEXT_READ,
+ PWRAP_EXT_CK_WRITE,
+ PWRAP_STAUPD_CTRL,
+ PWRAP_WACS_P2P_EN,
+ PWRAP_INIT_DONE_P2P,
+ PWRAP_WACS_MD32_EN,
+ PWRAP_INIT_DONE_MD32,
+ PWRAP_INT1_EN,
+ PWRAP_INT1_FLG,
+ PWRAP_INT1_CLR,
+ PWRAP_WDT_SRC_EN_1,
+ PWRAP_INT_GPS_AUXADC_CMD_ADDR,
+ PWRAP_INT_GPS_AUXADC_CMD,
+ PWRAP_INT_GPS_AUXADC_RDATA_ADDR,
+ PWRAP_EXT_GPS_AUXADC_RDATA_ADDR,
+ PWRAP_GPSINF_0_STA,
+ PWRAP_GPSINF_1_STA,
+
+ /* MT8516 only regs */
+ PWRAP_OP_TYPE,
+ PWRAP_MSB_FIRST,
+};
+
+static int mt2701_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_RDDMY] = 0x18,
+ [PWRAP_SI_CK_CON] = 0x1c,
+ [PWRAP_CSHEXT_WRITE] = 0x20,
+ [PWRAP_CSHEXT_READ] = 0x24,
+ [PWRAP_CSLEXT_START] = 0x28,
+ [PWRAP_CSLEXT_END] = 0x2c,
+ [PWRAP_STAUPD_PRD] = 0x30,
+ [PWRAP_STAUPD_GRPEN] = 0x34,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x38,
+ [PWRAP_STAUPD_STA] = 0x3c,
+ [PWRAP_WRAP_STA] = 0x44,
+ [PWRAP_HARB_INIT] = 0x48,
+ [PWRAP_HARB_HPRIO] = 0x4c,
+ [PWRAP_HIPRIO_ARB_EN] = 0x50,
+ [PWRAP_HARB_STA0] = 0x54,
+ [PWRAP_HARB_STA1] = 0x58,
+ [PWRAP_MAN_EN] = 0x5c,
+ [PWRAP_MAN_CMD] = 0x60,
+ [PWRAP_MAN_RDATA] = 0x64,
+ [PWRAP_MAN_VLDCLR] = 0x68,
+ [PWRAP_WACS0_EN] = 0x6c,
+ [PWRAP_INIT_DONE0] = 0x70,
+ [PWRAP_WACS0_CMD] = 0x74,
+ [PWRAP_WACS0_RDATA] = 0x78,
+ [PWRAP_WACS0_VLDCLR] = 0x7c,
+ [PWRAP_WACS1_EN] = 0x80,
+ [PWRAP_INIT_DONE1] = 0x84,
+ [PWRAP_WACS1_CMD] = 0x88,
+ [PWRAP_WACS1_RDATA] = 0x8c,
+ [PWRAP_WACS1_VLDCLR] = 0x90,
+ [PWRAP_WACS2_EN] = 0x94,
+ [PWRAP_INIT_DONE2] = 0x98,
+ [PWRAP_WACS2_CMD] = 0x9c,
+ [PWRAP_WACS2_RDATA] = 0xa0,
+ [PWRAP_WACS2_VLDCLR] = 0xa4,
+ [PWRAP_INT_EN] = 0xa8,
+ [PWRAP_INT_FLG_RAW] = 0xac,
+ [PWRAP_INT_FLG] = 0xb0,
+ [PWRAP_INT_CLR] = 0xb4,
+ [PWRAP_SIG_ADR] = 0xb8,
+ [PWRAP_SIG_MODE] = 0xbc,
+ [PWRAP_SIG_VALUE] = 0xc0,
+ [PWRAP_SIG_ERRVAL] = 0xc4,
+ [PWRAP_CRC_EN] = 0xc8,
+ [PWRAP_TIMER_EN] = 0xcc,
+ [PWRAP_TIMER_STA] = 0xd0,
+ [PWRAP_WDT_UNIT] = 0xd4,
+ [PWRAP_WDT_SRC_EN] = 0xd8,
+ [PWRAP_WDT_FLG] = 0xdc,
+ [PWRAP_DEBUG_INT_SEL] = 0xe0,
+ [PWRAP_DVFS_ADR0] = 0xe4,
+ [PWRAP_DVFS_WDATA0] = 0xe8,
+ [PWRAP_DVFS_ADR1] = 0xec,
+ [PWRAP_DVFS_WDATA1] = 0xf0,
+ [PWRAP_DVFS_ADR2] = 0xf4,
+ [PWRAP_DVFS_WDATA2] = 0xf8,
+ [PWRAP_DVFS_ADR3] = 0xfc,
+ [PWRAP_DVFS_WDATA3] = 0x100,
+ [PWRAP_DVFS_ADR4] = 0x104,
+ [PWRAP_DVFS_WDATA4] = 0x108,
+ [PWRAP_DVFS_ADR5] = 0x10c,
+ [PWRAP_DVFS_WDATA5] = 0x110,
+ [PWRAP_DVFS_ADR6] = 0x114,
+ [PWRAP_DVFS_WDATA6] = 0x118,
+ [PWRAP_DVFS_ADR7] = 0x11c,
+ [PWRAP_DVFS_WDATA7] = 0x120,
+ [PWRAP_CIPHER_KEY_SEL] = 0x124,
+ [PWRAP_CIPHER_IV_SEL] = 0x128,
+ [PWRAP_CIPHER_EN] = 0x12c,
+ [PWRAP_CIPHER_RDY] = 0x130,
+ [PWRAP_CIPHER_MODE] = 0x134,
+ [PWRAP_CIPHER_SWRST] = 0x138,
+ [PWRAP_DCM_EN] = 0x13c,
+ [PWRAP_DCM_DBC_PRD] = 0x140,
+ [PWRAP_ADC_CMD_ADDR] = 0x144,
+ [PWRAP_PWRAP_ADC_CMD] = 0x148,
+ [PWRAP_ADC_RDY_ADDR] = 0x14c,
+ [PWRAP_ADC_RDATA_ADDR1] = 0x150,
+ [PWRAP_ADC_RDATA_ADDR2] = 0x154,
+};
+
+static int mt6765_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_RDDMY] = 0x20,
+ [PWRAP_CSHEXT_WRITE] = 0x24,
+ [PWRAP_CSHEXT_READ] = 0x28,
+ [PWRAP_CSLEXT_START] = 0x2C,
+ [PWRAP_CSLEXT_END] = 0x30,
+ [PWRAP_STAUPD_PRD] = 0x3C,
+ [PWRAP_HARB_HPRIO] = 0x68,
+ [PWRAP_HIPRIO_ARB_EN] = 0x6C,
+ [PWRAP_MAN_EN] = 0x7C,
+ [PWRAP_MAN_CMD] = 0x80,
+ [PWRAP_WACS0_EN] = 0x8C,
+ [PWRAP_WACS1_EN] = 0x94,
+ [PWRAP_WACS2_EN] = 0x9C,
+ [PWRAP_INIT_DONE2] = 0xA0,
+ [PWRAP_WACS2_CMD] = 0xC20,
+ [PWRAP_WACS2_RDATA] = 0xC24,
+ [PWRAP_WACS2_VLDCLR] = 0xC28,
+ [PWRAP_INT_EN] = 0xB4,
+ [PWRAP_INT_FLG_RAW] = 0xB8,
+ [PWRAP_INT_FLG] = 0xBC,
+ [PWRAP_INT_CLR] = 0xC0,
+ [PWRAP_TIMER_EN] = 0xE8,
+ [PWRAP_WDT_UNIT] = 0xF0,
+ [PWRAP_WDT_SRC_EN] = 0xF4,
+ [PWRAP_DCM_EN] = 0x1DC,
+ [PWRAP_DCM_DBC_PRD] = 0x1E0,
+};
+
+static int mt6779_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_RDDMY] = 0x20,
+ [PWRAP_CSHEXT_WRITE] = 0x24,
+ [PWRAP_CSHEXT_READ] = 0x28,
+ [PWRAP_CSLEXT_WRITE] = 0x2C,
+ [PWRAP_CSLEXT_READ] = 0x30,
+ [PWRAP_EXT_CK_WRITE] = 0x34,
+ [PWRAP_STAUPD_CTRL] = 0x3C,
+ [PWRAP_STAUPD_GRPEN] = 0x40,
+ [PWRAP_EINT_STA0_ADR] = 0x44,
+ [PWRAP_HARB_HPRIO] = 0x68,
+ [PWRAP_HIPRIO_ARB_EN] = 0x6C,
+ [PWRAP_MAN_EN] = 0x7C,
+ [PWRAP_MAN_CMD] = 0x80,
+ [PWRAP_WACS0_EN] = 0x8C,
+ [PWRAP_INIT_DONE0] = 0x90,
+ [PWRAP_WACS1_EN] = 0x94,
+ [PWRAP_WACS2_EN] = 0x9C,
+ [PWRAP_INIT_DONE1] = 0x98,
+ [PWRAP_INIT_DONE2] = 0xA0,
+ [PWRAP_INT_EN] = 0xBC,
+ [PWRAP_INT_FLG_RAW] = 0xC0,
+ [PWRAP_INT_FLG] = 0xC4,
+ [PWRAP_INT_CLR] = 0xC8,
+ [PWRAP_INT1_EN] = 0xCC,
+ [PWRAP_INT1_FLG] = 0xD4,
+ [PWRAP_INT1_CLR] = 0xD8,
+ [PWRAP_TIMER_EN] = 0xF0,
+ [PWRAP_WDT_UNIT] = 0xF8,
+ [PWRAP_WDT_SRC_EN] = 0xFC,
+ [PWRAP_WDT_SRC_EN_1] = 0x100,
+ [PWRAP_WACS2_CMD] = 0xC20,
+ [PWRAP_WACS2_RDATA] = 0xC24,
+ [PWRAP_WACS2_VLDCLR] = 0xC28,
+};
+
+static int mt6795_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1c,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2c,
+ [PWRAP_EINT_STA0_ADR] = 0x30,
+ [PWRAP_EINT_STA1_ADR] = 0x34,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4c,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5c,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6c,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7c,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8c,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9c,
+ [PWRAP_WACS2_CMD] = 0xa0,
+ [PWRAP_WACS2_RDATA] = 0xa4,
+ [PWRAP_WACS2_VLDCLR] = 0xa8,
+ [PWRAP_INT_EN] = 0xac,
+ [PWRAP_INT_FLG_RAW] = 0xb0,
+ [PWRAP_INT_FLG] = 0xb4,
+ [PWRAP_INT_CLR] = 0xb8,
+ [PWRAP_SIG_ADR] = 0xbc,
+ [PWRAP_SIG_MODE] = 0xc0,
+ [PWRAP_SIG_VALUE] = 0xc4,
+ [PWRAP_SIG_ERRVAL] = 0xc8,
+ [PWRAP_CRC_EN] = 0xcc,
+ [PWRAP_TIMER_EN] = 0xd0,
+ [PWRAP_TIMER_STA] = 0xd4,
+ [PWRAP_WDT_UNIT] = 0xd8,
+ [PWRAP_WDT_SRC_EN] = 0xdc,
+ [PWRAP_WDT_FLG] = 0xe0,
+ [PWRAP_DEBUG_INT_SEL] = 0xe4,
+ [PWRAP_DVFS_ADR0] = 0xe8,
+ [PWRAP_DVFS_WDATA0] = 0xec,
+ [PWRAP_DVFS_ADR1] = 0xf0,
+ [PWRAP_DVFS_WDATA1] = 0xf4,
+ [PWRAP_DVFS_ADR2] = 0xf8,
+ [PWRAP_DVFS_WDATA2] = 0xfc,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10c,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11c,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_SPMINF_STA] = 0x128,
+ [PWRAP_CIPHER_KEY_SEL] = 0x12c,
+ [PWRAP_CIPHER_IV_SEL] = 0x130,
+ [PWRAP_CIPHER_EN] = 0x134,
+ [PWRAP_CIPHER_RDY] = 0x138,
+ [PWRAP_CIPHER_MODE] = 0x13c,
+ [PWRAP_CIPHER_SWRST] = 0x140,
+ [PWRAP_DCM_EN] = 0x144,
+ [PWRAP_DCM_DBC_PRD] = 0x148,
+ [PWRAP_EXT_CK] = 0x14c,
+};
+
+static int mt6797_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xC,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1C,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9C,
+ [PWRAP_WACS2_CMD] = 0xA0,
+ [PWRAP_WACS2_RDATA] = 0xA4,
+ [PWRAP_WACS2_VLDCLR] = 0xA8,
+ [PWRAP_INT_EN] = 0xC0,
+ [PWRAP_INT_FLG_RAW] = 0xC4,
+ [PWRAP_INT_FLG] = 0xC8,
+ [PWRAP_INT_CLR] = 0xCC,
+ [PWRAP_TIMER_EN] = 0xF4,
+ [PWRAP_WDT_UNIT] = 0xFC,
+ [PWRAP_WDT_SRC_EN] = 0x100,
+ [PWRAP_DCM_EN] = 0x1CC,
+ [PWRAP_DCM_DBC_PRD] = 0x1D4,
+};
+
+static int mt6873_regs[] = {
+ [PWRAP_INIT_DONE2] = 0x0,
+ [PWRAP_TIMER_EN] = 0x3E0,
+ [PWRAP_INT_EN] = 0x448,
+ [PWRAP_WACS2_CMD] = 0xC80,
+ [PWRAP_SWINF_2_WDATA_31_0] = 0xC84,
+ [PWRAP_SWINF_2_RDATA_31_0] = 0xC94,
+ [PWRAP_WACS2_VLDCLR] = 0xCA4,
+ [PWRAP_WACS2_RDATA] = 0xCA8,
+};
+
+static int mt7622_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xC,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1C,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2C,
+ [PWRAP_EINT_STA0_ADR] = 0x30,
+ [PWRAP_EINT_STA1_ADR] = 0x34,
+ [PWRAP_STA] = 0x38,
+ [PWRAP_CLR] = 0x3C,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4C,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5C,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6C,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7C,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8C,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9C,
+ [PWRAP_WACS2_CMD] = 0xA0,
+ [PWRAP_WACS2_RDATA] = 0xA4,
+ [PWRAP_WACS2_VLDCLR] = 0xA8,
+ [PWRAP_INT_EN] = 0xAC,
+ [PWRAP_INT_FLG_RAW] = 0xB0,
+ [PWRAP_INT_FLG] = 0xB4,
+ [PWRAP_INT_CLR] = 0xB8,
+ [PWRAP_SIG_ADR] = 0xBC,
+ [PWRAP_SIG_MODE] = 0xC0,
+ [PWRAP_SIG_VALUE] = 0xC4,
+ [PWRAP_SIG_ERRVAL] = 0xC8,
+ [PWRAP_CRC_EN] = 0xCC,
+ [PWRAP_TIMER_EN] = 0xD0,
+ [PWRAP_TIMER_STA] = 0xD4,
+ [PWRAP_WDT_UNIT] = 0xD8,
+ [PWRAP_WDT_SRC_EN] = 0xDC,
+ [PWRAP_WDT_FLG] = 0xE0,
+ [PWRAP_DEBUG_INT_SEL] = 0xE4,
+ [PWRAP_DVFS_ADR0] = 0xE8,
+ [PWRAP_DVFS_WDATA0] = 0xEC,
+ [PWRAP_DVFS_ADR1] = 0xF0,
+ [PWRAP_DVFS_WDATA1] = 0xF4,
+ [PWRAP_DVFS_ADR2] = 0xF8,
+ [PWRAP_DVFS_WDATA2] = 0xFC,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10C,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11C,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_DVFS_ADR8] = 0x128,
+ [PWRAP_DVFS_WDATA8] = 0x12C,
+ [PWRAP_DVFS_ADR9] = 0x130,
+ [PWRAP_DVFS_WDATA9] = 0x134,
+ [PWRAP_DVFS_ADR10] = 0x138,
+ [PWRAP_DVFS_WDATA10] = 0x13C,
+ [PWRAP_DVFS_ADR11] = 0x140,
+ [PWRAP_DVFS_WDATA11] = 0x144,
+ [PWRAP_DVFS_ADR12] = 0x148,
+ [PWRAP_DVFS_WDATA12] = 0x14C,
+ [PWRAP_DVFS_ADR13] = 0x150,
+ [PWRAP_DVFS_WDATA13] = 0x154,
+ [PWRAP_DVFS_ADR14] = 0x158,
+ [PWRAP_DVFS_WDATA14] = 0x15C,
+ [PWRAP_DVFS_ADR15] = 0x160,
+ [PWRAP_DVFS_WDATA15] = 0x164,
+ [PWRAP_SPMINF_STA] = 0x168,
+ [PWRAP_CIPHER_KEY_SEL] = 0x16C,
+ [PWRAP_CIPHER_IV_SEL] = 0x170,
+ [PWRAP_CIPHER_EN] = 0x174,
+ [PWRAP_CIPHER_RDY] = 0x178,
+ [PWRAP_CIPHER_MODE] = 0x17C,
+ [PWRAP_CIPHER_SWRST] = 0x180,
+ [PWRAP_DCM_EN] = 0x184,
+ [PWRAP_DCM_DBC_PRD] = 0x188,
+ [PWRAP_EXT_CK] = 0x18C,
+ [PWRAP_ADC_CMD_ADDR] = 0x190,
+ [PWRAP_PWRAP_ADC_CMD] = 0x194,
+ [PWRAP_ADC_RDATA_ADDR] = 0x198,
+ [PWRAP_GPS_STA] = 0x19C,
+ [PWRAP_SW_RST] = 0x1A0,
+ [PWRAP_DVFS_STEP_CTRL0] = 0x238,
+ [PWRAP_DVFS_STEP_CTRL1] = 0x23C,
+ [PWRAP_DVFS_STEP_CTRL2] = 0x240,
+ [PWRAP_SPI2_CTRL] = 0x244,
+};
+
+static int mt8135_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_CSHEXT] = 0x10,
+ [PWRAP_CSHEXT_WRITE] = 0x14,
+ [PWRAP_CSHEXT_READ] = 0x18,
+ [PWRAP_CSLEXT_START] = 0x1c,
+ [PWRAP_CSLEXT_END] = 0x20,
+ [PWRAP_STAUPD_PRD] = 0x24,
+ [PWRAP_STAUPD_GRPEN] = 0x28,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x2c,
+ [PWRAP_STAUPD_STA] = 0x30,
+ [PWRAP_EVENT_IN_EN] = 0x34,
+ [PWRAP_EVENT_DST_EN] = 0x38,
+ [PWRAP_WRAP_STA] = 0x3c,
+ [PWRAP_RRARB_INIT] = 0x40,
+ [PWRAP_RRARB_EN] = 0x44,
+ [PWRAP_RRARB_STA0] = 0x48,
+ [PWRAP_RRARB_STA1] = 0x4c,
+ [PWRAP_HARB_INIT] = 0x50,
+ [PWRAP_HARB_HPRIO] = 0x54,
+ [PWRAP_HIPRIO_ARB_EN] = 0x58,
+ [PWRAP_HARB_STA0] = 0x5c,
+ [PWRAP_HARB_STA1] = 0x60,
+ [PWRAP_MAN_EN] = 0x64,
+ [PWRAP_MAN_CMD] = 0x68,
+ [PWRAP_MAN_RDATA] = 0x6c,
+ [PWRAP_MAN_VLDCLR] = 0x70,
+ [PWRAP_WACS0_EN] = 0x74,
+ [PWRAP_INIT_DONE0] = 0x78,
+ [PWRAP_WACS0_CMD] = 0x7c,
+ [PWRAP_WACS0_RDATA] = 0x80,
+ [PWRAP_WACS0_VLDCLR] = 0x84,
+ [PWRAP_WACS1_EN] = 0x88,
+ [PWRAP_INIT_DONE1] = 0x8c,
+ [PWRAP_WACS1_CMD] = 0x90,
+ [PWRAP_WACS1_RDATA] = 0x94,
+ [PWRAP_WACS1_VLDCLR] = 0x98,
+ [PWRAP_WACS2_EN] = 0x9c,
+ [PWRAP_INIT_DONE2] = 0xa0,
+ [PWRAP_WACS2_CMD] = 0xa4,
+ [PWRAP_WACS2_RDATA] = 0xa8,
+ [PWRAP_WACS2_VLDCLR] = 0xac,
+ [PWRAP_INT_EN] = 0xb0,
+ [PWRAP_INT_FLG_RAW] = 0xb4,
+ [PWRAP_INT_FLG] = 0xb8,
+ [PWRAP_INT_CLR] = 0xbc,
+ [PWRAP_SIG_ADR] = 0xc0,
+ [PWRAP_SIG_MODE] = 0xc4,
+ [PWRAP_SIG_VALUE] = 0xc8,
+ [PWRAP_SIG_ERRVAL] = 0xcc,
+ [PWRAP_CRC_EN] = 0xd0,
+ [PWRAP_EVENT_STA] = 0xd4,
+ [PWRAP_EVENT_STACLR] = 0xd8,
+ [PWRAP_TIMER_EN] = 0xdc,
+ [PWRAP_TIMER_STA] = 0xe0,
+ [PWRAP_WDT_UNIT] = 0xe4,
+ [PWRAP_WDT_SRC_EN] = 0xe8,
+ [PWRAP_WDT_FLG] = 0xec,
+ [PWRAP_DEBUG_INT_SEL] = 0xf0,
+ [PWRAP_CIPHER_KEY_SEL] = 0x134,
+ [PWRAP_CIPHER_IV_SEL] = 0x138,
+ [PWRAP_CIPHER_LOAD] = 0x13c,
+ [PWRAP_CIPHER_START] = 0x140,
+ [PWRAP_CIPHER_RDY] = 0x144,
+ [PWRAP_CIPHER_MODE] = 0x148,
+ [PWRAP_CIPHER_SWRST] = 0x14c,
+ [PWRAP_DCM_EN] = 0x15c,
+ [PWRAP_DCM_DBC_PRD] = 0x160,
+};
+
+static int mt8173_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1c,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2c,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4c,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5c,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6c,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7c,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8c,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9c,
+ [PWRAP_WACS2_CMD] = 0xa0,
+ [PWRAP_WACS2_RDATA] = 0xa4,
+ [PWRAP_WACS2_VLDCLR] = 0xa8,
+ [PWRAP_INT_EN] = 0xac,
+ [PWRAP_INT_FLG_RAW] = 0xb0,
+ [PWRAP_INT_FLG] = 0xb4,
+ [PWRAP_INT_CLR] = 0xb8,
+ [PWRAP_SIG_ADR] = 0xbc,
+ [PWRAP_SIG_MODE] = 0xc0,
+ [PWRAP_SIG_VALUE] = 0xc4,
+ [PWRAP_SIG_ERRVAL] = 0xc8,
+ [PWRAP_CRC_EN] = 0xcc,
+ [PWRAP_TIMER_EN] = 0xd0,
+ [PWRAP_TIMER_STA] = 0xd4,
+ [PWRAP_WDT_UNIT] = 0xd8,
+ [PWRAP_WDT_SRC_EN] = 0xdc,
+ [PWRAP_WDT_FLG] = 0xe0,
+ [PWRAP_DEBUG_INT_SEL] = 0xe4,
+ [PWRAP_DVFS_ADR0] = 0xe8,
+ [PWRAP_DVFS_WDATA0] = 0xec,
+ [PWRAP_DVFS_ADR1] = 0xf0,
+ [PWRAP_DVFS_WDATA1] = 0xf4,
+ [PWRAP_DVFS_ADR2] = 0xf8,
+ [PWRAP_DVFS_WDATA2] = 0xfc,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10c,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11c,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_SPMINF_STA] = 0x128,
+ [PWRAP_CIPHER_KEY_SEL] = 0x12c,
+ [PWRAP_CIPHER_IV_SEL] = 0x130,
+ [PWRAP_CIPHER_EN] = 0x134,
+ [PWRAP_CIPHER_RDY] = 0x138,
+ [PWRAP_CIPHER_MODE] = 0x13c,
+ [PWRAP_CIPHER_SWRST] = 0x140,
+ [PWRAP_DCM_EN] = 0x144,
+ [PWRAP_DCM_DBC_PRD] = 0x148,
+};
+
+static int mt8183_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SI_SAMPLE_CTRL] = 0xC,
+ [PWRAP_RDDMY] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1C,
+ [PWRAP_CSLEXT_WRITE] = 0x20,
+ [PWRAP_CSLEXT_READ] = 0x24,
+ [PWRAP_EXT_CK_WRITE] = 0x28,
+ [PWRAP_STAUPD_CTRL] = 0x30,
+ [PWRAP_STAUPD_GRPEN] = 0x34,
+ [PWRAP_EINT_STA0_ADR] = 0x38,
+ [PWRAP_HARB_HPRIO] = 0x5C,
+ [PWRAP_HIPRIO_ARB_EN] = 0x60,
+ [PWRAP_MAN_EN] = 0x70,
+ [PWRAP_MAN_CMD] = 0x74,
+ [PWRAP_WACS0_EN] = 0x80,
+ [PWRAP_INIT_DONE0] = 0x84,
+ [PWRAP_WACS1_EN] = 0x88,
+ [PWRAP_INIT_DONE1] = 0x8C,
+ [PWRAP_WACS2_EN] = 0x90,
+ [PWRAP_INIT_DONE2] = 0x94,
+ [PWRAP_WACS_P2P_EN] = 0xA0,
+ [PWRAP_INIT_DONE_P2P] = 0xA4,
+ [PWRAP_WACS_MD32_EN] = 0xA8,
+ [PWRAP_INIT_DONE_MD32] = 0xAC,
+ [PWRAP_INT_EN] = 0xB0,
+ [PWRAP_INT_FLG] = 0xB8,
+ [PWRAP_INT_CLR] = 0xBC,
+ [PWRAP_INT1_EN] = 0xC0,
+ [PWRAP_INT1_FLG] = 0xC8,
+ [PWRAP_INT1_CLR] = 0xCC,
+ [PWRAP_SIG_ADR] = 0xD0,
+ [PWRAP_CRC_EN] = 0xE0,
+ [PWRAP_TIMER_EN] = 0xE4,
+ [PWRAP_WDT_UNIT] = 0xEC,
+ [PWRAP_WDT_SRC_EN] = 0xF0,
+ [PWRAP_WDT_SRC_EN_1] = 0xF4,
+ [PWRAP_INT_GPS_AUXADC_CMD_ADDR] = 0x1DC,
+ [PWRAP_INT_GPS_AUXADC_CMD] = 0x1E0,
+ [PWRAP_INT_GPS_AUXADC_RDATA_ADDR] = 0x1E4,
+ [PWRAP_EXT_GPS_AUXADC_RDATA_ADDR] = 0x1E8,
+ [PWRAP_GPSINF_0_STA] = 0x1EC,
+ [PWRAP_GPSINF_1_STA] = 0x1F0,
+ [PWRAP_WACS2_CMD] = 0xC20,
+ [PWRAP_WACS2_RDATA] = 0xC24,
+ [PWRAP_WACS2_VLDCLR] = 0xC28,
+};
+
+static int mt8195_regs[] = {
+ [PWRAP_INIT_DONE2] = 0x0,
+ [PWRAP_STAUPD_CTRL] = 0x4C,
+ [PWRAP_TIMER_EN] = 0x3E4,
+ [PWRAP_INT_EN] = 0x420,
+ [PWRAP_INT_FLG] = 0x428,
+ [PWRAP_INT_CLR] = 0x42C,
+ [PWRAP_INT1_EN] = 0x450,
+ [PWRAP_INT1_FLG] = 0x458,
+ [PWRAP_INT1_CLR] = 0x45C,
+ [PWRAP_WACS2_CMD] = 0x880,
+ [PWRAP_SWINF_2_WDATA_31_0] = 0x884,
+ [PWRAP_SWINF_2_RDATA_31_0] = 0x894,
+ [PWRAP_WACS2_VLDCLR] = 0x8A4,
+ [PWRAP_WACS2_RDATA] = 0x8A8,
+};
+
+static int mt8365_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_CSHEXT_WRITE] = 0x24,
+ [PWRAP_CSHEXT_READ] = 0x28,
+ [PWRAP_STAUPD_PRD] = 0x3c,
+ [PWRAP_STAUPD_GRPEN] = 0x40,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x58,
+ [PWRAP_STAUPD_STA] = 0x5c,
+ [PWRAP_WRAP_STA] = 0x60,
+ [PWRAP_HARB_INIT] = 0x64,
+ [PWRAP_HARB_HPRIO] = 0x68,
+ [PWRAP_HIPRIO_ARB_EN] = 0x6c,
+ [PWRAP_HARB_STA0] = 0x70,
+ [PWRAP_HARB_STA1] = 0x74,
+ [PWRAP_MAN_EN] = 0x7c,
+ [PWRAP_MAN_CMD] = 0x80,
+ [PWRAP_MAN_RDATA] = 0x84,
+ [PWRAP_MAN_VLDCLR] = 0x88,
+ [PWRAP_WACS0_EN] = 0x8c,
+ [PWRAP_INIT_DONE0] = 0x90,
+ [PWRAP_WACS0_CMD] = 0xc00,
+ [PWRAP_WACS0_RDATA] = 0xc04,
+ [PWRAP_WACS0_VLDCLR] = 0xc08,
+ [PWRAP_WACS1_EN] = 0x94,
+ [PWRAP_INIT_DONE1] = 0x98,
+ [PWRAP_WACS2_EN] = 0x9c,
+ [PWRAP_INIT_DONE2] = 0xa0,
+ [PWRAP_WACS2_CMD] = 0xc20,
+ [PWRAP_WACS2_RDATA] = 0xc24,
+ [PWRAP_WACS2_VLDCLR] = 0xc28,
+ [PWRAP_INT_EN] = 0xb4,
+ [PWRAP_INT_FLG_RAW] = 0xb8,
+ [PWRAP_INT_FLG] = 0xbc,
+ [PWRAP_INT_CLR] = 0xc0,
+ [PWRAP_SIG_ADR] = 0xd4,
+ [PWRAP_SIG_MODE] = 0xd8,
+ [PWRAP_SIG_VALUE] = 0xdc,
+ [PWRAP_SIG_ERRVAL] = 0xe0,
+ [PWRAP_CRC_EN] = 0xe4,
+ [PWRAP_TIMER_EN] = 0xe8,
+ [PWRAP_TIMER_STA] = 0xec,
+ [PWRAP_WDT_UNIT] = 0xf0,
+ [PWRAP_WDT_SRC_EN] = 0xf4,
+ [PWRAP_WDT_FLG] = 0xfc,
+ [PWRAP_DEBUG_INT_SEL] = 0x104,
+ [PWRAP_CIPHER_KEY_SEL] = 0x1c4,
+ [PWRAP_CIPHER_IV_SEL] = 0x1c8,
+ [PWRAP_CIPHER_RDY] = 0x1d0,
+ [PWRAP_CIPHER_MODE] = 0x1d4,
+ [PWRAP_CIPHER_SWRST] = 0x1d8,
+ [PWRAP_DCM_EN] = 0x1dc,
+ [PWRAP_DCM_DBC_PRD] = 0x1e0,
+ [PWRAP_EINT_STA0_ADR] = 0x44,
+ [PWRAP_EINT_STA1_ADR] = 0x48,
+ [PWRAP_INT1_EN] = 0xc4,
+ [PWRAP_INT1_FLG] = 0xcc,
+ [PWRAP_INT1_CLR] = 0xd0,
+ [PWRAP_WDT_SRC_EN_1] = 0xf8,
+};
+
+static int mt8516_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1c,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2c,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4c,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5c,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6c,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7c,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8c,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9c,
+ [PWRAP_WACS2_CMD] = 0xa0,
+ [PWRAP_WACS2_RDATA] = 0xa4,
+ [PWRAP_WACS2_VLDCLR] = 0xa8,
+ [PWRAP_INT_EN] = 0xac,
+ [PWRAP_INT_FLG_RAW] = 0xb0,
+ [PWRAP_INT_FLG] = 0xb4,
+ [PWRAP_INT_CLR] = 0xb8,
+ [PWRAP_SIG_ADR] = 0xbc,
+ [PWRAP_SIG_MODE] = 0xc0,
+ [PWRAP_SIG_VALUE] = 0xc4,
+ [PWRAP_SIG_ERRVAL] = 0xc8,
+ [PWRAP_CRC_EN] = 0xcc,
+ [PWRAP_TIMER_EN] = 0xd0,
+ [PWRAP_TIMER_STA] = 0xd4,
+ [PWRAP_WDT_UNIT] = 0xd8,
+ [PWRAP_WDT_SRC_EN] = 0xdc,
+ [PWRAP_WDT_FLG] = 0xe0,
+ [PWRAP_DEBUG_INT_SEL] = 0xe4,
+ [PWRAP_DVFS_ADR0] = 0xe8,
+ [PWRAP_DVFS_WDATA0] = 0xec,
+ [PWRAP_DVFS_ADR1] = 0xf0,
+ [PWRAP_DVFS_WDATA1] = 0xf4,
+ [PWRAP_DVFS_ADR2] = 0xf8,
+ [PWRAP_DVFS_WDATA2] = 0xfc,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10c,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11c,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_SPMINF_STA] = 0x128,
+ [PWRAP_CIPHER_KEY_SEL] = 0x12c,
+ [PWRAP_CIPHER_IV_SEL] = 0x130,
+ [PWRAP_CIPHER_EN] = 0x134,
+ [PWRAP_CIPHER_RDY] = 0x138,
+ [PWRAP_CIPHER_MODE] = 0x13c,
+ [PWRAP_CIPHER_SWRST] = 0x140,
+ [PWRAP_DCM_EN] = 0x144,
+ [PWRAP_DCM_DBC_PRD] = 0x148,
+ [PWRAP_SW_RST] = 0x168,
+ [PWRAP_OP_TYPE] = 0x16c,
+ [PWRAP_MSB_FIRST] = 0x170,
+};
+
+static int mt8186_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_RDDMY] = 0x20,
+ [PWRAP_CSHEXT_WRITE] = 0x24,
+ [PWRAP_CSHEXT_READ] = 0x28,
+ [PWRAP_CSLEXT_WRITE] = 0x2C,
+ [PWRAP_CSLEXT_READ] = 0x30,
+ [PWRAP_EXT_CK_WRITE] = 0x34,
+ [PWRAP_STAUPD_CTRL] = 0x3C,
+ [PWRAP_STAUPD_GRPEN] = 0x40,
+ [PWRAP_EINT_STA0_ADR] = 0x44,
+ [PWRAP_EINT_STA1_ADR] = 0x48,
+ [PWRAP_INT_CLR] = 0xC8,
+ [PWRAP_INT_FLG] = 0xC4,
+ [PWRAP_MAN_EN] = 0x7C,
+ [PWRAP_MAN_CMD] = 0x80,
+ [PWRAP_WACS0_EN] = 0x8C,
+ [PWRAP_WACS1_EN] = 0x94,
+ [PWRAP_WACS2_EN] = 0x9C,
+ [PWRAP_INIT_DONE0] = 0x90,
+ [PWRAP_INIT_DONE1] = 0x98,
+ [PWRAP_INIT_DONE2] = 0xA0,
+ [PWRAP_INT_EN] = 0xBC,
+ [PWRAP_INT1_EN] = 0xCC,
+ [PWRAP_INT1_FLG] = 0xD4,
+ [PWRAP_INT1_CLR] = 0xD8,
+ [PWRAP_TIMER_EN] = 0xF0,
+ [PWRAP_WDT_UNIT] = 0xF8,
+ [PWRAP_WDT_SRC_EN] = 0xFC,
+ [PWRAP_WDT_SRC_EN_1] = 0x100,
+ [PWRAP_WDT_FLG] = 0x104,
+ [PWRAP_SPMINF_STA] = 0x1B4,
+ [PWRAP_DCM_EN] = 0x1EC,
+ [PWRAP_DCM_DBC_PRD] = 0x1F0,
+ [PWRAP_GPSINF_0_STA] = 0x204,
+ [PWRAP_GPSINF_1_STA] = 0x208,
+ [PWRAP_WACS0_CMD] = 0xC00,
+ [PWRAP_WACS0_RDATA] = 0xC04,
+ [PWRAP_WACS0_VLDCLR] = 0xC08,
+ [PWRAP_WACS1_CMD] = 0xC10,
+ [PWRAP_WACS1_RDATA] = 0xC14,
+ [PWRAP_WACS1_VLDCLR] = 0xC18,
+ [PWRAP_WACS2_CMD] = 0xC20,
+ [PWRAP_WACS2_RDATA] = 0xC24,
+ [PWRAP_WACS2_VLDCLR] = 0xC28,
+};
+
+enum pmic_type {
+ PMIC_MT6323,
+ PMIC_MT6331,
+ PMIC_MT6332,
+ PMIC_MT6351,
+ PMIC_MT6357,
+ PMIC_MT6358,
+ PMIC_MT6359,
+ PMIC_MT6380,
+ PMIC_MT6397,
+};
+
+enum pwrap_type {
+ PWRAP_MT2701,
+ PWRAP_MT6765,
+ PWRAP_MT6779,
+ PWRAP_MT6795,
+ PWRAP_MT6797,
+ PWRAP_MT6873,
+ PWRAP_MT7622,
+ PWRAP_MT8135,
+ PWRAP_MT8173,
+ PWRAP_MT8183,
+ PWRAP_MT8186,
+ PWRAP_MT8195,
+ PWRAP_MT8365,
+ PWRAP_MT8516,
+};
+
+struct pmic_wrapper;
+
+struct pwrap_slv_regops {
+ const struct regmap_config *regmap;
+ /*
+ * pwrap operations are highly associated with the PMIC types,
+ * so the pointers added increases flexibility allowing determination
+ * which type is used by the detection through device tree.
+ */
+ int (*pwrap_read)(struct pmic_wrapper *wrp, u32 adr, u32 *rdata);
+ int (*pwrap_write)(struct pmic_wrapper *wrp, u32 adr, u32 wdata);
+};
+
+/**
+ * struct pwrap_slv_type - PMIC device wrapper definitions
+ * @dew_regs: Device Wrapper (DeW) register offsets
+ * @type: PMIC Type (model)
+ * @comp_dew_regs: Device Wrapper (DeW) register offsets for companion device
+ * @comp_type: Companion PMIC Type (model)
+ * @regops: Register R/W ops
+ * @caps: Capability flags for the target device
+ */
+struct pwrap_slv_type {
+ const u32 *dew_regs;
+ enum pmic_type type;
+ const u32 *comp_dew_regs;
+ enum pmic_type comp_type;
+ const struct pwrap_slv_regops *regops;
+ u32 caps;
+};
+
+struct pmic_wrapper {
+ struct device *dev;
+ void __iomem *base;
+ struct regmap *regmap;
+ const struct pmic_wrapper_type *master;
+ const struct pwrap_slv_type *slave;
+ struct clk *clk_spi;
+ struct clk *clk_wrap;
+ struct clk *clk_sys;
+ struct clk *clk_tmr;
+ struct reset_control *rstc;
+
+ struct reset_control *rstc_bridge;
+ void __iomem *bridge_base;
+};
+
+struct pmic_wrapper_type {
+ int *regs;
+ enum pwrap_type type;
+ u32 arb_en_all;
+ u32 int_en_all;
+ u32 int1_en_all;
+ u32 spi_w;
+ u32 wdt_src;
+ /* Flags indicating the capability for the target pwrap */
+ u32 caps;
+ int (*init_reg_clock)(struct pmic_wrapper *wrp);
+ int (*init_soc_specific)(struct pmic_wrapper *wrp);
+};
+
+static u32 pwrap_readl(struct pmic_wrapper *wrp, enum pwrap_regs reg)
+{
+ return readl(wrp->base + wrp->master->regs[reg]);
+}
+
+static void pwrap_writel(struct pmic_wrapper *wrp, u32 val, enum pwrap_regs reg)
+{
+ writel(val, wrp->base + wrp->master->regs[reg]);
+}
+
+static u32 pwrap_get_fsm_state(struct pmic_wrapper *wrp)
+{
+ u32 val;
+
+ val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ return PWRAP_GET_WACS_ARB_FSM(val);
+ else
+ return PWRAP_GET_WACS_FSM(val);
+}
+
+static bool pwrap_is_fsm_idle(struct pmic_wrapper *wrp)
+{
+ return pwrap_get_fsm_state(wrp) == PWRAP_WACS_FSM_IDLE;
+}
+
+static bool pwrap_is_fsm_vldclr(struct pmic_wrapper *wrp)
+{
+ return pwrap_get_fsm_state(wrp) == PWRAP_WACS_FSM_WFVLDCLR;
+}
+
+/*
+ * Timeout issue sometimes caused by the last read command
+ * failed because pmic wrap could not got the FSM_VLDCLR
+ * in time after finishing WACS2_CMD. It made state machine
+ * still on FSM_VLDCLR and timeout next time.
+ * Check the status of FSM and clear the vldclr to recovery the
+ * error.
+ */
+static inline void pwrap_leave_fsm_vldclr(struct pmic_wrapper *wrp)
+{
+ if (pwrap_is_fsm_vldclr(wrp))
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+}
+
+static bool pwrap_is_sync_idle(struct pmic_wrapper *wrp)
+{
+ return pwrap_readl(wrp, PWRAP_WACS2_RDATA) & PWRAP_STATE_SYNC_IDLE0;
+}
+
+static bool pwrap_is_fsm_idle_and_sync_idle(struct pmic_wrapper *wrp)
+{
+ u32 val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+
+ return (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_IDLE) &&
+ (val & PWRAP_STATE_SYNC_IDLE0);
+}
+
+static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
+{
+ bool tmp;
+ int ret;
+ u32 val;
+
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ val = adr;
+ else
+ val = (adr >> 1) << 16;
+ pwrap_writel(wrp, val, PWRAP_WACS2_CMD);
+
+ ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ val = pwrap_readl(wrp, PWRAP_SWINF_2_RDATA_31_0);
+ else
+ val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
+ *rdata = PWRAP_GET_WACS_RDATA(val);
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+
+ return 0;
+}
+
+static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
+{
+ bool tmp;
+ int ret, msb;
+
+ *rdata = 0;
+ for (msb = 0; msb < 2; msb++) {
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ pwrap_writel(wrp, ((msb << 30) | (adr << 16)),
+ PWRAP_WACS2_CMD);
+
+ ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ *rdata += (PWRAP_GET_WACS_RDATA(pwrap_readl(wrp,
+ PWRAP_WACS2_RDATA)) << (16 * msb));
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+ }
+
+ return 0;
+}
+
+static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
+{
+ return wrp->slave->regops->pwrap_read(wrp, adr, rdata);
+}
+
+static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ bool tmp;
+ int ret;
+
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB)) {
+ pwrap_writel(wrp, wdata, PWRAP_SWINF_2_WDATA_31_0);
+ pwrap_writel(wrp, BIT(29) | adr, PWRAP_WACS2_CMD);
+ } else {
+ pwrap_writel(wrp, BIT(31) | ((adr >> 1) << 16) | wdata,
+ PWRAP_WACS2_CMD);
+ }
+
+ return 0;
+}
+
+static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ bool tmp;
+ int ret, msb, rdata;
+
+ for (msb = 0; msb < 2; msb++) {
+ ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ pwrap_writel(wrp, (1 << 31) | (msb << 30) | (adr << 16) |
+ ((wdata >> (msb * 16)) & 0xffff),
+ PWRAP_WACS2_CMD);
+
+ /*
+ * The pwrap_read operation is the requirement of hardware used
+ * for the synchronization between two successive 16-bit
+ * pwrap_writel operations composing one 32-bit bus writing.
+ * Otherwise, we'll find the result fails on the lower 16-bit
+ * pwrap writing.
+ */
+ if (!msb)
+ pwrap_read(wrp, adr, &rdata);
+ }
+
+ return 0;
+}
+
+static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ return wrp->slave->regops->pwrap_write(wrp, adr, wdata);
+}
+
+static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata)
+{
+ return pwrap_read(context, adr, rdata);
+}
+
+static int pwrap_regmap_write(void *context, u32 adr, u32 wdata)
+{
+ return pwrap_write(context, adr, wdata);
+}
+
+static bool pwrap_pmic_read_test(struct pmic_wrapper *wrp, const u32 *dew_regs,
+ u16 read_test_val)
+{
+ bool is_success;
+ u32 rdata;
+
+ pwrap_read(wrp, dew_regs[PWRAP_DEW_READ_TEST], &rdata);
+ is_success = ((rdata & U16_MAX) == read_test_val);
+
+ return is_success;
+}
+
+static int pwrap_reset_spislave(struct pmic_wrapper *wrp)
+{
+ bool tmp;
+ int ret, i;
+
+ pwrap_writel(wrp, 0, PWRAP_HIPRIO_ARB_EN);
+ pwrap_writel(wrp, 0, PWRAP_WRAP_EN);
+ pwrap_writel(wrp, 1, PWRAP_MUX_SEL);
+ pwrap_writel(wrp, 1, PWRAP_MAN_EN);
+ pwrap_writel(wrp, 0, PWRAP_DIO_EN);
+
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_CSL,
+ PWRAP_MAN_CMD);
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS,
+ PWRAP_MAN_CMD);
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_CSH,
+ PWRAP_MAN_CMD);
+
+ for (i = 0; i < 4; i++)
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS,
+ PWRAP_MAN_CMD);
+
+ ret = readx_poll_timeout(pwrap_is_sync_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 0, PWRAP_MAN_EN);
+ pwrap_writel(wrp, 0, PWRAP_MUX_SEL);
+
+ return 0;
+}
+
+/*
+ * pwrap_init_sidly - configure serial input delay
+ *
+ * This configures the serial input delay. We can configure 0, 2, 4 or 6ns
+ * delay. Do a read test with all possible values and chose the best delay.
+ */
+static int pwrap_init_sidly(struct pmic_wrapper *wrp)
+{
+ u32 i;
+ u32 pass = 0;
+ bool read_ok;
+ signed char dly[16] = {
+ -1, 0, 1, 0, 2, -1, 1, 1, 3, -1, -1, -1, 3, -1, 2, 1
+ };
+
+ for (i = 0; i < 4; i++) {
+ pwrap_writel(wrp, i, PWRAP_SIDLY);
+ read_ok = pwrap_pmic_read_test(wrp, wrp->slave->dew_regs,
+ PWRAP_DEW_READ_TEST_VAL);
+ if (read_ok) {
+ dev_dbg(wrp->dev, "[Read Test] pass, SIDLY=%x\n", i);
+ pass |= 1 << i;
+ }
+ }
+
+ if (dly[pass] < 0) {
+ dev_err(wrp->dev, "sidly pass range 0x%x not continuous\n",
+ pass);
+ return -EIO;
+ }
+
+ pwrap_writel(wrp, dly[pass], PWRAP_SIDLY);
+
+ return 0;
+}
+
+static int pwrap_init_dual_io(struct pmic_wrapper *wrp)
+{
+ int ret;
+ bool read_ok, tmp;
+ bool comp_read_ok = true;
+
+ /* Enable dual IO mode */
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1);
+ if (wrp->slave->comp_dew_regs)
+ pwrap_write(wrp, wrp->slave->comp_dew_regs[PWRAP_DEW_DIO_EN], 1);
+
+ /* Check IDLE & INIT_DONE in advance */
+ ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 1, PWRAP_DIO_EN);
+
+ /* Read Test */
+ read_ok = pwrap_pmic_read_test(wrp, wrp->slave->dew_regs, PWRAP_DEW_READ_TEST_VAL);
+ if (wrp->slave->comp_dew_regs)
+ comp_read_ok = pwrap_pmic_read_test(wrp, wrp->slave->comp_dew_regs,
+ PWRAP_DEW_COMP_READ_TEST_VAL);
+ if (!read_ok || !comp_read_ok) {
+ dev_err(wrp->dev, "Read failed on DIO mode. Main PMIC %s%s\n",
+ !read_ok ? "fail" : "success",
+ wrp->slave->comp_dew_regs && !comp_read_ok ?
+ ", Companion PMIC fail" : "");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * pwrap_init_chip_select_ext is used to configure CS extension time for each
+ * phase during data transactions on the pwrap bus.
+ */
+static void pwrap_init_chip_select_ext(struct pmic_wrapper *wrp, u8 hext_write,
+ u8 hext_read, u8 lext_start,
+ u8 lext_end)
+{
+ /*
+ * After finishing a write and read transaction, extends CS high time
+ * to be at least xT of BUS CLK as hext_write and hext_read specifies
+ * respectively.
+ */
+ pwrap_writel(wrp, hext_write, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, hext_read, PWRAP_CSHEXT_READ);
+
+ /*
+ * Extends CS low time after CSL and before CSH command to be at
+ * least xT of BUS CLK as lext_start and lext_end specifies
+ * respectively.
+ */
+ pwrap_writel(wrp, lext_start, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, lext_end, PWRAP_CSLEXT_END);
+}
+
+static int pwrap_common_init_reg_clock(struct pmic_wrapper *wrp)
+{
+ switch (wrp->master->type) {
+ case PWRAP_MT6795:
+ if (wrp->slave->type == PMIC_MT6331) {
+ const u32 *dew_regs = wrp->slave->dew_regs;
+
+ pwrap_write(wrp, dew_regs[PWRAP_DEW_RDDMY_NO], 0x8);
+
+ if (wrp->slave->comp_type == PMIC_MT6332) {
+ dew_regs = wrp->slave->comp_dew_regs;
+ pwrap_write(wrp, dew_regs[PWRAP_DEW_RDDMY_NO], 0x8);
+ }
+ }
+ pwrap_writel(wrp, 0x88, PWRAP_RDDMY);
+ pwrap_init_chip_select_ext(wrp, 15, 15, 15, 15);
+ break;
+ case PWRAP_MT8173:
+ pwrap_init_chip_select_ext(wrp, 0, 4, 2, 2);
+ break;
+ case PWRAP_MT8135:
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+ pwrap_init_chip_select_ext(wrp, 0, 4, 0, 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int pwrap_mt2701_init_reg_clock(struct pmic_wrapper *wrp)
+{
+ switch (wrp->slave->type) {
+ case PMIC_MT6397:
+ pwrap_writel(wrp, 0xc, PWRAP_RDDMY);
+ pwrap_init_chip_select_ext(wrp, 4, 0, 2, 2);
+ break;
+
+ case PMIC_MT6323:
+ pwrap_writel(wrp, 0x8, PWRAP_RDDMY);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_RDDMY_NO],
+ 0x8);
+ pwrap_init_chip_select_ext(wrp, 5, 0, 2, 2);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static bool pwrap_is_cipher_ready(struct pmic_wrapper *wrp)
+{
+ return pwrap_readl(wrp, PWRAP_CIPHER_RDY) & 1;
+}
+
+static bool __pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp, const u32 *dew_regs)
+{
+ u32 rdata;
+ int ret;
+
+ ret = pwrap_read(wrp, dew_regs[PWRAP_DEW_CIPHER_RDY], &rdata);
+ if (ret)
+ return false;
+
+ return rdata == 1;
+}
+
+
+static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
+{
+ bool ret = __pwrap_is_pmic_cipher_ready(wrp, wrp->slave->dew_regs);
+
+ if (!ret)
+ return ret;
+
+ /* If there's any companion, wait for it to be ready too */
+ if (wrp->slave->comp_dew_regs)
+ ret = __pwrap_is_pmic_cipher_ready(wrp, wrp->slave->comp_dew_regs);
+
+ return ret;
+}
+
+static void pwrap_config_cipher(struct pmic_wrapper *wrp, const u32 *dew_regs)
+{
+ pwrap_write(wrp, dew_regs[PWRAP_DEW_CIPHER_SWRST], 0x1);
+ pwrap_write(wrp, dew_regs[PWRAP_DEW_CIPHER_SWRST], 0x0);
+ pwrap_write(wrp, dew_regs[PWRAP_DEW_CIPHER_KEY_SEL], 0x1);
+ pwrap_write(wrp, dew_regs[PWRAP_DEW_CIPHER_IV_SEL], 0x2);
+}
+
+static int pwrap_init_cipher(struct pmic_wrapper *wrp)
+{
+ int ret;
+ bool tmp;
+ u32 rdata = 0;
+
+ pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
+ pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST);
+ pwrap_writel(wrp, 0x1, PWRAP_CIPHER_KEY_SEL);
+ pwrap_writel(wrp, 0x2, PWRAP_CIPHER_IV_SEL);
+
+ switch (wrp->master->type) {
+ case PWRAP_MT8135:
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_LOAD);
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_START);
+ break;
+ case PWRAP_MT2701:
+ case PWRAP_MT6765:
+ case PWRAP_MT6779:
+ case PWRAP_MT6795:
+ case PWRAP_MT6797:
+ case PWRAP_MT8173:
+ case PWRAP_MT8186:
+ case PWRAP_MT8365:
+ case PWRAP_MT8516:
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
+ break;
+ case PWRAP_MT7622:
+ pwrap_writel(wrp, 0, PWRAP_CIPHER_EN);
+ break;
+ case PWRAP_MT6873:
+ case PWRAP_MT8183:
+ case PWRAP_MT8195:
+ break;
+ }
+
+ /* Config cipher mode @PMIC */
+ pwrap_config_cipher(wrp, wrp->slave->dew_regs);
+
+ /* If there is any companion PMIC, configure cipher mode there too */
+ if (wrp->slave->comp_type > 0)
+ pwrap_config_cipher(wrp, wrp->slave->comp_dew_regs);
+
+ switch (wrp->slave->type) {
+ case PMIC_MT6397:
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_LOAD],
+ 0x1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_START],
+ 0x1);
+ break;
+ case PMIC_MT6323:
+ case PMIC_MT6351:
+ case PMIC_MT6357:
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_EN],
+ 0x1);
+ break;
+ default:
+ break;
+ }
+
+ /* wait for cipher data ready@AP */
+ ret = readx_poll_timeout(pwrap_is_cipher_ready, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(wrp->dev, "cipher data ready@AP fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* wait for cipher data ready@PMIC */
+ ret = readx_poll_timeout(pwrap_is_pmic_cipher_ready, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(wrp->dev,
+ "timeout waiting for cipher data ready@PMIC\n");
+ return ret;
+ }
+
+ /* wait for cipher mode idle */
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_MODE], 0x1);
+ ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp,
+ PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(wrp->dev, "cipher mode idle fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 1, PWRAP_CIPHER_MODE);
+
+ /* Write Test */
+ if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_WRITE_TEST],
+ PWRAP_DEW_WRITE_TEST_VAL) ||
+ pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_WRITE_TEST],
+ &rdata) ||
+ (rdata != PWRAP_DEW_WRITE_TEST_VAL)) {
+ dev_err(wrp->dev, "rdata=0x%04X\n", rdata);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int pwrap_init_security(struct pmic_wrapper *wrp)
+{
+ u32 crc_val;
+ int ret;
+
+ /* Enable encryption */
+ ret = pwrap_init_cipher(wrp);
+ if (ret)
+ return ret;
+
+ /* Signature checking - using CRC */
+ ret = pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1);
+ if (ret == 0 && wrp->slave->comp_dew_regs)
+ ret = pwrap_write(wrp, wrp->slave->comp_dew_regs[PWRAP_DEW_CRC_EN], 0x1);
+
+ pwrap_writel(wrp, 0x1, PWRAP_CRC_EN);
+ pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE);
+
+ /* CRC value */
+ crc_val = wrp->slave->dew_regs[PWRAP_DEW_CRC_VAL];
+ if (wrp->slave->comp_dew_regs)
+ crc_val |= wrp->slave->comp_dew_regs[PWRAP_DEW_CRC_VAL] << 16;
+
+ pwrap_writel(wrp, crc_val, PWRAP_SIG_ADR);
+
+ /* PMIC Wrapper Arbiter priority */
+ pwrap_writel(wrp,
+ wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+
+ return 0;
+}
+
+static int pwrap_mt8135_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ /* enable pwrap events and pwrap bridge in AP side */
+ pwrap_writel(wrp, 0x1, PWRAP_EVENT_IN_EN);
+ pwrap_writel(wrp, 0xffff, PWRAP_EVENT_DST_EN);
+ writel(0x7f, wrp->bridge_base + PWRAP_MT8135_BRIDGE_IORD_ARB_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS3_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS4_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_UNIT);
+ writel(0xffff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_SRC_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_TIMER_EN);
+ writel(0x7ff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INT_EN);
+
+ /* enable PMIC event out and sources */
+ if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_OUT_EN],
+ 0x1) ||
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_SRC_EN],
+ 0xffff)) {
+ dev_err(wrp->dev, "enable dewrap fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int pwrap_mt8173_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ /* PMIC_DEWRAP enables */
+ if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_OUT_EN],
+ 0x1) ||
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_SRC_EN],
+ 0xffff)) {
+ dev_err(wrp->dev, "enable dewrap fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int pwrap_mt2701_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ /* GPS_INTF initialization */
+ switch (wrp->slave->type) {
+ case PMIC_MT6323:
+ pwrap_writel(wrp, 0x076c, PWRAP_ADC_CMD_ADDR);
+ pwrap_writel(wrp, 0x8000, PWRAP_PWRAP_ADC_CMD);
+ pwrap_writel(wrp, 0x072c, PWRAP_ADC_RDY_ADDR);
+ pwrap_writel(wrp, 0x072e, PWRAP_ADC_RDATA_ADDR1);
+ pwrap_writel(wrp, 0x0730, PWRAP_ADC_RDATA_ADDR2);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int pwrap_mt6795_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ pwrap_writel(wrp, 0xf, PWRAP_STAUPD_GRPEN);
+
+ if (wrp->slave->type == PMIC_MT6331)
+ pwrap_writel(wrp, 0x1b4, PWRAP_EINT_STA0_ADR);
+
+ if (wrp->slave->comp_type == PMIC_MT6332)
+ pwrap_writel(wrp, 0x8112, PWRAP_EINT_STA1_ADR);
+
+ return 0;
+}
+
+static int pwrap_mt7622_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ pwrap_writel(wrp, 0, PWRAP_STAUPD_PRD);
+ /* enable 2wire SPI master */
+ pwrap_writel(wrp, 0x8000000, PWRAP_SPI2_CTRL);
+
+ return 0;
+}
+
+static int pwrap_mt8183_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ pwrap_writel(wrp, 0xf5, PWRAP_STAUPD_GRPEN);
+
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1);
+ pwrap_writel(wrp, 1, PWRAP_CRC_EN);
+ pwrap_writel(wrp, 0x416, PWRAP_SIG_ADR);
+ pwrap_writel(wrp, 0x42e, PWRAP_EINT_STA0_ADR);
+
+ pwrap_writel(wrp, 1, PWRAP_WACS_P2P_EN);
+ pwrap_writel(wrp, 1, PWRAP_WACS_MD32_EN);
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE_P2P);
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE_MD32);
+
+ return 0;
+}
+
+static int pwrap_init(struct pmic_wrapper *wrp)
+{
+ int ret;
+
+ if (wrp->rstc)
+ reset_control_reset(wrp->rstc);
+ if (wrp->rstc_bridge)
+ reset_control_reset(wrp->rstc_bridge);
+
+ switch (wrp->master->type) {
+ case PWRAP_MT6795:
+ fallthrough;
+ case PWRAP_MT8173:
+ /* Enable DCM */
+ pwrap_writel(wrp, 3, PWRAP_DCM_EN);
+ pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
+ break;
+ default:
+ break;
+ }
+
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SPI)) {
+ /* Reset SPI slave */
+ ret = pwrap_reset_spislave(wrp);
+ if (ret)
+ return ret;
+ }
+
+ pwrap_writel(wrp, 1, PWRAP_WRAP_EN);
+
+ pwrap_writel(wrp, wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_EN);
+
+ ret = wrp->master->init_reg_clock(wrp);
+ if (ret)
+ return ret;
+
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SPI)) {
+ /* Setup serial input delay */
+ ret = pwrap_init_sidly(wrp);
+ if (ret)
+ return ret;
+ }
+
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_DUALIO)) {
+ /* Enable dual I/O mode */
+ ret = pwrap_init_dual_io(wrp);
+ if (ret)
+ return ret;
+ }
+
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SECURITY)) {
+ /* Enable security on bus */
+ ret = pwrap_init_security(wrp);
+ if (ret)
+ return ret;
+ }
+
+ if (wrp->master->type == PWRAP_MT8135)
+ pwrap_writel(wrp, 0x7, PWRAP_RRARB_EN);
+
+ pwrap_writel(wrp, 0x1, PWRAP_WACS0_EN);
+ pwrap_writel(wrp, 0x1, PWRAP_WACS1_EN);
+ pwrap_writel(wrp, 0x1, PWRAP_WACS2_EN);
+ pwrap_writel(wrp, 0x5, PWRAP_STAUPD_PRD);
+ pwrap_writel(wrp, 0xff, PWRAP_STAUPD_GRPEN);
+
+ if (wrp->master->init_soc_specific) {
+ ret = wrp->master->init_soc_specific(wrp);
+ if (ret)
+ return ret;
+ }
+
+ /* Setup the init done registers */
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE2);
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE0);
+ pwrap_writel(wrp, 1, PWRAP_INIT_DONE1);
+
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_BRIDGE)) {
+ writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE3);
+ writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE4);
+ }
+
+ return 0;
+}
+
+static irqreturn_t pwrap_interrupt(int irqno, void *dev_id)
+{
+ u32 rdata;
+ struct pmic_wrapper *wrp = dev_id;
+
+ rdata = pwrap_readl(wrp, PWRAP_INT_FLG);
+ dev_err(wrp->dev, "unexpected interrupt int=0x%x\n", rdata);
+ pwrap_writel(wrp, 0xffffffff, PWRAP_INT_CLR);
+
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_INT1_EN)) {
+ rdata = pwrap_readl(wrp, PWRAP_INT1_FLG);
+ dev_err(wrp->dev, "unexpected interrupt int1=0x%x\n", rdata);
+ pwrap_writel(wrp, 0xffffffff, PWRAP_INT1_CLR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config pwrap_regmap_config16 = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .reg_read = pwrap_regmap_read,
+ .reg_write = pwrap_regmap_write,
+ .max_register = 0xffff,
+};
+
+static const struct regmap_config pwrap_regmap_config32 = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_read = pwrap_regmap_read,
+ .reg_write = pwrap_regmap_write,
+ .max_register = 0xffff,
+};
+
+static const struct pwrap_slv_regops pwrap_regops16 = {
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+ .regmap = &pwrap_regmap_config16,
+};
+
+static const struct pwrap_slv_regops pwrap_regops32 = {
+ .pwrap_read = pwrap_read32,
+ .pwrap_write = pwrap_write32,
+ .regmap = &pwrap_regmap_config32,
+};
+
+static const struct pwrap_slv_type pmic_mt6323 = {
+ .dew_regs = mt6323_regs,
+ .type = PMIC_MT6323,
+ .regops = &pwrap_regops16,
+ .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
+ PWRAP_SLV_CAP_SECURITY,
+};
+
+static const struct pwrap_slv_type pmic_mt6331 = {
+ .dew_regs = mt6331_regs,
+ .type = PMIC_MT6331,
+ .comp_dew_regs = mt6332_regs,
+ .comp_type = PMIC_MT6332,
+ .regops = &pwrap_regops16,
+ .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
+ PWRAP_SLV_CAP_SECURITY,
+};
+
+static const struct pwrap_slv_type pmic_mt6351 = {
+ .dew_regs = mt6351_regs,
+ .type = PMIC_MT6351,
+ .regops = &pwrap_regops16,
+ .caps = 0,
+};
+
+static const struct pwrap_slv_type pmic_mt6357 = {
+ .dew_regs = mt6357_regs,
+ .type = PMIC_MT6357,
+ .regops = &pwrap_regops16,
+ .caps = 0,
+};
+
+static const struct pwrap_slv_type pmic_mt6358 = {
+ .dew_regs = mt6358_regs,
+ .type = PMIC_MT6358,
+ .regops = &pwrap_regops16,
+ .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO,
+};
+
+static const struct pwrap_slv_type pmic_mt6359 = {
+ .dew_regs = mt6359_regs,
+ .type = PMIC_MT6359,
+ .regops = &pwrap_regops16,
+ .caps = PWRAP_SLV_CAP_DUALIO,
+};
+
+static const struct pwrap_slv_type pmic_mt6380 = {
+ .dew_regs = NULL,
+ .type = PMIC_MT6380,
+ .regops = &pwrap_regops32,
+ .caps = 0,
+};
+
+static const struct pwrap_slv_type pmic_mt6397 = {
+ .dew_regs = mt6397_regs,
+ .type = PMIC_MT6397,
+ .regops = &pwrap_regops16,
+ .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
+ PWRAP_SLV_CAP_SECURITY,
+};
+
+static const struct of_device_id of_slave_match_tbl[] = {
+ { .compatible = "mediatek,mt6323", .data = &pmic_mt6323 },
+ { .compatible = "mediatek,mt6331", .data = &pmic_mt6331 },
+ { .compatible = "mediatek,mt6351", .data = &pmic_mt6351 },
+ { .compatible = "mediatek,mt6357", .data = &pmic_mt6357 },
+ { .compatible = "mediatek,mt6358", .data = &pmic_mt6358 },
+ { .compatible = "mediatek,mt6359", .data = &pmic_mt6359 },
+
+ /* The MT6380 PMIC only implements a regulator, so we bind it
+ * directly instead of using a MFD.
+ */
+ { .compatible = "mediatek,mt6380-regulator", .data = &pmic_mt6380 },
+ { .compatible = "mediatek,mt6397", .data = &pmic_mt6397 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_slave_match_tbl);
+
+static const struct pmic_wrapper_type pwrap_mt2701 = {
+ .regs = mt2701_regs,
+ .type = PWRAP_MT2701,
+ .arb_en_all = 0x3f,
+ .int_en_all = ~(u32)(BIT(31) | BIT(2)),
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE_NEW,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
+ .init_reg_clock = pwrap_mt2701_init_reg_clock,
+ .init_soc_specific = pwrap_mt2701_init_soc_specific,
+};
+
+static const struct pmic_wrapper_type pwrap_mt6765 = {
+ .regs = mt6765_regs,
+ .type = PWRAP_MT6765,
+ .arb_en_all = 0x3fd35,
+ .int_en_all = 0xffffffff,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
+static const struct pmic_wrapper_type pwrap_mt6779 = {
+ .regs = mt6779_regs,
+ .type = PWRAP_MT6779,
+ .arb_en_all = 0xfbb7f,
+ .int_en_all = 0xfffffffe,
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = 0,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
+static const struct pmic_wrapper_type pwrap_mt6795 = {
+ .regs = mt6795_regs,
+ .type = PWRAP_MT6795,
+ .arb_en_all = 0x3f,
+ .int_en_all = ~(u32)(BIT(31) | BIT(2) | BIT(1)),
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD,
+ .caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = pwrap_mt6795_init_soc_specific,
+};
+
+static const struct pmic_wrapper_type pwrap_mt6797 = {
+ .regs = mt6797_regs,
+ .type = PWRAP_MT6797,
+ .arb_en_all = 0x01fff,
+ .int_en_all = 0xffffffc6,
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
+static const struct pmic_wrapper_type pwrap_mt6873 = {
+ .regs = mt6873_regs,
+ .type = PWRAP_MT6873,
+ .arb_en_all = 0x777f,
+ .int_en_all = BIT(4) | BIT(5),
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_ARB,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
+static const struct pmic_wrapper_type pwrap_mt7622 = {
+ .regs = mt7622_regs,
+ .type = PWRAP_MT7622,
+ .arb_en_all = 0xff,
+ .int_en_all = ~(u32)BIT(31),
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = pwrap_mt7622_init_soc_specific,
+};
+
+static const struct pmic_wrapper_type pwrap_mt8135 = {
+ .regs = mt8135_regs,
+ .type = PWRAP_MT8135,
+ .arb_en_all = 0x1ff,
+ .int_en_all = ~(u32)(BIT(31) | BIT(1)),
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_BRIDGE | PWRAP_CAP_RESET | PWRAP_CAP_DCM,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = pwrap_mt8135_init_soc_specific,
+};
+
+static const struct pmic_wrapper_type pwrap_mt8173 = {
+ .regs = mt8173_regs,
+ .type = PWRAP_MT8173,
+ .arb_en_all = 0x3f,
+ .int_en_all = ~(u32)(BIT(31) | BIT(1)),
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD,
+ .caps = PWRAP_CAP_RESET | PWRAP_CAP_DCM,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = pwrap_mt8173_init_soc_specific,
+};
+
+static const struct pmic_wrapper_type pwrap_mt8183 = {
+ .regs = mt8183_regs,
+ .type = PWRAP_MT8183,
+ .arb_en_all = 0x3fa75,
+ .int_en_all = 0xffffffff,
+ .int1_en_all = 0xeef7ffff,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_INT1_EN | PWRAP_CAP_WDT_SRC1,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = pwrap_mt8183_init_soc_specific,
+};
+
+static struct pmic_wrapper_type pwrap_mt8195 = {
+ .regs = mt8195_regs,
+ .type = PWRAP_MT8195,
+ .arb_en_all = 0x777f, /* NEED CONFIRM */
+ .int_en_all = 0x180000, /* NEED CONFIRM */
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_INT1_EN | PWRAP_CAP_ARB,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
+static const struct pmic_wrapper_type pwrap_mt8365 = {
+ .regs = mt8365_regs,
+ .type = PWRAP_MT8365,
+ .arb_en_all = 0x3ffff,
+ .int_en_all = 0x7f1fffff,
+ .int1_en_all = 0x0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_INT1_EN | PWRAP_CAP_WDT_SRC1,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
+static struct pmic_wrapper_type pwrap_mt8516 = {
+ .regs = mt8516_regs,
+ .type = PWRAP_MT8516,
+ .arb_en_all = 0xff,
+ .int_en_all = ~(u32)(BIT(31) | BIT(2)),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_DCM,
+ .init_reg_clock = pwrap_mt2701_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
+static struct pmic_wrapper_type pwrap_mt8186 = {
+ .regs = mt8186_regs,
+ .type = PWRAP_MT8186,
+ .arb_en_all = 0xfb27f,
+ .int_en_all = 0xfffffffe, /* disable WatchDog Timeout for bit 1 */
+ .int1_en_all = 0x000017ff, /* disable Matching interrupt for bit 13 */
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_INT1_EN | PWRAP_CAP_ARB_MT8186,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
+static const struct of_device_id of_pwrap_match_tbl[] = {
+ { .compatible = "mediatek,mt2701-pwrap", .data = &pwrap_mt2701 },
+ { .compatible = "mediatek,mt6765-pwrap", .data = &pwrap_mt6765 },
+ { .compatible = "mediatek,mt6779-pwrap", .data = &pwrap_mt6779 },
+ { .compatible = "mediatek,mt6795-pwrap", .data = &pwrap_mt6795 },
+ { .compatible = "mediatek,mt6797-pwrap", .data = &pwrap_mt6797 },
+ { .compatible = "mediatek,mt6873-pwrap", .data = &pwrap_mt6873 },
+ { .compatible = "mediatek,mt7622-pwrap", .data = &pwrap_mt7622 },
+ { .compatible = "mediatek,mt8135-pwrap", .data = &pwrap_mt8135 },
+ { .compatible = "mediatek,mt8173-pwrap", .data = &pwrap_mt8173 },
+ { .compatible = "mediatek,mt8183-pwrap", .data = &pwrap_mt8183 },
+ { .compatible = "mediatek,mt8186-pwrap", .data = &pwrap_mt8186 },
+ { .compatible = "mediatek,mt8195-pwrap", .data = &pwrap_mt8195 },
+ { .compatible = "mediatek,mt8365-pwrap", .data = &pwrap_mt8365 },
+ { .compatible = "mediatek,mt8516-pwrap", .data = &pwrap_mt8516 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl);
+
+static int pwrap_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ u32 mask_done;
+ struct pmic_wrapper *wrp;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_slave_id = NULL;
+
+ if (np->child)
+ of_slave_id = of_match_node(of_slave_match_tbl, np->child);
+
+ if (!of_slave_id) {
+ dev_dbg(&pdev->dev, "slave pmic should be defined in dts\n");
+ return -EINVAL;
+ }
+
+ wrp = devm_kzalloc(&pdev->dev, sizeof(*wrp), GFP_KERNEL);
+ if (!wrp)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, wrp);
+
+ wrp->master = of_device_get_match_data(&pdev->dev);
+ wrp->slave = of_slave_id->data;
+ wrp->dev = &pdev->dev;
+
+ wrp->base = devm_platform_ioremap_resource_byname(pdev, "pwrap");
+ if (IS_ERR(wrp->base))
+ return PTR_ERR(wrp->base);
+
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_RESET)) {
+ wrp->rstc = devm_reset_control_get(wrp->dev, "pwrap");
+ if (IS_ERR(wrp->rstc)) {
+ ret = PTR_ERR(wrp->rstc);
+ dev_dbg(wrp->dev, "cannot get pwrap reset: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_BRIDGE)) {
+ wrp->bridge_base = devm_platform_ioremap_resource_byname(pdev, "pwrap-bridge");
+ if (IS_ERR(wrp->bridge_base))
+ return PTR_ERR(wrp->bridge_base);
+
+ wrp->rstc_bridge = devm_reset_control_get(wrp->dev,
+ "pwrap-bridge");
+ if (IS_ERR(wrp->rstc_bridge)) {
+ ret = PTR_ERR(wrp->rstc_bridge);
+ dev_dbg(wrp->dev,
+ "cannot get pwrap-bridge reset: %d\n", ret);
+ return ret;
+ }
+ }
+
+ wrp->clk_spi = devm_clk_get(wrp->dev, "spi");
+ if (IS_ERR(wrp->clk_spi)) {
+ dev_dbg(wrp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(wrp->clk_spi));
+ return PTR_ERR(wrp->clk_spi);
+ }
+
+ wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap");
+ if (IS_ERR(wrp->clk_wrap)) {
+ dev_dbg(wrp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(wrp->clk_wrap));
+ return PTR_ERR(wrp->clk_wrap);
+ }
+
+ wrp->clk_sys = devm_clk_get_optional(wrp->dev, "sys");
+ if (IS_ERR(wrp->clk_sys)) {
+ return dev_err_probe(wrp->dev, PTR_ERR(wrp->clk_sys),
+ "failed to get clock: %pe\n",
+ wrp->clk_sys);
+ }
+
+ wrp->clk_tmr = devm_clk_get_optional(wrp->dev, "tmr");
+ if (IS_ERR(wrp->clk_tmr)) {
+ return dev_err_probe(wrp->dev, PTR_ERR(wrp->clk_tmr),
+ "failed to get clock: %pe\n",
+ wrp->clk_tmr);
+ }
+
+ ret = clk_prepare_enable(wrp->clk_spi);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(wrp->clk_wrap);
+ if (ret)
+ goto err_out1;
+
+ ret = clk_prepare_enable(wrp->clk_sys);
+ if (ret)
+ goto err_out2;
+
+ ret = clk_prepare_enable(wrp->clk_tmr);
+ if (ret)
+ goto err_out3;
+
+ /* Enable internal dynamic clock */
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_DCM)) {
+ pwrap_writel(wrp, 1, PWRAP_DCM_EN);
+ pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
+ }
+
+ /*
+ * The PMIC could already be initialized by the bootloader.
+ * Skip initialization here in this case.
+ */
+ if (!pwrap_readl(wrp, PWRAP_INIT_DONE2)) {
+ ret = pwrap_init(wrp);
+ if (ret) {
+ dev_dbg(wrp->dev, "init failed with %d\n", ret);
+ goto err_out4;
+ }
+ }
+
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ mask_done = PWRAP_STATE_INIT_DONE1;
+ else if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB_MT8186))
+ mask_done = PWRAP_STATE_INIT_DONE0_MT8186;
+ else
+ mask_done = PWRAP_STATE_INIT_DONE0;
+
+ if (!(pwrap_readl(wrp, PWRAP_WACS2_RDATA) & mask_done)) {
+ dev_dbg(wrp->dev, "initialization isn't finished\n");
+ ret = -ENODEV;
+ goto err_out4;
+ }
+
+ /* Initialize watchdog, may not be done by the bootloader */
+ if (!HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ pwrap_writel(wrp, 0xf, PWRAP_WDT_UNIT);
+
+ /*
+ * Since STAUPD was not used on mt8173 platform,
+ * so STAUPD of WDT_SRC which should be turned off
+ */
+ pwrap_writel(wrp, wrp->master->wdt_src, PWRAP_WDT_SRC_EN);
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_WDT_SRC1))
+ pwrap_writel(wrp, wrp->master->wdt_src, PWRAP_WDT_SRC_EN_1);
+
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_ARB))
+ pwrap_writel(wrp, 0x3, PWRAP_TIMER_EN);
+ else
+ pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
+
+ pwrap_writel(wrp, wrp->master->int_en_all, PWRAP_INT_EN);
+ /*
+ * We add INT1 interrupt to handle starvation and request exception
+ * If we support it, we should enable it here.
+ */
+ if (HAS_CAP(wrp->master->caps, PWRAP_CAP_INT1_EN))
+ pwrap_writel(wrp, wrp->master->int1_en_all, PWRAP_INT1_EN);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_out2;
+ }
+
+ ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt,
+ IRQF_TRIGGER_HIGH,
+ "mt-pmic-pwrap", wrp);
+ if (ret)
+ goto err_out4;
+
+ wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regops->regmap);
+ if (IS_ERR(wrp->regmap)) {
+ ret = PTR_ERR(wrp->regmap);
+ goto err_out2;
+ }
+
+ ret = of_platform_populate(np, NULL, NULL, wrp->dev);
+ if (ret) {
+ dev_dbg(wrp->dev, "failed to create child devices at %pOF\n",
+ np);
+ goto err_out4;
+ }
+
+ return 0;
+
+err_out4:
+ clk_disable_unprepare(wrp->clk_tmr);
+err_out3:
+ clk_disable_unprepare(wrp->clk_sys);
+err_out2:
+ clk_disable_unprepare(wrp->clk_wrap);
+err_out1:
+ clk_disable_unprepare(wrp->clk_spi);
+
+ return ret;
+}
+
+static struct platform_driver pwrap_drv = {
+ .driver = {
+ .name = "mt-pmic-pwrap",
+ .of_match_table = of_pwrap_match_tbl,
+ },
+ .probe = pwrap_probe,
+};
+
+module_platform_driver(pwrap_drv);
+
+MODULE_AUTHOR("Flora Fu, MediaTek");
+MODULE_DESCRIPTION("MediaTek MT8135 PMIC Wrapper Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/mtk-regulator-coupler.c b/drivers/soc/mediatek/mtk-regulator-coupler.c
new file mode 100644
index 0000000000..ad2ed42aa6
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-regulator-coupler.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Voltage regulators coupler for MediaTek SoCs
+ *
+ * Copyright (C) 2022 Collabora, Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/suspend.h>
+
+#define to_mediatek_coupler(x) container_of(x, struct mediatek_regulator_coupler, coupler)
+
+struct mediatek_regulator_coupler {
+ struct regulator_coupler coupler;
+ struct regulator_dev *vsram_rdev;
+};
+
+/*
+ * We currently support only couples of not more than two vregs and
+ * modify the vsram voltage only when changing voltage of vgpu.
+ *
+ * This function is limited to the GPU<->SRAM voltages relationships.
+ */
+static int mediatek_regulator_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct mediatek_regulator_coupler *mrc = to_mediatek_coupler(coupler);
+ int max_spread = rdev->constraints->max_spread[0];
+ int vsram_min_uV = mrc->vsram_rdev->constraints->min_uV;
+ int vsram_max_uV = mrc->vsram_rdev->constraints->max_uV;
+ int vsram_target_min_uV, vsram_target_max_uV;
+ int min_uV = 0;
+ int max_uV = INT_MAX;
+ int ret;
+
+ /*
+ * If the target device is on, setting the SRAM voltage directly
+ * is not supported as it scales through its coupled supply voltage.
+ *
+ * An exception is made in case the use_count is zero: this means
+ * that this is the first time we power up the SRAM regulator, which
+ * implies that the target device has yet to perform initialization
+ * and setting a voltage at that time is harmless.
+ */
+ if (rdev == mrc->vsram_rdev) {
+ if (rdev->use_count == 0)
+ return regulator_do_balance_voltage(rdev, state, true);
+
+ return -EPERM;
+ }
+
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, state);
+ if (ret < 0)
+ return ret;
+
+ if (min_uV == 0) {
+ ret = regulator_get_voltage_rdev(rdev);
+ if (ret < 0)
+ return ret;
+ min_uV = ret;
+ }
+
+ ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * If we're asked to set a voltage less than VSRAM min_uV, set
+ * the minimum allowed voltage on VSRAM, as in this case it is
+ * safe to ignore the max_spread parameter.
+ */
+ vsram_target_min_uV = max(vsram_min_uV, min_uV + max_spread);
+ vsram_target_max_uV = min(vsram_max_uV, vsram_target_min_uV + max_spread);
+
+ /* Make sure we're not out of range */
+ vsram_target_min_uV = min(vsram_target_min_uV, vsram_max_uV);
+
+ pr_debug("Setting voltage %d-%duV on %s (minuV %d)\n",
+ vsram_target_min_uV, vsram_target_max_uV,
+ rdev_get_name(mrc->vsram_rdev), min_uV);
+
+ ret = regulator_set_voltage_rdev(mrc->vsram_rdev, vsram_target_min_uV,
+ vsram_target_max_uV, state);
+ if (ret)
+ return ret;
+
+ /* The sram voltage is now balanced: update the target vreg voltage */
+ return regulator_do_balance_voltage(rdev, state, true);
+}
+
+static int mediatek_regulator_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct mediatek_regulator_coupler *mrc = to_mediatek_coupler(coupler);
+ const char *rdev_name = rdev_get_name(rdev);
+
+ /*
+ * If we're getting a coupling of more than two regulators here and
+ * this means that this is surely not a GPU<->SRAM couple: in that
+ * case, we may want to use another coupler implementation, if any,
+ * or the generic one: the regulator core will keep walking through
+ * the list of couplers when any .attach_regulator() cb returns 1.
+ */
+ if (rdev->coupling_desc.n_coupled > 2)
+ return 1;
+
+ if (strstr(rdev_name, "sram")) {
+ if (mrc->vsram_rdev)
+ return -EINVAL;
+ mrc->vsram_rdev = rdev;
+ } else if (!strstr(rdev_name, "vgpu") && !strstr(rdev_name, "Vgpu")) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mediatek_regulator_detach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct mediatek_regulator_coupler *mrc = to_mediatek_coupler(coupler);
+
+ if (rdev == mrc->vsram_rdev)
+ mrc->vsram_rdev = NULL;
+
+ return 0;
+}
+
+static struct mediatek_regulator_coupler mediatek_coupler = {
+ .coupler = {
+ .attach_regulator = mediatek_regulator_attach,
+ .detach_regulator = mediatek_regulator_detach,
+ .balance_voltage = mediatek_regulator_balance_voltage,
+ },
+};
+
+static int mediatek_regulator_coupler_init(void)
+{
+ if (!of_machine_is_compatible("mediatek,mt8183") &&
+ !of_machine_is_compatible("mediatek,mt8186") &&
+ !of_machine_is_compatible("mediatek,mt8192"))
+ return 0;
+
+ return regulator_coupler_register(&mediatek_coupler.coupler);
+}
+arch_initcall(mediatek_regulator_coupler_init);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("MediaTek Regulator Coupler driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
new file mode 100644
index 0000000000..3a2f97cd52
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -0,0 +1,2434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/thermal.h>
+
+/* svs bank 1-line software id */
+#define SVSB_CPU_LITTLE BIT(0)
+#define SVSB_CPU_BIG BIT(1)
+#define SVSB_CCI BIT(2)
+#define SVSB_GPU BIT(3)
+
+/* svs bank 2-line type */
+#define SVSB_LOW BIT(8)
+#define SVSB_HIGH BIT(9)
+
+/* svs bank mode support */
+#define SVSB_MODE_ALL_DISABLE 0
+#define SVSB_MODE_INIT01 BIT(1)
+#define SVSB_MODE_INIT02 BIT(2)
+#define SVSB_MODE_MON BIT(3)
+
+/* svs bank volt flags */
+#define SVSB_INIT01_PD_REQ BIT(0)
+#define SVSB_INIT01_VOLT_IGNORE BIT(1)
+#define SVSB_INIT01_VOLT_INC_ONLY BIT(2)
+#define SVSB_MON_VOLT_IGNORE BIT(16)
+#define SVSB_REMOVE_DVTFIXED_VOLT BIT(24)
+
+/* svs bank register fields and common configuration */
+#define SVSB_PTPCONFIG_DETMAX GENMASK(15, 0)
+#define SVSB_DET_MAX FIELD_PREP(SVSB_PTPCONFIG_DETMAX, 0xffff)
+#define SVSB_DET_WINDOW 0xa28
+
+/* DESCHAR */
+#define SVSB_DESCHAR_FLD_MDES GENMASK(7, 0)
+#define SVSB_DESCHAR_FLD_BDES GENMASK(15, 8)
+
+/* TEMPCHAR */
+#define SVSB_TEMPCHAR_FLD_DVT_FIXED GENMASK(7, 0)
+#define SVSB_TEMPCHAR_FLD_MTDES GENMASK(15, 8)
+#define SVSB_TEMPCHAR_FLD_VCO GENMASK(23, 16)
+
+/* DETCHAR */
+#define SVSB_DETCHAR_FLD_DCMDET GENMASK(7, 0)
+#define SVSB_DETCHAR_FLD_DCBDET GENMASK(15, 8)
+
+/* SVSEN (PTPEN) */
+#define SVSB_PTPEN_INIT01 BIT(0)
+#define SVSB_PTPEN_MON BIT(1)
+#define SVSB_PTPEN_INIT02 (SVSB_PTPEN_INIT01 | BIT(2))
+#define SVSB_PTPEN_OFF 0x0
+
+/* FREQPCTS */
+#define SVSB_FREQPCTS_FLD_PCT0_4 GENMASK(7, 0)
+#define SVSB_FREQPCTS_FLD_PCT1_5 GENMASK(15, 8)
+#define SVSB_FREQPCTS_FLD_PCT2_6 GENMASK(23, 16)
+#define SVSB_FREQPCTS_FLD_PCT3_7 GENMASK(31, 24)
+
+/* INTSTS */
+#define SVSB_INTSTS_VAL_CLEAN 0x00ffffff
+#define SVSB_INTSTS_F0_COMPLETE BIT(0)
+#define SVSB_INTSTS_FLD_MONVOP GENMASK(23, 16)
+#define SVSB_RUNCONFIG_DEFAULT 0x80000000
+
+/* LIMITVALS */
+#define SVSB_LIMITVALS_FLD_DTLO GENMASK(7, 0)
+#define SVSB_LIMITVALS_FLD_DTHI GENMASK(15, 8)
+#define SVSB_LIMITVALS_FLD_VMIN GENMASK(23, 16)
+#define SVSB_LIMITVALS_FLD_VMAX GENMASK(31, 24)
+#define SVSB_VAL_DTHI 0x1
+#define SVSB_VAL_DTLO 0xfe
+
+/* INTEN */
+#define SVSB_INTEN_F0EN BIT(0)
+#define SVSB_INTEN_DACK0UPEN BIT(8)
+#define SVSB_INTEN_DC0EN BIT(9)
+#define SVSB_INTEN_DC1EN BIT(10)
+#define SVSB_INTEN_DACK0LOEN BIT(11)
+#define SVSB_INTEN_INITPROD_OVF_EN BIT(12)
+#define SVSB_INTEN_INITSUM_OVF_EN BIT(14)
+#define SVSB_INTEN_MONVOPEN GENMASK(23, 16)
+#define SVSB_INTEN_INIT0x (SVSB_INTEN_F0EN | SVSB_INTEN_DACK0UPEN | \
+ SVSB_INTEN_DC0EN | SVSB_INTEN_DC1EN | \
+ SVSB_INTEN_DACK0LOEN | \
+ SVSB_INTEN_INITPROD_OVF_EN | \
+ SVSB_INTEN_INITSUM_OVF_EN)
+
+/* TSCALCS */
+#define SVSB_TSCALCS_FLD_MTS GENMASK(11, 0)
+#define SVSB_TSCALCS_FLD_BTS GENMASK(23, 12)
+
+/* INIT2VALS */
+#define SVSB_INIT2VALS_FLD_DCVOFFSETIN GENMASK(15, 0)
+#define SVSB_INIT2VALS_FLD_AGEVOFFSETIN GENMASK(31, 16)
+
+/* VOPS */
+#define SVSB_VOPS_FLD_VOP0_4 GENMASK(7, 0)
+#define SVSB_VOPS_FLD_VOP1_5 GENMASK(15, 8)
+#define SVSB_VOPS_FLD_VOP2_6 GENMASK(23, 16)
+#define SVSB_VOPS_FLD_VOP3_7 GENMASK(31, 24)
+
+/* svs bank related setting */
+#define BITS8 8
+#define MAX_OPP_ENTRIES 16
+#define REG_BYTES 4
+#define SVSB_DC_SIGNED_BIT BIT(15)
+#define SVSB_DET_CLK_EN BIT(31)
+#define SVSB_TEMP_LOWER_BOUND 0xb2
+#define SVSB_TEMP_UPPER_BOUND 0x64
+
+static DEFINE_SPINLOCK(svs_lock);
+
+#ifdef CONFIG_DEBUG_FS
+#define debug_fops_ro(name) \
+ static int svs_##name##_debug_open(struct inode *inode, \
+ struct file *filp) \
+ { \
+ return single_open(filp, svs_##name##_debug_show, \
+ inode->i_private); \
+ } \
+ static const struct file_operations svs_##name##_debug_fops = { \
+ .owner = THIS_MODULE, \
+ .open = svs_##name##_debug_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define debug_fops_rw(name) \
+ static int svs_##name##_debug_open(struct inode *inode, \
+ struct file *filp) \
+ { \
+ return single_open(filp, svs_##name##_debug_show, \
+ inode->i_private); \
+ } \
+ static const struct file_operations svs_##name##_debug_fops = { \
+ .owner = THIS_MODULE, \
+ .open = svs_##name##_debug_open, \
+ .read = seq_read, \
+ .write = svs_##name##_debug_write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
+
+#define svs_dentry_data(name) {__stringify(name), &svs_##name##_debug_fops}
+#endif
+
+/**
+ * enum svsb_phase - svs bank phase enumeration
+ * @SVSB_PHASE_ERROR: svs bank encounters unexpected condition
+ * @SVSB_PHASE_INIT01: svs bank basic init for data calibration
+ * @SVSB_PHASE_INIT02: svs bank can provide voltages to opp table
+ * @SVSB_PHASE_MON: svs bank can provide voltages with thermal effect
+ * @SVSB_PHASE_MAX: total number of svs bank phase (debug purpose)
+ *
+ * Each svs bank has its own independent phase and we enable each svs bank by
+ * running their phase orderly. However, when svs bank encounters unexpected
+ * condition, it will fire an irq (PHASE_ERROR) to inform svs software.
+ *
+ * svs bank general phase-enabled order:
+ * SVSB_PHASE_INIT01 -> SVSB_PHASE_INIT02 -> SVSB_PHASE_MON
+ */
+enum svsb_phase {
+ SVSB_PHASE_ERROR = 0,
+ SVSB_PHASE_INIT01,
+ SVSB_PHASE_INIT02,
+ SVSB_PHASE_MON,
+ SVSB_PHASE_MAX,
+};
+
+enum svs_reg_index {
+ DESCHAR = 0,
+ TEMPCHAR,
+ DETCHAR,
+ AGECHAR,
+ DCCONFIG,
+ AGECONFIG,
+ FREQPCT30,
+ FREQPCT74,
+ LIMITVALS,
+ VBOOT,
+ DETWINDOW,
+ CONFIG,
+ TSCALCS,
+ RUNCONFIG,
+ SVSEN,
+ INIT2VALS,
+ DCVALUES,
+ AGEVALUES,
+ VOP30,
+ VOP74,
+ TEMP,
+ INTSTS,
+ INTSTSRAW,
+ INTEN,
+ CHKINT,
+ CHKSHIFT,
+ STATUS,
+ VDESIGN30,
+ VDESIGN74,
+ DVT30,
+ DVT74,
+ AGECOUNT,
+ SMSTATE0,
+ SMSTATE1,
+ CTL0,
+ DESDETSEC,
+ TEMPAGESEC,
+ CTRLSPARE0,
+ CTRLSPARE1,
+ CTRLSPARE2,
+ CTRLSPARE3,
+ CORESEL,
+ THERMINTST,
+ INTST,
+ THSTAGE0ST,
+ THSTAGE1ST,
+ THSTAGE2ST,
+ THAHBST0,
+ THAHBST1,
+ SPARE0,
+ SPARE1,
+ SPARE2,
+ SPARE3,
+ THSLPEVEB,
+ SVS_REG_MAX,
+};
+
+static const u32 svs_regs_v2[] = {
+ [DESCHAR] = 0xc00,
+ [TEMPCHAR] = 0xc04,
+ [DETCHAR] = 0xc08,
+ [AGECHAR] = 0xc0c,
+ [DCCONFIG] = 0xc10,
+ [AGECONFIG] = 0xc14,
+ [FREQPCT30] = 0xc18,
+ [FREQPCT74] = 0xc1c,
+ [LIMITVALS] = 0xc20,
+ [VBOOT] = 0xc24,
+ [DETWINDOW] = 0xc28,
+ [CONFIG] = 0xc2c,
+ [TSCALCS] = 0xc30,
+ [RUNCONFIG] = 0xc34,
+ [SVSEN] = 0xc38,
+ [INIT2VALS] = 0xc3c,
+ [DCVALUES] = 0xc40,
+ [AGEVALUES] = 0xc44,
+ [VOP30] = 0xc48,
+ [VOP74] = 0xc4c,
+ [TEMP] = 0xc50,
+ [INTSTS] = 0xc54,
+ [INTSTSRAW] = 0xc58,
+ [INTEN] = 0xc5c,
+ [CHKINT] = 0xc60,
+ [CHKSHIFT] = 0xc64,
+ [STATUS] = 0xc68,
+ [VDESIGN30] = 0xc6c,
+ [VDESIGN74] = 0xc70,
+ [DVT30] = 0xc74,
+ [DVT74] = 0xc78,
+ [AGECOUNT] = 0xc7c,
+ [SMSTATE0] = 0xc80,
+ [SMSTATE1] = 0xc84,
+ [CTL0] = 0xc88,
+ [DESDETSEC] = 0xce0,
+ [TEMPAGESEC] = 0xce4,
+ [CTRLSPARE0] = 0xcf0,
+ [CTRLSPARE1] = 0xcf4,
+ [CTRLSPARE2] = 0xcf8,
+ [CTRLSPARE3] = 0xcfc,
+ [CORESEL] = 0xf00,
+ [THERMINTST] = 0xf04,
+ [INTST] = 0xf08,
+ [THSTAGE0ST] = 0xf0c,
+ [THSTAGE1ST] = 0xf10,
+ [THSTAGE2ST] = 0xf14,
+ [THAHBST0] = 0xf18,
+ [THAHBST1] = 0xf1c,
+ [SPARE0] = 0xf20,
+ [SPARE1] = 0xf24,
+ [SPARE2] = 0xf28,
+ [SPARE3] = 0xf2c,
+ [THSLPEVEB] = 0xf30,
+};
+
+/**
+ * struct svs_platform - svs platform control
+ * @base: svs platform register base
+ * @dev: svs platform device
+ * @main_clk: main clock for svs bank
+ * @pbank: svs bank pointer needing to be protected by spin_lock section
+ * @banks: svs banks that svs platform supports
+ * @rst: svs platform reset control
+ * @efuse_max: total number of svs efuse
+ * @tefuse_max: total number of thermal efuse
+ * @regs: svs platform registers map
+ * @bank_max: total number of svs banks
+ * @efuse: svs efuse data received from NVMEM framework
+ * @tefuse: thermal efuse data received from NVMEM framework
+ */
+struct svs_platform {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *main_clk;
+ struct svs_bank *pbank;
+ struct svs_bank *banks;
+ struct reset_control *rst;
+ size_t efuse_max;
+ size_t tefuse_max;
+ const u32 *regs;
+ u32 bank_max;
+ u32 *efuse;
+ u32 *tefuse;
+};
+
+struct svs_platform_data {
+ char *name;
+ struct svs_bank *banks;
+ bool (*efuse_parsing)(struct svs_platform *svsp);
+ int (*probe)(struct svs_platform *svsp);
+ const u32 *regs;
+ u32 bank_max;
+};
+
+/**
+ * struct svs_bank - svs bank representation
+ * @dev: bank device
+ * @opp_dev: device for opp table/buck control
+ * @init_completion: the timeout completion for bank init
+ * @buck: regulator used by opp_dev
+ * @tzd: thermal zone device for getting temperature
+ * @lock: mutex lock to protect voltage update process
+ * @set_freq_pct: function pointer to set bank frequency percent table
+ * @get_volts: function pointer to get bank voltages
+ * @name: bank name
+ * @buck_name: regulator name
+ * @tzone_name: thermal zone name
+ * @phase: bank current phase
+ * @volt_od: bank voltage overdrive
+ * @reg_data: bank register data in different phase for debug purpose
+ * @pm_runtime_enabled_count: bank pm runtime enabled count
+ * @mode_support: bank mode support.
+ * @freq_base: reference frequency for bank init
+ * @turn_freq_base: refenrece frequency for 2-line turn point
+ * @vboot: voltage request for bank init01 only
+ * @opp_dfreq: default opp frequency table
+ * @opp_dvolt: default opp voltage table
+ * @freq_pct: frequency percent table for bank init
+ * @volt: bank voltage table
+ * @volt_step: bank voltage step
+ * @volt_base: bank voltage base
+ * @volt_flags: bank voltage flags
+ * @vmax: bank voltage maximum
+ * @vmin: bank voltage minimum
+ * @age_config: bank age configuration
+ * @age_voffset_in: bank age voltage offset
+ * @dc_config: bank dc configuration
+ * @dc_voffset_in: bank dc voltage offset
+ * @dvt_fixed: bank dvt fixed value
+ * @vco: bank VCO value
+ * @chk_shift: bank chicken shift
+ * @core_sel: bank selection
+ * @opp_count: bank opp count
+ * @int_st: bank interrupt identification
+ * @sw_id: bank software identification
+ * @cpu_id: cpu core id for SVS CPU bank use only
+ * @ctl0: TS-x selection
+ * @temp: bank temperature
+ * @tzone_htemp: thermal zone high temperature threshold
+ * @tzone_htemp_voffset: thermal zone high temperature voltage offset
+ * @tzone_ltemp: thermal zone low temperature threshold
+ * @tzone_ltemp_voffset: thermal zone low temperature voltage offset
+ * @bts: svs efuse data
+ * @mts: svs efuse data
+ * @bdes: svs efuse data
+ * @mdes: svs efuse data
+ * @mtdes: svs efuse data
+ * @dcbdet: svs efuse data
+ * @dcmdet: svs efuse data
+ * @turn_pt: 2-line turn point tells which opp_volt calculated by high/low bank
+ * @type: bank type to represent it is 2-line (high/low) bank or 1-line bank
+ *
+ * Svs bank will generate suitalbe voltages by below general math equation
+ * and provide these voltages to opp voltage table.
+ *
+ * opp_volt[i] = (volt[i] * volt_step) + volt_base;
+ */
+struct svs_bank {
+ struct device *dev;
+ struct device *opp_dev;
+ struct completion init_completion;
+ struct regulator *buck;
+ struct thermal_zone_device *tzd;
+ struct mutex lock; /* lock to protect voltage update process */
+ void (*set_freq_pct)(struct svs_platform *svsp);
+ void (*get_volts)(struct svs_platform *svsp);
+ char *name;
+ char *buck_name;
+ char *tzone_name;
+ enum svsb_phase phase;
+ s32 volt_od;
+ u32 reg_data[SVSB_PHASE_MAX][SVS_REG_MAX];
+ u32 pm_runtime_enabled_count;
+ u32 mode_support;
+ u32 freq_base;
+ u32 turn_freq_base;
+ u32 vboot;
+ u32 opp_dfreq[MAX_OPP_ENTRIES];
+ u32 opp_dvolt[MAX_OPP_ENTRIES];
+ u32 freq_pct[MAX_OPP_ENTRIES];
+ u32 volt[MAX_OPP_ENTRIES];
+ u32 volt_step;
+ u32 volt_base;
+ u32 volt_flags;
+ u32 vmax;
+ u32 vmin;
+ u32 age_config;
+ u32 age_voffset_in;
+ u32 dc_config;
+ u32 dc_voffset_in;
+ u32 dvt_fixed;
+ u32 vco;
+ u32 chk_shift;
+ u32 core_sel;
+ u32 opp_count;
+ u32 int_st;
+ u32 sw_id;
+ u32 cpu_id;
+ u32 ctl0;
+ u32 temp;
+ u32 tzone_htemp;
+ u32 tzone_htemp_voffset;
+ u32 tzone_ltemp;
+ u32 tzone_ltemp_voffset;
+ u32 bts;
+ u32 mts;
+ u32 bdes;
+ u32 mdes;
+ u32 mtdes;
+ u32 dcbdet;
+ u32 dcmdet;
+ u32 turn_pt;
+ u32 type;
+};
+
+static u32 percent(u32 numerator, u32 denominator)
+{
+ /* If not divide 1000, "numerator * 100" will have data overflow. */
+ numerator /= 1000;
+ denominator /= 1000;
+
+ return DIV_ROUND_UP(numerator * 100, denominator);
+}
+
+static u32 svs_readl_relaxed(struct svs_platform *svsp, enum svs_reg_index rg_i)
+{
+ return readl_relaxed(svsp->base + svsp->regs[rg_i]);
+}
+
+static void svs_writel_relaxed(struct svs_platform *svsp, u32 val,
+ enum svs_reg_index rg_i)
+{
+ writel_relaxed(val, svsp->base + svsp->regs[rg_i]);
+}
+
+static void svs_switch_bank(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ svs_writel_relaxed(svsp, svsb->core_sel, CORESEL);
+}
+
+static u32 svs_bank_volt_to_opp_volt(u32 svsb_volt, u32 svsb_volt_step,
+ u32 svsb_volt_base)
+{
+ return (svsb_volt * svsb_volt_step) + svsb_volt_base;
+}
+
+static u32 svs_opp_volt_to_bank_volt(u32 opp_u_volt, u32 svsb_volt_step,
+ u32 svsb_volt_base)
+{
+ return (opp_u_volt - svsb_volt_base) / svsb_volt_step;
+}
+
+static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb)
+{
+ struct dev_pm_opp *opp;
+ u32 i, opp_u_volt;
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ opp = dev_pm_opp_find_freq_exact(svsb->opp_dev,
+ svsb->opp_dfreq[i],
+ true);
+ if (IS_ERR(opp)) {
+ dev_err(svsb->dev, "cannot find freq = %u (%ld)\n",
+ svsb->opp_dfreq[i], PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ opp_u_volt = dev_pm_opp_get_voltage(opp);
+ svsb->volt[i] = svs_opp_volt_to_bank_volt(opp_u_volt,
+ svsb->volt_step,
+ svsb->volt_base);
+ dev_pm_opp_put(opp);
+ }
+
+ return 0;
+}
+
+static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
+{
+ int ret = -EPERM, tzone_temp = 0;
+ u32 i, svsb_volt, opp_volt, temp_voffset = 0, opp_start, opp_stop;
+
+ mutex_lock(&svsb->lock);
+
+ /*
+ * 2-line bank updates its corresponding opp volts.
+ * 1-line bank updates all opp volts.
+ */
+ if (svsb->type == SVSB_HIGH) {
+ opp_start = 0;
+ opp_stop = svsb->turn_pt;
+ } else if (svsb->type == SVSB_LOW) {
+ opp_start = svsb->turn_pt;
+ opp_stop = svsb->opp_count;
+ } else {
+ opp_start = 0;
+ opp_stop = svsb->opp_count;
+ }
+
+ /* Get thermal effect */
+ if (!IS_ERR_OR_NULL(svsb->tzd)) {
+ ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp);
+ if (ret || (svsb->temp > SVSB_TEMP_UPPER_BOUND &&
+ svsb->temp < SVSB_TEMP_LOWER_BOUND)) {
+ dev_err(svsb->dev, "%s: %d (0x%x), run default volts\n",
+ svsb->tzone_name, ret, svsb->temp);
+ svsb->phase = SVSB_PHASE_ERROR;
+ }
+
+ if (tzone_temp >= svsb->tzone_htemp)
+ temp_voffset += svsb->tzone_htemp_voffset;
+ else if (tzone_temp <= svsb->tzone_ltemp)
+ temp_voffset += svsb->tzone_ltemp_voffset;
+
+ /* 2-line bank update all opp volts when running mon mode */
+ if (svsb->phase == SVSB_PHASE_MON && (svsb->type == SVSB_HIGH ||
+ svsb->type == SVSB_LOW)) {
+ opp_start = 0;
+ opp_stop = svsb->opp_count;
+ }
+ }
+
+ /* vmin <= svsb_volt (opp_volt) <= default opp voltage */
+ for (i = opp_start; i < opp_stop; i++) {
+ switch (svsb->phase) {
+ case SVSB_PHASE_ERROR:
+ opp_volt = svsb->opp_dvolt[i];
+ break;
+ case SVSB_PHASE_INIT01:
+ /* do nothing */
+ goto unlock_mutex;
+ case SVSB_PHASE_INIT02:
+ case SVSB_PHASE_MON:
+ svsb_volt = max(svsb->volt[i] + temp_voffset, svsb->vmin);
+ opp_volt = svs_bank_volt_to_opp_volt(svsb_volt,
+ svsb->volt_step,
+ svsb->volt_base);
+ break;
+ default:
+ dev_err(svsb->dev, "unknown phase: %u\n", svsb->phase);
+ ret = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ opp_volt = min(opp_volt, svsb->opp_dvolt[i]);
+ ret = dev_pm_opp_adjust_voltage(svsb->opp_dev,
+ svsb->opp_dfreq[i],
+ opp_volt, opp_volt,
+ svsb->opp_dvolt[i]);
+ if (ret) {
+ dev_err(svsb->dev, "set %uuV fail: %d\n",
+ opp_volt, ret);
+ goto unlock_mutex;
+ }
+ }
+
+unlock_mutex:
+ mutex_unlock(&svsb->lock);
+
+ return ret;
+}
+
+static void svs_bank_disable_and_restore_default_volts(struct svs_platform *svsp,
+ struct svs_bank *svsb)
+{
+ unsigned long flags;
+
+ if (svsb->mode_support == SVSB_MODE_ALL_DISABLE)
+ return;
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_switch_bank(svsp);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ svsb->phase = SVSB_PHASE_ERROR;
+ svs_adjust_pm_opp_volts(svsb);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int svs_dump_debug_show(struct seq_file *m, void *p)
+{
+ struct svs_platform *svsp = (struct svs_platform *)m->private;
+ struct svs_bank *svsb;
+ unsigned long svs_reg_addr;
+ u32 idx, i, j, bank_id;
+
+ for (i = 0; i < svsp->efuse_max; i++)
+ if (svsp->efuse && svsp->efuse[i])
+ seq_printf(m, "M_HW_RES%d = 0x%08x\n",
+ i, svsp->efuse[i]);
+
+ for (i = 0; i < svsp->tefuse_max; i++)
+ if (svsp->tefuse)
+ seq_printf(m, "THERMAL_EFUSE%d = 0x%08x\n",
+ i, svsp->tefuse[i]);
+
+ for (bank_id = 0, idx = 0; idx < svsp->bank_max; idx++, bank_id++) {
+ svsb = &svsp->banks[idx];
+
+ for (i = SVSB_PHASE_INIT01; i <= SVSB_PHASE_MON; i++) {
+ seq_printf(m, "Bank_number = %u\n", bank_id);
+
+ if (i == SVSB_PHASE_INIT01 || i == SVSB_PHASE_INIT02)
+ seq_printf(m, "mode = init%d\n", i);
+ else if (i == SVSB_PHASE_MON)
+ seq_puts(m, "mode = mon\n");
+ else
+ seq_puts(m, "mode = error\n");
+
+ for (j = DESCHAR; j < SVS_REG_MAX; j++) {
+ svs_reg_addr = (unsigned long)(svsp->base +
+ svsp->regs[j]);
+ seq_printf(m, "0x%08lx = 0x%08x\n",
+ svs_reg_addr, svsb->reg_data[i][j]);
+ }
+ }
+ }
+
+ return 0;
+}
+
+debug_fops_ro(dump);
+
+static int svs_enable_debug_show(struct seq_file *m, void *v)
+{
+ struct svs_bank *svsb = (struct svs_bank *)m->private;
+
+ switch (svsb->phase) {
+ case SVSB_PHASE_ERROR:
+ seq_puts(m, "disabled\n");
+ break;
+ case SVSB_PHASE_INIT01:
+ seq_puts(m, "init1\n");
+ break;
+ case SVSB_PHASE_INIT02:
+ seq_puts(m, "init2\n");
+ break;
+ case SVSB_PHASE_MON:
+ seq_puts(m, "mon mode\n");
+ break;
+ default:
+ seq_puts(m, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static ssize_t svs_enable_debug_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct svs_bank *svsb = file_inode(filp)->i_private;
+ struct svs_platform *svsp = dev_get_drvdata(svsb->dev);
+ int enabled, ret;
+ char *buf = NULL;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ buf = (char *)memdup_user_nul(buffer, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = kstrtoint(buf, 10, &enabled);
+ if (ret)
+ return ret;
+
+ if (!enabled) {
+ svs_bank_disable_and_restore_default_volts(svsp, svsb);
+ svsb->mode_support = SVSB_MODE_ALL_DISABLE;
+ }
+
+ kfree(buf);
+
+ return count;
+}
+
+debug_fops_rw(enable);
+
+static int svs_status_debug_show(struct seq_file *m, void *v)
+{
+ struct svs_bank *svsb = (struct svs_bank *)m->private;
+ struct dev_pm_opp *opp;
+ int tzone_temp = 0, ret;
+ u32 i;
+
+ ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp);
+ if (ret)
+ seq_printf(m, "%s: temperature ignore, turn_pt = %u\n",
+ svsb->name, svsb->turn_pt);
+ else
+ seq_printf(m, "%s: temperature = %d, turn_pt = %u\n",
+ svsb->name, tzone_temp, svsb->turn_pt);
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ opp = dev_pm_opp_find_freq_exact(svsb->opp_dev,
+ svsb->opp_dfreq[i], true);
+ if (IS_ERR(opp)) {
+ seq_printf(m, "%s: cannot find freq = %u (%ld)\n",
+ svsb->name, svsb->opp_dfreq[i],
+ PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ seq_printf(m, "opp_freq[%02u]: %u, opp_volt[%02u]: %lu, ",
+ i, svsb->opp_dfreq[i], i,
+ dev_pm_opp_get_voltage(opp));
+ seq_printf(m, "svsb_volt[%02u]: 0x%x, freq_pct[%02u]: %u\n",
+ i, svsb->volt[i], i, svsb->freq_pct[i]);
+ dev_pm_opp_put(opp);
+ }
+
+ return 0;
+}
+
+debug_fops_ro(status);
+
+static int svs_create_debug_cmds(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ struct dentry *svs_dir, *svsb_dir, *file_entry;
+ const char *d = "/sys/kernel/debug/svs";
+ u32 i, idx;
+
+ struct svs_dentry {
+ const char *name;
+ const struct file_operations *fops;
+ };
+
+ struct svs_dentry svs_entries[] = {
+ svs_dentry_data(dump),
+ };
+
+ struct svs_dentry svsb_entries[] = {
+ svs_dentry_data(enable),
+ svs_dentry_data(status),
+ };
+
+ svs_dir = debugfs_create_dir("svs", NULL);
+ if (IS_ERR(svs_dir)) {
+ dev_err(svsp->dev, "cannot create %s: %ld\n",
+ d, PTR_ERR(svs_dir));
+ return PTR_ERR(svs_dir);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(svs_entries); i++) {
+ file_entry = debugfs_create_file(svs_entries[i].name, 0664,
+ svs_dir, svsp,
+ svs_entries[i].fops);
+ if (IS_ERR(file_entry)) {
+ dev_err(svsp->dev, "cannot create %s/%s: %ld\n",
+ d, svs_entries[i].name, PTR_ERR(file_entry));
+ return PTR_ERR(file_entry);
+ }
+ }
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (svsb->mode_support == SVSB_MODE_ALL_DISABLE)
+ continue;
+
+ svsb_dir = debugfs_create_dir(svsb->name, svs_dir);
+ if (IS_ERR(svsb_dir)) {
+ dev_err(svsp->dev, "cannot create %s/%s: %ld\n",
+ d, svsb->name, PTR_ERR(svsb_dir));
+ return PTR_ERR(svsb_dir);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(svsb_entries); i++) {
+ file_entry = debugfs_create_file(svsb_entries[i].name,
+ 0664, svsb_dir, svsb,
+ svsb_entries[i].fops);
+ if (IS_ERR(file_entry)) {
+ dev_err(svsp->dev, "no %s/%s/%s?: %ld\n",
+ d, svsb->name, svsb_entries[i].name,
+ PTR_ERR(file_entry));
+ return PTR_ERR(file_entry);
+ }
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx)
+{
+ u32 vx;
+
+ if (v0 == v1 || f0 == f1)
+ return v0;
+
+ /* *100 to have decimal fraction factor */
+ vx = (v0 * 100) - ((((v0 - v1) * 100) / (f0 - f1)) * (f0 - fx));
+
+ return DIV_ROUND_UP(vx, 100);
+}
+
+static void svs_get_bank_volts_v3(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 i, j, *vop, vop74, vop30, turn_pt = svsb->turn_pt;
+ u32 b_sft, shift_byte = 0, opp_start = 0, opp_stop = 0;
+ u32 middle_index = (svsb->opp_count / 2);
+
+ if (svsb->phase == SVSB_PHASE_MON &&
+ svsb->volt_flags & SVSB_MON_VOLT_IGNORE)
+ return;
+
+ vop74 = svs_readl_relaxed(svsp, VOP74);
+ vop30 = svs_readl_relaxed(svsp, VOP30);
+
+ /* Target is to set svsb->volt[] by algorithm */
+ if (turn_pt < middle_index) {
+ if (svsb->type == SVSB_HIGH) {
+ /* volt[0] ~ volt[turn_pt - 1] */
+ for (i = 0; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+ } else if (svsb->type == SVSB_LOW) {
+ /* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */
+ j = svsb->opp_count - 7;
+ svsb->volt[turn_pt] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30);
+ shift_byte++;
+ for (i = j; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+
+ /* volt[turn_pt + 1] ~ volt[j - 1] by interpolate */
+ for (i = turn_pt + 1; i < j; i++)
+ svsb->volt[i] = interpolate(svsb->freq_pct[turn_pt],
+ svsb->freq_pct[j],
+ svsb->volt[turn_pt],
+ svsb->volt[j],
+ svsb->freq_pct[i]);
+ }
+ } else {
+ if (svsb->type == SVSB_HIGH) {
+ /* volt[0] + volt[j] ~ volt[turn_pt - 1] */
+ j = turn_pt - 7;
+ svsb->volt[0] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30);
+ shift_byte++;
+ for (i = j; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+
+ /* volt[1] ~ volt[j - 1] by interpolate */
+ for (i = 1; i < j; i++)
+ svsb->volt[i] = interpolate(svsb->freq_pct[0],
+ svsb->freq_pct[j],
+ svsb->volt[0],
+ svsb->volt[j],
+ svsb->freq_pct[i]);
+ } else if (svsb->type == SVSB_LOW) {
+ /* volt[turn_pt] ~ volt[opp_count - 1] */
+ for (i = turn_pt; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ vop = (shift_byte < REG_BYTES) ? &vop30 :
+ &vop74;
+ svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
+ shift_byte++;
+ }
+ }
+ }
+
+ if (svsb->type == SVSB_HIGH) {
+ opp_start = 0;
+ opp_stop = svsb->turn_pt;
+ } else if (svsb->type == SVSB_LOW) {
+ opp_start = svsb->turn_pt;
+ opp_stop = svsb->opp_count;
+ }
+
+ for (i = opp_start; i < opp_stop; i++)
+ if (svsb->volt_flags & SVSB_REMOVE_DVTFIXED_VOLT)
+ svsb->volt[i] -= svsb->dvt_fixed;
+}
+
+static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 i, j, *freq_pct, freq_pct74 = 0, freq_pct30 = 0;
+ u32 b_sft, shift_byte = 0, turn_pt;
+ u32 middle_index = (svsb->opp_count / 2);
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ if (svsb->opp_dfreq[i] <= svsb->turn_freq_base) {
+ svsb->turn_pt = i;
+ break;
+ }
+ }
+
+ turn_pt = svsb->turn_pt;
+
+ /* Target is to fill out freq_pct74 / freq_pct30 by algorithm */
+ if (turn_pt < middle_index) {
+ if (svsb->type == SVSB_HIGH) {
+ /*
+ * If we don't handle this situation,
+ * SVSB_HIGH's FREQPCT74 / FREQPCT30 would keep "0"
+ * and this leads SVSB_LOW to work abnormally.
+ */
+ if (turn_pt == 0)
+ freq_pct30 = svsb->freq_pct[0];
+
+ /* freq_pct[0] ~ freq_pct[turn_pt - 1] */
+ for (i = 0; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ } else if (svsb->type == SVSB_LOW) {
+ /*
+ * freq_pct[turn_pt] +
+ * freq_pct[opp_count - 7] ~ freq_pct[opp_count -1]
+ */
+ freq_pct30 = svsb->freq_pct[turn_pt];
+ shift_byte++;
+ j = svsb->opp_count - 7;
+ for (i = j; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ }
+ } else {
+ if (svsb->type == SVSB_HIGH) {
+ /*
+ * freq_pct[0] +
+ * freq_pct[turn_pt - 7] ~ freq_pct[turn_pt - 1]
+ */
+ freq_pct30 = svsb->freq_pct[0];
+ shift_byte++;
+ j = turn_pt - 7;
+ for (i = j; i < turn_pt; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ } else if (svsb->type == SVSB_LOW) {
+ /* freq_pct[turn_pt] ~ freq_pct[opp_count - 1] */
+ for (i = turn_pt; i < svsb->opp_count; i++) {
+ b_sft = BITS8 * (shift_byte % REG_BYTES);
+ freq_pct = (shift_byte < REG_BYTES) ?
+ &freq_pct30 : &freq_pct74;
+ *freq_pct |= (svsb->freq_pct[i] << b_sft);
+ shift_byte++;
+ }
+ }
+ }
+
+ svs_writel_relaxed(svsp, freq_pct74, FREQPCT74);
+ svs_writel_relaxed(svsp, freq_pct30, FREQPCT30);
+}
+
+static void svs_get_bank_volts_v2(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 temp, i;
+
+ temp = svs_readl_relaxed(svsp, VOP74);
+ svsb->volt[14] = FIELD_GET(SVSB_VOPS_FLD_VOP3_7, temp);
+ svsb->volt[12] = FIELD_GET(SVSB_VOPS_FLD_VOP2_6, temp);
+ svsb->volt[10] = FIELD_GET(SVSB_VOPS_FLD_VOP1_5, temp);
+ svsb->volt[8] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, temp);
+
+ temp = svs_readl_relaxed(svsp, VOP30);
+ svsb->volt[6] = FIELD_GET(SVSB_VOPS_FLD_VOP3_7, temp);
+ svsb->volt[4] = FIELD_GET(SVSB_VOPS_FLD_VOP2_6, temp);
+ svsb->volt[2] = FIELD_GET(SVSB_VOPS_FLD_VOP1_5, temp);
+ svsb->volt[0] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, temp);
+
+ for (i = 0; i <= 12; i += 2)
+ svsb->volt[i + 1] = interpolate(svsb->freq_pct[i],
+ svsb->freq_pct[i + 2],
+ svsb->volt[i],
+ svsb->volt[i + 2],
+ svsb->freq_pct[i + 1]);
+
+ svsb->volt[15] = interpolate(svsb->freq_pct[12],
+ svsb->freq_pct[14],
+ svsb->volt[12],
+ svsb->volt[14],
+ svsb->freq_pct[15]);
+
+ for (i = 0; i < svsb->opp_count; i++)
+ svsb->volt[i] += svsb->volt_od;
+}
+
+static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 freqpct74_val, freqpct30_val;
+
+ freqpct74_val = FIELD_PREP(SVSB_FREQPCTS_FLD_PCT0_4, svsb->freq_pct[8]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT1_5, svsb->freq_pct[10]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT2_6, svsb->freq_pct[12]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT3_7, svsb->freq_pct[14]);
+
+ freqpct30_val = FIELD_PREP(SVSB_FREQPCTS_FLD_PCT0_4, svsb->freq_pct[0]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT1_5, svsb->freq_pct[2]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT2_6, svsb->freq_pct[4]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT3_7, svsb->freq_pct[6]);
+
+ svs_writel_relaxed(svsp, freqpct74_val, FREQPCT74);
+ svs_writel_relaxed(svsp, freqpct30_val, FREQPCT30);
+}
+
+static void svs_set_bank_phase(struct svs_platform *svsp,
+ enum svsb_phase target_phase)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ u32 des_char, temp_char, det_char, limit_vals, init2vals, ts_calcs;
+
+ svs_switch_bank(svsp);
+
+ des_char = FIELD_PREP(SVSB_DESCHAR_FLD_BDES, svsb->bdes) |
+ FIELD_PREP(SVSB_DESCHAR_FLD_MDES, svsb->mdes);
+ svs_writel_relaxed(svsp, des_char, DESCHAR);
+
+ temp_char = FIELD_PREP(SVSB_TEMPCHAR_FLD_VCO, svsb->vco) |
+ FIELD_PREP(SVSB_TEMPCHAR_FLD_MTDES, svsb->mtdes) |
+ FIELD_PREP(SVSB_TEMPCHAR_FLD_DVT_FIXED, svsb->dvt_fixed);
+ svs_writel_relaxed(svsp, temp_char, TEMPCHAR);
+
+ det_char = FIELD_PREP(SVSB_DETCHAR_FLD_DCBDET, svsb->dcbdet) |
+ FIELD_PREP(SVSB_DETCHAR_FLD_DCMDET, svsb->dcmdet);
+ svs_writel_relaxed(svsp, det_char, DETCHAR);
+
+ svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG);
+ svs_writel_relaxed(svsp, svsb->age_config, AGECONFIG);
+ svs_writel_relaxed(svsp, SVSB_RUNCONFIG_DEFAULT, RUNCONFIG);
+
+ svsb->set_freq_pct(svsp);
+
+ limit_vals = FIELD_PREP(SVSB_LIMITVALS_FLD_DTLO, SVSB_VAL_DTLO) |
+ FIELD_PREP(SVSB_LIMITVALS_FLD_DTHI, SVSB_VAL_DTHI) |
+ FIELD_PREP(SVSB_LIMITVALS_FLD_VMIN, svsb->vmin) |
+ FIELD_PREP(SVSB_LIMITVALS_FLD_VMAX, svsb->vmax);
+ svs_writel_relaxed(svsp, limit_vals, LIMITVALS);
+
+ svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW);
+ svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG);
+ svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT);
+ svs_writel_relaxed(svsp, svsb->ctl0, CTL0);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
+
+ switch (target_phase) {
+ case SVSB_PHASE_INIT01:
+ svs_writel_relaxed(svsp, svsb->vboot, VBOOT);
+ svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_INIT01, SVSEN);
+ break;
+ case SVSB_PHASE_INIT02:
+ init2vals = FIELD_PREP(SVSB_INIT2VALS_FLD_AGEVOFFSETIN, svsb->age_voffset_in) |
+ FIELD_PREP(SVSB_INIT2VALS_FLD_DCVOFFSETIN, svsb->dc_voffset_in);
+ svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
+ svs_writel_relaxed(svsp, init2vals, INIT2VALS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_INIT02, SVSEN);
+ break;
+ case SVSB_PHASE_MON:
+ ts_calcs = FIELD_PREP(SVSB_TSCALCS_FLD_BTS, svsb->bts) |
+ FIELD_PREP(SVSB_TSCALCS_FLD_MTS, svsb->mts);
+ svs_writel_relaxed(svsp, ts_calcs, TSCALCS);
+ svs_writel_relaxed(svsp, SVSB_INTEN_MONVOPEN, INTEN);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_MON, SVSEN);
+ break;
+ default:
+ dev_err(svsb->dev, "requested unknown target phase: %u\n",
+ target_phase);
+ break;
+ }
+}
+
+static inline void svs_save_bank_register_data(struct svs_platform *svsp,
+ enum svsb_phase phase)
+{
+ struct svs_bank *svsb = svsp->pbank;
+ enum svs_reg_index rg_i;
+
+ for (rg_i = DESCHAR; rg_i < SVS_REG_MAX; rg_i++)
+ svsb->reg_data[phase][rg_i] = svs_readl_relaxed(svsp, rg_i);
+}
+
+static inline void svs_error_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ dev_err(svsb->dev, "%s: CORESEL = 0x%08x\n",
+ __func__, svs_readl_relaxed(svsp, CORESEL));
+ dev_err(svsb->dev, "SVSEN = 0x%08x, INTSTS = 0x%08x\n",
+ svs_readl_relaxed(svsp, SVSEN),
+ svs_readl_relaxed(svsp, INTSTS));
+ dev_err(svsb->dev, "SMSTATE0 = 0x%08x, SMSTATE1 = 0x%08x\n",
+ svs_readl_relaxed(svsp, SMSTATE0),
+ svs_readl_relaxed(svsp, SMSTATE1));
+ dev_err(svsb->dev, "TEMP = 0x%08x\n", svs_readl_relaxed(svsp, TEMP));
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR);
+
+ svsb->phase = SVSB_PHASE_ERROR;
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
+}
+
+static inline void svs_init01_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ dev_info(svsb->dev, "%s: VDN74~30:0x%08x~0x%08x, DC:0x%08x\n",
+ __func__, svs_readl_relaxed(svsp, VDESIGN74),
+ svs_readl_relaxed(svsp, VDESIGN30),
+ svs_readl_relaxed(svsp, DCVALUES));
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_INIT01);
+
+ svsb->phase = SVSB_PHASE_INIT01;
+ svsb->dc_voffset_in = ~(svs_readl_relaxed(svsp, DCVALUES) &
+ GENMASK(15, 0)) + 1;
+ if (svsb->volt_flags & SVSB_INIT01_VOLT_IGNORE ||
+ (svsb->dc_voffset_in & SVSB_DC_SIGNED_BIT &&
+ svsb->volt_flags & SVSB_INIT01_VOLT_INC_ONLY))
+ svsb->dc_voffset_in = 0;
+
+ svsb->age_voffset_in = svs_readl_relaxed(svsp, AGEVALUES) &
+ GENMASK(15, 0);
+
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_F0_COMPLETE, INTSTS);
+ svsb->core_sel &= ~SVSB_DET_CLK_EN;
+}
+
+static inline void svs_init02_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ dev_info(svsb->dev, "%s: VOP74~30:0x%08x~0x%08x, DC:0x%08x\n",
+ __func__, svs_readl_relaxed(svsp, VOP74),
+ svs_readl_relaxed(svsp, VOP30),
+ svs_readl_relaxed(svsp, DCVALUES));
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_INIT02);
+
+ svsb->phase = SVSB_PHASE_INIT02;
+ svsb->get_volts(svsp);
+
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_F0_COMPLETE, INTSTS);
+}
+
+static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb = svsp->pbank;
+
+ svs_save_bank_register_data(svsp, SVSB_PHASE_MON);
+
+ svsb->phase = SVSB_PHASE_MON;
+ svsb->get_volts(svsp);
+
+ svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_FLD_MONVOP, INTSTS);
+}
+
+static irqreturn_t svs_isr(int irq, void *data)
+{
+ struct svs_platform *svsp = data;
+ struct svs_bank *svsb = NULL;
+ unsigned long flags;
+ u32 idx, int_sts, svs_en;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ WARN(!svsb, "%s: svsb(%s) is null", __func__, svsb->name);
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+
+ /* Find out which svs bank fires interrupt */
+ if (svsb->int_st & svs_readl_relaxed(svsp, INTST)) {
+ spin_unlock_irqrestore(&svs_lock, flags);
+ continue;
+ }
+
+ svs_switch_bank(svsp);
+ int_sts = svs_readl_relaxed(svsp, INTSTS);
+ svs_en = svs_readl_relaxed(svsp, SVSEN);
+
+ if (int_sts == SVSB_INTSTS_F0_COMPLETE &&
+ svs_en == SVSB_PTPEN_INIT01)
+ svs_init01_isr_handler(svsp);
+ else if (int_sts == SVSB_INTSTS_F0_COMPLETE &&
+ svs_en == SVSB_PTPEN_INIT02)
+ svs_init02_isr_handler(svsp);
+ else if (int_sts & SVSB_INTSTS_FLD_MONVOP)
+ svs_mon_mode_isr_handler(svsp);
+ else
+ svs_error_isr_handler(svsp);
+
+ spin_unlock_irqrestore(&svs_lock, flags);
+ break;
+ }
+
+ svs_adjust_pm_opp_volts(svsb);
+
+ if (svsb->phase == SVSB_PHASE_INIT01 ||
+ svsb->phase == SVSB_PHASE_INIT02)
+ complete(&svsb->init_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int svs_init01(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ unsigned long flags, time_left;
+ bool search_done;
+ int ret = 0, r;
+ u32 opp_freq, opp_vboot, buck_volt, idx, i;
+
+ /* Keep CPUs' core power on for svs_init01 initialization */
+ cpuidle_pause_and_lock();
+
+ /* Svs bank init01 preparation - power enable */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ ret = regulator_enable(svsb->buck);
+ if (ret) {
+ dev_err(svsb->dev, "%s enable fail: %d\n",
+ svsb->buck_name, ret);
+ goto svs_init01_resume_cpuidle;
+ }
+
+ /* Some buck doesn't support mode change. Show fail msg only */
+ ret = regulator_set_mode(svsb->buck, REGULATOR_MODE_FAST);
+ if (ret)
+ dev_notice(svsb->dev, "set fast mode fail: %d\n", ret);
+
+ if (svsb->volt_flags & SVSB_INIT01_PD_REQ) {
+ if (!pm_runtime_enabled(svsb->opp_dev)) {
+ pm_runtime_enable(svsb->opp_dev);
+ svsb->pm_runtime_enabled_count++;
+ }
+
+ ret = pm_runtime_resume_and_get(svsb->opp_dev);
+ if (ret < 0) {
+ dev_err(svsb->dev, "mtcmos on fail: %d\n", ret);
+ goto svs_init01_resume_cpuidle;
+ }
+ }
+ }
+
+ /*
+ * Svs bank init01 preparation - vboot voltage adjustment
+ * Sometimes two svs banks use the same buck. Therefore,
+ * we have to set each svs bank to target voltage(vboot) first.
+ */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ /*
+ * Find the fastest freq that can be run at vboot and
+ * fix to that freq until svs_init01 is done.
+ */
+ search_done = false;
+ opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot,
+ svsb->volt_step,
+ svsb->volt_base);
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ opp_freq = svsb->opp_dfreq[i];
+ if (!search_done && svsb->opp_dvolt[i] <= opp_vboot) {
+ ret = dev_pm_opp_adjust_voltage(svsb->opp_dev,
+ opp_freq,
+ opp_vboot,
+ opp_vboot,
+ opp_vboot);
+ if (ret) {
+ dev_err(svsb->dev,
+ "set opp %uuV vboot fail: %d\n",
+ opp_vboot, ret);
+ goto svs_init01_finish;
+ }
+
+ search_done = true;
+ } else {
+ ret = dev_pm_opp_disable(svsb->opp_dev,
+ svsb->opp_dfreq[i]);
+ if (ret) {
+ dev_err(svsb->dev,
+ "opp %uHz disable fail: %d\n",
+ svsb->opp_dfreq[i], ret);
+ goto svs_init01_finish;
+ }
+ }
+ }
+ }
+
+ /* Svs bank init01 begins */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot,
+ svsb->volt_step,
+ svsb->volt_base);
+
+ buck_volt = regulator_get_voltage(svsb->buck);
+ if (buck_volt != opp_vboot) {
+ dev_err(svsb->dev,
+ "buck voltage: %uuV, expected vboot: %uuV\n",
+ buck_volt, opp_vboot);
+ ret = -EPERM;
+ goto svs_init01_finish;
+ }
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_set_bank_phase(svsp, SVSB_PHASE_INIT01);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ time_left = wait_for_completion_timeout(&svsb->init_completion,
+ msecs_to_jiffies(5000));
+ if (!time_left) {
+ dev_err(svsb->dev, "init01 completion timeout\n");
+ ret = -EBUSY;
+ goto svs_init01_finish;
+ }
+ }
+
+svs_init01_finish:
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT01))
+ continue;
+
+ for (i = 0; i < svsb->opp_count; i++) {
+ r = dev_pm_opp_enable(svsb->opp_dev,
+ svsb->opp_dfreq[i]);
+ if (r)
+ dev_err(svsb->dev, "opp %uHz enable fail: %d\n",
+ svsb->opp_dfreq[i], r);
+ }
+
+ if (svsb->volt_flags & SVSB_INIT01_PD_REQ) {
+ r = pm_runtime_put_sync(svsb->opp_dev);
+ if (r)
+ dev_err(svsb->dev, "mtcmos off fail: %d\n", r);
+
+ if (svsb->pm_runtime_enabled_count > 0) {
+ pm_runtime_disable(svsb->opp_dev);
+ svsb->pm_runtime_enabled_count--;
+ }
+ }
+
+ r = regulator_set_mode(svsb->buck, REGULATOR_MODE_NORMAL);
+ if (r)
+ dev_notice(svsb->dev, "set normal mode fail: %d\n", r);
+
+ r = regulator_disable(svsb->buck);
+ if (r)
+ dev_err(svsb->dev, "%s disable fail: %d\n",
+ svsb->buck_name, r);
+ }
+
+svs_init01_resume_cpuidle:
+ cpuidle_resume_and_unlock();
+
+ return ret;
+}
+
+static int svs_init02(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ unsigned long flags, time_left;
+ int ret;
+ u32 idx;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT02))
+ continue;
+
+ reinit_completion(&svsb->init_completion);
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_set_bank_phase(svsp, SVSB_PHASE_INIT02);
+ spin_unlock_irqrestore(&svs_lock, flags);
+
+ time_left = wait_for_completion_timeout(&svsb->init_completion,
+ msecs_to_jiffies(5000));
+ if (!time_left) {
+ dev_err(svsb->dev, "init02 completion timeout\n");
+ ret = -EBUSY;
+ goto out_of_init02;
+ }
+ }
+
+ /*
+ * 2-line high/low bank update its corresponding opp voltages only.
+ * Therefore, we sync voltages from opp for high/low bank voltages
+ * consistency.
+ */
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_INIT02))
+ continue;
+
+ if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) {
+ if (svs_sync_bank_volts_from_opp(svsb)) {
+ dev_err(svsb->dev, "sync volt fail\n");
+ ret = -EPERM;
+ goto out_of_init02;
+ }
+ }
+ }
+
+ return 0;
+
+out_of_init02:
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svs_bank_disable_and_restore_default_volts(svsp, svsb);
+ }
+
+ return ret;
+}
+
+static void svs_mon_mode(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ unsigned long flags;
+ u32 idx;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (!(svsb->mode_support & SVSB_MODE_MON))
+ continue;
+
+ spin_lock_irqsave(&svs_lock, flags);
+ svsp->pbank = svsb;
+ svs_set_bank_phase(svsp, SVSB_PHASE_MON);
+ spin_unlock_irqrestore(&svs_lock, flags);
+ }
+}
+
+static int svs_start(struct svs_platform *svsp)
+{
+ int ret;
+
+ ret = svs_init01(svsp);
+ if (ret)
+ return ret;
+
+ ret = svs_init02(svsp);
+ if (ret)
+ return ret;
+
+ svs_mon_mode(svsp);
+
+ return 0;
+}
+
+static int svs_suspend(struct device *dev)
+{
+ struct svs_platform *svsp = dev_get_drvdata(dev);
+ struct svs_bank *svsb;
+ int ret;
+ u32 idx;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svs_bank_disable_and_restore_default_volts(svsp, svsb);
+ }
+
+ ret = reset_control_assert(svsp->rst);
+ if (ret) {
+ dev_err(svsp->dev, "cannot assert reset %d\n", ret);
+ return ret;
+ }
+
+ clk_disable_unprepare(svsp->main_clk);
+
+ return 0;
+}
+
+static int svs_resume(struct device *dev)
+{
+ struct svs_platform *svsp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(svsp->main_clk);
+ if (ret) {
+ dev_err(svsp->dev, "cannot enable main_clk, disable svs\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(svsp->rst);
+ if (ret) {
+ dev_err(svsp->dev, "cannot deassert reset %d\n", ret);
+ goto out_of_resume;
+ }
+
+ ret = svs_init02(svsp);
+ if (ret)
+ goto svs_resume_reset_assert;
+
+ svs_mon_mode(svsp);
+
+ return 0;
+
+svs_resume_reset_assert:
+ dev_err(svsp->dev, "assert reset: %d\n",
+ reset_control_assert(svsp->rst));
+
+out_of_resume:
+ clk_disable_unprepare(svsp->main_clk);
+ return ret;
+}
+
+static int svs_bank_resource_setup(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ struct dev_pm_opp *opp;
+ unsigned long freq;
+ int count, ret;
+ u32 idx, i;
+
+ dev_set_drvdata(svsp->dev, svsp);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ svsb->name = "SVSB_CPU_LITTLE";
+ break;
+ case SVSB_CPU_BIG:
+ svsb->name = "SVSB_CPU_BIG";
+ break;
+ case SVSB_CCI:
+ svsb->name = "SVSB_CCI";
+ break;
+ case SVSB_GPU:
+ if (svsb->type == SVSB_HIGH)
+ svsb->name = "SVSB_GPU_HIGH";
+ else if (svsb->type == SVSB_LOW)
+ svsb->name = "SVSB_GPU_LOW";
+ else
+ svsb->name = "SVSB_GPU";
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ return -EINVAL;
+ }
+
+ svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev),
+ GFP_KERNEL);
+ if (!svsb->dev)
+ return -ENOMEM;
+
+ ret = dev_set_name(svsb->dev, "%s", svsb->name);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(svsb->dev, svsp);
+
+ ret = devm_pm_opp_of_add_table(svsb->opp_dev);
+ if (ret) {
+ dev_err(svsb->dev, "add opp table fail: %d\n", ret);
+ return ret;
+ }
+
+ mutex_init(&svsb->lock);
+ init_completion(&svsb->init_completion);
+
+ if (svsb->mode_support & SVSB_MODE_INIT01) {
+ svsb->buck = devm_regulator_get_optional(svsb->opp_dev,
+ svsb->buck_name);
+ if (IS_ERR(svsb->buck)) {
+ dev_err(svsb->dev, "cannot get \"%s-supply\"\n",
+ svsb->buck_name);
+ return PTR_ERR(svsb->buck);
+ }
+ }
+
+ if (!IS_ERR_OR_NULL(svsb->tzone_name)) {
+ svsb->tzd = thermal_zone_get_zone_by_name(svsb->tzone_name);
+ if (IS_ERR(svsb->tzd)) {
+ dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n",
+ svsb->tzone_name);
+ return PTR_ERR(svsb->tzd);
+ }
+ }
+
+ count = dev_pm_opp_get_opp_count(svsb->opp_dev);
+ if (svsb->opp_count != count) {
+ dev_err(svsb->dev,
+ "opp_count not \"%u\" but get \"%d\"?\n",
+ svsb->opp_count, count);
+ return count;
+ }
+
+ for (i = 0, freq = U32_MAX; i < svsb->opp_count; i++, freq--) {
+ opp = dev_pm_opp_find_freq_floor(svsb->opp_dev, &freq);
+ if (IS_ERR(opp)) {
+ dev_err(svsb->dev, "cannot find freq = %ld\n",
+ PTR_ERR(opp));
+ return PTR_ERR(opp);
+ }
+
+ svsb->opp_dfreq[i] = freq;
+ svsb->opp_dvolt[i] = dev_pm_opp_get_voltage(opp);
+ svsb->freq_pct[i] = percent(svsb->opp_dfreq[i],
+ svsb->freq_base);
+ dev_pm_opp_put(opp);
+ }
+ }
+
+ return 0;
+}
+
+static int svs_get_efuse_data(struct svs_platform *svsp,
+ const char *nvmem_cell_name,
+ u32 **svsp_efuse, size_t *svsp_efuse_max)
+{
+ struct nvmem_cell *cell;
+
+ cell = nvmem_cell_get(svsp->dev, nvmem_cell_name);
+ if (IS_ERR(cell)) {
+ dev_err(svsp->dev, "no \"%s\"? %ld\n",
+ nvmem_cell_name, PTR_ERR(cell));
+ return PTR_ERR(cell);
+ }
+
+ *svsp_efuse = nvmem_cell_read(cell, svsp_efuse_max);
+ if (IS_ERR(*svsp_efuse)) {
+ dev_err(svsp->dev, "cannot read \"%s\" efuse: %ld\n",
+ nvmem_cell_name, PTR_ERR(*svsp_efuse));
+ nvmem_cell_put(cell);
+ return PTR_ERR(*svsp_efuse);
+ }
+
+ *svsp_efuse_max /= sizeof(u32);
+ nvmem_cell_put(cell);
+
+ return 0;
+}
+
+static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ u32 idx, i, vmin, golden_temp;
+ int ret;
+
+ for (i = 0; i < svsp->efuse_max; i++)
+ if (svsp->efuse[i])
+ dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n",
+ i, svsp->efuse[i]);
+
+ if (!svsp->efuse[9]) {
+ dev_notice(svsp->dev, "svs_efuse[9] = 0x0?\n");
+ return false;
+ }
+
+ /* Svs efuse parsing */
+ vmin = (svsp->efuse[19] >> 4) & GENMASK(1, 0);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (vmin == 0x1)
+ svsb->vmin = 0x1e;
+
+ if (svsb->type == SVSB_LOW) {
+ svsb->mtdes = svsp->efuse[10] & GENMASK(7, 0);
+ svsb->bdes = (svsp->efuse[10] >> 16) & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[10] >> 24) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[17]) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[17] >> 8) & GENMASK(7, 0);
+ } else if (svsb->type == SVSB_HIGH) {
+ svsb->mtdes = svsp->efuse[9] & GENMASK(7, 0);
+ svsb->bdes = (svsp->efuse[9] >> 16) & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[9] >> 24) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[17] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[17] >> 24) & GENMASK(7, 0);
+ }
+
+ svsb->vmax += svsb->dvt_fixed;
+ }
+
+ ret = svs_get_efuse_data(svsp, "t-calibration-data",
+ &svsp->tefuse, &svsp->tefuse_max);
+ if (ret)
+ return false;
+
+ for (i = 0; i < svsp->tefuse_max; i++)
+ if (svsp->tefuse[i] != 0)
+ break;
+
+ if (i == svsp->tefuse_max)
+ golden_temp = 50; /* All thermal efuse data are 0 */
+ else
+ golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svsb->mts = 500;
+ svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4;
+ }
+
+ return true;
+}
+
+static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
+{
+ struct svs_bank *svsb;
+ int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0;
+ int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t;
+ int o_slope, o_slope_sign, ts_id;
+ u32 idx, i, ft_pgm, mts, temp0, temp1, temp2;
+ int ret;
+
+ for (i = 0; i < svsp->efuse_max; i++)
+ if (svsp->efuse[i])
+ dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n",
+ i, svsp->efuse[i]);
+
+ if (!svsp->efuse[2]) {
+ dev_notice(svsp->dev, "svs_efuse[2] = 0x0?\n");
+ return false;
+ }
+
+ /* Svs efuse parsing */
+ ft_pgm = (svsp->efuse[0] >> 4) & GENMASK(3, 0);
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (ft_pgm <= 1)
+ svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE;
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ svsb->bdes = svsp->efuse[16] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[16] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[16] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[16] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = (svsp->efuse[17] >> 16) & GENMASK(7, 0);
+
+ if (ft_pgm <= 3)
+ svsb->volt_od += 10;
+ else
+ svsb->volt_od += 2;
+ break;
+ case SVSB_CPU_BIG:
+ svsb->bdes = svsp->efuse[18] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[18] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[18] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[18] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = svsp->efuse[17] & GENMASK(7, 0);
+
+ if (ft_pgm <= 3)
+ svsb->volt_od += 15;
+ else
+ svsb->volt_od += 12;
+ break;
+ case SVSB_CCI:
+ svsb->bdes = svsp->efuse[4] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[4] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[4] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[4] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0);
+
+ if (ft_pgm <= 3)
+ svsb->volt_od += 10;
+ else
+ svsb->volt_od += 2;
+ break;
+ case SVSB_GPU:
+ svsb->bdes = svsp->efuse[6] & GENMASK(7, 0);
+ svsb->mdes = (svsp->efuse[6] >> 8) & GENMASK(7, 0);
+ svsb->dcbdet = (svsp->efuse[6] >> 16) & GENMASK(7, 0);
+ svsb->dcmdet = (svsp->efuse[6] >> 24) & GENMASK(7, 0);
+ svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0);
+
+ if (ft_pgm >= 2) {
+ svsb->freq_base = 800000000; /* 800MHz */
+ svsb->dvt_fixed = 2;
+ }
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ return false;
+ }
+ }
+
+ ret = svs_get_efuse_data(svsp, "t-calibration-data",
+ &svsp->tefuse, &svsp->tefuse_max);
+ if (ret)
+ return false;
+
+ /* Thermal efuse parsing */
+ adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0);
+ adc_oe_t = (svsp->tefuse[1] >> 12) & GENMASK(9, 0);
+
+ o_vtsmcu[0] = (svsp->tefuse[0] >> 17) & GENMASK(8, 0);
+ o_vtsmcu[1] = (svsp->tefuse[0] >> 8) & GENMASK(8, 0);
+ o_vtsmcu[2] = svsp->tefuse[1] & GENMASK(8, 0);
+ o_vtsmcu[3] = (svsp->tefuse[2] >> 23) & GENMASK(8, 0);
+ o_vtsmcu[4] = (svsp->tefuse[2] >> 5) & GENMASK(8, 0);
+ o_vtsabb = (svsp->tefuse[2] >> 14) & GENMASK(8, 0);
+
+ degc_cali = (svsp->tefuse[0] >> 1) & GENMASK(5, 0);
+ adc_cali_en_t = svsp->tefuse[0] & BIT(0);
+ o_slope_sign = (svsp->tefuse[0] >> 7) & BIT(0);
+
+ ts_id = (svsp->tefuse[1] >> 9) & BIT(0);
+ if (!ts_id) {
+ o_slope = 1534;
+ } else {
+ o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0);
+ if (!o_slope_sign)
+ o_slope = 1534 + o_slope * 10;
+ else
+ o_slope = 1534 - o_slope * 10;
+ }
+
+ if (adc_cali_en_t == 0 ||
+ adc_ge_t < 265 || adc_ge_t > 758 ||
+ adc_oe_t < 265 || adc_oe_t > 758 ||
+ o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 ||
+ o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 ||
+ o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 ||
+ o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 ||
+ o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 ||
+ o_vtsabb < -8 || o_vtsabb > 484 ||
+ degc_cali < 1 || degc_cali > 63) {
+ dev_err(svsp->dev, "bad thermal efuse, no mon mode\n");
+ goto remove_mt8183_svsb_mon_mode;
+ }
+
+ ge = ((adc_ge_t - 512) * 10000) / 4096;
+ oe = (adc_oe_t - 512);
+ gain = (10000 + ge);
+
+ format[0] = (o_vtsmcu[0] + 3350 - oe);
+ format[1] = (o_vtsmcu[1] + 3350 - oe);
+ format[2] = (o_vtsmcu[2] + 3350 - oe);
+ format[3] = (o_vtsmcu[3] + 3350 - oe);
+ format[4] = (o_vtsmcu[4] + 3350 - oe);
+ format[5] = (o_vtsabb + 3350 - oe);
+
+ for (i = 0; i < 6; i++)
+ x_roomt[i] = (((format[i] * 10000) / 4096) * 10000) / gain;
+
+ temp0 = (10000 * 100000 / gain) * 15 / 18;
+ mts = (temp0 * 10) / o_slope;
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svsb->mts = mts;
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ tb_roomt = x_roomt[3];
+ break;
+ case SVSB_CPU_BIG:
+ tb_roomt = x_roomt[4];
+ break;
+ case SVSB_CCI:
+ tb_roomt = x_roomt[3];
+ break;
+ case SVSB_GPU:
+ tb_roomt = x_roomt[1];
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ goto remove_mt8183_svsb_mon_mode;
+ }
+
+ temp0 = (degc_cali * 10 / 2);
+ temp1 = ((10000 * 100000 / 4096 / gain) *
+ oe + tb_roomt * 10) * 15 / 18;
+ temp2 = temp1 * 100 / o_slope;
+
+ svsb->bts = (temp0 + temp2 - 250) * 4 / 10;
+ }
+
+ return true;
+
+remove_mt8183_svsb_mon_mode:
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+ svsb->mode_support &= ~SVSB_MODE_MON;
+ }
+
+ return true;
+}
+
+static struct device *svs_get_subsys_device(struct svs_platform *svsp,
+ const char *node_name)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ np = of_find_node_by_name(NULL, node_name);
+ if (!np) {
+ dev_err(svsp->dev, "cannot find %s node\n", node_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ of_node_put(np);
+ dev_err(svsp->dev, "cannot find pdev by %s\n", node_name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ of_node_put(np);
+
+ return &pdev->dev;
+}
+
+static struct device *svs_add_device_link(struct svs_platform *svsp,
+ const char *node_name)
+{
+ struct device *dev;
+ struct device_link *sup_link;
+
+ dev = svs_get_subsys_device(svsp, node_name);
+ if (IS_ERR(dev))
+ return dev;
+
+ sup_link = device_link_add(svsp->dev, dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!sup_link) {
+ dev_err(svsp->dev, "sup_link is NULL\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return dev;
+}
+
+static int svs_mt8192_platform_probe(struct svs_platform *svsp)
+{
+ struct device *dev;
+ struct svs_bank *svsb;
+ u32 idx;
+
+ svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst");
+ if (IS_ERR(svsp->rst))
+ return dev_err_probe(svsp->dev, PTR_ERR(svsp->rst),
+ "cannot get svs reset control\n");
+
+ dev = svs_add_device_link(svsp, "lvts");
+ if (IS_ERR(dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(dev),
+ "failed to get lvts device\n");
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ if (svsb->type == SVSB_HIGH)
+ svsb->opp_dev = svs_add_device_link(svsp, "gpu");
+ else if (svsb->type == SVSB_LOW)
+ svsb->opp_dev = svs_get_subsys_device(svsp, "gpu");
+
+ if (IS_ERR(svsb->opp_dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
+ "failed to get OPP device for bank %d\n",
+ idx);
+ }
+
+ return 0;
+}
+
+static int svs_mt8183_platform_probe(struct svs_platform *svsp)
+{
+ struct device *dev;
+ struct svs_bank *svsb;
+ u32 idx;
+
+ dev = svs_add_device_link(svsp, "thermal");
+ if (IS_ERR(dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(dev),
+ "failed to get thermal device\n");
+
+ for (idx = 0; idx < svsp->bank_max; idx++) {
+ svsb = &svsp->banks[idx];
+
+ switch (svsb->sw_id) {
+ case SVSB_CPU_LITTLE:
+ case SVSB_CPU_BIG:
+ svsb->opp_dev = get_cpu_device(svsb->cpu_id);
+ break;
+ case SVSB_CCI:
+ svsb->opp_dev = svs_add_device_link(svsp, "cci");
+ break;
+ case SVSB_GPU:
+ svsb->opp_dev = svs_add_device_link(svsp, "gpu");
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(svsb->opp_dev))
+ return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
+ "failed to get OPP device for bank %d\n",
+ idx);
+ }
+
+ return 0;
+}
+
+static struct svs_bank svs_mt8192_banks[] = {
+ {
+ .sw_id = SVSB_GPU,
+ .type = SVSB_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu1",
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .mode_support = SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 688000000,
+ .turn_freq_base = 688000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .vmax = 0x60,
+ .vmin = 0x1a,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .dvt_fixed = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .core_sel = 0x0fff0100,
+ .int_st = BIT(0),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ },
+ {
+ .sw_id = SVSB_GPU,
+ .type = SVSB_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu1",
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT |
+ SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 902000000,
+ .turn_freq_base = 688000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .vmax = 0x60,
+ .vmin = 0x1a,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .dvt_fixed = 0x6,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .core_sel = 0x0fff0101,
+ .int_st = BIT(1),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ },
+};
+
+static struct svs_bank svs_mt8183_banks[] = {
+ {
+ .sw_id = SVSB_CPU_LITTLE,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 0,
+ .buck_name = "proc",
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 1989000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x64,
+ .vmin = 0x18,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x7,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0000,
+ .int_st = BIT(0),
+ .ctl0 = 0x00010001,
+ },
+ {
+ .sw_id = SVSB_CPU_BIG,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 4,
+ .buck_name = "proc",
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 1989000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x58,
+ .vmin = 0x10,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x7,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0001,
+ .int_st = BIT(1),
+ .ctl0 = 0x00000001,
+ },
+ {
+ .sw_id = SVSB_CCI,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .buck_name = "proc",
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 1196000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x64,
+ .vmin = 0x18,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x7,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0002,
+ .int_st = BIT(2),
+ .ctl0 = 0x00100003,
+ },
+ {
+ .sw_id = SVSB_GPU,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .buck_name = "mali",
+ .tzone_name = "tzts2",
+ .volt_flags = SVSB_INIT01_PD_REQ |
+ SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 |
+ SVSB_MODE_MON,
+ .opp_count = MAX_OPP_ENTRIES,
+ .freq_base = 900000000,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .vmax = 0x40,
+ .vmin = 0x14,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .dvt_fixed = 0x3,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .core_sel = 0x8fff0003,
+ .int_st = BIT(3),
+ .ctl0 = 0x00050001,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 3,
+ },
+};
+
+static const struct svs_platform_data svs_mt8192_platform_data = {
+ .name = "mt8192-svs",
+ .banks = svs_mt8192_banks,
+ .efuse_parsing = svs_mt8192_efuse_parsing,
+ .probe = svs_mt8192_platform_probe,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8192_banks),
+};
+
+static const struct svs_platform_data svs_mt8183_platform_data = {
+ .name = "mt8183-svs",
+ .banks = svs_mt8183_banks,
+ .efuse_parsing = svs_mt8183_efuse_parsing,
+ .probe = svs_mt8183_platform_probe,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8183_banks),
+};
+
+static const struct of_device_id svs_of_match[] = {
+ {
+ .compatible = "mediatek,mt8192-svs",
+ .data = &svs_mt8192_platform_data,
+ }, {
+ .compatible = "mediatek,mt8183-svs",
+ .data = &svs_mt8183_platform_data,
+ }, {
+ /* Sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, svs_of_match);
+
+static int svs_probe(struct platform_device *pdev)
+{
+ struct svs_platform *svsp;
+ const struct svs_platform_data *svsp_data;
+ int ret, svsp_irq;
+
+ svsp_data = of_device_get_match_data(&pdev->dev);
+
+ svsp = devm_kzalloc(&pdev->dev, sizeof(*svsp), GFP_KERNEL);
+ if (!svsp)
+ return -ENOMEM;
+
+ svsp->dev = &pdev->dev;
+ svsp->banks = svsp_data->banks;
+ svsp->regs = svsp_data->regs;
+ svsp->bank_max = svsp_data->bank_max;
+
+ ret = svsp_data->probe(svsp);
+ if (ret)
+ return ret;
+
+ ret = svs_get_efuse_data(svsp, "svs-calibration-data",
+ &svsp->efuse, &svsp->efuse_max);
+ if (ret) {
+ ret = -EPERM;
+ goto svs_probe_free_efuse;
+ }
+
+ if (!svsp_data->efuse_parsing(svsp)) {
+ dev_err(svsp->dev, "efuse data parsing failed\n");
+ ret = -EPERM;
+ goto svs_probe_free_tefuse;
+ }
+
+ ret = svs_bank_resource_setup(svsp);
+ if (ret) {
+ dev_err(svsp->dev, "svs bank resource setup fail: %d\n", ret);
+ goto svs_probe_free_tefuse;
+ }
+
+ svsp_irq = platform_get_irq(pdev, 0);
+ if (svsp_irq < 0) {
+ ret = svsp_irq;
+ goto svs_probe_free_tefuse;
+ }
+
+ svsp->main_clk = devm_clk_get(svsp->dev, "main");
+ if (IS_ERR(svsp->main_clk)) {
+ dev_err(svsp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(svsp->main_clk));
+ ret = PTR_ERR(svsp->main_clk);
+ goto svs_probe_free_tefuse;
+ }
+
+ ret = clk_prepare_enable(svsp->main_clk);
+ if (ret) {
+ dev_err(svsp->dev, "cannot enable main clk: %d\n", ret);
+ goto svs_probe_free_tefuse;
+ }
+
+ svsp->base = of_iomap(svsp->dev->of_node, 0);
+ if (IS_ERR_OR_NULL(svsp->base)) {
+ dev_err(svsp->dev, "cannot find svs register base\n");
+ ret = -EINVAL;
+ goto svs_probe_clk_disable;
+ }
+
+ ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
+ IRQF_ONESHOT, svsp_data->name, svsp);
+ if (ret) {
+ dev_err(svsp->dev, "register irq(%d) failed: %d\n",
+ svsp_irq, ret);
+ goto svs_probe_iounmap;
+ }
+
+ ret = svs_start(svsp);
+ if (ret) {
+ dev_err(svsp->dev, "svs start fail: %d\n", ret);
+ goto svs_probe_iounmap;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ ret = svs_create_debug_cmds(svsp);
+ if (ret) {
+ dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret);
+ goto svs_probe_iounmap;
+ }
+#endif
+
+ return 0;
+
+svs_probe_iounmap:
+ iounmap(svsp->base);
+
+svs_probe_clk_disable:
+ clk_disable_unprepare(svsp->main_clk);
+
+svs_probe_free_tefuse:
+ if (!IS_ERR_OR_NULL(svsp->tefuse))
+ kfree(svsp->tefuse);
+
+svs_probe_free_efuse:
+ if (!IS_ERR_OR_NULL(svsp->efuse))
+ kfree(svsp->efuse);
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(svs_pm_ops, svs_suspend, svs_resume);
+
+static struct platform_driver svs_driver = {
+ .probe = svs_probe,
+ .driver = {
+ .name = "mtk-svs",
+ .pm = &svs_pm_ops,
+ .of_match_table = svs_of_match,
+ },
+};
+
+module_platform_driver(svs_driver);
+
+MODULE_AUTHOR("Roger Lu <roger.lu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek SVS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
new file mode 100644
index 0000000000..eb656b3315
--- /dev/null
+++ b/drivers/soc/microchip/Kconfig
@@ -0,0 +1,10 @@
+config POLARFIRE_SOC_SYS_CTRL
+ tristate "POLARFIRE_SOC_SYS_CTRL"
+ depends on POLARFIRE_SOC_MAILBOX
+ help
+ This driver adds support for the PolarFire SoC (MPFS) system controller.
+
+ To compile this driver as a module, choose M here. the
+ module will be called mpfs_system_controller.
+
+ If unsure, say N.
diff --git a/drivers/soc/microchip/Makefile b/drivers/soc/microchip/Makefile
new file mode 100644
index 0000000000..14489919fe
--- /dev/null
+++ b/drivers/soc/microchip/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_POLARFIRE_SOC_SYS_CTRL) += mpfs-sys-controller.o
diff --git a/drivers/soc/microchip/mpfs-sys-controller.c b/drivers/soc/microchip/mpfs-sys-controller.c
new file mode 100644
index 0000000000..fbcd5fd24d
--- /dev/null
+++ b/drivers/soc/microchip/mpfs-sys-controller.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip PolarFire SoC (MPFS) system controller driver
+ *
+ * Copyright (c) 2020-2021 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/mailbox_client.h>
+#include <linux/platform_device.h>
+#include <soc/microchip/mpfs.h>
+
+/*
+ * This timeout must be long, as some services (example: image authentication)
+ * take significant time to complete
+ */
+#define MPFS_SYS_CTRL_TIMEOUT_MS 30000
+
+static DEFINE_MUTEX(transaction_lock);
+
+struct mpfs_sys_controller {
+ struct mbox_client client;
+ struct mbox_chan *chan;
+ struct completion c;
+ struct kref consumers;
+};
+
+int mpfs_blocking_transaction(struct mpfs_sys_controller *sys_controller, struct mpfs_mss_msg *msg)
+{
+ unsigned long timeout = msecs_to_jiffies(MPFS_SYS_CTRL_TIMEOUT_MS);
+ int ret;
+
+ ret = mutex_lock_interruptible(&transaction_lock);
+ if (ret)
+ return ret;
+
+ reinit_completion(&sys_controller->c);
+
+ ret = mbox_send_message(sys_controller->chan, msg);
+ if (ret < 0) {
+ dev_warn(sys_controller->client.dev, "MPFS sys controller service timeout\n");
+ goto out;
+ }
+
+ /*
+ * Unfortunately, the system controller will only deliver an interrupt
+ * if a service succeeds. mbox_send_message() will block until the busy
+ * flag is gone. If the busy flag is gone but no interrupt has arrived
+ * to trigger the rx callback then the service can be deemed to have
+ * failed.
+ * The caller can then interrogate msg::response::resp_status to
+ * determine the cause of the failure.
+ * mbox_send_message() returns positive integers in the success path, so
+ * ret needs to be cleared if we do get an interrupt.
+ */
+ if (!wait_for_completion_timeout(&sys_controller->c, timeout)) {
+ ret = -EBADMSG;
+ dev_warn(sys_controller->client.dev, "MPFS sys controller service failed\n");
+ } else {
+ ret = 0;
+ }
+
+out:
+ mutex_unlock(&transaction_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mpfs_blocking_transaction);
+
+static void mpfs_sys_controller_rx_callback(struct mbox_client *client, void *msg)
+{
+ struct mpfs_sys_controller *sys_controller =
+ container_of(client, struct mpfs_sys_controller, client);
+
+ complete(&sys_controller->c);
+}
+
+static void mpfs_sys_controller_delete(struct kref *kref)
+{
+ struct mpfs_sys_controller *sys_controller =
+ container_of(kref, struct mpfs_sys_controller, consumers);
+
+ mbox_free_channel(sys_controller->chan);
+ kfree(sys_controller);
+}
+
+static void mpfs_sys_controller_put(void *data)
+{
+ struct mpfs_sys_controller *sys_controller = data;
+
+ kref_put(&sys_controller->consumers, mpfs_sys_controller_delete);
+}
+
+static struct platform_device subdevs[] = {
+ {
+ .name = "mpfs-rng",
+ .id = -1,
+ },
+ {
+ .name = "mpfs-generic-service",
+ .id = -1,
+ }
+};
+
+static int mpfs_sys_controller_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpfs_sys_controller *sys_controller;
+ int i, ret;
+
+ sys_controller = kzalloc(sizeof(*sys_controller), GFP_KERNEL);
+ if (!sys_controller)
+ return -ENOMEM;
+
+ sys_controller->client.dev = dev;
+ sys_controller->client.rx_callback = mpfs_sys_controller_rx_callback;
+ sys_controller->client.tx_block = 1U;
+ sys_controller->client.tx_tout = msecs_to_jiffies(MPFS_SYS_CTRL_TIMEOUT_MS);
+
+ sys_controller->chan = mbox_request_channel(&sys_controller->client, 0);
+ if (IS_ERR(sys_controller->chan)) {
+ ret = dev_err_probe(dev, PTR_ERR(sys_controller->chan),
+ "Failed to get mbox channel\n");
+ kfree(sys_controller);
+ return ret;
+ }
+
+ init_completion(&sys_controller->c);
+ kref_init(&sys_controller->consumers);
+
+ platform_set_drvdata(pdev, sys_controller);
+
+ dev_info(&pdev->dev, "Registered MPFS system controller\n");
+
+ for (i = 0; i < ARRAY_SIZE(subdevs); i++) {
+ subdevs[i].dev.parent = dev;
+ if (platform_device_register(&subdevs[i]))
+ dev_warn(dev, "Error registering sub device %s\n", subdevs[i].name);
+ }
+
+ return 0;
+}
+
+static int mpfs_sys_controller_remove(struct platform_device *pdev)
+{
+ struct mpfs_sys_controller *sys_controller = platform_get_drvdata(pdev);
+
+ mpfs_sys_controller_put(sys_controller);
+
+ return 0;
+}
+
+static const struct of_device_id mpfs_sys_controller_of_match[] = {
+ {.compatible = "microchip,mpfs-sys-controller", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpfs_sys_controller_of_match);
+
+struct mpfs_sys_controller *mpfs_sys_controller_get(struct device *dev)
+{
+ const struct of_device_id *match;
+ struct mpfs_sys_controller *sys_controller;
+ int ret;
+
+ if (!dev->parent)
+ goto err_no_device;
+
+ match = of_match_node(mpfs_sys_controller_of_match, dev->parent->of_node);
+ of_node_put(dev->parent->of_node);
+ if (!match)
+ goto err_no_device;
+
+ sys_controller = dev_get_drvdata(dev->parent);
+ if (!sys_controller)
+ goto err_bad_device;
+
+ if (!kref_get_unless_zero(&sys_controller->consumers))
+ goto err_bad_device;
+
+ ret = devm_add_action_or_reset(dev, mpfs_sys_controller_put, sys_controller);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return sys_controller;
+
+err_no_device:
+ dev_dbg(dev, "Parent device was not an MPFS system controller\n");
+ return ERR_PTR(-ENODEV);
+
+err_bad_device:
+ dev_dbg(dev, "MPFS system controller found but could not register as a sub device\n");
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(mpfs_sys_controller_get);
+
+static struct platform_driver mpfs_sys_controller_driver = {
+ .driver = {
+ .name = "mpfs-sys-controller",
+ .of_match_table = mpfs_sys_controller_of_match,
+ },
+ .probe = mpfs_sys_controller_probe,
+ .remove = mpfs_sys_controller_remove,
+};
+module_platform_driver(mpfs_sys_controller_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("MPFS system controller driver");
diff --git a/drivers/soc/nuvoton/Kconfig b/drivers/soc/nuvoton/Kconfig
new file mode 100644
index 0000000000..df46182088
--- /dev/null
+++ b/drivers/soc/nuvoton/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig WPCM450_SOC
+ tristate "Nuvoton WPCM450 SoC driver"
+ default y if ARCH_WPCM450
+ select SOC_BUS
+ help
+ Say Y here to compile the SoC information driver for Nuvoton
+ WPCM450 SoCs.
+
+ This driver provides information such as the SoC model and
+ revision.
diff --git a/drivers/soc/nuvoton/Makefile b/drivers/soc/nuvoton/Makefile
new file mode 100644
index 0000000000..e30317b4e8
--- /dev/null
+++ b/drivers/soc/nuvoton/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_WPCM450_SOC) += wpcm450-soc.o
diff --git a/drivers/soc/nuvoton/wpcm450-soc.c b/drivers/soc/nuvoton/wpcm450-soc.c
new file mode 100644
index 0000000000..c5e0d11c38
--- /dev/null
+++ b/drivers/soc/nuvoton/wpcm450-soc.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton WPCM450 SoC Identification
+ *
+ * Copyright (C) 2022 Jonathan Neuschäfer
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define GCR_PDID 0
+#define PDID_CHIP(x) ((x) & 0x00ffffff)
+#define CHIP_WPCM450 0x926450
+#define PDID_REV(x) ((x) >> 24)
+
+struct revision {
+ u8 number;
+ const char *name;
+};
+
+static const struct revision revisions[] __initconst = {
+ { 0x00, "Z1" },
+ { 0x03, "Z2" },
+ { 0x04, "Z21" },
+ { 0x08, "A1" },
+ { 0x09, "A2" },
+ { 0x0a, "A3" },
+ {}
+};
+
+static const char * __init get_revision(unsigned int rev)
+{
+ int i;
+
+ for (i = 0; revisions[i].name; i++)
+ if (revisions[i].number == rev)
+ return revisions[i].name;
+ return NULL;
+}
+
+static struct soc_device_attribute *wpcm450_attr;
+static struct soc_device *wpcm450_soc;
+
+static int __init wpcm450_soc_init(void)
+{
+ struct soc_device_attribute *attr;
+ struct soc_device *soc;
+ const char *revision;
+ struct regmap *gcr;
+ u32 pdid;
+ int ret;
+
+ if (!of_machine_is_compatible("nuvoton,wpcm450"))
+ return 0;
+
+ gcr = syscon_regmap_lookup_by_compatible("nuvoton,wpcm450-gcr");
+ if (IS_ERR(gcr))
+ return PTR_ERR(gcr);
+ ret = regmap_read(gcr, GCR_PDID, &pdid);
+ if (ret)
+ return ret;
+
+ if (PDID_CHIP(pdid) != CHIP_WPCM450) {
+ pr_warn("Unknown chip ID in GCR.PDID: 0x%06x\n", PDID_CHIP(pdid));
+ return -ENODEV;
+ }
+
+ revision = get_revision(PDID_REV(pdid));
+ if (!revision) {
+ pr_warn("Unknown chip revision in GCR.PDID: 0x%02x\n", PDID_REV(pdid));
+ return -ENODEV;
+ }
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ attr->family = "Nuvoton NPCM";
+ attr->soc_id = "WPCM450";
+ attr->revision = revision;
+ soc = soc_device_register(attr);
+ if (IS_ERR(soc)) {
+ kfree(attr);
+ pr_warn("Could not register SoC device\n");
+ return PTR_ERR(soc);
+ }
+
+ wpcm450_soc = soc;
+ wpcm450_attr = attr;
+ return 0;
+}
+module_init(wpcm450_soc_init);
+
+static void __exit wpcm450_soc_exit(void)
+{
+ if (wpcm450_soc) {
+ soc_device_unregister(wpcm450_soc);
+ wpcm450_soc = NULL;
+ kfree(wpcm450_attr);
+ }
+}
+module_exit(wpcm450_soc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonathan Neuschäfer");
+MODULE_DESCRIPTION("Nuvoton WPCM450 SoC Identification driver");
diff --git a/drivers/soc/pxa/Kconfig b/drivers/soc/pxa/Kconfig
new file mode 100644
index 0000000000..c5c265aa4f
--- /dev/null
+++ b/drivers/soc/pxa/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config PLAT_PXA
+ bool
+
+config PXA_SSP
+ tristate
+ help
+ Enable support for PXA2xx SSP ports
diff --git a/drivers/soc/pxa/Makefile b/drivers/soc/pxa/Makefile
new file mode 100644
index 0000000000..413deceddb
--- /dev/null
+++ b/drivers/soc/pxa/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_PXA3xx) += mfp.o
+obj-$(CONFIG_ARCH_MMP) += mfp.o
+
+obj-$(CONFIG_PXA_SSP) += ssp.o
diff --git a/drivers/soc/pxa/mfp.c b/drivers/soc/pxa/mfp.c
new file mode 100644
index 0000000000..6220ba321c
--- /dev/null
+++ b/drivers/soc/pxa/mfp.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/arch/arm/plat-pxa/mfp.c
+ *
+ * Multi-Function Pin Support
+ *
+ * Copyright (C) 2007 Marvell Internation Ltd.
+ *
+ * 2007-08-21: eric miao <eric.miao@marvell.com>
+ * initial version
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <linux/soc/pxa/mfp.h>
+
+#define MFPR_SIZE (PAGE_SIZE)
+
+/* MFPR register bit definitions */
+#define MFPR_PULL_SEL (0x1 << 15)
+#define MFPR_PULLUP_EN (0x1 << 14)
+#define MFPR_PULLDOWN_EN (0x1 << 13)
+#define MFPR_SLEEP_SEL (0x1 << 9)
+#define MFPR_SLEEP_OE_N (0x1 << 7)
+#define MFPR_EDGE_CLEAR (0x1 << 6)
+#define MFPR_EDGE_FALL_EN (0x1 << 5)
+#define MFPR_EDGE_RISE_EN (0x1 << 4)
+
+#define MFPR_SLEEP_DATA(x) ((x) << 8)
+#define MFPR_DRIVE(x) (((x) & 0x7) << 10)
+#define MFPR_AF_SEL(x) (((x) & 0x7) << 0)
+
+#define MFPR_EDGE_NONE (0)
+#define MFPR_EDGE_RISE (MFPR_EDGE_RISE_EN)
+#define MFPR_EDGE_FALL (MFPR_EDGE_FALL_EN)
+#define MFPR_EDGE_BOTH (MFPR_EDGE_RISE | MFPR_EDGE_FALL)
+
+/*
+ * Table that determines the low power modes outputs, with actual settings
+ * used in parentheses for don't-care values. Except for the float output,
+ * the configured driven and pulled levels match, so if there is a need for
+ * non-LPM pulled output, the same configuration could probably be used.
+ *
+ * Output value sleep_oe_n sleep_data pullup_en pulldown_en pull_sel
+ * (bit 7) (bit 8) (bit 14) (bit 13) (bit 15)
+ *
+ * Input 0 X(0) X(0) X(0) 0
+ * Drive 0 0 0 0 X(1) 0
+ * Drive 1 0 1 X(1) 0 0
+ * Pull hi (1) 1 X(1) 1 0 0
+ * Pull lo (0) 1 X(0) 0 1 0
+ * Z (float) 1 X(0) 0 0 0
+ */
+#define MFPR_LPM_INPUT (0)
+#define MFPR_LPM_DRIVE_LOW (MFPR_SLEEP_DATA(0) | MFPR_PULLDOWN_EN)
+#define MFPR_LPM_DRIVE_HIGH (MFPR_SLEEP_DATA(1) | MFPR_PULLUP_EN)
+#define MFPR_LPM_PULL_LOW (MFPR_LPM_DRIVE_LOW | MFPR_SLEEP_OE_N)
+#define MFPR_LPM_PULL_HIGH (MFPR_LPM_DRIVE_HIGH | MFPR_SLEEP_OE_N)
+#define MFPR_LPM_FLOAT (MFPR_SLEEP_OE_N)
+#define MFPR_LPM_MASK (0xe080)
+
+/*
+ * The pullup and pulldown state of the MFP pin at run mode is by default
+ * determined by the selected alternate function. In case that some buggy
+ * devices need to override this default behavior, the definitions below
+ * indicates the setting of corresponding MFPR bits
+ *
+ * Definition pull_sel pullup_en pulldown_en
+ * MFPR_PULL_NONE 0 0 0
+ * MFPR_PULL_LOW 1 0 1
+ * MFPR_PULL_HIGH 1 1 0
+ * MFPR_PULL_BOTH 1 1 1
+ * MFPR_PULL_FLOAT 1 0 0
+ */
+#define MFPR_PULL_NONE (0)
+#define MFPR_PULL_LOW (MFPR_PULL_SEL | MFPR_PULLDOWN_EN)
+#define MFPR_PULL_BOTH (MFPR_PULL_LOW | MFPR_PULLUP_EN)
+#define MFPR_PULL_HIGH (MFPR_PULL_SEL | MFPR_PULLUP_EN)
+#define MFPR_PULL_FLOAT (MFPR_PULL_SEL)
+
+/* mfp_spin_lock is used to ensure that MFP register configuration
+ * (most likely a read-modify-write operation) is atomic, and that
+ * mfp_table[] is consistent
+ */
+static DEFINE_SPINLOCK(mfp_spin_lock);
+
+static void __iomem *mfpr_mmio_base;
+
+struct mfp_pin {
+ unsigned long config; /* -1 for not configured */
+ unsigned long mfpr_off; /* MFPRxx Register offset */
+ unsigned long mfpr_run; /* Run-Mode Register Value */
+ unsigned long mfpr_lpm; /* Low Power Mode Register Value */
+};
+
+static struct mfp_pin mfp_table[MFP_PIN_MAX];
+
+/* mapping of MFP_LPM_* definitions to MFPR_LPM_* register bits */
+static const unsigned long mfpr_lpm[] = {
+ MFPR_LPM_INPUT,
+ MFPR_LPM_DRIVE_LOW,
+ MFPR_LPM_DRIVE_HIGH,
+ MFPR_LPM_PULL_LOW,
+ MFPR_LPM_PULL_HIGH,
+ MFPR_LPM_FLOAT,
+ MFPR_LPM_INPUT,
+};
+
+/* mapping of MFP_PULL_* definitions to MFPR_PULL_* register bits */
+static const unsigned long mfpr_pull[] = {
+ MFPR_PULL_NONE,
+ MFPR_PULL_LOW,
+ MFPR_PULL_HIGH,
+ MFPR_PULL_BOTH,
+ MFPR_PULL_FLOAT,
+};
+
+/* mapping of MFP_LPM_EDGE_* definitions to MFPR_EDGE_* register bits */
+static const unsigned long mfpr_edge[] = {
+ MFPR_EDGE_NONE,
+ MFPR_EDGE_RISE,
+ MFPR_EDGE_FALL,
+ MFPR_EDGE_BOTH,
+};
+
+#define mfpr_readl(off) \
+ __raw_readl(mfpr_mmio_base + (off))
+
+#define mfpr_writel(off, val) \
+ __raw_writel(val, mfpr_mmio_base + (off))
+
+#define mfp_configured(p) ((p)->config != -1)
+
+/*
+ * perform a read-back of any valid MFPR register to make sure the
+ * previous writings are finished
+ */
+static unsigned long mfpr_off_readback;
+#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback)
+
+static inline void __mfp_config_run(struct mfp_pin *p)
+{
+ if (mfp_configured(p))
+ mfpr_writel(p->mfpr_off, p->mfpr_run);
+}
+
+static inline void __mfp_config_lpm(struct mfp_pin *p)
+{
+ if (mfp_configured(p)) {
+ unsigned long mfpr_clr = (p->mfpr_run & ~MFPR_EDGE_BOTH) | MFPR_EDGE_CLEAR;
+ if (mfpr_clr != p->mfpr_run)
+ mfpr_writel(p->mfpr_off, mfpr_clr);
+ if (p->mfpr_lpm != mfpr_clr)
+ mfpr_writel(p->mfpr_off, p->mfpr_lpm);
+ }
+}
+
+void mfp_config(unsigned long *mfp_cfgs, int num)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&mfp_spin_lock, flags);
+
+ for (i = 0; i < num; i++, mfp_cfgs++) {
+ unsigned long tmp, c = *mfp_cfgs;
+ struct mfp_pin *p;
+ int pin, af, drv, lpm, edge, pull;
+
+ pin = MFP_PIN(c);
+ BUG_ON(pin >= MFP_PIN_MAX);
+ p = &mfp_table[pin];
+
+ af = MFP_AF(c);
+ drv = MFP_DS(c);
+ lpm = MFP_LPM_STATE(c);
+ edge = MFP_LPM_EDGE(c);
+ pull = MFP_PULL(c);
+
+ /* run-mode pull settings will conflict with MFPR bits of
+ * low power mode state, calculate mfpr_run and mfpr_lpm
+ * individually if pull != MFP_PULL_NONE
+ */
+ tmp = MFPR_AF_SEL(af) | MFPR_DRIVE(drv);
+
+ if (likely(pull == MFP_PULL_NONE)) {
+ p->mfpr_run = tmp | mfpr_lpm[lpm] | mfpr_edge[edge];
+ p->mfpr_lpm = p->mfpr_run;
+ } else {
+ p->mfpr_lpm = tmp | mfpr_lpm[lpm] | mfpr_edge[edge];
+ p->mfpr_run = tmp | mfpr_pull[pull];
+ }
+
+ p->config = c; __mfp_config_run(p);
+ }
+
+ mfpr_sync();
+ spin_unlock_irqrestore(&mfp_spin_lock, flags);
+}
+
+unsigned long mfp_read(int mfp)
+{
+ unsigned long val, flags;
+
+ BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX);
+
+ spin_lock_irqsave(&mfp_spin_lock, flags);
+ val = mfpr_readl(mfp_table[mfp].mfpr_off);
+ spin_unlock_irqrestore(&mfp_spin_lock, flags);
+
+ return val;
+}
+
+void mfp_write(int mfp, unsigned long val)
+{
+ unsigned long flags;
+
+ BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX);
+
+ spin_lock_irqsave(&mfp_spin_lock, flags);
+ mfpr_writel(mfp_table[mfp].mfpr_off, val);
+ mfpr_sync();
+ spin_unlock_irqrestore(&mfp_spin_lock, flags);
+}
+
+void __init mfp_init_base(void __iomem *mfpr_base)
+{
+ int i;
+
+ /* initialize the table with default - unconfigured */
+ for (i = 0; i < ARRAY_SIZE(mfp_table); i++)
+ mfp_table[i].config = -1;
+
+ mfpr_mmio_base = mfpr_base;
+}
+
+void __init mfp_init_addr(struct mfp_addr_map *map)
+{
+ struct mfp_addr_map *p;
+ unsigned long offset, flags;
+ int i;
+
+ spin_lock_irqsave(&mfp_spin_lock, flags);
+
+ /* mfp offset for readback */
+ mfpr_off_readback = map[0].offset;
+
+ for (p = map; p->start != MFP_PIN_INVALID; p++) {
+ offset = p->offset;
+ i = p->start;
+
+ do {
+ mfp_table[i].mfpr_off = offset;
+ mfp_table[i].mfpr_run = 0;
+ mfp_table[i].mfpr_lpm = 0;
+ offset += 4; i++;
+ } while ((i <= p->end) && (p->end != -1));
+ }
+
+ spin_unlock_irqrestore(&mfp_spin_lock, flags);
+}
+
+void mfp_config_lpm(void)
+{
+ struct mfp_pin *p = &mfp_table[0];
+ int pin;
+
+ for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++)
+ __mfp_config_lpm(p);
+}
+
+void mfp_config_run(void)
+{
+ struct mfp_pin *p = &mfp_table[0];
+ int pin;
+
+ for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++)
+ __mfp_config_run(p);
+}
diff --git a/drivers/soc/pxa/ssp.c b/drivers/soc/pxa/ssp.c
new file mode 100644
index 0000000000..bd029e8382
--- /dev/null
+++ b/drivers/soc/pxa/ssp.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/arch/arm/mach-pxa/ssp.c
+ *
+ * based on linux/arch/arm/mach-sa1100/ssp.c by Russell King
+ *
+ * Copyright (C) 2003 Russell King.
+ * Copyright (C) 2003 Wolfson Microelectronics PLC
+ *
+ * PXA2xx SSP driver. This provides the generic core for simple
+ * IO-based SSP applications and allows easy port setup for DMA access.
+ *
+ * Author: Liam Girdwood <liam.girdwood@wolfsonmicro.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/irq.h>
+
+static DEFINE_MUTEX(ssp_lock);
+static LIST_HEAD(ssp_list);
+
+struct ssp_device *pxa_ssp_request(int port, const char *label)
+{
+ struct ssp_device *ssp = NULL;
+
+ mutex_lock(&ssp_lock);
+
+ list_for_each_entry(ssp, &ssp_list, node) {
+ if (ssp->port_id == port && ssp->use_count == 0) {
+ ssp->use_count++;
+ ssp->label = label;
+ break;
+ }
+ }
+
+ mutex_unlock(&ssp_lock);
+
+ if (&ssp->node == &ssp_list)
+ return NULL;
+
+ return ssp;
+}
+EXPORT_SYMBOL(pxa_ssp_request);
+
+struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node,
+ const char *label)
+{
+ struct ssp_device *ssp = NULL;
+
+ mutex_lock(&ssp_lock);
+
+ list_for_each_entry(ssp, &ssp_list, node) {
+ if (ssp->of_node == of_node && ssp->use_count == 0) {
+ ssp->use_count++;
+ ssp->label = label;
+ break;
+ }
+ }
+
+ mutex_unlock(&ssp_lock);
+
+ if (&ssp->node == &ssp_list)
+ return NULL;
+
+ return ssp;
+}
+EXPORT_SYMBOL(pxa_ssp_request_of);
+
+void pxa_ssp_free(struct ssp_device *ssp)
+{
+ mutex_lock(&ssp_lock);
+ if (ssp->use_count) {
+ ssp->use_count--;
+ ssp->label = NULL;
+ } else
+ dev_err(ssp->dev, "device already free\n");
+ mutex_unlock(&ssp_lock);
+}
+EXPORT_SYMBOL(pxa_ssp_free);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pxa_ssp_of_ids[] = {
+ { .compatible = "mrvl,pxa25x-ssp", .data = (void *) PXA25x_SSP },
+ { .compatible = "mvrl,pxa25x-nssp", .data = (void *) PXA25x_NSSP },
+ { .compatible = "mrvl,pxa27x-ssp", .data = (void *) PXA27x_SSP },
+ { .compatible = "mrvl,pxa3xx-ssp", .data = (void *) PXA3xx_SSP },
+ { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP },
+ { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP },
+ { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
+#endif
+
+static int pxa_ssp_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct ssp_device *ssp;
+ struct device *dev = &pdev->dev;
+
+ ssp = devm_kzalloc(dev, sizeof(struct ssp_device), GFP_KERNEL);
+ if (ssp == NULL)
+ return -ENOMEM;
+
+ ssp->dev = dev;
+
+ ssp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ssp->clk))
+ return PTR_ERR(ssp->clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ res = devm_request_mem_region(dev, res->start, resource_size(res),
+ pdev->name);
+ if (res == NULL) {
+ dev_err(dev, "failed to request memory resource\n");
+ return -EBUSY;
+ }
+
+ ssp->phys_base = res->start;
+
+ ssp->mmio_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (ssp->mmio_base == NULL) {
+ dev_err(dev, "failed to ioremap() registers\n");
+ return -ENODEV;
+ }
+
+ ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0)
+ return -ENODEV;
+
+ if (dev->of_node) {
+ const struct of_device_id *id =
+ of_match_device(of_match_ptr(pxa_ssp_of_ids), dev);
+ ssp->type = (int) id->data;
+ } else {
+ const struct platform_device_id *id =
+ platform_get_device_id(pdev);
+ ssp->type = (int) id->driver_data;
+
+ /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
+ * starts from 0, do a translation here
+ */
+ ssp->port_id = pdev->id + 1;
+ }
+
+ ssp->use_count = 0;
+ ssp->of_node = dev->of_node;
+
+ mutex_lock(&ssp_lock);
+ list_add(&ssp->node, &ssp_list);
+ mutex_unlock(&ssp_lock);
+
+ platform_set_drvdata(pdev, ssp);
+
+ return 0;
+}
+
+static int pxa_ssp_remove(struct platform_device *pdev)
+{
+ struct ssp_device *ssp = platform_get_drvdata(pdev);
+
+ mutex_lock(&ssp_lock);
+ list_del(&ssp->node);
+ mutex_unlock(&ssp_lock);
+
+ return 0;
+}
+
+static const struct platform_device_id ssp_id_table[] = {
+ { "pxa25x-ssp", PXA25x_SSP },
+ { "pxa25x-nssp", PXA25x_NSSP },
+ { "pxa27x-ssp", PXA27x_SSP },
+ { "pxa3xx-ssp", PXA3xx_SSP },
+ { "pxa168-ssp", PXA168_SSP },
+ { "pxa910-ssp", PXA910_SSP },
+ { },
+};
+
+static struct platform_driver pxa_ssp_driver = {
+ .probe = pxa_ssp_probe,
+ .remove = pxa_ssp_remove,
+ .driver = {
+ .name = "pxa2xx-ssp",
+ .of_match_table = of_match_ptr(pxa_ssp_of_ids),
+ },
+ .id_table = ssp_id_table,
+};
+
+static int __init pxa_ssp_init(void)
+{
+ return platform_driver_register(&pxa_ssp_driver);
+}
+
+static void __exit pxa_ssp_exit(void)
+{
+ platform_driver_unregister(&pxa_ssp_driver);
+}
+
+arch_initcall(pxa_ssp_init);
+module_exit(pxa_ssp_exit);
+
+MODULE_DESCRIPTION("PXA SSP driver");
+MODULE_AUTHOR("Liam Girdwood");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
new file mode 100644
index 0000000000..715348869d
--- /dev/null
+++ b/drivers/soc/qcom/Kconfig
@@ -0,0 +1,294 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# QCOM Soc drivers
+#
+menu "Qualcomm SoC drivers"
+
+config QCOM_AOSS_QMP
+ tristate "Qualcomm AOSS Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on MAILBOX
+ depends on COMMON_CLK && PM
+ select PM_GENERIC_DOMAINS
+ help
+ This driver provides the means of communicating with and controlling
+ the low-power state for resources related to the remoteproc
+ subsystems as well as controlling the debug clocks exposed by the Always On
+ Subsystem (AOSS) using Qualcomm Messaging Protocol (QMP).
+
+config QCOM_COMMAND_DB
+ tristate "Qualcomm Command DB"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on OF_RESERVED_MEM
+ help
+ Command DB queries shared memory by key string for shared system
+ resources. Platform drivers that require to set state of a shared
+ resource on a RPM-hardened platform must use this database to get
+ SoC specific identifier and information for the shared resources.
+
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ depends on ARCH_QCOM && HAS_IOMEM
+ select PM_OPP
+ select REGMAP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like QCS404.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
+config QCOM_GENI_SE
+ tristate "QCOM GENI Serial Engine Driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ This driver is used to manage Generic Interface (GENI) firmware based
+ Qualcomm Technologies, Inc. Universal Peripheral (QUP) Wrapper. This
+ driver is also used to manage the common aspects of multiple Serial
+ Engines present in the QUP.
+
+config QCOM_GSBI
+ tristate "QCOM General Serial Bus Interface"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Say y here to enable GSBI support. The GSBI provides control
+ functions for connecting the underlying serial UART, SPI, and I2C
+ devices to the output pins.
+
+config QCOM_LLCC
+ tristate "Qualcomm Technologies, Inc. LLCC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Qualcomm Technologies, Inc. platform specific
+ Last Level Cache Controller(LLCC) driver for platforms such as,
+ SDM845. This provides interfaces to clients that use the LLCC.
+ Say yes here to enable LLCC slice driver.
+
+config QCOM_KRYO_L2_ACCESSORS
+ bool
+ depends on (ARCH_QCOM || COMPILE_TEST) && ARM64
+
+config QCOM_MDT_LOADER
+ tristate
+ select QCOM_SCM
+
+config QCOM_OCMEM
+ tristate "Qualcomm On Chip Memory (OCMEM) driver"
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ The On Chip Memory (OCMEM) allocator allows various clients to
+ allocate memory from OCMEM based on performance, latency and power
+ requirements. This is typically used by the GPU, camera/video, and
+ audio components on some Snapdragon SoCs.
+
+config QCOM_PDR_HELPERS
+ tristate
+ select QCOM_QMI_HELPERS
+ depends on NET
+
+config QCOM_PMIC_GLINK
+ tristate "Qualcomm PMIC GLINK driver"
+ depends on RPMSG
+ depends on TYPEC
+ depends on DRM
+ depends on NET
+ depends on OF
+ select AUXILIARY_BUS
+ select QCOM_PDR_HELPERS
+ help
+ The Qualcomm PMIC GLINK driver provides access, over GLINK, to the
+ USB and battery firmware running on one of the coprocessors in
+ several modern Qualcomm platforms.
+
+ Say yes here to support USB-C and battery status on modern Qualcomm
+ platforms.
+
+config QCOM_QMI_HELPERS
+ tristate
+ depends on NET
+
+config QCOM_RAMP_CTRL
+ tristate "Qualcomm Ramp Controller driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ The Ramp Controller is used to program the sequence ID for pulse
+ swallowing, enable sequence and link sequence IDs for the CPU
+ cores on some Qualcomm SoCs.
+ Say y here to enable support for the ramp controller.
+
+config QCOM_RMTFS_MEM
+ tristate "Qualcomm Remote Filesystem memory driver"
+ depends on ARCH_QCOM
+ select QCOM_SCM
+ help
+ The Qualcomm remote filesystem memory driver is used for allocating
+ and exposing regions of shared memory with remote processors for the
+ purpose of exchanging sector-data between the remote filesystem
+ service and its clients.
+
+ Say y here if you intend to boot the modem remoteproc.
+
+config QCOM_RPM_MASTER_STATS
+ tristate "Qualcomm RPM Master stats"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ The RPM Master sleep stats driver provides detailed per-subsystem
+ sleep/wake data, read from the RPM message RAM. It can be used to
+ assess whether all the low-power modes available are entered as
+ expected or to check which part of the SoC prevents it from sleeping.
+
+ Say y here if you intend to debug or monitor platform sleep.
+
+config QCOM_RPMH
+ tristate "Qualcomm RPM-Hardened (RPMH) Communication"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on (QCOM_COMMAND_DB || !QCOM_COMMAND_DB)
+ help
+ Support for communication with the hardened-RPM blocks in
+ Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
+ internal bus to transmit state requests for shared resources. A set
+ of hardware components aggregate requests for these resources and
+ help apply the aggregated state on the resource.
+
+config QCOM_RPMHPD
+ tristate "Qualcomm RPMh Power domain driver"
+ depends on QCOM_RPMH && QCOM_COMMAND_DB
+ help
+ QCOM RPMh Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPMh which then translates it into corresponding voltage
+ for the voltage rail.
+
+config QCOM_RPMPD
+ tristate "Qualcomm RPM Power domain driver"
+ depends on PM && OF
+ depends on QCOM_SMD_RPM
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ QCOM RPM Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPM which then translates it into corresponding voltage
+ for the voltage rail.
+
+config QCOM_SMEM
+ tristate "Qualcomm Shared Memory Manager (SMEM)"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HWSPINLOCK
+ help
+ Say y here to enable support for the Qualcomm Shared Memory Manager.
+ The driver provides an interface to items in a heap shared among all
+ processors in a Qualcomm platform.
+
+config QCOM_SMD_RPM
+ tristate "Qualcomm Resource Power Manager (RPM) over SMD"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on RPMSG
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
+ help
+ If you say yes to this option, support will be included for the
+ Resource Power Manager system found in the Qualcomm 8974 based
+ devices.
+
+ This is required to access many regulators, clocks and bus
+ frequencies controlled by the RPM on these devices.
+
+ Say M here if you want to include support for the Qualcomm RPM as a
+ module. This will build a module called "qcom-smd-rpm".
+
+config QCOM_SMEM_STATE
+ bool
+
+config QCOM_SMP2P
+ tristate "Qualcomm Shared Memory Point to Point support"
+ depends on MAILBOX
+ depends on QCOM_SMEM
+ select QCOM_SMEM_STATE
+ select IRQ_DOMAIN
+ help
+ Say yes here to support the Qualcomm Shared Memory Point to Point
+ protocol.
+
+config QCOM_SMSM
+ tristate "Qualcomm Shared Memory State Machine"
+ depends on QCOM_SMEM
+ select QCOM_SMEM_STATE
+ select IRQ_DOMAIN
+ help
+ Say yes here to support the Qualcomm Shared Memory State Machine.
+ The state machine is represented by bits in shared memory.
+
+config QCOM_SOCINFO
+ tristate "Qualcomm socinfo driver"
+ depends on QCOM_SMEM
+ select SOC_BUS
+ help
+ Say yes here to support the Qualcomm socinfo driver, providing
+ information about the SoC to user space.
+
+config QCOM_SPM
+ tristate "Qualcomm Subsystem Power Manager (SPM)"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select QCOM_SCM
+ help
+ Enable the support for the Qualcomm Subsystem Power Manager, used
+ to manage cores, L2 low power modes and to configure the internal
+ Adaptive Voltage Scaler parameters, where supported.
+
+config QCOM_STATS
+ tristate "Qualcomm Technologies, Inc. (QTI) Sleep stats driver"
+ depends on (ARCH_QCOM && DEBUG_FS) || COMPILE_TEST
+ depends on QCOM_SMEM
+ help
+ Qualcomm Technologies, Inc. (QTI) Sleep stats driver to read
+ the shared memory exported by the remote processor related to
+ various SoC level low power modes statistics and export to debugfs
+ interface.
+
+config QCOM_WCNSS_CTRL
+ tristate "Qualcomm WCNSS control driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on RPMSG
+ help
+ Client driver for the WCNSS_CTRL SMD channel, used to download nv
+ firmware to a newly booted WCNSS chip.
+
+config QCOM_APR
+ tristate "Qualcomm APR/GPR Bus (Asynchronous/Generic Packet Router)"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on RPMSG
+ depends on NET
+ select QCOM_PDR_HELPERS
+ help
+ Enable APR IPC protocol support between
+ application processor and QDSP6. APR is
+ used by audio driver to configure QDSP6
+ ASM, ADM and AFE modules.
+
+config QCOM_ICC_BWMON
+ tristate "QCOM Interconnect Bandwidth Monitor driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select PM_OPP
+ select REGMAP_MMIO
+ help
+ Sets up driver monitoring bandwidth on various interconnects and
+ based on that voting for interconnect bandwidth, adjusting their
+ speed to current demand.
+ Current implementation brings support for BWMON v4, used for example
+ on SDM845 to measure bandwidth between CPU (gladiator_noc) and Last
+ Level Cache (memnoc). Usage of this BWMON allows to remove some of
+ the fixed bandwidth votes from cpufreq (CPU nodes) thus achieve high
+ memory throughput even with lower CPU frequencies.
+
+config QCOM_INLINE_CRYPTO_ENGINE
+ tristate
+ select QCOM_SCM
+
+endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
new file mode 100644
index 0000000000..bbca2e1e55
--- /dev/null
+++ b/drivers/soc/qcom/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS_rpmh-rsc.o := -I$(src)
+obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
+obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
+obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
+obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
+obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
+obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
+obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
+obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o
+obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o
+obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
+qmi_helpers-y += qmi_encdec.o qmi_interface.o
+obj-$(CONFIG_QCOM_RAMP_CTRL) += ramp_controller.o
+obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
+obj-$(CONFIG_QCOM_RPM_MASTER_STATS) += rpm_master_stats.o
+obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o
+qcom_rpmh-y += rpmh-rsc.o
+qcom_rpmh-y += rpmh.o
+obj-$(CONFIG_QCOM_SMD_RPM) += rpm-proc.o smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) += smem.o
+obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
+obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
+obj-$(CONFIG_QCOM_SMSM) += smsm.o
+obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o
+obj-$(CONFIG_QCOM_SPM) += spm.o
+obj-$(CONFIG_QCOM_STATS) += qcom_stats.o
+obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
+obj-$(CONFIG_QCOM_APR) += apr.o
+obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o
+obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
+obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o
+qcom_ice-objs += ice.o
+obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += qcom_ice.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
new file mode 100644
index 0000000000..30f81d6d9d
--- /dev/null
+++ b/drivers/soc/qcom/apr.c
@@ -0,0 +1,736 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+// Copyright (c) 2018, Linaro Limited
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/of_device.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/soc/qcom/pdr.h>
+#include <linux/rpmsg.h>
+#include <linux/of.h>
+
+enum {
+ PR_TYPE_APR = 0,
+ PR_TYPE_GPR,
+};
+
+/* Some random values tbh which does not collide with static modules */
+#define GPR_DYNAMIC_PORT_START 0x10000000
+#define GPR_DYNAMIC_PORT_END 0x20000000
+
+struct packet_router {
+ struct rpmsg_endpoint *ch;
+ struct device *dev;
+ spinlock_t svcs_lock;
+ spinlock_t rx_lock;
+ struct idr svcs_idr;
+ int dest_domain_id;
+ int type;
+ struct pdr_handle *pdr;
+ struct workqueue_struct *rxwq;
+ struct work_struct rx_work;
+ struct list_head rx_list;
+};
+
+struct apr_rx_buf {
+ struct list_head node;
+ int len;
+ uint8_t buf[];
+};
+
+/**
+ * apr_send_pkt() - Send a apr message from apr device
+ *
+ * @adev: Pointer to previously registered apr device.
+ * @pkt: Pointer to apr packet to send
+ *
+ * Return: Will be an negative on packet size on success.
+ */
+int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt)
+{
+ struct packet_router *apr = dev_get_drvdata(adev->dev.parent);
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->svc.lock, flags);
+
+ hdr = &pkt->hdr;
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->src_svc = adev->svc.id;
+ hdr->dest_domain = adev->domain_id;
+ hdr->dest_svc = adev->svc.id;
+
+ ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size);
+ spin_unlock_irqrestore(&adev->svc.lock, flags);
+
+ return ret ? ret : hdr->pkt_size;
+}
+EXPORT_SYMBOL_GPL(apr_send_pkt);
+
+void gpr_free_port(gpr_port_t *port)
+{
+ struct packet_router *gpr = port->pr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpr->svcs_lock, flags);
+ idr_remove(&gpr->svcs_idr, port->id);
+ spin_unlock_irqrestore(&gpr->svcs_lock, flags);
+
+ kfree(port);
+}
+EXPORT_SYMBOL_GPL(gpr_free_port);
+
+gpr_port_t *gpr_alloc_port(struct apr_device *gdev, struct device *dev,
+ gpr_port_cb cb, void *priv)
+{
+ struct packet_router *pr = dev_get_drvdata(gdev->dev.parent);
+ gpr_port_t *port;
+ struct pkt_router_svc *svc;
+ int id;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ svc = port;
+ svc->callback = cb;
+ svc->pr = pr;
+ svc->priv = priv;
+ svc->dev = dev;
+ spin_lock_init(&svc->lock);
+
+ spin_lock(&pr->svcs_lock);
+ id = idr_alloc_cyclic(&pr->svcs_idr, svc, GPR_DYNAMIC_PORT_START,
+ GPR_DYNAMIC_PORT_END, GFP_ATOMIC);
+ if (id < 0) {
+ dev_err(dev, "Unable to allocate dynamic GPR src port\n");
+ kfree(port);
+ spin_unlock(&pr->svcs_lock);
+ return ERR_PTR(id);
+ }
+
+ svc->id = id;
+ spin_unlock(&pr->svcs_lock);
+
+ return port;
+}
+EXPORT_SYMBOL_GPL(gpr_alloc_port);
+
+static int pkt_router_send_svc_pkt(struct pkt_router_svc *svc, struct gpr_pkt *pkt)
+{
+ struct packet_router *pr = svc->pr;
+ struct gpr_hdr *hdr;
+ unsigned long flags;
+ int ret;
+
+ hdr = &pkt->hdr;
+
+ spin_lock_irqsave(&svc->lock, flags);
+ ret = rpmsg_trysend(pr->ch, pkt, hdr->pkt_size);
+ spin_unlock_irqrestore(&svc->lock, flags);
+
+ return ret ? ret : hdr->pkt_size;
+}
+
+int gpr_send_pkt(struct apr_device *gdev, struct gpr_pkt *pkt)
+{
+ return pkt_router_send_svc_pkt(&gdev->svc, pkt);
+}
+EXPORT_SYMBOL_GPL(gpr_send_pkt);
+
+int gpr_send_port_pkt(gpr_port_t *port, struct gpr_pkt *pkt)
+{
+ return pkt_router_send_svc_pkt(port, pkt);
+}
+EXPORT_SYMBOL_GPL(gpr_send_port_pkt);
+
+static void apr_dev_release(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ kfree(adev);
+}
+
+static int apr_callback(struct rpmsg_device *rpdev, void *buf,
+ int len, void *priv, u32 addr)
+{
+ struct packet_router *apr = dev_get_drvdata(&rpdev->dev);
+ struct apr_rx_buf *abuf;
+ unsigned long flags;
+
+ if (len <= APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Improper apr pkt received:%p %d\n",
+ buf, len);
+ return -EINVAL;
+ }
+
+ abuf = kzalloc(sizeof(*abuf) + len, GFP_ATOMIC);
+ if (!abuf)
+ return -ENOMEM;
+
+ abuf->len = len;
+ memcpy(abuf->buf, buf, len);
+
+ spin_lock_irqsave(&apr->rx_lock, flags);
+ list_add_tail(&abuf->node, &apr->rx_list);
+ spin_unlock_irqrestore(&apr->rx_lock, flags);
+
+ queue_work(apr->rxwq, &apr->rx_work);
+
+ return 0;
+}
+
+static int apr_do_rx_callback(struct packet_router *apr, struct apr_rx_buf *abuf)
+{
+ uint16_t hdr_size, msg_type, ver, svc_id;
+ struct pkt_router_svc *svc;
+ struct apr_device *adev;
+ struct apr_driver *adrv = NULL;
+ struct apr_resp_pkt resp;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ void *buf = abuf->buf;
+ int len = abuf->len;
+
+ hdr = buf;
+ ver = APR_HDR_FIELD_VER(hdr->hdr_field);
+ if (ver > APR_PKT_VER + 1)
+ return -EINVAL;
+
+ hdr_size = APR_HDR_FIELD_SIZE_BYTES(hdr->hdr_field);
+ if (hdr_size < APR_HDR_SIZE) {
+ dev_err(apr->dev, "APR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+ if (hdr->pkt_size < APR_HDR_SIZE || hdr->pkt_size != len) {
+ dev_err(apr->dev, "APR: Wrong packet size\n");
+ return -EINVAL;
+ }
+
+ msg_type = APR_HDR_FIELD_MT(hdr->hdr_field);
+ if (msg_type >= APR_MSG_TYPE_MAX) {
+ dev_err(apr->dev, "APR: Wrong message type: %d\n", msg_type);
+ return -EINVAL;
+ }
+
+ if (hdr->src_domain >= APR_DOMAIN_MAX ||
+ hdr->dest_domain >= APR_DOMAIN_MAX ||
+ hdr->src_svc >= APR_SVC_MAX ||
+ hdr->dest_svc >= APR_SVC_MAX) {
+ dev_err(apr->dev, "APR: Wrong APR header\n");
+ return -EINVAL;
+ }
+
+ svc_id = hdr->dest_svc;
+ spin_lock_irqsave(&apr->svcs_lock, flags);
+ svc = idr_find(&apr->svcs_idr, svc_id);
+ if (svc && svc->dev->driver) {
+ adev = svc_to_apr_device(svc);
+ adrv = to_apr_driver(adev->dev.driver);
+ }
+ spin_unlock_irqrestore(&apr->svcs_lock, flags);
+
+ if (!adrv || !adev) {
+ dev_err(apr->dev, "APR: service is not registered (%d)\n",
+ svc_id);
+ return -EINVAL;
+ }
+
+ resp.hdr = *hdr;
+ resp.payload_size = hdr->pkt_size - hdr_size;
+
+ /*
+ * NOTE: hdr_size is not same as APR_HDR_SIZE as remote can include
+ * optional headers in to apr_hdr which should be ignored
+ */
+ if (resp.payload_size > 0)
+ resp.payload = buf + hdr_size;
+
+ adrv->callback(adev, &resp);
+
+ return 0;
+}
+
+static int gpr_do_rx_callback(struct packet_router *gpr, struct apr_rx_buf *abuf)
+{
+ uint16_t hdr_size, ver;
+ struct pkt_router_svc *svc = NULL;
+ struct gpr_resp_pkt resp;
+ struct gpr_hdr *hdr;
+ unsigned long flags;
+ void *buf = abuf->buf;
+ int len = abuf->len;
+
+ hdr = buf;
+ ver = hdr->version;
+ if (ver > GPR_PKT_VER + 1)
+ return -EINVAL;
+
+ hdr_size = hdr->hdr_size;
+ if (hdr_size < GPR_PKT_HEADER_WORD_SIZE) {
+ dev_err(gpr->dev, "GPR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+ if (hdr->pkt_size < GPR_PKT_HEADER_BYTE_SIZE || hdr->pkt_size != len) {
+ dev_err(gpr->dev, "GPR: Wrong packet size\n");
+ return -EINVAL;
+ }
+
+ resp.hdr = *hdr;
+ resp.payload_size = hdr->pkt_size - (hdr_size * 4);
+
+ /*
+ * NOTE: hdr_size is not same as GPR_HDR_SIZE as remote can include
+ * optional headers in to gpr_hdr which should be ignored
+ */
+ if (resp.payload_size > 0)
+ resp.payload = buf + (hdr_size * 4);
+
+
+ spin_lock_irqsave(&gpr->svcs_lock, flags);
+ svc = idr_find(&gpr->svcs_idr, hdr->dest_port);
+ spin_unlock_irqrestore(&gpr->svcs_lock, flags);
+
+ if (!svc) {
+ dev_err(gpr->dev, "GPR: Port(%x) is not registered\n",
+ hdr->dest_port);
+ return -EINVAL;
+ }
+
+ if (svc->callback)
+ svc->callback(&resp, svc->priv, 0);
+
+ return 0;
+}
+
+static void apr_rxwq(struct work_struct *work)
+{
+ struct packet_router *apr = container_of(work, struct packet_router, rx_work);
+ struct apr_rx_buf *abuf, *b;
+ unsigned long flags;
+
+ if (!list_empty(&apr->rx_list)) {
+ list_for_each_entry_safe(abuf, b, &apr->rx_list, node) {
+ switch (apr->type) {
+ case PR_TYPE_APR:
+ apr_do_rx_callback(apr, abuf);
+ break;
+ case PR_TYPE_GPR:
+ gpr_do_rx_callback(apr, abuf);
+ break;
+ default:
+ break;
+ }
+ spin_lock_irqsave(&apr->rx_lock, flags);
+ list_del(&abuf->node);
+ spin_unlock_irqrestore(&apr->rx_lock, flags);
+ kfree(abuf);
+ }
+ }
+}
+
+static int apr_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(drv);
+ const struct apr_device_id *id = adrv->id_table;
+
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!id)
+ return 0;
+
+ while (id->domain_id != 0 || id->svc_id != 0) {
+ if (id->domain_id == adev->domain_id &&
+ id->svc_id == adev->svc.id)
+ return 1;
+ id++;
+ }
+
+ return 0;
+}
+
+static int apr_device_probe(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(dev->driver);
+ int ret;
+
+ ret = adrv->probe(adev);
+ if (!ret)
+ adev->svc.callback = adrv->gpr_callback;
+
+ return ret;
+}
+
+static void apr_device_remove(struct device *dev)
+{
+ struct apr_device *adev = to_apr_device(dev);
+ struct apr_driver *adrv = to_apr_driver(dev->driver);
+ struct packet_router *apr = dev_get_drvdata(adev->dev.parent);
+
+ if (adrv->remove)
+ adrv->remove(adev);
+ spin_lock(&apr->svcs_lock);
+ idr_remove(&apr->svcs_idr, adev->svc.id);
+ spin_unlock(&apr->svcs_lock);
+}
+
+static int apr_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct apr_device *adev = to_apr_device(dev);
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return add_uevent_var(env, "MODALIAS=apr:%s", adev->name);
+}
+
+struct bus_type aprbus = {
+ .name = "aprbus",
+ .match = apr_device_match,
+ .probe = apr_device_probe,
+ .uevent = apr_uevent,
+ .remove = apr_device_remove,
+};
+EXPORT_SYMBOL_GPL(aprbus);
+
+static int apr_add_device(struct device *dev, struct device_node *np,
+ u32 svc_id, u32 domain_id)
+{
+ struct packet_router *apr = dev_get_drvdata(dev);
+ struct apr_device *adev = NULL;
+ struct pkt_router_svc *svc;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->svc_id = svc_id;
+ svc = &adev->svc;
+
+ svc->id = svc_id;
+ svc->pr = apr;
+ svc->priv = adev;
+ svc->dev = dev;
+ spin_lock_init(&svc->lock);
+
+ adev->domain_id = domain_id;
+
+ if (np)
+ snprintf(adev->name, APR_NAME_SIZE, "%pOFn", np);
+
+ switch (apr->type) {
+ case PR_TYPE_APR:
+ dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name,
+ domain_id, svc_id);
+ break;
+ case PR_TYPE_GPR:
+ dev_set_name(&adev->dev, "gprsvc:%s:%x:%x", adev->name,
+ domain_id, svc_id);
+ break;
+ default:
+ break;
+ }
+
+ adev->dev.bus = &aprbus;
+ adev->dev.parent = dev;
+ adev->dev.of_node = np;
+ adev->dev.release = apr_dev_release;
+ adev->dev.driver = NULL;
+
+ spin_lock(&apr->svcs_lock);
+ ret = idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC);
+ spin_unlock(&apr->svcs_lock);
+ if (ret < 0) {
+ dev_err(dev, "idr_alloc failed: %d\n", ret);
+ goto out;
+ }
+
+ /* Protection domain is optional, it does not exist on older platforms */
+ ret = of_property_read_string_index(np, "qcom,protection-domain",
+ 1, &adev->service_path);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "Failed to read second value of qcom,protection-domain\n");
+ goto out;
+ }
+
+ dev_info(dev, "Adding APR/GPR dev: %s\n", dev_name(&adev->dev));
+
+ ret = device_register(&adev->dev);
+ if (ret) {
+ dev_err(dev, "device_register failed: %d\n", ret);
+ put_device(&adev->dev);
+ }
+
+out:
+ return ret;
+}
+
+static int of_apr_add_pd_lookups(struct device *dev)
+{
+ const char *service_name, *service_path;
+ struct packet_router *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+ struct pdr_service *pds;
+ int ret;
+
+ for_each_child_of_node(dev->of_node, node) {
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 0, &service_name);
+ if (ret < 0)
+ continue;
+
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 1, &service_path);
+ if (ret < 0) {
+ dev_err(dev, "pdr service path missing: %d\n", ret);
+ of_node_put(node);
+ return ret;
+ }
+
+ pds = pdr_add_lookup(apr->pdr, service_name, service_path);
+ if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
+ dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
+ of_node_put(node);
+ return PTR_ERR(pds);
+ }
+ }
+
+ return 0;
+}
+
+static void of_register_apr_devices(struct device *dev, const char *svc_path)
+{
+ struct packet_router *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+ const char *service_path;
+ int ret;
+
+ for_each_child_of_node(dev->of_node, node) {
+ u32 svc_id;
+ u32 domain_id;
+
+ /*
+ * This function is called with svc_path NULL during
+ * apr_probe(), in which case we register any apr devices
+ * without a qcom,protection-domain specified.
+ *
+ * Then as the protection domains becomes available
+ * (if applicable) this function is again called, but with
+ * svc_path representing the service becoming available. In
+ * this case we register any apr devices with a matching
+ * qcom,protection-domain.
+ */
+
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 1, &service_path);
+ if (svc_path) {
+ /* skip APR services that are PD independent */
+ if (ret)
+ continue;
+
+ /* skip APR services whose PD paths don't match */
+ if (strcmp(service_path, svc_path))
+ continue;
+ } else {
+ /* skip APR services whose PD lookups are registered */
+ if (ret == 0)
+ continue;
+ }
+
+ if (of_property_read_u32(node, "reg", &svc_id))
+ continue;
+
+ domain_id = apr->dest_domain_id;
+
+ if (apr_add_device(dev, node, svc_id, domain_id))
+ dev_err(dev, "Failed to add apr %d svc\n", svc_id);
+ }
+}
+
+static int apr_remove_device(struct device *dev, void *svc_path)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ if (svc_path && adev->service_path) {
+ if (!strcmp(adev->service_path, (char *)svc_path))
+ device_unregister(&adev->dev);
+ } else {
+ device_unregister(&adev->dev);
+ }
+
+ return 0;
+}
+
+static void apr_pd_status(int state, char *svc_path, void *priv)
+{
+ struct packet_router *apr = (struct packet_router *)priv;
+
+ switch (state) {
+ case SERVREG_SERVICE_STATE_UP:
+ of_register_apr_devices(apr->dev, svc_path);
+ break;
+ case SERVREG_SERVICE_STATE_DOWN:
+ device_for_each_child(apr->dev, svc_path, apr_remove_device);
+ break;
+ }
+}
+
+static int apr_probe(struct rpmsg_device *rpdev)
+{
+ struct device *dev = &rpdev->dev;
+ struct packet_router *apr;
+ int ret;
+
+ apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL);
+ if (!apr)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(dev->of_node, "qcom,domain", &apr->dest_domain_id);
+
+ if (of_device_is_compatible(dev->of_node, "qcom,gpr")) {
+ apr->type = PR_TYPE_GPR;
+ } else {
+ if (ret) /* try deprecated apr-domain property */
+ ret = of_property_read_u32(dev->of_node, "qcom,apr-domain",
+ &apr->dest_domain_id);
+ apr->type = PR_TYPE_APR;
+ }
+
+ if (ret) {
+ dev_err(dev, "Domain ID not specified in DT\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, apr);
+ apr->ch = rpdev->ept;
+ apr->dev = dev;
+ apr->rxwq = create_singlethread_workqueue("qcom_apr_rx");
+ if (!apr->rxwq) {
+ dev_err(apr->dev, "Failed to start Rx WQ\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&apr->rx_work, apr_rxwq);
+
+ apr->pdr = pdr_handle_alloc(apr_pd_status, apr);
+ if (IS_ERR(apr->pdr)) {
+ dev_err(dev, "Failed to init PDR handle\n");
+ ret = PTR_ERR(apr->pdr);
+ goto destroy_wq;
+ }
+
+ INIT_LIST_HEAD(&apr->rx_list);
+ spin_lock_init(&apr->rx_lock);
+ spin_lock_init(&apr->svcs_lock);
+ idr_init(&apr->svcs_idr);
+
+ ret = of_apr_add_pd_lookups(dev);
+ if (ret)
+ goto handle_release;
+
+ of_register_apr_devices(dev, NULL);
+
+ return 0;
+
+handle_release:
+ pdr_handle_release(apr->pdr);
+destroy_wq:
+ destroy_workqueue(apr->rxwq);
+ return ret;
+}
+
+static void apr_remove(struct rpmsg_device *rpdev)
+{
+ struct packet_router *apr = dev_get_drvdata(&rpdev->dev);
+
+ pdr_handle_release(apr->pdr);
+ device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
+ destroy_workqueue(apr->rxwq);
+}
+
+/*
+ * __apr_driver_register() - Client driver registration with aprbus
+ *
+ * @drv:Client driver to be associated with client-device.
+ * @owner: owning module/driver
+ *
+ * This API will register the client driver with the aprbus
+ * It is called from the driver's module-init function.
+ */
+int __apr_driver_register(struct apr_driver *drv, struct module *owner)
+{
+ drv->driver.bus = &aprbus;
+ drv->driver.owner = owner;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__apr_driver_register);
+
+/*
+ * apr_driver_unregister() - Undo effect of apr_driver_register
+ *
+ * @drv: Client driver to be unregistered
+ */
+void apr_driver_unregister(struct apr_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(apr_driver_unregister);
+
+static const struct of_device_id pkt_router_of_match[] = {
+ { .compatible = "qcom,apr"},
+ { .compatible = "qcom,apr-v2"},
+ { .compatible = "qcom,gpr"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, pkt_router_of_match);
+
+static struct rpmsg_driver packet_router_driver = {
+ .probe = apr_probe,
+ .remove = apr_remove,
+ .callback = apr_callback,
+ .drv = {
+ .name = "qcom,apr",
+ .of_match_table = pkt_router_of_match,
+ },
+};
+
+static int __init apr_init(void)
+{
+ int ret;
+
+ ret = bus_register(&aprbus);
+ if (!ret)
+ ret = register_rpmsg_driver(&packet_router_driver);
+ else
+ bus_unregister(&aprbus);
+
+ return ret;
+}
+
+static void __exit apr_exit(void)
+{
+ bus_unregister(&aprbus);
+ unregister_rpmsg_driver(&packet_router_driver);
+}
+
+subsys_initcall(apr_init);
+module_exit(apr_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APR Bus");
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
new file mode 100644
index 0000000000..34c40368d5
--- /dev/null
+++ b/drivers/soc/qcom/cmd-db.c
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+#include <soc/qcom/cmd-db.h>
+
+#define NUM_PRIORITY 2
+#define MAX_SLV_ID 8
+#define SLAVE_ID_MASK 0x7
+#define SLAVE_ID_SHIFT 16
+
+/**
+ * struct entry_header: header for each entry in cmddb
+ *
+ * @id: resource's identifier
+ * @priority: unused
+ * @addr: the address of the resource
+ * @len: length of the data
+ * @offset: offset from :@data_offset, start of the data
+ */
+struct entry_header {
+ u8 id[8];
+ __le32 priority[NUM_PRIORITY];
+ __le32 addr;
+ __le16 len;
+ __le16 offset;
+};
+
+/**
+ * struct rsc_hdr: resource header information
+ *
+ * @slv_id: id for the resource
+ * @header_offset: entry's header at offset from the end of the cmd_db_header
+ * @data_offset: entry's data at offset from the end of the cmd_db_header
+ * @cnt: number of entries for HW type
+ * @version: MSB is major, LSB is minor
+ * @reserved: reserved for future use.
+ */
+struct rsc_hdr {
+ __le16 slv_id;
+ __le16 header_offset;
+ __le16 data_offset;
+ __le16 cnt;
+ __le16 version;
+ __le16 reserved[3];
+};
+
+/**
+ * struct cmd_db_header: The DB header information
+ *
+ * @version: The cmd db version
+ * @magic: constant expected in the database
+ * @header: array of resources
+ * @checksum: checksum for the header. Unused.
+ * @reserved: reserved memory
+ * @data: driver specific data
+ */
+struct cmd_db_header {
+ __le32 version;
+ u8 magic[4];
+ struct rsc_hdr header[MAX_SLV_ID];
+ __le32 checksum;
+ __le32 reserved;
+ u8 data[];
+};
+
+/**
+ * DOC: Description of the Command DB database.
+ *
+ * At the start of the command DB memory is the cmd_db_header structure.
+ * The cmd_db_header holds the version, checksum, magic key as well as an
+ * array for header for each slave (depicted by the rsc_header). Each h/w
+ * based accelerator is a 'slave' (shared resource) and has slave id indicating
+ * the type of accelerator. The rsc_header is the header for such individual
+ * slaves of a given type. The entries for each of these slaves begin at the
+ * rsc_hdr.header_offset. In addition each slave could have auxiliary data
+ * that may be needed by the driver. The data for the slave starts at the
+ * entry_header.offset to the location pointed to by the rsc_hdr.data_offset.
+ *
+ * Drivers have a stringified key to a slave/resource. They can query the slave
+ * information and get the slave id and the auxiliary data and the length of the
+ * data. Using this information, they can format the request to be sent to the
+ * h/w accelerator and request a resource state.
+ */
+
+static const u8 CMD_DB_MAGIC[] = { 0xdb, 0x30, 0x03, 0x0c };
+
+static bool cmd_db_magic_matches(const struct cmd_db_header *header)
+{
+ const u8 *magic = header->magic;
+
+ return memcmp(magic, CMD_DB_MAGIC, ARRAY_SIZE(CMD_DB_MAGIC)) == 0;
+}
+
+static struct cmd_db_header *cmd_db_header;
+
+static inline const void *rsc_to_entry_header(const struct rsc_hdr *hdr)
+{
+ u16 offset = le16_to_cpu(hdr->header_offset);
+
+ return cmd_db_header->data + offset;
+}
+
+static inline void *
+rsc_offset(const struct rsc_hdr *hdr, const struct entry_header *ent)
+{
+ u16 offset = le16_to_cpu(hdr->data_offset);
+ u16 loffset = le16_to_cpu(ent->offset);
+
+ return cmd_db_header->data + offset + loffset;
+}
+
+/**
+ * cmd_db_ready - Indicates if command DB is available
+ *
+ * Return: 0 on success, errno otherwise
+ */
+int cmd_db_ready(void)
+{
+ if (cmd_db_header == NULL)
+ return -EPROBE_DEFER;
+ else if (!cmd_db_magic_matches(cmd_db_header))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(cmd_db_ready);
+
+static int cmd_db_get_header(const char *id, const struct entry_header **eh,
+ const struct rsc_hdr **rh)
+{
+ const struct rsc_hdr *rsc_hdr;
+ const struct entry_header *ent;
+ int ret, i, j;
+ u8 query[sizeof(ent->id)] __nonstring;
+
+ ret = cmd_db_ready();
+ if (ret)
+ return ret;
+
+ /*
+ * Pad out query string to same length as in DB. NOTE: the output
+ * query string is not necessarily '\0' terminated if it bumps up
+ * against the max size. That's OK and expected.
+ */
+ strncpy(query, id, sizeof(query));
+
+ for (i = 0; i < MAX_SLV_ID; i++) {
+ rsc_hdr = &cmd_db_header->header[i];
+ if (!rsc_hdr->slv_id)
+ break;
+
+ ent = rsc_to_entry_header(rsc_hdr);
+ for (j = 0; j < le16_to_cpu(rsc_hdr->cnt); j++, ent++) {
+ if (memcmp(ent->id, query, sizeof(ent->id)) == 0) {
+ if (eh)
+ *eh = ent;
+ if (rh)
+ *rh = rsc_hdr;
+ return 0;
+ }
+ }
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * cmd_db_read_addr() - Query command db for resource id address.
+ *
+ * @id: resource id to query for address
+ *
+ * Return: resource address on success, 0 on error
+ *
+ * This is used to retrieve resource address based on resource
+ * id.
+ */
+u32 cmd_db_read_addr(const char *id)
+{
+ int ret;
+ const struct entry_header *ent;
+
+ ret = cmd_db_get_header(id, &ent, NULL);
+
+ return ret < 0 ? 0 : le32_to_cpu(ent->addr);
+}
+EXPORT_SYMBOL(cmd_db_read_addr);
+
+/**
+ * cmd_db_read_aux_data() - Query command db for aux data.
+ *
+ * @id: Resource to retrieve AUX Data on
+ * @len: size of data buffer returned
+ *
+ * Return: pointer to data on success, error pointer otherwise
+ */
+const void *cmd_db_read_aux_data(const char *id, size_t *len)
+{
+ int ret;
+ const struct entry_header *ent;
+ const struct rsc_hdr *rsc_hdr;
+
+ ret = cmd_db_get_header(id, &ent, &rsc_hdr);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (len)
+ *len = le16_to_cpu(ent->len);
+
+ return rsc_offset(rsc_hdr, ent);
+}
+EXPORT_SYMBOL(cmd_db_read_aux_data);
+
+/**
+ * cmd_db_read_slave_id - Get the slave ID for a given resource address
+ *
+ * @id: Resource id to query the DB for version
+ *
+ * Return: cmd_db_hw_type enum on success, CMD_DB_HW_INVALID on error
+ */
+enum cmd_db_hw_type cmd_db_read_slave_id(const char *id)
+{
+ int ret;
+ const struct entry_header *ent;
+ u32 addr;
+
+ ret = cmd_db_get_header(id, &ent, NULL);
+ if (ret < 0)
+ return CMD_DB_HW_INVALID;
+
+ addr = le32_to_cpu(ent->addr);
+ return (addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
+}
+EXPORT_SYMBOL(cmd_db_read_slave_id);
+
+#ifdef CONFIG_DEBUG_FS
+static int cmd_db_debugfs_dump(struct seq_file *seq, void *p)
+{
+ int i, j;
+ const struct rsc_hdr *rsc;
+ const struct entry_header *ent;
+ const char *name;
+ u16 len, version;
+ u8 major, minor;
+
+ seq_puts(seq, "Command DB DUMP\n");
+
+ for (i = 0; i < MAX_SLV_ID; i++) {
+ rsc = &cmd_db_header->header[i];
+ if (!rsc->slv_id)
+ break;
+
+ switch (le16_to_cpu(rsc->slv_id)) {
+ case CMD_DB_HW_ARC:
+ name = "ARC";
+ break;
+ case CMD_DB_HW_VRM:
+ name = "VRM";
+ break;
+ case CMD_DB_HW_BCM:
+ name = "BCM";
+ break;
+ default:
+ name = "Unknown";
+ break;
+ }
+
+ version = le16_to_cpu(rsc->version);
+ major = version >> 8;
+ minor = version;
+
+ seq_printf(seq, "Slave %s (v%u.%u)\n", name, major, minor);
+ seq_puts(seq, "-------------------------\n");
+
+ ent = rsc_to_entry_header(rsc);
+ for (j = 0; j < le16_to_cpu(rsc->cnt); j++, ent++) {
+ seq_printf(seq, "0x%05x: %*pEp", le32_to_cpu(ent->addr),
+ (int)strnlen(ent->id, sizeof(ent->id)), ent->id);
+
+ len = le16_to_cpu(ent->len);
+ if (len) {
+ seq_printf(seq, " [%*ph]",
+ len, rsc_offset(rsc, ent));
+ }
+ seq_putc(seq, '\n');
+ }
+ }
+
+ return 0;
+}
+
+static int open_cmd_db_debugfs(struct inode *inode, struct file *file)
+{
+ return single_open(file, cmd_db_debugfs_dump, inode->i_private);
+}
+#endif
+
+static const struct file_operations cmd_db_debugfs_ops = {
+#ifdef CONFIG_DEBUG_FS
+ .open = open_cmd_db_debugfs,
+#endif
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int cmd_db_dev_probe(struct platform_device *pdev)
+{
+ struct reserved_mem *rmem;
+ int ret = 0;
+
+ rmem = of_reserved_mem_lookup(pdev->dev.of_node);
+ if (!rmem) {
+ dev_err(&pdev->dev, "failed to acquire memory region\n");
+ return -EINVAL;
+ }
+
+ cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+ if (!cmd_db_header) {
+ ret = -ENOMEM;
+ cmd_db_header = NULL;
+ return ret;
+ }
+
+ if (!cmd_db_magic_matches(cmd_db_header)) {
+ dev_err(&pdev->dev, "Invalid Command DB Magic\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("cmd-db", 0400, NULL, NULL, &cmd_db_debugfs_ops);
+
+ device_set_pm_not_required(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id cmd_db_match_table[] = {
+ { .compatible = "qcom,cmd-db" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cmd_db_match_table);
+
+static struct platform_driver cmd_db_dev_driver = {
+ .probe = cmd_db_dev_probe,
+ .driver = {
+ .name = "cmd-db",
+ .of_match_table = cmd_db_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init cmd_db_device_init(void)
+{
+ return platform_driver_register(&cmd_db_dev_driver);
+}
+arch_initcall(cmd_db_device_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Command DB Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
new file mode 100644
index 0000000000..adf2d523f1
--- /dev/null
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -0,0 +1,875 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2021-2022 Linaro Ltd
+ * Author: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, based on
+ * previous work of Thara Gopinath and msm-4.9 downstream sources.
+ */
+
+#include <linux/err.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+
+/*
+ * The BWMON samples data throughput within 'sample_ms' time. With three
+ * configurable thresholds (Low, Medium and High) gives four windows (called
+ * zones) of current bandwidth:
+ *
+ * Zone 0: byte count < THRES_LO
+ * Zone 1: THRES_LO < byte count < THRES_MED
+ * Zone 2: THRES_MED < byte count < THRES_HIGH
+ * Zone 3: THRES_HIGH < byte count
+ *
+ * Zones 0 and 2 are not used by this driver.
+ */
+
+/* Internal sampling clock frequency */
+#define HW_TIMER_HZ 19200000
+
+#define BWMON_V4_GLOBAL_IRQ_CLEAR 0x108
+#define BWMON_V4_GLOBAL_IRQ_ENABLE 0x10c
+/*
+ * All values here and further are matching regmap fields, so without absolute
+ * register offsets.
+ */
+#define BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE BIT(0)
+
+/*
+ * Starting with SDM845, the BWMON4 register space has changed a bit:
+ * the global registers were jammed into the beginning of the monitor region.
+ * To keep the proper offsets, one would have to map <GLOBAL_BASE 0x200> and
+ * <GLOBAL_BASE+0x100 0x300>, which is straight up wrong.
+ * To facilitate for that, while allowing the older, arguably more proper
+ * implementations to work, offset the global registers by -0x100 to avoid
+ * having to map half of the global registers twice.
+ */
+#define BWMON_V4_845_OFFSET 0x100
+#define BWMON_V4_GLOBAL_IRQ_CLEAR_845 (BWMON_V4_GLOBAL_IRQ_CLEAR - BWMON_V4_845_OFFSET)
+#define BWMON_V4_GLOBAL_IRQ_ENABLE_845 (BWMON_V4_GLOBAL_IRQ_ENABLE - BWMON_V4_845_OFFSET)
+
+#define BWMON_V4_IRQ_STATUS 0x100
+#define BWMON_V4_IRQ_CLEAR 0x108
+
+#define BWMON_V4_IRQ_ENABLE 0x10c
+#define BWMON_IRQ_ENABLE_MASK (BIT(1) | BIT(3))
+#define BWMON_V5_IRQ_STATUS 0x000
+#define BWMON_V5_IRQ_CLEAR 0x008
+#define BWMON_V5_IRQ_ENABLE 0x00c
+
+#define BWMON_V4_ENABLE 0x2a0
+#define BWMON_V5_ENABLE 0x010
+#define BWMON_ENABLE_ENABLE BIT(0)
+
+#define BWMON_V4_CLEAR 0x2a4
+#define BWMON_V5_CLEAR 0x014
+#define BWMON_CLEAR_CLEAR BIT(0)
+#define BWMON_CLEAR_CLEAR_ALL BIT(1)
+
+#define BWMON_V4_SAMPLE_WINDOW 0x2a8
+#define BWMON_V5_SAMPLE_WINDOW 0x020
+
+#define BWMON_V4_THRESHOLD_HIGH 0x2ac
+#define BWMON_V4_THRESHOLD_MED 0x2b0
+#define BWMON_V4_THRESHOLD_LOW 0x2b4
+#define BWMON_V5_THRESHOLD_HIGH 0x024
+#define BWMON_V5_THRESHOLD_MED 0x028
+#define BWMON_V5_THRESHOLD_LOW 0x02c
+
+#define BWMON_V4_ZONE_ACTIONS 0x2b8
+#define BWMON_V5_ZONE_ACTIONS 0x030
+/*
+ * Actions to perform on some zone 'z' when current zone hits the threshold:
+ * Increment counter of zone 'z'
+ */
+#define BWMON_ZONE_ACTIONS_INCREMENT(z) (0x2 << ((z) * 2))
+/* Clear counter of zone 'z' */
+#define BWMON_ZONE_ACTIONS_CLEAR(z) (0x1 << ((z) * 2))
+
+/* Zone 0 threshold hit: Clear zone count */
+#define BWMON_ZONE_ACTIONS_ZONE0 (BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 1 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE1 (BWMON_ZONE_ACTIONS_INCREMENT(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 2 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE2 (BWMON_ZONE_ACTIONS_INCREMENT(2) | \
+ BWMON_ZONE_ACTIONS_CLEAR(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/* Zone 3 threshold hit: Increment zone count & clear lower zones */
+#define BWMON_ZONE_ACTIONS_ZONE3 (BWMON_ZONE_ACTIONS_INCREMENT(3) | \
+ BWMON_ZONE_ACTIONS_CLEAR(2) | \
+ BWMON_ZONE_ACTIONS_CLEAR(1) | \
+ BWMON_ZONE_ACTIONS_CLEAR(0))
+
+/*
+ * There is no clear documentation/explanation of BWMON_V4_THRESHOLD_COUNT
+ * register. Based on observations, this is number of times one threshold has to
+ * be reached, to trigger interrupt in given zone.
+ *
+ * 0xff are maximum values meant to ignore the zones 0 and 2.
+ */
+#define BWMON_V4_THRESHOLD_COUNT 0x2bc
+#define BWMON_V5_THRESHOLD_COUNT 0x034
+#define BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT 0xff
+#define BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT 0xff
+
+#define BWMON_V4_ZONE_MAX(zone) (0x2e0 + 4 * (zone))
+#define BWMON_V5_ZONE_MAX(zone) (0x044 + 4 * (zone))
+
+/* Quirks for specific BWMON types */
+#define BWMON_HAS_GLOBAL_IRQ BIT(0)
+#define BWMON_NEEDS_FORCE_CLEAR BIT(1)
+
+enum bwmon_fields {
+ /* Global region fields, keep them at the top */
+ F_GLOBAL_IRQ_CLEAR,
+ F_GLOBAL_IRQ_ENABLE,
+ F_NUM_GLOBAL_FIELDS,
+
+ /* Monitor region fields */
+ F_IRQ_STATUS = F_NUM_GLOBAL_FIELDS,
+ F_IRQ_CLEAR,
+ F_IRQ_ENABLE,
+ F_ENABLE,
+ F_CLEAR,
+ F_SAMPLE_WINDOW,
+ F_THRESHOLD_HIGH,
+ F_THRESHOLD_MED,
+ F_THRESHOLD_LOW,
+ F_ZONE_ACTIONS_ZONE0,
+ F_ZONE_ACTIONS_ZONE1,
+ F_ZONE_ACTIONS_ZONE2,
+ F_ZONE_ACTIONS_ZONE3,
+ F_THRESHOLD_COUNT_ZONE0,
+ F_THRESHOLD_COUNT_ZONE1,
+ F_THRESHOLD_COUNT_ZONE2,
+ F_THRESHOLD_COUNT_ZONE3,
+ F_ZONE0_MAX,
+ F_ZONE1_MAX,
+ F_ZONE2_MAX,
+ F_ZONE3_MAX,
+
+ F_NUM_FIELDS
+};
+
+struct icc_bwmon_data {
+ unsigned int sample_ms;
+ unsigned int count_unit_kb; /* kbytes */
+ u8 zone1_thres_count;
+ u8 zone3_thres_count;
+ unsigned int quirks;
+
+ const struct regmap_config *regmap_cfg;
+ const struct reg_field *regmap_fields;
+
+ const struct regmap_config *global_regmap_cfg;
+ const struct reg_field *global_regmap_fields;
+};
+
+struct icc_bwmon {
+ struct device *dev;
+ const struct icc_bwmon_data *data;
+ int irq;
+
+ struct regmap_field *regs[F_NUM_FIELDS];
+ struct regmap_field *global_regs[F_NUM_GLOBAL_FIELDS];
+
+ unsigned int max_bw_kbps;
+ unsigned int min_bw_kbps;
+ unsigned int target_kbps;
+ unsigned int current_kbps;
+};
+
+/* BWMON v4 */
+static const struct reg_field msm8998_bwmon_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = {},
+ [F_GLOBAL_IRQ_ENABLE] = {},
+ [F_IRQ_STATUS] = REG_FIELD(BWMON_V4_IRQ_STATUS, 4, 7),
+ [F_IRQ_CLEAR] = REG_FIELD(BWMON_V4_IRQ_CLEAR, 4, 7),
+ [F_IRQ_ENABLE] = REG_FIELD(BWMON_V4_IRQ_ENABLE, 4, 7),
+ /* F_ENABLE covers entire register to disable other features */
+ [F_ENABLE] = REG_FIELD(BWMON_V4_ENABLE, 0, 31),
+ [F_CLEAR] = REG_FIELD(BWMON_V4_CLEAR, 0, 1),
+ [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V4_SAMPLE_WINDOW, 0, 23),
+ [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V4_THRESHOLD_HIGH, 0, 11),
+ [F_THRESHOLD_MED] = REG_FIELD(BWMON_V4_THRESHOLD_MED, 0, 11),
+ [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V4_THRESHOLD_LOW, 0, 11),
+ [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 0, 7),
+ [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 8, 15),
+ [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 16, 23),
+ [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 24, 31),
+ [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 0, 7),
+ [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 8, 15),
+ [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 16, 23),
+ [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 24, 31),
+ [F_ZONE0_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(0), 0, 11),
+ [F_ZONE1_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(1), 0, 11),
+ [F_ZONE2_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(2), 0, 11),
+ [F_ZONE3_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(3), 0, 11),
+};
+
+static const struct regmap_range msm8998_bwmon_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V4_IRQ_CLEAR, BWMON_V4_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V4_CLEAR, BWMON_V4_CLEAR),
+};
+
+static const struct regmap_access_table msm8998_bwmon_reg_read_table = {
+ .no_ranges = msm8998_bwmon_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(msm8998_bwmon_reg_noread_ranges),
+};
+
+static const struct regmap_range msm8998_bwmon_reg_volatile_ranges[] = {
+ regmap_reg_range(BWMON_V4_IRQ_STATUS, BWMON_V4_IRQ_STATUS),
+ regmap_reg_range(BWMON_V4_ZONE_MAX(0), BWMON_V4_ZONE_MAX(3)),
+};
+
+static const struct regmap_access_table msm8998_bwmon_reg_volatile_table = {
+ .yes_ranges = msm8998_bwmon_reg_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(msm8998_bwmon_reg_volatile_ranges),
+};
+
+static const struct reg_field msm8998_bwmon_global_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_CLEAR, 0, 0),
+ [F_GLOBAL_IRQ_ENABLE] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_ENABLE, 0, 0),
+};
+
+static const struct regmap_range msm8998_bwmon_global_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V4_GLOBAL_IRQ_CLEAR, BWMON_V4_GLOBAL_IRQ_CLEAR),
+};
+
+static const struct regmap_access_table msm8998_bwmon_global_reg_read_table = {
+ .no_ranges = msm8998_bwmon_global_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(msm8998_bwmon_global_reg_noread_ranges),
+};
+
+/*
+ * Fill the cache for non-readable registers only as rest does not really
+ * matter and can be read from the device.
+ */
+static const struct reg_default msm8998_bwmon_reg_defaults[] = {
+ { BWMON_V4_IRQ_CLEAR, 0x0 },
+ { BWMON_V4_CLEAR, 0x0 },
+};
+
+static const struct reg_default msm8998_bwmon_global_reg_defaults[] = {
+ { BWMON_V4_GLOBAL_IRQ_CLEAR, 0x0 },
+};
+
+static const struct regmap_config msm8998_bwmon_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &msm8998_bwmon_reg_read_table,
+ .volatile_table = &msm8998_bwmon_reg_volatile_table,
+ .reg_defaults = msm8998_bwmon_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(msm8998_bwmon_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regmap_config msm8998_bwmon_global_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &msm8998_bwmon_global_reg_read_table,
+ .reg_defaults = msm8998_bwmon_global_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(msm8998_bwmon_global_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct reg_field sdm845_cpu_bwmon_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_CLEAR_845, 0, 0),
+ [F_GLOBAL_IRQ_ENABLE] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_ENABLE_845, 0, 0),
+ [F_IRQ_STATUS] = REG_FIELD(BWMON_V4_IRQ_STATUS, 4, 7),
+ [F_IRQ_CLEAR] = REG_FIELD(BWMON_V4_IRQ_CLEAR, 4, 7),
+ [F_IRQ_ENABLE] = REG_FIELD(BWMON_V4_IRQ_ENABLE, 4, 7),
+ /* F_ENABLE covers entire register to disable other features */
+ [F_ENABLE] = REG_FIELD(BWMON_V4_ENABLE, 0, 31),
+ [F_CLEAR] = REG_FIELD(BWMON_V4_CLEAR, 0, 1),
+ [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V4_SAMPLE_WINDOW, 0, 23),
+ [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V4_THRESHOLD_HIGH, 0, 11),
+ [F_THRESHOLD_MED] = REG_FIELD(BWMON_V4_THRESHOLD_MED, 0, 11),
+ [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V4_THRESHOLD_LOW, 0, 11),
+ [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 0, 7),
+ [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 8, 15),
+ [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 16, 23),
+ [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 24, 31),
+ [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 0, 7),
+ [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 8, 15),
+ [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 16, 23),
+ [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 24, 31),
+ [F_ZONE0_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(0), 0, 11),
+ [F_ZONE1_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(1), 0, 11),
+ [F_ZONE2_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(2), 0, 11),
+ [F_ZONE3_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(3), 0, 11),
+};
+
+static const struct regmap_range sdm845_cpu_bwmon_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V4_GLOBAL_IRQ_CLEAR_845, BWMON_V4_GLOBAL_IRQ_CLEAR_845),
+ regmap_reg_range(BWMON_V4_IRQ_CLEAR, BWMON_V4_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V4_CLEAR, BWMON_V4_CLEAR),
+};
+
+static const struct regmap_access_table sdm845_cpu_bwmon_reg_read_table = {
+ .no_ranges = sdm845_cpu_bwmon_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(sdm845_cpu_bwmon_reg_noread_ranges),
+};
+
+/*
+ * Fill the cache for non-readable registers only as rest does not really
+ * matter and can be read from the device.
+ */
+static const struct reg_default sdm845_cpu_bwmon_reg_defaults[] = {
+ { BWMON_V4_GLOBAL_IRQ_CLEAR_845, 0x0 },
+ { BWMON_V4_IRQ_CLEAR, 0x0 },
+ { BWMON_V4_CLEAR, 0x0 },
+};
+
+static const struct regmap_config sdm845_cpu_bwmon_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &sdm845_cpu_bwmon_reg_read_table,
+ .volatile_table = &msm8998_bwmon_reg_volatile_table,
+ .reg_defaults = sdm845_cpu_bwmon_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(sdm845_cpu_bwmon_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/* BWMON v5 */
+static const struct reg_field sdm845_llcc_bwmon_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = {},
+ [F_GLOBAL_IRQ_ENABLE] = {},
+ [F_IRQ_STATUS] = REG_FIELD(BWMON_V5_IRQ_STATUS, 0, 3),
+ [F_IRQ_CLEAR] = REG_FIELD(BWMON_V5_IRQ_CLEAR, 0, 3),
+ [F_IRQ_ENABLE] = REG_FIELD(BWMON_V5_IRQ_ENABLE, 0, 3),
+ /* F_ENABLE covers entire register to disable other features */
+ [F_ENABLE] = REG_FIELD(BWMON_V5_ENABLE, 0, 31),
+ [F_CLEAR] = REG_FIELD(BWMON_V5_CLEAR, 0, 1),
+ [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V5_SAMPLE_WINDOW, 0, 19),
+ [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V5_THRESHOLD_HIGH, 0, 11),
+ [F_THRESHOLD_MED] = REG_FIELD(BWMON_V5_THRESHOLD_MED, 0, 11),
+ [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V5_THRESHOLD_LOW, 0, 11),
+ [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 0, 7),
+ [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 8, 15),
+ [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 16, 23),
+ [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 24, 31),
+ [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 0, 7),
+ [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 8, 15),
+ [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 16, 23),
+ [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 24, 31),
+ [F_ZONE0_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(0), 0, 11),
+ [F_ZONE1_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(1), 0, 11),
+ [F_ZONE2_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(2), 0, 11),
+ [F_ZONE3_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(3), 0, 11),
+};
+
+static const struct regmap_range sdm845_llcc_bwmon_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V5_IRQ_CLEAR, BWMON_V5_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V5_CLEAR, BWMON_V5_CLEAR),
+};
+
+static const struct regmap_access_table sdm845_llcc_bwmon_reg_read_table = {
+ .no_ranges = sdm845_llcc_bwmon_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(sdm845_llcc_bwmon_reg_noread_ranges),
+};
+
+static const struct regmap_range sdm845_llcc_bwmon_reg_volatile_ranges[] = {
+ regmap_reg_range(BWMON_V5_IRQ_STATUS, BWMON_V5_IRQ_STATUS),
+ regmap_reg_range(BWMON_V5_ZONE_MAX(0), BWMON_V5_ZONE_MAX(3)),
+};
+
+static const struct regmap_access_table sdm845_llcc_bwmon_reg_volatile_table = {
+ .yes_ranges = sdm845_llcc_bwmon_reg_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sdm845_llcc_bwmon_reg_volatile_ranges),
+};
+
+/*
+ * Fill the cache for non-readable registers only as rest does not really
+ * matter and can be read from the device.
+ */
+static const struct reg_default sdm845_llcc_bwmon_reg_defaults[] = {
+ { BWMON_V5_IRQ_CLEAR, 0x0 },
+ { BWMON_V5_CLEAR, 0x0 },
+};
+
+static const struct regmap_config sdm845_llcc_bwmon_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &sdm845_llcc_bwmon_reg_read_table,
+ .volatile_table = &sdm845_llcc_bwmon_reg_volatile_table,
+ .reg_defaults = sdm845_llcc_bwmon_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(sdm845_llcc_bwmon_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void bwmon_clear_counters(struct icc_bwmon *bwmon, bool clear_all)
+{
+ unsigned int val = BWMON_CLEAR_CLEAR;
+
+ if (clear_all)
+ val |= BWMON_CLEAR_CLEAR_ALL;
+ /*
+ * Clear counters. The order and barriers are
+ * important. Quoting downstream Qualcomm msm-4.9 tree:
+ *
+ * The counter clear and IRQ clear bits are not in the same 4KB
+ * region. So, we need to make sure the counter clear is completed
+ * before we try to clear the IRQ or do any other counter operations.
+ */
+ regmap_field_force_write(bwmon->regs[F_CLEAR], val);
+ if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR)
+ regmap_field_force_write(bwmon->regs[F_CLEAR], 0);
+}
+
+static void bwmon_clear_irq(struct icc_bwmon *bwmon)
+{
+ struct regmap_field *global_irq_clr;
+
+ if (bwmon->data->global_regmap_fields)
+ global_irq_clr = bwmon->global_regs[F_GLOBAL_IRQ_CLEAR];
+ else
+ global_irq_clr = bwmon->regs[F_GLOBAL_IRQ_CLEAR];
+
+ /*
+ * Clear zone and global interrupts. The order and barriers are
+ * important. Quoting downstream Qualcomm msm-4.9 tree:
+ *
+ * Synchronize the local interrupt clear in mon_irq_clear()
+ * with the global interrupt clear here. Otherwise, the CPU
+ * may reorder the two writes and clear the global interrupt
+ * before the local interrupt, causing the global interrupt
+ * to be retriggered by the local interrupt still being high.
+ *
+ * Similarly, because the global registers are in a different
+ * region than the local registers, we need to ensure any register
+ * writes to enable the monitor after this call are ordered with the
+ * clearing here so that local writes don't happen before the
+ * interrupt is cleared.
+ */
+ regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], BWMON_IRQ_ENABLE_MASK);
+ if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR)
+ regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], 0);
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_force_write(global_irq_clr,
+ BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE);
+}
+
+static void bwmon_disable(struct icc_bwmon *bwmon)
+{
+ struct regmap_field *global_irq_en;
+
+ if (bwmon->data->global_regmap_fields)
+ global_irq_en = bwmon->global_regs[F_GLOBAL_IRQ_ENABLE];
+ else
+ global_irq_en = bwmon->regs[F_GLOBAL_IRQ_ENABLE];
+
+ /* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_write(global_irq_en, 0x0);
+ regmap_field_write(bwmon->regs[F_IRQ_ENABLE], 0x0);
+
+ /*
+ * Disable bwmon. Must happen before bwmon_clear_irq() to avoid spurious
+ * IRQ.
+ */
+ regmap_field_write(bwmon->regs[F_ENABLE], 0x0);
+}
+
+static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable)
+{
+ struct regmap_field *global_irq_en;
+
+ if (bwmon->data->global_regmap_fields)
+ global_irq_en = bwmon->global_regs[F_GLOBAL_IRQ_ENABLE];
+ else
+ global_irq_en = bwmon->regs[F_GLOBAL_IRQ_ENABLE];
+
+ /* Enable interrupts */
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_write(global_irq_en,
+ BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE);
+
+ regmap_field_write(bwmon->regs[F_IRQ_ENABLE], irq_enable);
+
+ /* Enable bwmon */
+ regmap_field_write(bwmon->regs[F_ENABLE], BWMON_ENABLE_ENABLE);
+}
+
+static unsigned int bwmon_kbps_to_count(struct icc_bwmon *bwmon,
+ unsigned int kbps)
+{
+ return kbps / bwmon->data->count_unit_kb;
+}
+
+static void bwmon_set_threshold(struct icc_bwmon *bwmon,
+ struct regmap_field *reg, unsigned int kbps)
+{
+ unsigned int thres;
+
+ thres = mult_frac(bwmon_kbps_to_count(bwmon, kbps),
+ bwmon->data->sample_ms, MSEC_PER_SEC);
+ regmap_field_write(reg, thres);
+}
+
+static void bwmon_start(struct icc_bwmon *bwmon)
+{
+ const struct icc_bwmon_data *data = bwmon->data;
+ u32 bw_low = 0;
+ int window;
+
+ /* No need to check for errors, as this must have succeeded before. */
+ dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0);
+
+ bwmon_clear_counters(bwmon, true);
+
+ window = mult_frac(bwmon->data->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC);
+ /* Maximum sampling window: 0xffffff for v4 and 0xfffff for v5 */
+ regmap_field_write(bwmon->regs[F_SAMPLE_WINDOW], window);
+
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH], bw_low);
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED], bw_low);
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_LOW], 0);
+
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE0],
+ BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE1],
+ data->zone1_thres_count);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE2],
+ BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE3],
+ data->zone3_thres_count);
+
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE0],
+ BWMON_ZONE_ACTIONS_ZONE0);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE1],
+ BWMON_ZONE_ACTIONS_ZONE1);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE2],
+ BWMON_ZONE_ACTIONS_ZONE2);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE3],
+ BWMON_ZONE_ACTIONS_ZONE3);
+
+ bwmon_clear_irq(bwmon);
+ bwmon_enable(bwmon, BWMON_IRQ_ENABLE_MASK);
+}
+
+static irqreturn_t bwmon_intr(int irq, void *dev_id)
+{
+ struct icc_bwmon *bwmon = dev_id;
+ unsigned int status, max;
+ int zone;
+
+ if (regmap_field_read(bwmon->regs[F_IRQ_STATUS], &status))
+ return IRQ_NONE;
+
+ status &= BWMON_IRQ_ENABLE_MASK;
+ if (!status) {
+ /*
+ * Only zone 1 and zone 3 interrupts are enabled but zone 2
+ * threshold could be hit and trigger interrupt even if not
+ * enabled.
+ * Such spurious interrupt might come with valuable max count or
+ * not, so solution would be to always check all
+ * BWMON_ZONE_MAX() registers to find the highest value.
+ * Such case is currently ignored.
+ */
+ return IRQ_NONE;
+ }
+
+ bwmon_disable(bwmon);
+
+ zone = get_bitmask_order(status) - 1;
+ /*
+ * Zone max bytes count register returns count units within sampling
+ * window. Downstream kernel for BWMONv4 (called BWMON type 2 in
+ * downstream) always increments the max bytes count by one.
+ */
+ if (regmap_field_read(bwmon->regs[F_ZONE0_MAX + zone], &max))
+ return IRQ_NONE;
+
+ max += 1;
+ max *= bwmon->data->count_unit_kb;
+ bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->data->sample_ms);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
+{
+ struct icc_bwmon *bwmon = dev_id;
+ unsigned int irq_enable = 0;
+ struct dev_pm_opp *opp, *target_opp;
+ unsigned int bw_kbps, up_kbps, down_kbps;
+
+ bw_kbps = bwmon->target_kbps;
+
+ target_opp = dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE)
+ target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+
+ bwmon->target_kbps = bw_kbps;
+
+ bw_kbps--;
+ opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
+ down_kbps = bwmon->target_kbps;
+ else
+ down_kbps = bw_kbps;
+
+ up_kbps = bwmon->target_kbps + 1;
+
+ if (bwmon->target_kbps >= bwmon->max_bw_kbps)
+ irq_enable = BIT(1);
+ else if (bwmon->target_kbps <= bwmon->min_bw_kbps)
+ irq_enable = BIT(3);
+ else
+ irq_enable = BWMON_IRQ_ENABLE_MASK;
+
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH],
+ up_kbps);
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED],
+ down_kbps);
+ bwmon_clear_counters(bwmon, false);
+ bwmon_clear_irq(bwmon);
+ bwmon_enable(bwmon, irq_enable);
+
+ if (bwmon->target_kbps == bwmon->current_kbps)
+ goto out;
+
+ dev_pm_opp_set_opp(bwmon->dev, target_opp);
+ bwmon->current_kbps = bwmon->target_kbps;
+
+out:
+ dev_pm_opp_put(target_opp);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
+
+ return IRQ_HANDLED;
+}
+
+static int bwmon_init_regmap(struct platform_device *pdev,
+ struct icc_bwmon *bwmon)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct regmap *map;
+ int ret;
+
+ /* Map the monitor base */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "failed to map bwmon registers\n");
+
+ map = devm_regmap_init_mmio(dev, base, bwmon->data->regmap_cfg);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map),
+ "failed to initialize regmap\n");
+
+ BUILD_BUG_ON(ARRAY_SIZE(msm8998_bwmon_global_reg_fields) != F_NUM_GLOBAL_FIELDS);
+ BUILD_BUG_ON(ARRAY_SIZE(msm8998_bwmon_reg_fields) != F_NUM_FIELDS);
+ BUILD_BUG_ON(ARRAY_SIZE(sdm845_cpu_bwmon_reg_fields) != F_NUM_FIELDS);
+ BUILD_BUG_ON(ARRAY_SIZE(sdm845_llcc_bwmon_reg_fields) != F_NUM_FIELDS);
+
+ ret = devm_regmap_field_bulk_alloc(dev, map, bwmon->regs,
+ bwmon->data->regmap_fields,
+ F_NUM_FIELDS);
+ if (ret)
+ return ret;
+
+ if (bwmon->data->global_regmap_cfg) {
+ /* Map the global base, if separate */
+ base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "failed to map bwmon global registers\n");
+
+ map = devm_regmap_init_mmio(dev, base, bwmon->data->global_regmap_cfg);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map),
+ "failed to initialize global regmap\n");
+
+ ret = devm_regmap_field_bulk_alloc(dev, map, bwmon->global_regs,
+ bwmon->data->global_regmap_fields,
+ F_NUM_GLOBAL_FIELDS);
+ }
+
+ return ret;
+}
+
+static int bwmon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
+ struct icc_bwmon *bwmon;
+ int ret;
+
+ bwmon = devm_kzalloc(dev, sizeof(*bwmon), GFP_KERNEL);
+ if (!bwmon)
+ return -ENOMEM;
+
+ bwmon->data = of_device_get_match_data(dev);
+
+ ret = bwmon_init_regmap(pdev, bwmon);
+ if (ret)
+ return ret;
+
+ bwmon->irq = platform_get_irq(pdev, 0);
+ if (bwmon->irq < 0)
+ return bwmon->irq;
+
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add OPP table\n");
+
+ bwmon->max_bw_kbps = UINT_MAX;
+ opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0);
+ if (IS_ERR(opp))
+ return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n");
+
+ bwmon->min_bw_kbps = 0;
+ opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0);
+ if (IS_ERR(opp))
+ return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n");
+
+ bwmon->dev = dev;
+
+ bwmon_disable(bwmon);
+ ret = devm_request_threaded_irq(dev, bwmon->irq, bwmon_intr,
+ bwmon_intr_thread,
+ IRQF_ONESHOT, dev_name(dev), bwmon);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ\n");
+
+ platform_set_drvdata(pdev, bwmon);
+ bwmon_start(bwmon);
+
+ return 0;
+}
+
+static int bwmon_remove(struct platform_device *pdev)
+{
+ struct icc_bwmon *bwmon = platform_get_drvdata(pdev);
+
+ bwmon_disable(bwmon);
+
+ return 0;
+}
+
+static const struct icc_bwmon_data msm8998_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 1024,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .quirks = BWMON_HAS_GLOBAL_IRQ,
+ .regmap_fields = msm8998_bwmon_reg_fields,
+ .regmap_cfg = &msm8998_bwmon_regmap_cfg,
+ .global_regmap_fields = msm8998_bwmon_global_reg_fields,
+ .global_regmap_cfg = &msm8998_bwmon_global_regmap_cfg,
+};
+
+static const struct icc_bwmon_data sdm845_cpu_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 64,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .quirks = BWMON_HAS_GLOBAL_IRQ,
+ .regmap_fields = sdm845_cpu_bwmon_reg_fields,
+ .regmap_cfg = &sdm845_cpu_bwmon_regmap_cfg,
+};
+
+static const struct icc_bwmon_data sdm845_llcc_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 1024,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .regmap_fields = sdm845_llcc_bwmon_reg_fields,
+ .regmap_cfg = &sdm845_llcc_bwmon_regmap_cfg,
+};
+
+static const struct icc_bwmon_data sc7280_llcc_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 64,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .quirks = BWMON_NEEDS_FORCE_CLEAR,
+ .regmap_fields = sdm845_llcc_bwmon_reg_fields,
+ .regmap_cfg = &sdm845_llcc_bwmon_regmap_cfg,
+};
+
+static const struct of_device_id bwmon_of_match[] = {
+ /* BWMONv4, separate monitor and global register spaces */
+ { .compatible = "qcom,msm8998-bwmon", .data = &msm8998_bwmon_data },
+ /* BWMONv4, unified register space */
+ { .compatible = "qcom,sdm845-bwmon", .data = &sdm845_cpu_bwmon_data },
+ /* BWMONv5 */
+ { .compatible = "qcom,sdm845-llcc-bwmon", .data = &sdm845_llcc_bwmon_data },
+ { .compatible = "qcom,sc7280-llcc-bwmon", .data = &sc7280_llcc_bwmon_data },
+
+ /* Compatibles kept for legacy reasons */
+ { .compatible = "qcom,sc7280-cpu-bwmon", .data = &sdm845_cpu_bwmon_data },
+ { .compatible = "qcom,sc8280xp-cpu-bwmon", .data = &sdm845_cpu_bwmon_data },
+ { .compatible = "qcom,sm8550-cpu-bwmon", .data = &sdm845_cpu_bwmon_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bwmon_of_match);
+
+static struct platform_driver bwmon_driver = {
+ .probe = bwmon_probe,
+ .remove = bwmon_remove,
+ .driver = {
+ .name = "qcom-bwmon",
+ .of_match_table = bwmon_of_match,
+ },
+};
+module_platform_driver(bwmon_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>");
+MODULE_DESCRIPTION("QCOM BWMON driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
new file mode 100644
index 0000000000..fbab7fe5c6
--- /dev/null
+++ b/drivers/soc/qcom/ice.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm ICE (Inline Crypto Engine) support.
+ *
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, Google LLC
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <linux/firmware/qcom/qcom_scm.h>
+
+#include <soc/qcom/ice.h>
+
+#define AES_256_XTS_KEY_SIZE 64
+
+/* QCOM ICE registers */
+#define QCOM_ICE_REG_VERSION 0x0008
+#define QCOM_ICE_REG_FUSE_SETTING 0x0010
+#define QCOM_ICE_REG_BIST_STATUS 0x0070
+#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
+
+/* BIST ("built-in self-test") status flags */
+#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28)
+
+#define QCOM_ICE_FUSE_SETTING_MASK 0x1
+#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
+#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
+
+#define qcom_ice_writel(engine, val, reg) \
+ writel((val), (engine)->base + (reg))
+
+#define qcom_ice_readl(engine, reg) \
+ readl((engine)->base + (reg))
+
+struct qcom_ice {
+ struct device *dev;
+ void __iomem *base;
+ struct device_link *link;
+
+ struct clk *core_clk;
+};
+
+static bool qcom_ice_check_supported(struct qcom_ice *ice)
+{
+ u32 regval = qcom_ice_readl(ice, QCOM_ICE_REG_VERSION);
+ struct device *dev = ice->dev;
+ int major = FIELD_GET(GENMASK(31, 24), regval);
+ int minor = FIELD_GET(GENMASK(23, 16), regval);
+ int step = FIELD_GET(GENMASK(15, 0), regval);
+
+ /* For now this driver only supports ICE version 3 and 4. */
+ if (major != 3 && major != 4) {
+ dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n",
+ major, minor, step);
+ return false;
+ }
+
+ dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
+ major, minor, step);
+
+ /* If fuses are blown, ICE might not work in the standard way. */
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING);
+ if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
+ QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
+ QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) {
+ dev_warn(dev, "Fuses are blown; ICE is unusable!\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void qcom_ice_low_power_mode_enable(struct qcom_ice *ice)
+{
+ u32 regval;
+
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL);
+
+ /* Enable low power mode sequence */
+ regval |= 0x7000;
+ qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
+}
+
+static void qcom_ice_optimization_enable(struct qcom_ice *ice)
+{
+ u32 regval;
+
+ /* ICE Optimizations Enable Sequence */
+ regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL);
+ regval |= 0xd807100;
+ /* ICE HPG requires delay before writing */
+ udelay(5);
+ qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
+ udelay(5);
+}
+
+/*
+ * Wait until the ICE BIST (built-in self-test) has completed.
+ *
+ * This may be necessary before ICE can be used.
+ * Note that we don't really care whether the BIST passed or failed;
+ * we really just want to make sure that it isn't still running. This is
+ * because (a) the BIST is a FIPS compliance thing that never fails in
+ * practice, (b) ICE is documented to reject crypto requests if the BIST
+ * fails, so we needn't do it in software too, and (c) properly testing
+ * storage encryption requires testing the full storage stack anyway,
+ * and not relying on hardware-level self-tests.
+ */
+static int qcom_ice_wait_bist_status(struct qcom_ice *ice)
+{
+ u32 regval;
+ int err;
+
+ err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS,
+ regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
+ 50, 5000);
+ if (err)
+ dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n");
+
+ return err;
+}
+
+int qcom_ice_enable(struct qcom_ice *ice)
+{
+ qcom_ice_low_power_mode_enable(ice);
+ qcom_ice_optimization_enable(ice);
+
+ return qcom_ice_wait_bist_status(ice);
+}
+EXPORT_SYMBOL_GPL(qcom_ice_enable);
+
+int qcom_ice_resume(struct qcom_ice *ice)
+{
+ struct device *dev = ice->dev;
+ int err;
+
+ err = clk_prepare_enable(ice->core_clk);
+ if (err) {
+ dev_err(dev, "failed to enable core clock (%d)\n",
+ err);
+ return err;
+ }
+
+ return qcom_ice_wait_bist_status(ice);
+}
+EXPORT_SYMBOL_GPL(qcom_ice_resume);
+
+int qcom_ice_suspend(struct qcom_ice *ice)
+{
+ clk_disable_unprepare(ice->core_clk);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_suspend);
+
+int qcom_ice_program_key(struct qcom_ice *ice,
+ u8 algorithm_id, u8 key_size,
+ const u8 crypto_key[], u8 data_unit_size,
+ int slot)
+{
+ struct device *dev = ice->dev;
+ union {
+ u8 bytes[AES_256_XTS_KEY_SIZE];
+ u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)];
+ } key;
+ int i;
+ int err;
+
+ /* Only AES-256-XTS has been tested so far. */
+ if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS ||
+ key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) {
+ dev_err_ratelimited(dev,
+ "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
+ algorithm_id, key_size);
+ return -EINVAL;
+ }
+
+ memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE);
+
+ /* The SCM call requires that the key words are encoded in big endian */
+ for (i = 0; i < ARRAY_SIZE(key.words); i++)
+ __cpu_to_be32s(&key.words[i]);
+
+ err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS,
+ data_unit_size);
+
+ memzero_explicit(&key, sizeof(key));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(qcom_ice_program_key);
+
+int qcom_ice_evict_key(struct qcom_ice *ice, int slot)
+{
+ return qcom_scm_ice_invalidate_key(slot);
+}
+EXPORT_SYMBOL_GPL(qcom_ice_evict_key);
+
+static struct qcom_ice *qcom_ice_create(struct device *dev,
+ void __iomem *base)
+{
+ struct qcom_ice *engine;
+
+ if (!qcom_scm_is_available())
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!qcom_scm_ice_available()) {
+ dev_warn(dev, "ICE SCM interface not found\n");
+ return NULL;
+ }
+
+ engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return ERR_PTR(-ENOMEM);
+
+ engine->dev = dev;
+ engine->base = base;
+
+ /*
+ * Legacy DT binding uses different clk names for each consumer,
+ * so lets try those first. If none of those are a match, it means
+ * the we only have one clock and it is part of the dedicated DT node.
+ * Also, enable the clock before we check what HW version the driver
+ * supports.
+ */
+ engine->core_clk = devm_clk_get_optional_enabled(dev, "ice_core_clk");
+ if (!engine->core_clk)
+ engine->core_clk = devm_clk_get_optional_enabled(dev, "ice");
+ if (!engine->core_clk)
+ engine->core_clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(engine->core_clk))
+ return ERR_CAST(engine->core_clk);
+
+ if (!qcom_ice_check_supported(engine))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dev_dbg(dev, "Registered Qualcomm Inline Crypto Engine\n");
+
+ return engine;
+}
+
+/**
+ * of_qcom_ice_get() - get an ICE instance from a DT node
+ * @dev: device pointer for the consumer device
+ *
+ * This function will provide an ICE instance either by creating one for the
+ * consumer device if its DT node provides the 'ice' reg range and the 'ice'
+ * clock (for legacy DT style). On the other hand, if consumer provides a
+ * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
+ * be created and so this function will return that instead.
+ *
+ * Return: ICE pointer on success, NULL if there is no ICE data provided by the
+ * consumer or ERR_PTR() on error.
+ */
+struct qcom_ice *of_qcom_ice_get(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct qcom_ice *ice;
+ struct device_node *node;
+ struct resource *res;
+ void __iomem *base;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * In order to support legacy style devicetree bindings, we need
+ * to create the ICE instance using the consumer device and the reg
+ * range called 'ice' it provides.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice");
+ if (res) {
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ /* create ICE instance using consumer dev */
+ return qcom_ice_create(&pdev->dev, base);
+ }
+
+ /*
+ * If the consumer node does not provider an 'ice' reg range
+ * (legacy DT binding), then it must at least provide a phandle
+ * to the ICE devicetree node, otherwise ICE is not supported.
+ */
+ node = of_parse_phandle(dev->of_node, "qcom,ice", 0);
+ if (!node)
+ return NULL;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev) {
+ dev_err(dev, "Cannot find device node %s\n", node->name);
+ ice = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ ice = platform_get_drvdata(pdev);
+ if (!ice) {
+ dev_err(dev, "Cannot get ice instance from %s\n",
+ dev_name(&pdev->dev));
+ platform_device_put(pdev);
+ ice = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ ice->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!ice->link) {
+ dev_err(&pdev->dev,
+ "Failed to create device link to consumer %s\n",
+ dev_name(dev));
+ platform_device_put(pdev);
+ ice = ERR_PTR(-EINVAL);
+ }
+
+out:
+ of_node_put(node);
+
+ return ice;
+}
+EXPORT_SYMBOL_GPL(of_qcom_ice_get);
+
+static int qcom_ice_probe(struct platform_device *pdev)
+{
+ struct qcom_ice *engine;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_warn(&pdev->dev, "ICE registers not found\n");
+ return PTR_ERR(base);
+ }
+
+ engine = qcom_ice_create(&pdev->dev, base);
+ if (IS_ERR(engine))
+ return PTR_ERR(engine);
+
+ platform_set_drvdata(pdev, engine);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_ice_of_match_table[] = {
+ { .compatible = "qcom,inline-crypto-engine" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_ice_of_match_table);
+
+static struct platform_driver qcom_ice_driver = {
+ .probe = qcom_ice_probe,
+ .driver = {
+ .name = "qcom-ice",
+ .of_match_table = qcom_ice_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_ice_driver);
+
+MODULE_DESCRIPTION("Qualcomm Inline Crypto Engine driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c
new file mode 100644
index 0000000000..7886af4fd7
--- /dev/null
+++ b/drivers/soc/qcom/kryo-l2-accessors.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/spinlock.h>
+#include <asm/barrier.h>
+#include <asm/sysreg.h>
+#include <soc/qcom/kryo-l2-accessors.h>
+
+#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6)
+#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7)
+
+static DEFINE_RAW_SPINLOCK(l2_access_lock);
+
+/**
+ * kryo_l2_set_indirect_reg() - write value to an L2 register
+ * @reg: Address of L2 register.
+ * @val: Value to be written to register.
+ *
+ * Use architecturally required barriers for ordering between system register
+ * accesses, and system registers with respect to device memory
+ */
+void kryo_l2_set_indirect_reg(u64 reg, u64 val)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&l2_access_lock, flags);
+ write_sysreg_s(reg, L2CPUSRSELR_EL1);
+ isb();
+ write_sysreg_s(val, L2CPUSRDR_EL1);
+ isb();
+ raw_spin_unlock_irqrestore(&l2_access_lock, flags);
+}
+EXPORT_SYMBOL(kryo_l2_set_indirect_reg);
+
+/**
+ * kryo_l2_get_indirect_reg() - read an L2 register value
+ * @reg: Address of L2 register.
+ *
+ * Use architecturally required barriers for ordering between system register
+ * accesses, and system registers with respect to device memory
+ */
+u64 kryo_l2_get_indirect_reg(u64 reg)
+{
+ u64 val;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&l2_access_lock, flags);
+ write_sysreg_s(reg, L2CPUSRSELR_EL1);
+ isb();
+ val = read_sysreg_s(L2CPUSRDR_EL1);
+ raw_spin_unlock_irqrestore(&l2_access_lock, flags);
+
+ return val;
+}
+EXPORT_SYMBOL(kryo_l2_get_indirect_reg);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
new file mode 100644
index 0000000000..e877aace11
--- /dev/null
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -0,0 +1,1083 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define ACTIVATE BIT(0)
+#define DEACTIVATE BIT(1)
+#define ACT_CLEAR BIT(0)
+#define ACT_COMPLETE BIT(4)
+#define ACT_CTRL_OPCODE_ACTIVATE BIT(0)
+#define ACT_CTRL_OPCODE_DEACTIVATE BIT(1)
+#define ACT_CTRL_ACT_TRIG BIT(0)
+#define ACT_CTRL_OPCODE_SHIFT 0x01
+#define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02
+#define ATTR1_FIXED_SIZE_SHIFT 0x03
+#define ATTR1_PRIORITY_SHIFT 0x04
+#define ATTR1_MAX_CAP_SHIFT 0x10
+#define ATTR0_RES_WAYS_MASK GENMASK(15, 0)
+#define ATTR0_BONUS_WAYS_MASK GENMASK(31, 16)
+#define ATTR0_BONUS_WAYS_SHIFT 0x10
+#define LLCC_STATUS_READ_DELAY 100
+
+#define CACHE_LINE_SIZE_SHIFT 6
+
+#define LLCC_LB_CNT_MASK GENMASK(31, 28)
+#define LLCC_LB_CNT_SHIFT 28
+
+#define MAX_CAP_TO_BYTES(n) (n * SZ_1K)
+#define LLCC_TRP_ACT_CTRLn(n) (n * SZ_4K)
+#define LLCC_TRP_ACT_CLEARn(n) (8 + n * SZ_4K)
+#define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
+#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
+#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
+#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_4 * n)
+
+#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
+#define LLCC_TRP_PCB_ACT 0x21f04
+#define LLCC_TRP_ALGO_CFG1 0x21f0c
+#define LLCC_TRP_ALGO_CFG2 0x21f10
+#define LLCC_TRP_ALGO_CFG3 0x21f14
+#define LLCC_TRP_ALGO_CFG4 0x21f18
+#define LLCC_TRP_ALGO_CFG5 0x21f1c
+#define LLCC_TRP_WRSC_EN 0x21f20
+#define LLCC_TRP_ALGO_CFG6 0x21f24
+#define LLCC_TRP_ALGO_CFG7 0x21f28
+#define LLCC_TRP_WRSC_CACHEABLE_EN 0x21f2c
+#define LLCC_TRP_ALGO_CFG8 0x21f30
+
+#define LLCC_VERSION_2_0_0_0 0x02000000
+#define LLCC_VERSION_2_1_0_0 0x02010000
+#define LLCC_VERSION_4_1_0_0 0x04010000
+
+/**
+ * struct llcc_slice_config - Data associated with the llcc slice
+ * @usecase_id: Unique id for the client's use case
+ * @slice_id: llcc slice id for each client
+ * @max_cap: The maximum capacity of the cache slice provided in KB
+ * @priority: Priority of the client used to select victim line for replacement
+ * @fixed_size: Boolean indicating if the slice has a fixed capacity
+ * @bonus_ways: Bonus ways are additional ways to be used for any slice,
+ * if client ends up using more than reserved cache ways. Bonus
+ * ways are allocated only if they are not reserved for some
+ * other client.
+ * @res_ways: Reserved ways for the cache slice, the reserved ways cannot
+ * be used by any other client than the one its assigned to.
+ * @cache_mode: Each slice operates as a cache, this controls the mode of the
+ * slice: normal or TCM(Tightly Coupled Memory)
+ * @probe_target_ways: Determines what ways to probe for access hit. When
+ * configured to 1 only bonus and reserved ways are probed.
+ * When configured to 0 all ways in llcc are probed.
+ * @dis_cap_alloc: Disable capacity based allocation for a client
+ * @retain_on_pc: If this bit is set and client has maintained active vote
+ * then the ways assigned to this client are not flushed on power
+ * collapse.
+ * @activate_on_init: Activate the slice immediately after it is programmed
+ * @write_scid_en: Bit enables write cache support for a given scid.
+ * @write_scid_cacheable_en: Enables write cache cacheable support for a
+ * given scid (not supported on v2 or older hardware).
+ */
+struct llcc_slice_config {
+ u32 usecase_id;
+ u32 slice_id;
+ u32 max_cap;
+ u32 priority;
+ bool fixed_size;
+ u32 bonus_ways;
+ u32 res_ways;
+ u32 cache_mode;
+ u32 probe_target_ways;
+ bool dis_cap_alloc;
+ bool retain_on_pc;
+ bool activate_on_init;
+ bool write_scid_en;
+ bool write_scid_cacheable_en;
+ bool stale_en;
+ bool stale_cap_en;
+ bool mru_uncap_en;
+ bool mru_rollover;
+ bool alloc_oneway_en;
+ bool ovcap_en;
+ bool ovcap_prio;
+ bool vict_prio;
+};
+
+struct qcom_llcc_config {
+ const struct llcc_slice_config *sct_data;
+ const u32 *reg_offset;
+ const struct llcc_edac_reg_offset *edac_reg_offset;
+ int size;
+ bool need_llcc_cfg;
+ bool no_edac;
+};
+
+enum llcc_reg_offset {
+ LLCC_COMMON_HW_INFO,
+ LLCC_COMMON_STATUS0,
+};
+
+static const struct llcc_slice_config sc7180_data[] = {
+ { LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sc7280_data[] = {
+ { LLCC_CPUSS, 1, 768, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 1, 0},
+ { LLCC_MDMHPGRW, 7, 512, 2, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_CMPT, 10, 768, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_GPUHTW, 11, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_GPU, 12, 512, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_MMUHWT, 13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 0, 1, 0},
+ { LLCC_MDMPNG, 21, 768, 0, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_WLHW, 24, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+ { LLCC_MODPE, 29, 64, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
+};
+
+static const struct llcc_slice_config sc8180x_data[] = {
+ { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 3072, 1, 1, 0x3ff, 0xc00, 0, 0, 0, 1, 0 },
+ { LLCC_MDM, 8, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 5120, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1 },
+ { LLCC_CMPTDMA, 15, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDFW, 17, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_WLHW, 24, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 512, 1, 1, 0xc, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 512, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 128, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0 },
+};
+
+static const struct llcc_slice_config sc8280xp_data[] = {
+ { LLCC_CPUSS, 1, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_CMPT, 10, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_GPU, 12, 4096, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 6144, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDHW, 22, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_DRE, 26, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0, 0 },
+ { LLCC_WRCACHE, 31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUSS1, 3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sdm845_data[] = {
+ { LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
+ { LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0 },
+ { LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0 },
+ { LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0 },
+ { LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1 },
+ { LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+};
+
+static const struct llcc_slice_config sm6350_data[] = {
+ { LLCC_CPUSS, 1, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1 },
+ { LLCC_MDM, 8, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 256, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 512, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 768, 0, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 64, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sm7150_data[] = {
+ { LLCC_CPUSS, 1, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_MDM, 8, 128, 2, 0, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sm8150_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 3072, 1, 0, 0xFF, 0xF00, 0, 0, 0, 1, 0 },
+ { LLCC_MDM, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW , 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1 },
+ { LLCC_CMPTDMA, 15, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 21, 1024, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_WLHW, 24, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 256, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0 },
+};
+
+static const struct llcc_slice_config sm8250_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_CMPT, 10, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 1 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPTDMA, 15, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_DISP, 16, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_VIDFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_NPU, 23, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_WLHW, 24, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_CVP, 28, 256, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ { LLCC_APTCM, 30, 128, 3, 0, 0x0, 0x3, 1, 0, 0, 1, 0, 0 },
+ { LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sm8350_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 },
+ { LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+ { LLCC_DISP, 16, 3072, 2, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 256, 1, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+ { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CPUSS1, 3, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 },
+};
+
+static const struct llcc_slice_config sm8450_data[] = {
+ {LLCC_CPUSS, 1, 3072, 1, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 },
+ {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+ {LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_MODHW, 9, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_GPU, 12, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 1, 0 },
+ {LLCC_MMUHWT, 13, 768, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {LLCC_DISP, 16, 4096, 2, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_MDMPNG, 21, 1024, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+ {LLCC_CVP, 28, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_MODPE, 29, 64, 1, 1, 0xF000, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0xF0, 1, 0, 0, 1, 0, 0, 0 },
+ {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {LLCC_CVPFW, 17, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CPUSS1, 3, 1024, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CAMEXP0, 4, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_CPUMTE, 23, 256, 1, 1, 0x0FFF, 0x0, 0, 0, 0, 0, 1, 0, 0 },
+ {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 1, 0, 0 },
+ {LLCC_CAMEXP1, 27, 256, 3, 1, 0xFFFF, 0x0, 0, 0, 0, 1, 0, 0, 0 },
+ {LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static const struct llcc_slice_config sm8550_data[] = {
+ {LLCC_CPUSS, 1, 5120, 1, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_VIDSC0, 2, 512, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_AUDIO, 6, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_MDMHPGRW, 25, 1024, 4, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_MODHW, 26, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_GPU, 9, 3096, 1, 0, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_MMUHWT, 18, 768, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_DISP, 16, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_MDMPNG, 27, 1024, 0, 1, 0xF00000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_CVP, 8, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_MODPE, 29, 64, 1, 1, 0xF00000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, },
+ {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_CAMEXP0, 4, 256, 4, 1, 0xF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_CPUHWT, 5, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_CAMEXP1, 7, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_CMPTHCP, 17, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_LCPDARE, 30, 128, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, },
+ {LLCC_AENPU, 3, 3072, 1, 1, 0xFE01FF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_ISLAND1, 12, 1792, 7, 1, 0xFE00, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_ISLAND4, 15, 256, 7, 1, 0x10000, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_CAMEXP2, 19, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_CAMEXP3, 20, 3200, 2, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_CAMEXP4, 21, 3200, 2, 1, 0xFFFFF0, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_DISP_WB, 23, 1024, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_DISP_1, 24, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ {LLCC_VIDVSP, 28, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+};
+
+static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x20344,
+ .trp_ecc_error_status1 = 0x20348,
+ .trp_ecc_sb_err_syn0 = 0x2304c,
+ .trp_ecc_db_err_syn0 = 0x20370,
+ .trp_ecc_error_cntr_clear = 0x20440,
+ .trp_interrupt_0_status = 0x20480,
+ .trp_interrupt_0_clear = 0x20484,
+ .trp_interrupt_0_enable = 0x20488,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x3000c,
+ .cmn_interrupt_0_enable = 0x3001c,
+ .cmn_interrupt_2_enable = 0x3003c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x40000,
+ .drp_ecc_error_cntr_clear = 0x40004,
+ .drp_interrupt_status = 0x41000,
+ .drp_interrupt_clear = 0x41008,
+ .drp_interrupt_enable = 0x4100c,
+ .drp_ecc_error_status0 = 0x42044,
+ .drp_ecc_error_status1 = 0x42048,
+ .drp_ecc_sb_err_syn0 = 0x4204c,
+ .drp_ecc_db_err_syn0 = 0x42070,
+};
+
+static const struct llcc_edac_reg_offset llcc_v2_1_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x20344,
+ .trp_ecc_error_status1 = 0x20348,
+ .trp_ecc_sb_err_syn0 = 0x2034c,
+ .trp_ecc_db_err_syn0 = 0x20370,
+ .trp_ecc_error_cntr_clear = 0x20440,
+ .trp_interrupt_0_status = 0x20480,
+ .trp_interrupt_0_clear = 0x20484,
+ .trp_interrupt_0_enable = 0x20488,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x3400c,
+ .cmn_interrupt_0_enable = 0x3401c,
+ .cmn_interrupt_2_enable = 0x3403c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x50000,
+ .drp_ecc_error_cntr_clear = 0x50004,
+ .drp_interrupt_status = 0x50020,
+ .drp_interrupt_clear = 0x50028,
+ .drp_interrupt_enable = 0x5002c,
+ .drp_ecc_error_status0 = 0x520f4,
+ .drp_ecc_error_status1 = 0x520f8,
+ .drp_ecc_sb_err_syn0 = 0x520fc,
+ .drp_ecc_db_err_syn0 = 0x52120,
+};
+
+/* LLCC register offset starting from v1.0.0 */
+static const u32 llcc_v1_reg_offset[] = {
+ [LLCC_COMMON_HW_INFO] = 0x00030000,
+ [LLCC_COMMON_STATUS0] = 0x0003000c,
+};
+
+/* LLCC register offset starting from v2.0.1 */
+static const u32 llcc_v2_1_reg_offset[] = {
+ [LLCC_COMMON_HW_INFO] = 0x00034000,
+ [LLCC_COMMON_STATUS0] = 0x0003400c,
+};
+
+static const struct qcom_llcc_config sc7180_cfg = {
+ .sct_data = sc7180_data,
+ .size = ARRAY_SIZE(sc7180_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sc7280_cfg = {
+ .sct_data = sc7280_data,
+ .size = ARRAY_SIZE(sc7280_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sc8180x_cfg = {
+ .sct_data = sc8180x_data,
+ .size = ARRAY_SIZE(sc8180x_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sc8280xp_cfg = {
+ .sct_data = sc8280xp_data,
+ .size = ARRAY_SIZE(sc8280xp_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sdm845_cfg = {
+ .sct_data = sdm845_data,
+ .size = ARRAY_SIZE(sdm845_data),
+ .need_llcc_cfg = false,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+ .no_edac = true,
+};
+
+static const struct qcom_llcc_config sm6350_cfg = {
+ .sct_data = sm6350_data,
+ .size = ARRAY_SIZE(sm6350_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sm7150_cfg = {
+ .sct_data = sm7150_data,
+ .size = ARRAY_SIZE(sm7150_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sm8150_cfg = {
+ .sct_data = sm8150_data,
+ .size = ARRAY_SIZE(sm8150_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sm8250_cfg = {
+ .sct_data = sm8250_data,
+ .size = ARRAY_SIZE(sm8250_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sm8350_cfg = {
+ .sct_data = sm8350_data,
+ .size = ARRAY_SIZE(sm8350_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sm8450_cfg = {
+ .sct_data = sm8450_data,
+ .size = ARRAY_SIZE(sm8450_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+};
+
+static const struct qcom_llcc_config sm8550_cfg = {
+ .sct_data = sm8550_data,
+ .size = ARRAY_SIZE(sm8550_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+};
+
+static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
+
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @uid: usecase_id for the client
+ *
+ * A pointer to llcc slice descriptor will be returned on success
+ * and error pointer is returned on failure
+ */
+struct llcc_slice_desc *llcc_slice_getd(u32 uid)
+{
+ const struct llcc_slice_config *cfg;
+ struct llcc_slice_desc *desc;
+ u32 sz, count;
+
+ if (IS_ERR(drv_data))
+ return ERR_CAST(drv_data);
+
+ cfg = drv_data->cfg;
+ sz = drv_data->cfg_size;
+
+ for (count = 0; cfg && count < sz; count++, cfg++)
+ if (cfg->usecase_id == uid)
+ break;
+
+ if (count == sz || !cfg)
+ return ERR_PTR(-ENODEV);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->slice_id = cfg->slice_id;
+ desc->slice_size = cfg->max_cap;
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_getd);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+ if (!IS_ERR_OR_NULL(desc))
+ kfree(desc);
+}
+EXPORT_SYMBOL_GPL(llcc_slice_putd);
+
+static int llcc_update_act_ctrl(u32 sid,
+ u32 act_ctrl_reg_val, u32 status)
+{
+ u32 act_ctrl_reg;
+ u32 act_clear_reg;
+ u32 status_reg;
+ u32 slice_status;
+ int ret;
+
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
+ act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid);
+ act_clear_reg = LLCC_TRP_ACT_CLEARn(sid);
+ status_reg = LLCC_TRP_STATUSn(sid);
+
+ /* Set the ACTIVE trigger */
+ act_ctrl_reg_val |= ACT_CTRL_ACT_TRIG;
+ ret = regmap_write(drv_data->bcast_regmap, act_ctrl_reg,
+ act_ctrl_reg_val);
+ if (ret)
+ return ret;
+
+ /* Clear the ACTIVE trigger */
+ act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG;
+ ret = regmap_write(drv_data->bcast_regmap, act_ctrl_reg,
+ act_ctrl_reg_val);
+ if (ret)
+ return ret;
+
+ if (drv_data->version >= LLCC_VERSION_4_1_0_0) {
+ ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
+ slice_status, (slice_status & ACT_COMPLETE),
+ 0, LLCC_STATUS_READ_DELAY);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
+ slice_status, !(slice_status & status),
+ 0, LLCC_STATUS_READ_DELAY);
+
+ if (drv_data->version >= LLCC_VERSION_4_1_0_0)
+ ret = regmap_write(drv_data->bcast_regmap, act_clear_reg,
+ ACT_CLEAR);
+
+ return ret;
+}
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+ int ret;
+ u32 act_ctrl_val;
+
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
+ if (IS_ERR_OR_NULL(desc))
+ return -EINVAL;
+
+ mutex_lock(&drv_data->lock);
+ if (test_bit(desc->slice_id, drv_data->bitmap)) {
+ mutex_unlock(&drv_data->lock);
+ return 0;
+ }
+
+ act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+ ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+ DEACTIVATE);
+ if (ret) {
+ mutex_unlock(&drv_data->lock);
+ return ret;
+ }
+
+ __set_bit(desc->slice_id, drv_data->bitmap);
+ mutex_unlock(&drv_data->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_activate);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value of zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+ u32 act_ctrl_val;
+ int ret;
+
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
+ if (IS_ERR_OR_NULL(desc))
+ return -EINVAL;
+
+ mutex_lock(&drv_data->lock);
+ if (!test_bit(desc->slice_id, drv_data->bitmap)) {
+ mutex_unlock(&drv_data->lock);
+ return 0;
+ }
+ act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+
+ ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
+ ACTIVATE);
+ if (ret) {
+ mutex_unlock(&drv_data->lock);
+ return ret;
+ }
+
+ __clear_bit(desc->slice_id, drv_data->bitmap);
+ mutex_unlock(&drv_data->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(llcc_slice_deactivate);
+
+/**
+ * llcc_get_slice_id - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+ if (IS_ERR_OR_NULL(desc))
+ return -EINVAL;
+
+ return desc->slice_id;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_id);
+
+/**
+ * llcc_get_slice_size - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+ if (IS_ERR_OR_NULL(desc))
+ return 0;
+
+ return desc->slice_size;
+}
+EXPORT_SYMBOL_GPL(llcc_get_slice_size);
+
+static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
+ const struct qcom_llcc_config *cfg)
+{
+ int ret;
+ u32 attr2_cfg;
+ u32 attr1_cfg;
+ u32 attr0_cfg;
+ u32 attr2_val;
+ u32 attr1_val;
+ u32 attr0_val;
+ u32 max_cap_cacheline;
+ struct llcc_slice_desc desc;
+
+ attr1_val = config->cache_mode;
+ attr1_val |= config->probe_target_ways << ATTR1_PROBE_TARGET_WAYS_SHIFT;
+ attr1_val |= config->fixed_size << ATTR1_FIXED_SIZE_SHIFT;
+ attr1_val |= config->priority << ATTR1_PRIORITY_SHIFT;
+
+ max_cap_cacheline = MAX_CAP_TO_BYTES(config->max_cap);
+
+ /*
+ * LLCC instances can vary for each target.
+ * The SW writes to broadcast register which gets propagated
+ * to each llcc instance (llcc0,.. llccN).
+ * Since the size of the memory is divided equally amongst the
+ * llcc instances, we need to configure the max cap accordingly.
+ */
+ max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
+ max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
+ attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
+
+ attr1_cfg = LLCC_TRP_ATTR1_CFGn(config->slice_id);
+
+ ret = regmap_write(drv_data->bcast_regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+
+ if (drv_data->version >= LLCC_VERSION_4_1_0_0) {
+ attr2_cfg = LLCC_TRP_ATTR2_CFGn(config->slice_id);
+ attr0_val = config->res_ways;
+ attr2_val = config->bonus_ways;
+ } else {
+ attr0_val = config->res_ways & ATTR0_RES_WAYS_MASK;
+ attr0_val |= config->bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
+ }
+
+ attr0_cfg = LLCC_TRP_ATTR0_CFGn(config->slice_id);
+
+ ret = regmap_write(drv_data->bcast_regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+
+ if (drv_data->version >= LLCC_VERSION_4_1_0_0) {
+ ret = regmap_write(drv_data->bcast_regmap, attr2_cfg, attr2_val);
+ if (ret)
+ return ret;
+ }
+
+ if (cfg->need_llcc_cfg) {
+ u32 disable_cap_alloc, retain_pc;
+
+ disable_cap_alloc = config->dis_cap_alloc << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_SCID_DIS_CAP_ALLOC,
+ BIT(config->slice_id), disable_cap_alloc);
+ if (ret)
+ return ret;
+
+ if (drv_data->version < LLCC_VERSION_4_1_0_0) {
+ retain_pc = config->retain_on_pc << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_PCB_ACT,
+ BIT(config->slice_id), retain_pc);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (drv_data->version >= LLCC_VERSION_2_0_0_0) {
+ u32 wren;
+
+ wren = config->write_scid_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_WRSC_EN,
+ BIT(config->slice_id), wren);
+ if (ret)
+ return ret;
+ }
+
+ if (drv_data->version >= LLCC_VERSION_2_1_0_0) {
+ u32 wr_cache_en;
+
+ wr_cache_en = config->write_scid_cacheable_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_WRSC_CACHEABLE_EN,
+ BIT(config->slice_id), wr_cache_en);
+ if (ret)
+ return ret;
+ }
+
+ if (drv_data->version >= LLCC_VERSION_4_1_0_0) {
+ u32 stale_en;
+ u32 stale_cap_en;
+ u32 mru_uncap_en;
+ u32 mru_rollover;
+ u32 alloc_oneway_en;
+ u32 ovcap_en;
+ u32 ovcap_prio;
+ u32 vict_prio;
+
+ stale_en = config->stale_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG1,
+ BIT(config->slice_id), stale_en);
+ if (ret)
+ return ret;
+
+ stale_cap_en = config->stale_cap_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG2,
+ BIT(config->slice_id), stale_cap_en);
+ if (ret)
+ return ret;
+
+ mru_uncap_en = config->mru_uncap_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG3,
+ BIT(config->slice_id), mru_uncap_en);
+ if (ret)
+ return ret;
+
+ mru_rollover = config->mru_rollover << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG4,
+ BIT(config->slice_id), mru_rollover);
+ if (ret)
+ return ret;
+
+ alloc_oneway_en = config->alloc_oneway_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG5,
+ BIT(config->slice_id), alloc_oneway_en);
+ if (ret)
+ return ret;
+
+ ovcap_en = config->ovcap_en << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG6,
+ BIT(config->slice_id), ovcap_en);
+ if (ret)
+ return ret;
+
+ ovcap_prio = config->ovcap_prio << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG7,
+ BIT(config->slice_id), ovcap_prio);
+ if (ret)
+ return ret;
+
+ vict_prio = config->vict_prio << config->slice_id;
+ ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_ALGO_CFG8,
+ BIT(config->slice_id), vict_prio);
+ if (ret)
+ return ret;
+ }
+
+ if (config->activate_on_init) {
+ desc.slice_id = config->slice_id;
+ ret = llcc_slice_activate(&desc);
+ }
+
+ return ret;
+}
+
+static int qcom_llcc_cfg_program(struct platform_device *pdev,
+ const struct qcom_llcc_config *cfg)
+{
+ int i;
+ u32 sz;
+ int ret = 0;
+ const struct llcc_slice_config *llcc_table;
+
+ sz = drv_data->cfg_size;
+ llcc_table = drv_data->cfg;
+
+ for (i = 0; i < sz; i++) {
+ ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int qcom_llcc_remove(struct platform_device *pdev)
+{
+ /* Set the global pointer to a error code to avoid referencing it */
+ drv_data = ERR_PTR(-ENODEV);
+ return 0;
+}
+
+static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index,
+ const char *name)
+{
+ void __iomem *base;
+ struct regmap_config llcc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ };
+
+ base = devm_platform_ioremap_resource(pdev, index);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ llcc_regmap_config.name = name;
+ return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
+}
+
+static int qcom_llcc_probe(struct platform_device *pdev)
+{
+ u32 num_banks;
+ struct device *dev = &pdev->dev;
+ int ret, i;
+ struct platform_device *llcc_edac;
+ const struct qcom_llcc_config *cfg;
+ const struct llcc_slice_config *llcc_cfg;
+ u32 sz;
+ u32 version;
+ struct regmap *regmap;
+
+ if (!IS_ERR(drv_data))
+ return -EBUSY;
+
+ drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Initialize the first LLCC bank regmap */
+ regmap = qcom_llcc_init_mmio(pdev, 0, "llcc0_base");
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err;
+ }
+
+ cfg = of_device_get_match_data(&pdev->dev);
+
+ ret = regmap_read(regmap, cfg->reg_offset[LLCC_COMMON_STATUS0], &num_banks);
+ if (ret)
+ goto err;
+
+ num_banks &= LLCC_LB_CNT_MASK;
+ num_banks >>= LLCC_LB_CNT_SHIFT;
+ drv_data->num_banks = num_banks;
+
+ drv_data->regmaps = devm_kcalloc(dev, num_banks, sizeof(*drv_data->regmaps), GFP_KERNEL);
+ if (!drv_data->regmaps) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ drv_data->regmaps[0] = regmap;
+
+ /* Initialize rest of LLCC bank regmaps */
+ for (i = 1; i < num_banks; i++) {
+ char *base = kasprintf(GFP_KERNEL, "llcc%d_base", i);
+
+ drv_data->regmaps[i] = qcom_llcc_init_mmio(pdev, i, base);
+ if (IS_ERR(drv_data->regmaps[i])) {
+ ret = PTR_ERR(drv_data->regmaps[i]);
+ kfree(base);
+ goto err;
+ }
+
+ kfree(base);
+ }
+
+ drv_data->bcast_regmap = qcom_llcc_init_mmio(pdev, i, "llcc_broadcast_base");
+ if (IS_ERR(drv_data->bcast_regmap)) {
+ ret = PTR_ERR(drv_data->bcast_regmap);
+ goto err;
+ }
+
+ /* Extract version of the IP */
+ ret = regmap_read(drv_data->bcast_regmap, cfg->reg_offset[LLCC_COMMON_HW_INFO],
+ &version);
+ if (ret)
+ goto err;
+
+ drv_data->version = version;
+
+ llcc_cfg = cfg->sct_data;
+ sz = cfg->size;
+
+ for (i = 0; i < sz; i++)
+ if (llcc_cfg[i].slice_id > drv_data->max_slices)
+ drv_data->max_slices = llcc_cfg[i].slice_id;
+
+ drv_data->bitmap = devm_bitmap_zalloc(dev, drv_data->max_slices,
+ GFP_KERNEL);
+ if (!drv_data->bitmap) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ drv_data->cfg = llcc_cfg;
+ drv_data->cfg_size = sz;
+ drv_data->edac_reg_offset = cfg->edac_reg_offset;
+ mutex_init(&drv_data->lock);
+ platform_set_drvdata(pdev, drv_data);
+
+ ret = qcom_llcc_cfg_program(pdev, cfg);
+ if (ret)
+ goto err;
+
+ drv_data->ecc_irq = platform_get_irq_optional(pdev, 0);
+
+ /*
+ * On some platforms, the access to EDAC registers will be locked by
+ * the bootloader. So probing the EDAC driver will result in a crash.
+ * Hence, disable the creation of EDAC platform device for the
+ * problematic platforms.
+ */
+ if (!cfg->no_edac) {
+ llcc_edac = platform_device_register_data(&pdev->dev,
+ "qcom_llcc_edac", -1, drv_data,
+ sizeof(*drv_data));
+ if (IS_ERR(llcc_edac))
+ dev_err(dev, "Failed to register llcc edac driver\n");
+ }
+
+ return 0;
+err:
+ drv_data = ERR_PTR(-ENODEV);
+ return ret;
+}
+
+static const struct of_device_id qcom_llcc_of_match[] = {
+ { .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
+ { .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfg },
+ { .compatible = "qcom,sc8180x-llcc", .data = &sc8180x_cfg },
+ { .compatible = "qcom,sc8280xp-llcc", .data = &sc8280xp_cfg },
+ { .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
+ { .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg },
+ { .compatible = "qcom,sm7150-llcc", .data = &sm7150_cfg },
+ { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
+ { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
+ { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg },
+ { .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfg },
+ { .compatible = "qcom,sm8550-llcc", .data = &sm8550_cfg },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_llcc_of_match);
+
+static struct platform_driver qcom_llcc_driver = {
+ .driver = {
+ .name = "qcom-llcc",
+ .of_match_table = qcom_llcc_of_match,
+ },
+ .probe = qcom_llcc_probe,
+ .remove = qcom_llcc_remove,
+};
+module_platform_driver(qcom_llcc_driver);
+
+MODULE_DESCRIPTION("Qualcomm Last Level Cache Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
new file mode 100644
index 0000000000..6f177e46fa
--- /dev/null
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2015 Sony Mobile Communications Inc
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+static bool mdt_phdr_valid(const struct elf32_phdr *phdr)
+{
+ if (phdr->p_type != PT_LOAD)
+ return false;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ return false;
+
+ if (!phdr->p_memsz)
+ return false;
+
+ return true;
+}
+
+static ssize_t mdt_load_split_segment(void *ptr, const struct elf32_phdr *phdrs,
+ unsigned int segment, const char *fw_name,
+ struct device *dev)
+{
+ const struct elf32_phdr *phdr = &phdrs[segment];
+ const struct firmware *seg_fw;
+ char *seg_name;
+ ssize_t ret;
+
+ if (strlen(fw_name) < 4)
+ return -EINVAL;
+
+ seg_name = kstrdup(fw_name, GFP_KERNEL);
+ if (!seg_name)
+ return -ENOMEM;
+
+ sprintf(seg_name + strlen(fw_name) - 3, "b%02d", segment);
+ ret = request_firmware_into_buf(&seg_fw, seg_name, dev,
+ ptr, phdr->p_filesz);
+ if (ret) {
+ dev_err(dev, "error %zd loading %s\n", ret, seg_name);
+ kfree(seg_name);
+ return ret;
+ }
+
+ if (seg_fw->size != phdr->p_filesz) {
+ dev_err(dev,
+ "failed to load segment %d from truncated file %s\n",
+ segment, seg_name);
+ ret = -EINVAL;
+ }
+
+ release_firmware(seg_fw);
+ kfree(seg_name);
+
+ return ret;
+}
+
+/**
+ * qcom_mdt_get_size() - acquire size of the memory region needed to load mdt
+ * @fw: firmware object for the mdt file
+ *
+ * Returns size of the loaded firmware blob, or -EINVAL on failure.
+ */
+ssize_t qcom_mdt_get_size(const struct firmware *fw)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ phys_addr_t min_addr = PHYS_ADDR_MAX;
+ phys_addr_t max_addr = 0;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ return min_addr < max_addr ? max_addr - min_addr : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_get_size);
+
+/**
+ * qcom_mdt_read_metadata() - read header and metadata from mdt or mbn
+ * @fw: firmware of mdt header or mbn
+ * @data_len: length of the read metadata blob
+ * @fw_name: name of the firmware, for construction of segment file names
+ * @dev: device handle to associate resources with
+ *
+ * The mechanism that performs the authentication of the loading firmware
+ * expects an ELF header directly followed by the segment of hashes, with no
+ * padding inbetween. This function allocates a chunk of memory for this pair
+ * and copy the two pieces into the buffer.
+ *
+ * In the case of split firmware the hash is found directly following the ELF
+ * header, rather than at p_offset described by the second program header.
+ *
+ * The caller is responsible to free (kfree()) the returned pointer.
+ *
+ * Return: pointer to data, or ERR_PTR()
+ */
+void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
+ const char *fw_name, struct device *dev)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_hdr *ehdr;
+ unsigned int hash_segment = 0;
+ size_t hash_offset;
+ size_t hash_size;
+ size_t ehdr_size;
+ unsigned int i;
+ ssize_t ret;
+ void *data;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ if (ehdr->e_phnum < 2)
+ return ERR_PTR(-EINVAL);
+
+ if (phdrs[0].p_type == PT_LOAD)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 1; i < ehdr->e_phnum; i++) {
+ if ((phdrs[i].p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) {
+ hash_segment = i;
+ break;
+ }
+ }
+
+ if (!hash_segment) {
+ dev_err(dev, "no hash segment found in %s\n", fw_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ehdr_size = phdrs[0].p_filesz;
+ hash_size = phdrs[hash_segment].p_filesz;
+
+ data = kmalloc(ehdr_size + hash_size, GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ /* Copy ELF header */
+ memcpy(data, fw->data, ehdr_size);
+
+ if (ehdr_size + hash_size == fw->size) {
+ /* Firmware is split and hash is packed following the ELF header */
+ hash_offset = phdrs[0].p_filesz;
+ memcpy(data + ehdr_size, fw->data + hash_offset, hash_size);
+ } else if (phdrs[hash_segment].p_offset + hash_size <= fw->size) {
+ /* Hash is in its own segment, but within the loaded file */
+ hash_offset = phdrs[hash_segment].p_offset;
+ memcpy(data + ehdr_size, fw->data + hash_offset, hash_size);
+ } else {
+ /* Hash is in its own segment, beyond the loaded file */
+ ret = mdt_load_split_segment(data + ehdr_size, phdrs, hash_segment, fw_name, dev);
+ if (ret) {
+ kfree(data);
+ return ERR_PTR(ret);
+ }
+ }
+
+ *data_len = ehdr_size + hash_size;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata);
+
+/**
+ * qcom_mdt_pas_init() - initialize PAS region for firmware loading
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @fw_name: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_phys: physical address of allocated memory region
+ * @ctx: PAS metadata context, to be released by caller
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, phys_addr_t mem_phys,
+ struct qcom_scm_pas_metadata *ctx)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ phys_addr_t min_addr = PHYS_ADDR_MAX;
+ phys_addr_t max_addr = 0;
+ bool relocate = false;
+ size_t metadata_len;
+ void *metadata;
+ int ret;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
+ relocate = true;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ metadata = qcom_mdt_read_metadata(fw, &metadata_len, fw_name, dev);
+ if (IS_ERR(metadata)) {
+ ret = PTR_ERR(metadata);
+ dev_err(dev, "error %d reading firmware %s metadata\n", ret, fw_name);
+ goto out;
+ }
+
+ ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len, ctx);
+ kfree(metadata);
+ if (ret) {
+ /* Invalid firmware metadata */
+ dev_err(dev, "error %d initializing firmware %s\n", ret, fw_name);
+ goto out;
+ }
+
+ if (relocate) {
+ ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr);
+ if (ret) {
+ /* Unable to set up relocation */
+ dev_err(dev, "error %d setting up firmware %s\n", ret, fw_name);
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_pas_init);
+
+static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_name)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_hdr *ehdr;
+ uint64_t seg_start, seg_end;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ /*
+ * The size of the MDT file is not padded to include any
+ * zero-sized segments at the end. Ignore these, as they should
+ * not affect the decision about image being split or not.
+ */
+ if (!phdrs[i].p_filesz)
+ continue;
+
+ seg_start = phdrs[i].p_offset;
+ seg_end = phdrs[i].p_offset + phdrs[i].p_filesz;
+ if (seg_start > fw->size || seg_end > fw->size)
+ return true;
+ }
+
+ return false;
+}
+
+static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base, bool pas_init)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ phys_addr_t mem_reloc;
+ phys_addr_t min_addr = PHYS_ADDR_MAX;
+ ssize_t offset;
+ bool relocate = false;
+ bool is_split;
+ void *ptr;
+ int ret = 0;
+ int i;
+
+ if (!fw || !mem_region || !mem_phys || !mem_size)
+ return -EINVAL;
+
+ is_split = qcom_mdt_bins_are_split(fw, fw_name);
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
+ relocate = true;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+ }
+
+ if (relocate) {
+ /*
+ * The image is relocatable, so offset each segment based on
+ * the lowest segment address.
+ */
+ mem_reloc = min_addr;
+ } else {
+ /*
+ * Image is not relocatable, so offset each segment based on
+ * the allocated physical chunk of memory.
+ */
+ mem_reloc = mem_phys;
+ }
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!mdt_phdr_valid(phdr))
+ continue;
+
+ offset = phdr->p_paddr - mem_reloc;
+ if (offset < 0 || offset + phdr->p_memsz > mem_size) {
+ dev_err(dev, "segment outside memory range\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (phdr->p_filesz > phdr->p_memsz) {
+ dev_err(dev,
+ "refusing to load segment %d with p_filesz > p_memsz\n",
+ i);
+ ret = -EINVAL;
+ break;
+ }
+
+ ptr = mem_region + offset;
+
+ if (phdr->p_filesz && !is_split) {
+ /* Firmware is large enough to be non-split */
+ if (phdr->p_offset + phdr->p_filesz > fw->size) {
+ dev_err(dev, "file %s segment %d would be truncated\n",
+ fw_name, i);
+ ret = -EINVAL;
+ break;
+ }
+
+ memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
+ } else if (phdr->p_filesz) {
+ /* Firmware not large enough, load split-out segments */
+ ret = mdt_load_split_segment(ptr, phdrs, i, fw_name, dev);
+ if (ret)
+ break;
+ }
+
+ if (phdr->p_memsz > phdr->p_filesz)
+ memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+ }
+
+ if (reloc_base)
+ *reloc_base = mem_reloc;
+
+ return ret;
+}
+
+/**
+ * qcom_mdt_load() - load the firmware which header is loaded as fw
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @firmware: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_region: allocated memory region to load firmware into
+ * @mem_phys: physical address of allocated memory region
+ * @mem_size: size of the allocated memory region
+ * @reloc_base: adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *firmware, int pas_id, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base)
+{
+ int ret;
+
+ ret = qcom_mdt_pas_init(dev, fw, firmware, pas_id, mem_phys, NULL);
+ if (ret)
+ return ret;
+
+ return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
+ mem_size, reloc_base, true);
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load);
+
+/**
+ * qcom_mdt_load_no_init() - load the firmware which header is loaded as fw
+ * @dev: device handle to associate resources with
+ * @fw: firmware object for the mdt file
+ * @firmware: name of the firmware, for construction of segment file names
+ * @pas_id: PAS identifier
+ * @mem_region: allocated memory region to load firmware into
+ * @mem_phys: physical address of allocated memory region
+ * @mem_size: size of the allocated memory region
+ * @reloc_base: adjusted physical address after relocation
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
+ const char *firmware, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size, phys_addr_t *reloc_base)
+{
+ return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
+ mem_size, reloc_base, false);
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
+
+MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
new file mode 100644
index 0000000000..20f5461d46
--- /dev/null
+++ b/drivers/soc/qcom/ocmem.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The On Chip Memory (OCMEM) allocator allows various clients to allocate
+ * memory from OCMEM based on performance, latency and power requirements.
+ * This is typically used by the GPU, camera/video, and audio components on
+ * some Snapdragon SoCs.
+ *
+ * Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
+ * Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <soc/qcom/ocmem.h>
+
+enum region_mode {
+ WIDE_MODE = 0x0,
+ THIN_MODE,
+ MODE_DEFAULT = WIDE_MODE,
+};
+
+enum ocmem_macro_state {
+ PASSTHROUGH = 0,
+ PERI_ON = 1,
+ CORE_ON = 2,
+ CLK_OFF = 4,
+};
+
+struct ocmem_region {
+ bool interleaved;
+ enum region_mode mode;
+ unsigned int num_macros;
+ enum ocmem_macro_state macro_state[4];
+ unsigned long macro_size;
+ unsigned long region_size;
+};
+
+struct ocmem_config {
+ uint8_t num_regions;
+ unsigned long macro_size;
+};
+
+struct ocmem {
+ struct device *dev;
+ const struct ocmem_config *config;
+ struct resource *memory;
+ void __iomem *mmio;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+ unsigned int num_ports;
+ unsigned int num_macros;
+ bool interleaved;
+ struct ocmem_region *regions;
+ unsigned long active_allocations;
+};
+
+#define OCMEM_MIN_ALIGN SZ_64K
+#define OCMEM_MIN_ALLOC SZ_64K
+
+#define OCMEM_REG_HW_VERSION 0x00000000
+#define OCMEM_REG_HW_PROFILE 0x00000004
+
+#define OCMEM_REG_REGION_MODE_CTL 0x00001000
+#define OCMEM_REGION_MODE_CTL_REG0_THIN 0x00000001
+#define OCMEM_REGION_MODE_CTL_REG1_THIN 0x00000002
+#define OCMEM_REGION_MODE_CTL_REG2_THIN 0x00000004
+#define OCMEM_REGION_MODE_CTL_REG3_THIN 0x00000008
+
+#define OCMEM_REG_GFX_MPU_START 0x00001004
+#define OCMEM_REG_GFX_MPU_END 0x00001008
+
+#define OCMEM_HW_VERSION_MAJOR(val) FIELD_GET(GENMASK(31, 28), val)
+#define OCMEM_HW_VERSION_MINOR(val) FIELD_GET(GENMASK(27, 16), val)
+#define OCMEM_HW_VERSION_STEP(val) FIELD_GET(GENMASK(15, 0), val)
+
+#define OCMEM_HW_PROFILE_NUM_PORTS(val) FIELD_GET(0x0000000f, (val))
+#define OCMEM_HW_PROFILE_NUM_MACROS(val) FIELD_GET(0x00003f00, (val))
+
+#define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE 0x00010000
+#define OCMEM_HW_PROFILE_INTERLEAVING 0x00020000
+#define OCMEM_REG_GEN_STATUS 0x0000000c
+
+#define OCMEM_REG_PSGSC_STATUS 0x00000038
+#define OCMEM_REG_PSGSC_CTL(i0) (0x0000003c + 0x1*(i0))
+
+#define OCMEM_PSGSC_CTL_MACRO0_MODE(val) FIELD_PREP(0x00000007, (val))
+#define OCMEM_PSGSC_CTL_MACRO1_MODE(val) FIELD_PREP(0x00000070, (val))
+#define OCMEM_PSGSC_CTL_MACRO2_MODE(val) FIELD_PREP(0x00000700, (val))
+#define OCMEM_PSGSC_CTL_MACRO3_MODE(val) FIELD_PREP(0x00007000, (val))
+
+static inline void ocmem_write(struct ocmem *ocmem, u32 reg, u32 data)
+{
+ writel(data, ocmem->mmio + reg);
+}
+
+static inline u32 ocmem_read(struct ocmem *ocmem, u32 reg)
+{
+ return readl(ocmem->mmio + reg);
+}
+
+static void update_ocmem(struct ocmem *ocmem)
+{
+ uint32_t region_mode_ctrl = 0x0;
+ int i;
+
+ if (!qcom_scm_ocmem_lock_available()) {
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (region->mode == THIN_MODE)
+ region_mode_ctrl |= BIT(i);
+ }
+
+ dev_dbg(ocmem->dev, "ocmem_region_mode_control %x\n",
+ region_mode_ctrl);
+ ocmem_write(ocmem, OCMEM_REG_REGION_MODE_CTL, region_mode_ctrl);
+ }
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+ u32 data;
+
+ data = OCMEM_PSGSC_CTL_MACRO0_MODE(region->macro_state[0]) |
+ OCMEM_PSGSC_CTL_MACRO1_MODE(region->macro_state[1]) |
+ OCMEM_PSGSC_CTL_MACRO2_MODE(region->macro_state[2]) |
+ OCMEM_PSGSC_CTL_MACRO3_MODE(region->macro_state[3]);
+
+ ocmem_write(ocmem, OCMEM_REG_PSGSC_CTL(i), data);
+ }
+}
+
+static unsigned long phys_to_offset(struct ocmem *ocmem,
+ unsigned long addr)
+{
+ if (addr < ocmem->memory->start || addr >= ocmem->memory->end)
+ return 0;
+
+ return addr - ocmem->memory->start;
+}
+
+static unsigned long device_address(struct ocmem *ocmem,
+ enum ocmem_client client,
+ unsigned long addr)
+{
+ WARN_ON(client != OCMEM_GRAPHICS);
+
+ /* TODO: gpu uses phys_to_offset, but others do not.. */
+ return phys_to_offset(ocmem, addr);
+}
+
+static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf,
+ enum ocmem_macro_state mstate, enum region_mode rmode)
+{
+ unsigned long offset = 0;
+ int i, j;
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (buf->offset <= offset && offset < buf->offset + buf->len)
+ region->mode = rmode;
+
+ for (j = 0; j < region->num_macros; j++) {
+ if (buf->offset <= offset &&
+ offset < buf->offset + buf->len)
+ region->macro_state[j] = mstate;
+
+ offset += region->macro_size;
+ }
+ }
+
+ update_ocmem(ocmem);
+}
+
+struct ocmem *of_get_ocmem(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *devnode;
+ struct ocmem *ocmem;
+
+ devnode = of_parse_phandle(dev->of_node, "sram", 0);
+ if (!devnode || !devnode->parent) {
+ dev_err(dev, "Cannot look up sram phandle\n");
+ of_node_put(devnode);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(devnode->parent);
+ if (!pdev) {
+ dev_err(dev, "Cannot find device node %s\n", devnode->name);
+ of_node_put(devnode);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ of_node_put(devnode);
+
+ ocmem = platform_get_drvdata(pdev);
+ if (!ocmem) {
+ dev_err(dev, "Cannot get ocmem\n");
+ put_device(&pdev->dev);
+ return ERR_PTR(-ENODEV);
+ }
+ return ocmem;
+}
+EXPORT_SYMBOL(of_get_ocmem);
+
+struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
+ unsigned long size)
+{
+ struct ocmem_buf *buf;
+ int ret;
+
+ /* TODO: add support for other clients... */
+ if (WARN_ON(client != OCMEM_GRAPHICS))
+ return ERR_PTR(-ENODEV);
+
+ if (size < OCMEM_MIN_ALLOC || !IS_ALIGNED(size, OCMEM_MIN_ALIGN))
+ return ERR_PTR(-EINVAL);
+
+ if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations))
+ return ERR_PTR(-EBUSY);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ buf->offset = 0;
+ buf->addr = device_address(ocmem, client, buf->offset);
+ buf->len = size;
+
+ update_range(ocmem, buf, CORE_ON, WIDE_MODE);
+
+ if (qcom_scm_ocmem_lock_available()) {
+ ret = qcom_scm_ocmem_lock(QCOM_SCM_OCMEM_GRAPHICS_ID,
+ buf->offset, buf->len, WIDE_MODE);
+ if (ret) {
+ dev_err(ocmem->dev, "could not lock: %d\n", ret);
+ ret = -EINVAL;
+ goto err_kfree;
+ }
+ } else {
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset);
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END,
+ buf->offset + buf->len);
+ }
+
+ dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n",
+ size / 1024, buf->addr, client);
+
+ return buf;
+
+err_kfree:
+ kfree(buf);
+err_unlock:
+ clear_bit_unlock(BIT(client), &ocmem->active_allocations);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(ocmem_allocate);
+
+void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
+ struct ocmem_buf *buf)
+{
+ /* TODO: add support for other clients... */
+ if (WARN_ON(client != OCMEM_GRAPHICS))
+ return;
+
+ update_range(ocmem, buf, CLK_OFF, MODE_DEFAULT);
+
+ if (qcom_scm_ocmem_lock_available()) {
+ int ret;
+
+ ret = qcom_scm_ocmem_unlock(QCOM_SCM_OCMEM_GRAPHICS_ID,
+ buf->offset, buf->len);
+ if (ret)
+ dev_err(ocmem->dev, "could not unlock: %d\n", ret);
+ } else {
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, 0x0);
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, 0x0);
+ }
+
+ kfree(buf);
+
+ clear_bit_unlock(BIT(client), &ocmem->active_allocations);
+}
+EXPORT_SYMBOL(ocmem_free);
+
+static int ocmem_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long reg, region_size;
+ int i, j, ret, num_banks;
+ struct ocmem *ocmem;
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ ocmem = devm_kzalloc(dev, sizeof(*ocmem), GFP_KERNEL);
+ if (!ocmem)
+ return -ENOMEM;
+
+ ocmem->dev = dev;
+ ocmem->config = device_get_match_data(dev);
+
+ ocmem->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(ocmem->core_clk))
+ return dev_err_probe(dev, PTR_ERR(ocmem->core_clk),
+ "Unable to get core clock\n");
+
+ ocmem->iface_clk = devm_clk_get_optional(dev, "iface");
+ if (IS_ERR(ocmem->iface_clk))
+ return dev_err_probe(dev, PTR_ERR(ocmem->iface_clk),
+ "Unable to get iface clock\n");
+
+ ocmem->mmio = devm_platform_ioremap_resource_byname(pdev, "ctrl");
+ if (IS_ERR(ocmem->mmio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ocmem->mmio),
+ "Failed to ioremap ocmem_ctrl resource\n");
+
+ ocmem->memory = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mem");
+ if (!ocmem->memory) {
+ dev_err(dev, "Could not get mem region\n");
+ return -ENXIO;
+ }
+
+ /* The core clock is synchronous with graphics */
+ WARN_ON(clk_set_rate(ocmem->core_clk, 1000) < 0);
+
+ ret = clk_prepare_enable(ocmem->core_clk);
+ if (ret)
+ return dev_err_probe(ocmem->dev, ret, "Failed to enable core clock\n");
+
+ ret = clk_prepare_enable(ocmem->iface_clk);
+ if (ret) {
+ clk_disable_unprepare(ocmem->core_clk);
+ return dev_err_probe(ocmem->dev, ret, "Failed to enable iface clock\n");
+ }
+
+ if (qcom_scm_restore_sec_cfg_available()) {
+ dev_dbg(dev, "configuring scm\n");
+ ret = qcom_scm_restore_sec_cfg(QCOM_SCM_OCMEM_DEV_ID, 0);
+ if (ret) {
+ dev_err_probe(dev, ret, "Could not enable secure configuration\n");
+ goto err_clk_disable;
+ }
+ }
+
+ reg = ocmem_read(ocmem, OCMEM_REG_HW_VERSION);
+ dev_dbg(dev, "OCMEM hardware version: %lu.%lu.%lu\n",
+ OCMEM_HW_VERSION_MAJOR(reg),
+ OCMEM_HW_VERSION_MINOR(reg),
+ OCMEM_HW_VERSION_STEP(reg));
+
+ reg = ocmem_read(ocmem, OCMEM_REG_HW_PROFILE);
+ ocmem->num_ports = OCMEM_HW_PROFILE_NUM_PORTS(reg);
+ ocmem->num_macros = OCMEM_HW_PROFILE_NUM_MACROS(reg);
+ ocmem->interleaved = !!(reg & OCMEM_HW_PROFILE_INTERLEAVING);
+
+ num_banks = ocmem->num_ports / 2;
+ region_size = ocmem->config->macro_size * num_banks;
+
+ dev_info(dev, "%u ports, %u regions, %u macros, %sinterleaved\n",
+ ocmem->num_ports, ocmem->config->num_regions,
+ ocmem->num_macros, ocmem->interleaved ? "" : "not ");
+
+ ocmem->regions = devm_kcalloc(dev, ocmem->config->num_regions,
+ sizeof(struct ocmem_region), GFP_KERNEL);
+ if (!ocmem->regions) {
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (WARN_ON(num_banks > ARRAY_SIZE(region->macro_state))) {
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ region->mode = MODE_DEFAULT;
+ region->num_macros = num_banks;
+
+ if (i == (ocmem->config->num_regions - 1) &&
+ reg & OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE) {
+ region->macro_size = ocmem->config->macro_size / 2;
+ region->region_size = region_size / 2;
+ } else {
+ region->macro_size = ocmem->config->macro_size;
+ region->region_size = region_size;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(region->macro_state); j++)
+ region->macro_state[j] = CLK_OFF;
+ }
+
+ platform_set_drvdata(pdev, ocmem);
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(ocmem->core_clk);
+ clk_disable_unprepare(ocmem->iface_clk);
+ return ret;
+}
+
+static int ocmem_dev_remove(struct platform_device *pdev)
+{
+ struct ocmem *ocmem = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ocmem->core_clk);
+ clk_disable_unprepare(ocmem->iface_clk);
+
+ return 0;
+}
+
+static const struct ocmem_config ocmem_8226_config = {
+ .num_regions = 1,
+ .macro_size = SZ_128K,
+};
+
+static const struct ocmem_config ocmem_8974_config = {
+ .num_regions = 3,
+ .macro_size = SZ_128K,
+};
+
+static const struct of_device_id ocmem_of_match[] = {
+ { .compatible = "qcom,msm8226-ocmem", .data = &ocmem_8226_config },
+ { .compatible = "qcom,msm8974-ocmem", .data = &ocmem_8974_config },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ocmem_of_match);
+
+static struct platform_driver ocmem_driver = {
+ .probe = ocmem_dev_probe,
+ .remove = ocmem_dev_remove,
+ .driver = {
+ .name = "ocmem",
+ .of_match_table = ocmem_of_match,
+ },
+};
+
+module_platform_driver(ocmem_driver);
+
+MODULE_DESCRIPTION("On Chip Memory (OCMEM) allocator for some Snapdragon SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
new file mode 100644
index 0000000000..0034af927b
--- /dev/null
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include "pdr_internal.h"
+
+struct pdr_service {
+ char service_name[SERVREG_NAME_LENGTH + 1];
+ char service_path[SERVREG_NAME_LENGTH + 1];
+
+ struct sockaddr_qrtr addr;
+
+ unsigned int instance;
+ unsigned int service;
+ u8 service_data_valid;
+ u32 service_data;
+ int state;
+
+ bool need_notifier_register;
+ bool need_notifier_remove;
+ bool need_locator_lookup;
+ bool service_connected;
+
+ struct list_head node;
+};
+
+struct pdr_handle {
+ struct qmi_handle locator_hdl;
+ struct qmi_handle notifier_hdl;
+
+ struct sockaddr_qrtr locator_addr;
+
+ struct list_head lookups;
+ struct list_head indack_list;
+
+ /* control access to pdr lookup/indack lists */
+ struct mutex list_lock;
+
+ /* serialize pd status invocation */
+ struct mutex status_lock;
+
+ /* control access to the locator state */
+ struct mutex lock;
+
+ bool locator_init_complete;
+
+ struct work_struct locator_work;
+ struct work_struct notifier_work;
+ struct work_struct indack_work;
+
+ struct workqueue_struct *notifier_wq;
+ struct workqueue_struct *indack_wq;
+
+ void (*status)(int state, char *service_path, void *priv);
+ void *priv;
+};
+
+struct pdr_list_node {
+ enum servreg_service_state curr_state;
+ u16 transaction_id;
+ struct pdr_service *pds;
+ struct list_head node;
+};
+
+static int pdr_locator_new_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ locator_hdl);
+ struct pdr_service *pds;
+
+ /* Create a local client port for QMI communication */
+ pdr->locator_addr.sq_family = AF_QIPCRTR;
+ pdr->locator_addr.sq_node = svc->node;
+ pdr->locator_addr.sq_port = svc->port;
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = true;
+ mutex_unlock(&pdr->lock);
+
+ /* Service pending lookup requests */
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->need_locator_lookup)
+ schedule_work(&pdr->locator_work);
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ return 0;
+}
+
+static void pdr_locator_del_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ locator_hdl);
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = false;
+ mutex_unlock(&pdr->lock);
+
+ pdr->locator_addr.sq_node = 0;
+ pdr->locator_addr.sq_port = 0;
+}
+
+static const struct qmi_ops pdr_locator_ops = {
+ .new_server = pdr_locator_new_server,
+ .del_server = pdr_locator_del_server,
+};
+
+static int pdr_register_listener(struct pdr_handle *pdr,
+ struct pdr_service *pds,
+ bool enable)
+{
+ struct servreg_register_listener_resp resp;
+ struct servreg_register_listener_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
+ servreg_register_listener_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ req.enable = enable;
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
+ &txn, SERVREG_REGISTER_LISTENER_REQ,
+ SERVREG_REGISTER_LISTENER_REQ_LEN,
+ servreg_register_listener_req_ei,
+ &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s register listener txn wait failed: %d\n",
+ pds->service_path, ret);
+ return ret;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s register listener failed: 0x%x\n",
+ pds->service_path, resp.resp.error);
+ return -EREMOTEIO;
+ }
+
+ pds->state = resp.curr_state;
+
+ return 0;
+}
+
+static void pdr_notifier_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ notifier_work);
+ struct pdr_service *pds;
+ int ret;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service_connected) {
+ if (!pds->need_notifier_register)
+ continue;
+
+ pds->need_notifier_register = false;
+ ret = pdr_register_listener(pdr, pds, true);
+ if (ret < 0)
+ pds->state = SERVREG_SERVICE_STATE_DOWN;
+ } else {
+ if (!pds->need_notifier_remove)
+ continue;
+
+ pds->need_notifier_remove = false;
+ pds->state = SERVREG_SERVICE_STATE_DOWN;
+ }
+
+ mutex_lock(&pdr->status_lock);
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+static int pdr_notifier_new_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ struct pdr_service *pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service == svc->service &&
+ pds->instance == svc->instance) {
+ pds->service_connected = true;
+ pds->need_notifier_register = true;
+ pds->addr.sq_family = AF_QIPCRTR;
+ pds->addr.sq_node = svc->node;
+ pds->addr.sq_port = svc->port;
+ queue_work(pdr->notifier_wq, &pdr->notifier_work);
+ }
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ return 0;
+}
+
+static void pdr_notifier_del_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ struct pdr_service *pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service == svc->service &&
+ pds->instance == svc->instance) {
+ pds->service_connected = false;
+ pds->need_notifier_remove = true;
+ pds->addr.sq_node = 0;
+ pds->addr.sq_port = 0;
+ queue_work(pdr->notifier_wq, &pdr->notifier_work);
+ }
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+static const struct qmi_ops pdr_notifier_ops = {
+ .new_server = pdr_notifier_new_server,
+ .del_server = pdr_notifier_del_server,
+};
+
+static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds,
+ u16 tid)
+{
+ struct servreg_set_ack_resp resp;
+ struct servreg_set_ack_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ req.transaction_id = tid;
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
+ &txn, SERVREG_SET_ACK_REQ,
+ SERVREG_SET_ACK_REQ_LEN,
+ servreg_set_ack_req_ei,
+ &req);
+
+ /* Skip waiting for response */
+ qmi_txn_cancel(&txn);
+ return ret;
+}
+
+static void pdr_indack_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ indack_work);
+ struct pdr_list_node *ind, *tmp;
+ struct pdr_service *pds;
+
+ list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
+ pds = ind->pds;
+
+ mutex_lock(&pdr->status_lock);
+ pds->state = ind->curr_state;
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+
+ /* Ack the indication after clients release the PD resources */
+ pdr_send_indack_msg(pdr, pds, ind->transaction_id);
+
+ mutex_lock(&pdr->list_lock);
+ list_del(&ind->node);
+ mutex_unlock(&pdr->list_lock);
+
+ kfree(ind);
+ }
+}
+
+static void pdr_indication_cb(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ const struct servreg_state_updated_ind *ind_msg = data;
+ struct pdr_list_node *ind;
+ struct pdr_service *pds = NULL, *iter;
+
+ if (!ind_msg || !ind_msg->service_path[0] ||
+ strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH)
+ return;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(iter, &pdr->lookups, node) {
+ if (strcmp(iter->service_path, ind_msg->service_path))
+ continue;
+
+ pds = iter;
+ break;
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ if (!pds)
+ return;
+
+ pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
+ ind_msg->service_path, ind_msg->curr_state,
+ ind_msg->transaction_id);
+
+ ind = kzalloc(sizeof(*ind), GFP_KERNEL);
+ if (!ind)
+ return;
+
+ ind->transaction_id = ind_msg->transaction_id;
+ ind->curr_state = ind_msg->curr_state;
+ ind->pds = pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_add_tail(&ind->node, &pdr->indack_list);
+ mutex_unlock(&pdr->list_lock);
+
+ queue_work(pdr->indack_wq, &pdr->indack_work);
+}
+
+static const struct qmi_msg_handler qmi_indication_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = SERVREG_STATE_UPDATED_IND_ID,
+ .ei = servreg_state_updated_ind_ei,
+ .decoded_size = sizeof(struct servreg_state_updated_ind),
+ .fn = pdr_indication_cb,
+ },
+ {}
+};
+
+static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
+ struct servreg_get_domain_list_resp *resp,
+ struct pdr_handle *pdr)
+{
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->locator_hdl, &txn,
+ servreg_get_domain_list_resp_ei, resp);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(&pdr->locator_hdl,
+ &pdr->locator_addr,
+ &txn, SERVREG_GET_DOMAIN_LIST_REQ,
+ SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
+ servreg_get_domain_list_req_ei,
+ req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s get domain list txn wait failed: %d\n",
+ req->service_name, ret);
+ return ret;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s get domain list failed: 0x%x\n",
+ req->service_name, resp->resp.error);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+ struct servreg_get_domain_list_resp *resp;
+ struct servreg_get_domain_list_req req;
+ struct servreg_location_entry *entry;
+ int domains_read = 0;
+ int ret, i;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* Prepare req message */
+ strscpy(req.service_name, pds->service_name, sizeof(req.service_name));
+ req.domain_offset_valid = true;
+ req.domain_offset = 0;
+
+ do {
+ req.domain_offset = domains_read;
+ ret = pdr_get_domain_list(&req, resp, pdr);
+ if (ret < 0)
+ goto out;
+
+ for (i = domains_read; i < resp->domain_list_len; i++) {
+ entry = &resp->domain_list[i];
+
+ if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
+ continue;
+
+ if (!strcmp(entry->name, pds->service_path)) {
+ pds->service_data_valid = entry->service_data_valid;
+ pds->service_data = entry->service_data;
+ pds->instance = entry->instance;
+ goto out;
+ }
+ }
+
+ /* Update ret to indicate that the service is not yet found */
+ ret = -ENXIO;
+
+ /* Always read total_domains from the response msg */
+ if (resp->domain_list_len > resp->total_domains)
+ resp->domain_list_len = resp->total_domains;
+
+ domains_read += resp->domain_list_len;
+ } while (domains_read < resp->total_domains);
+out:
+ kfree(resp);
+ return ret;
+}
+
+static void pdr_notify_lookup_failure(struct pdr_handle *pdr,
+ struct pdr_service *pds,
+ int err)
+{
+ pr_err("PDR: service lookup for %s failed: %d\n",
+ pds->service_name, err);
+
+ if (err == -ENXIO)
+ return;
+
+ list_del(&pds->node);
+ pds->state = SERVREG_LOCATOR_ERR;
+ mutex_lock(&pdr->status_lock);
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+ kfree(pds);
+}
+
+static void pdr_locator_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ locator_work);
+ struct pdr_service *pds, *tmp;
+ int ret = 0;
+
+ /* Bail out early if the SERVREG LOCATOR QMI service is not up */
+ mutex_lock(&pdr->lock);
+ if (!pdr->locator_init_complete) {
+ mutex_unlock(&pdr->lock);
+ pr_debug("PDR: SERVICE LOCATOR service not available\n");
+ return;
+ }
+ mutex_unlock(&pdr->lock);
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+ if (!pds->need_locator_lookup)
+ continue;
+
+ ret = pdr_locate_service(pdr, pds);
+ if (ret < 0) {
+ pdr_notify_lookup_failure(pdr, pds, ret);
+ continue;
+ }
+
+ ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1,
+ pds->instance);
+ if (ret < 0) {
+ pdr_notify_lookup_failure(pdr, pds, ret);
+ continue;
+ }
+
+ pds->need_locator_lookup = false;
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+/**
+ * pdr_add_lookup() - register a tracking request for a PD
+ * @pdr: PDR client handle
+ * @service_name: service name of the tracking request
+ * @service_path: service path of the tracking request
+ *
+ * Registering a pdr lookup allows for tracking the life cycle of the PD.
+ *
+ * Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is
+ * returned if a lookup is already in progress for the given service path.
+ */
+struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
+ const char *service_name,
+ const char *service_path)
+{
+ struct pdr_service *pds, *tmp;
+ int ret;
+
+ if (IS_ERR_OR_NULL(pdr))
+ return ERR_PTR(-EINVAL);
+
+ if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH ||
+ !service_path || strlen(service_path) > SERVREG_NAME_LENGTH)
+ return ERR_PTR(-EINVAL);
+
+ pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return ERR_PTR(-ENOMEM);
+
+ pds->service = SERVREG_NOTIFIER_SERVICE;
+ strscpy(pds->service_name, service_name, sizeof(pds->service_name));
+ strscpy(pds->service_path, service_path, sizeof(pds->service_path));
+ pds->need_locator_lookup = true;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(tmp, &pdr->lookups, node) {
+ if (strcmp(tmp->service_path, service_path))
+ continue;
+
+ mutex_unlock(&pdr->list_lock);
+ ret = -EALREADY;
+ goto err;
+ }
+
+ list_add(&pds->node, &pdr->lookups);
+ mutex_unlock(&pdr->list_lock);
+
+ schedule_work(&pdr->locator_work);
+
+ return pds;
+err:
+ kfree(pds);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pdr_add_lookup);
+
+/**
+ * pdr_restart_pd() - restart PD
+ * @pdr: PDR client handle
+ * @pds: PD service handle
+ *
+ * Restarts the PD tracked by the PDR client handle for a given service path.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+ struct servreg_restart_pd_resp resp;
+ struct servreg_restart_pd_req req = { 0 };
+ struct sockaddr_qrtr addr;
+ struct pdr_service *tmp;
+ struct qmi_txn txn;
+ int ret;
+
+ if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds))
+ return -EINVAL;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(tmp, &pdr->lookups, node) {
+ if (tmp != pds)
+ continue;
+
+ if (!pds->service_connected)
+ break;
+
+ /* Prepare req message */
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
+ addr = pds->addr;
+ break;
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ if (!req.service_path[0])
+ return -EINVAL;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
+ servreg_restart_pd_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &addr,
+ &txn, SERVREG_RESTART_PD_REQ,
+ SERVREG_RESTART_PD_REQ_MAX_LEN,
+ servreg_restart_pd_req_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s PD restart txn wait failed: %d\n",
+ req.service_path, ret);
+ return ret;
+ }
+
+ /* Check response if PDR is disabled */
+ if (resp.resp.result == QMI_RESULT_FAILURE_V01 &&
+ resp.resp.error == QMI_ERR_DISABLED_V01) {
+ pr_err("PDR: %s PD restart is disabled: 0x%x\n",
+ req.service_path, resp.resp.error);
+ return -EOPNOTSUPP;
+ }
+
+ /* Check the response for other error case*/
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s request for PD restart failed: 0x%x\n",
+ req.service_path, resp.resp.error);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pdr_restart_pd);
+
+/**
+ * pdr_handle_alloc() - initialize the PDR client handle
+ * @status: function to be called on PD state change
+ * @priv: handle for client's use
+ *
+ * Initializes the PDR client handle to allow for tracking/restart of PDs.
+ *
+ * Return: pdr_handle object on success, ERR_PTR on failure.
+ */
+struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
+ char *service_path,
+ void *priv), void *priv)
+{
+ struct pdr_handle *pdr;
+ int ret;
+
+ if (!status)
+ return ERR_PTR(-EINVAL);
+
+ pdr = kzalloc(sizeof(*pdr), GFP_KERNEL);
+ if (!pdr)
+ return ERR_PTR(-ENOMEM);
+
+ pdr->status = status;
+ pdr->priv = priv;
+
+ mutex_init(&pdr->status_lock);
+ mutex_init(&pdr->list_lock);
+ mutex_init(&pdr->lock);
+
+ INIT_LIST_HEAD(&pdr->lookups);
+ INIT_LIST_HEAD(&pdr->indack_list);
+
+ INIT_WORK(&pdr->locator_work, pdr_locator_work);
+ INIT_WORK(&pdr->notifier_work, pdr_notifier_work);
+ INIT_WORK(&pdr->indack_work, pdr_indack_work);
+
+ pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq");
+ if (!pdr->notifier_wq) {
+ ret = -ENOMEM;
+ goto free_pdr_handle;
+ }
+
+ pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);
+ if (!pdr->indack_wq) {
+ ret = -ENOMEM;
+ goto destroy_notifier;
+ }
+
+ ret = qmi_handle_init(&pdr->locator_hdl,
+ SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN,
+ &pdr_locator_ops, NULL);
+ if (ret < 0)
+ goto destroy_indack;
+
+ ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1);
+ if (ret < 0)
+ goto release_qmi_handle;
+
+ ret = qmi_handle_init(&pdr->notifier_hdl,
+ SERVREG_STATE_UPDATED_IND_MAX_LEN,
+ &pdr_notifier_ops,
+ qmi_indication_handler);
+ if (ret < 0)
+ goto release_qmi_handle;
+
+ return pdr;
+
+release_qmi_handle:
+ qmi_handle_release(&pdr->locator_hdl);
+destroy_indack:
+ destroy_workqueue(pdr->indack_wq);
+destroy_notifier:
+ destroy_workqueue(pdr->notifier_wq);
+free_pdr_handle:
+ kfree(pdr);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pdr_handle_alloc);
+
+/**
+ * pdr_handle_release() - release the PDR client handle
+ * @pdr: PDR client handle
+ *
+ * Cleans up pending tracking requests and releases the underlying qmi handles.
+ */
+void pdr_handle_release(struct pdr_handle *pdr)
+{
+ struct pdr_service *pds, *tmp;
+
+ if (IS_ERR_OR_NULL(pdr))
+ return;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+ list_del(&pds->node);
+ kfree(pds);
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ cancel_work_sync(&pdr->locator_work);
+ cancel_work_sync(&pdr->notifier_work);
+ cancel_work_sync(&pdr->indack_work);
+
+ destroy_workqueue(pdr->notifier_wq);
+ destroy_workqueue(pdr->indack_wq);
+
+ qmi_handle_release(&pdr->locator_hdl);
+ qmi_handle_release(&pdr->notifier_hdl);
+
+ kfree(pdr);
+}
+EXPORT_SYMBOL(pdr_handle_release);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers");
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
new file mode 100644
index 0000000000..03c282b7f1
--- /dev/null
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PDR_HELPER_INTERNAL__
+#define __QCOM_PDR_HELPER_INTERNAL__
+
+#include <linux/soc/qcom/pdr.h>
+
+#define SERVREG_LOCATOR_SERVICE 0x40
+#define SERVREG_NOTIFIER_SERVICE 0x42
+
+#define SERVREG_REGISTER_LISTENER_REQ 0x20
+#define SERVREG_GET_DOMAIN_LIST_REQ 0x21
+#define SERVREG_STATE_UPDATED_IND_ID 0x22
+#define SERVREG_SET_ACK_REQ 0x23
+#define SERVREG_RESTART_PD_REQ 0x24
+
+#define SERVREG_DOMAIN_LIST_LENGTH 32
+#define SERVREG_RESTART_PD_REQ_MAX_LEN 67
+#define SERVREG_REGISTER_LISTENER_REQ_LEN 71
+#define SERVREG_SET_ACK_REQ_LEN 72
+#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74
+#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79
+#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389
+
+struct servreg_location_entry {
+ char name[SERVREG_NAME_LENGTH + 1];
+ u8 service_data_valid;
+ u32 service_data;
+ u32 instance;
+};
+
+static const struct qmi_elem_info servreg_location_entry_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ instance),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data),
+ },
+ {}
+};
+
+struct servreg_get_domain_list_req {
+ char service_name[SERVREG_NAME_LENGTH + 1];
+ u8 domain_offset_valid;
+ u32 domain_offset;
+};
+
+static const struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ service_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset),
+ },
+ {}
+};
+
+struct servreg_get_domain_list_resp {
+ struct qmi_response_type_v01 resp;
+ u8 total_domains_valid;
+ u16 total_domains;
+ u8 db_rev_count_valid;
+ u16 db_rev_count;
+ u8 domain_list_valid;
+ u32 domain_list_len;
+ struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
+};
+
+static const struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
+ .elem_size = sizeof(struct servreg_location_entry),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list),
+ .ei_array = servreg_location_entry_ei,
+ },
+ {}
+};
+
+struct servreg_register_listener_req {
+ u8 enable;
+ char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+static const struct qmi_elem_info servreg_register_listener_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_register_listener_req,
+ enable),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_req,
+ service_path),
+ },
+ {}
+};
+
+struct servreg_register_listener_resp {
+ struct qmi_response_type_v01 resp;
+ u8 curr_state_valid;
+ enum servreg_service_state curr_state;
+};
+
+static const struct qmi_elem_info servreg_register_listener_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum servreg_service_state),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state),
+ },
+ {}
+};
+
+struct servreg_restart_pd_req {
+ char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+static const struct qmi_elem_info servreg_restart_pd_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_restart_pd_req,
+ service_path),
+ },
+ {}
+};
+
+struct servreg_restart_pd_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+static const struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_restart_pd_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct servreg_state_updated_ind {
+ enum servreg_service_state curr_state;
+ char service_path[SERVREG_NAME_LENGTH + 1];
+ u16 transaction_id;
+};
+
+static const struct qmi_elem_info servreg_state_updated_ind_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ curr_state),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ transaction_id),
+ },
+ {}
+};
+
+struct servreg_set_ack_req {
+ char service_path[SERVREG_NAME_LENGTH + 1];
+ u16 transaction_id;
+};
+
+static const struct qmi_elem_info servreg_set_ack_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_set_ack_req,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_req,
+ transaction_id),
+ },
+ {}
+};
+
+struct servreg_set_ack_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+static const struct qmi_elem_info servreg_set_ack_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+#endif
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
new file mode 100644
index 0000000000..61c89ddfc7
--- /dev/null
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/pdr.h>
+#include <linux/soc/qcom/pmic_glink.h>
+
+enum {
+ PMIC_GLINK_CLIENT_BATT = 0,
+ PMIC_GLINK_CLIENT_ALTMODE,
+ PMIC_GLINK_CLIENT_UCSI,
+};
+
+#define PMIC_GLINK_CLIENT_DEFAULT (BIT(PMIC_GLINK_CLIENT_BATT) | \
+ BIT(PMIC_GLINK_CLIENT_ALTMODE))
+
+struct pmic_glink {
+ struct device *dev;
+ struct pdr_handle *pdr;
+
+ struct rpmsg_endpoint *ept;
+
+ unsigned long client_mask;
+
+ struct auxiliary_device altmode_aux;
+ struct auxiliary_device ps_aux;
+ struct auxiliary_device ucsi_aux;
+
+ /* serializing client_state and pdr_state updates */
+ struct mutex state_lock;
+ unsigned int client_state;
+ unsigned int pdr_state;
+
+ /* serializing clients list updates */
+ struct mutex client_lock;
+ struct list_head clients;
+};
+
+static struct pmic_glink *__pmic_glink;
+static DEFINE_MUTEX(__pmic_glink_lock);
+
+struct pmic_glink_client {
+ struct list_head node;
+
+ struct pmic_glink *pg;
+ unsigned int id;
+
+ void (*cb)(const void *data, size_t len, void *priv);
+ void (*pdr_notify)(void *priv, int state);
+ void *priv;
+};
+
+static void _devm_pmic_glink_release_client(struct device *dev, void *res)
+{
+ struct pmic_glink_client *client = (struct pmic_glink_client *)res;
+ struct pmic_glink *pg = client->pg;
+
+ mutex_lock(&pg->client_lock);
+ list_del(&client->node);
+ mutex_unlock(&pg->client_lock);
+}
+
+struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
+ unsigned int id,
+ void (*cb)(const void *, size_t, void *),
+ void (*pdr)(void *, int),
+ void *priv)
+{
+ struct pmic_glink_client *client;
+ struct pmic_glink *pg = dev_get_drvdata(dev->parent);
+
+ client = devres_alloc(_devm_pmic_glink_release_client, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ client->pg = pg;
+ client->id = id;
+ client->cb = cb;
+ client->pdr_notify = pdr;
+ client->priv = priv;
+
+ mutex_lock(&pg->client_lock);
+ list_add(&client->node, &pg->clients);
+ mutex_unlock(&pg->client_lock);
+
+ devres_add(dev, client);
+
+ return client;
+}
+EXPORT_SYMBOL_GPL(devm_pmic_glink_register_client);
+
+int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len)
+{
+ struct pmic_glink *pg = client->pg;
+
+ return rpmsg_send(pg->ept, data, len);
+}
+EXPORT_SYMBOL_GPL(pmic_glink_send);
+
+static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 addr)
+{
+ struct pmic_glink_client *client;
+ struct pmic_glink_hdr *hdr;
+ struct pmic_glink *pg = dev_get_drvdata(&rpdev->dev);
+
+ if (len < sizeof(*hdr)) {
+ dev_warn(pg->dev, "ignoring truncated message\n");
+ return 0;
+ }
+
+ hdr = data;
+
+ list_for_each_entry(client, &pg->clients, node) {
+ if (client->id == le32_to_cpu(hdr->owner))
+ client->cb(data, len, client->priv);
+ }
+
+ return 0;
+}
+
+static void pmic_glink_aux_release(struct device *dev) {}
+
+static int pmic_glink_add_aux_device(struct pmic_glink *pg,
+ struct auxiliary_device *aux,
+ const char *name)
+{
+ struct device *parent = pg->dev;
+ int ret;
+
+ aux->name = name;
+ aux->dev.parent = parent;
+ aux->dev.release = pmic_glink_aux_release;
+ device_set_of_node_from_dev(&aux->dev, parent);
+ ret = auxiliary_device_init(aux);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(aux);
+ if (ret)
+ auxiliary_device_uninit(aux);
+
+ return ret;
+}
+
+static void pmic_glink_del_aux_device(struct pmic_glink *pg,
+ struct auxiliary_device *aux)
+{
+ auxiliary_device_delete(aux);
+ auxiliary_device_uninit(aux);
+}
+
+static void pmic_glink_state_notify_clients(struct pmic_glink *pg)
+{
+ struct pmic_glink_client *client;
+ unsigned int new_state = pg->client_state;
+
+ if (pg->client_state != SERVREG_SERVICE_STATE_UP) {
+ if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
+ new_state = SERVREG_SERVICE_STATE_UP;
+ } else {
+ if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
+ new_state = SERVREG_SERVICE_STATE_DOWN;
+ }
+
+ if (new_state != pg->client_state) {
+ list_for_each_entry(client, &pg->clients, node)
+ client->pdr_notify(client->priv, new_state);
+ pg->client_state = new_state;
+ }
+}
+
+static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv)
+{
+ struct pmic_glink *pg = priv;
+
+ mutex_lock(&pg->state_lock);
+ pg->pdr_state = state;
+
+ pmic_glink_state_notify_clients(pg);
+ mutex_unlock(&pg->state_lock);
+}
+
+static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct pmic_glink *pg = __pmic_glink;
+ int ret = 0;
+
+ mutex_lock(&__pmic_glink_lock);
+ if (!pg) {
+ ret = dev_err_probe(&rpdev->dev, -ENODEV, "no pmic_glink device to attach to\n");
+ goto out_unlock;
+ }
+
+ dev_set_drvdata(&rpdev->dev, pg);
+
+ mutex_lock(&pg->state_lock);
+ pg->ept = rpdev->ept;
+ pmic_glink_state_notify_clients(pg);
+ mutex_unlock(&pg->state_lock);
+
+out_unlock:
+ mutex_unlock(&__pmic_glink_lock);
+ return ret;
+}
+
+static void pmic_glink_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct pmic_glink *pg;
+
+ mutex_lock(&__pmic_glink_lock);
+ pg = __pmic_glink;
+ if (!pg)
+ goto out_unlock;
+
+ mutex_lock(&pg->state_lock);
+ pg->ept = NULL;
+ pmic_glink_state_notify_clients(pg);
+ mutex_unlock(&pg->state_lock);
+out_unlock:
+ mutex_unlock(&__pmic_glink_lock);
+}
+
+static const struct rpmsg_device_id pmic_glink_rpmsg_id_match[] = {
+ { "PMIC_RTR_ADSP_APPS" },
+ {}
+};
+
+static struct rpmsg_driver pmic_glink_rpmsg_driver = {
+ .probe = pmic_glink_rpmsg_probe,
+ .remove = pmic_glink_rpmsg_remove,
+ .callback = pmic_glink_rpmsg_callback,
+ .id_table = pmic_glink_rpmsg_id_match,
+ .drv = {
+ .name = "qcom_pmic_glink_rpmsg",
+ },
+};
+
+static int pmic_glink_probe(struct platform_device *pdev)
+{
+ const unsigned long *match_data;
+ struct pdr_service *service;
+ struct pmic_glink *pg;
+ int ret;
+
+ pg = devm_kzalloc(&pdev->dev, sizeof(*pg), GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, pg);
+
+ pg->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&pg->clients);
+ mutex_init(&pg->client_lock);
+ mutex_init(&pg->state_lock);
+
+ match_data = (unsigned long *)of_device_get_match_data(&pdev->dev);
+ if (match_data)
+ pg->client_mask = *match_data;
+ else
+ pg->client_mask = PMIC_GLINK_CLIENT_DEFAULT;
+
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
+ ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
+ if (ret)
+ return ret;
+ }
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
+ ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
+ if (ret)
+ goto out_release_ucsi_aux;
+ }
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT)) {
+ ret = pmic_glink_add_aux_device(pg, &pg->ps_aux, "power-supply");
+ if (ret)
+ goto out_release_altmode_aux;
+ }
+
+ pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
+ if (IS_ERR(pg->pdr)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
+ goto out_release_aux_devices;
+ }
+
+ service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
+ if (IS_ERR(service)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
+ "failed adding pdr lookup for charger_pd\n");
+ goto out_release_pdr_handle;
+ }
+
+ mutex_lock(&__pmic_glink_lock);
+ __pmic_glink = pg;
+ mutex_unlock(&__pmic_glink_lock);
+
+ return 0;
+
+out_release_pdr_handle:
+ pdr_handle_release(pg->pdr);
+out_release_aux_devices:
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
+ pmic_glink_del_aux_device(pg, &pg->ps_aux);
+out_release_altmode_aux:
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE))
+ pmic_glink_del_aux_device(pg, &pg->altmode_aux);
+out_release_ucsi_aux:
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
+ pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
+
+ return ret;
+}
+
+static int pmic_glink_remove(struct platform_device *pdev)
+{
+ struct pmic_glink *pg = dev_get_drvdata(&pdev->dev);
+
+ pdr_handle_release(pg->pdr);
+
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
+ pmic_glink_del_aux_device(pg, &pg->ps_aux);
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE))
+ pmic_glink_del_aux_device(pg, &pg->altmode_aux);
+ if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
+ pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
+
+ mutex_lock(&__pmic_glink_lock);
+ __pmic_glink = NULL;
+ mutex_unlock(&__pmic_glink_lock);
+
+ return 0;
+}
+
+static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
+ BIT(PMIC_GLINK_CLIENT_ALTMODE) |
+ BIT(PMIC_GLINK_CLIENT_UCSI);
+
+static const struct of_device_id pmic_glink_of_match[] = {
+ { .compatible = "qcom,sm8450-pmic-glink", .data = &pmic_glink_sm8450_client_mask },
+ { .compatible = "qcom,sm8550-pmic-glink", .data = &pmic_glink_sm8450_client_mask },
+ { .compatible = "qcom,pmic-glink" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pmic_glink_of_match);
+
+static struct platform_driver pmic_glink_driver = {
+ .probe = pmic_glink_probe,
+ .remove = pmic_glink_remove,
+ .driver = {
+ .name = "qcom_pmic_glink",
+ .of_match_table = pmic_glink_of_match,
+ },
+};
+
+static int pmic_glink_init(void)
+{
+ platform_driver_register(&pmic_glink_driver);
+ register_rpmsg_driver(&pmic_glink_rpmsg_driver);
+
+ return 0;
+};
+module_init(pmic_glink_init);
+
+static void pmic_glink_exit(void)
+{
+ unregister_rpmsg_driver(&pmic_glink_rpmsg_driver);
+ platform_driver_unregister(&pmic_glink_driver);
+};
+module_exit(pmic_glink_exit);
+
+MODULE_DESCRIPTION("Qualcomm PMIC GLINK driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
new file mode 100644
index 0000000000..9b0000b5f0
--- /dev/null
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/soc/qcom/pdr.h>
+#include <drm/drm_bridge.h>
+
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
+
+#include <linux/soc/qcom/pmic_glink.h>
+
+#define PMIC_GLINK_MAX_PORTS 2
+
+#define USBC_SC8180X_NOTIFY_IND 0x13
+#define USBC_CMD_WRITE_REQ 0x15
+#define USBC_NOTIFY_IND 0x16
+
+#define ALTMODE_PAN_EN 0x10
+#define ALTMODE_PAN_ACK 0x11
+
+struct usbc_write_req {
+ struct pmic_glink_hdr hdr;
+ __le32 cmd;
+ __le32 arg;
+ __le32 reserved;
+};
+
+#define NOTIFY_PAYLOAD_SIZE 16
+struct usbc_notify {
+ struct pmic_glink_hdr hdr;
+ char payload[NOTIFY_PAYLOAD_SIZE];
+ u32 reserved;
+};
+
+struct usbc_sc8180x_notify {
+ struct pmic_glink_hdr hdr;
+ __le32 notification;
+ __le32 reserved[2];
+};
+
+enum pmic_glink_altmode_pin_assignment {
+ DPAM_HPD_OUT,
+ DPAM_HPD_A,
+ DPAM_HPD_B,
+ DPAM_HPD_C,
+ DPAM_HPD_D,
+ DPAM_HPD_E,
+ DPAM_HPD_F,
+};
+
+struct pmic_glink_altmode;
+
+#define work_to_altmode_port(w) container_of((w), struct pmic_glink_altmode_port, work)
+
+struct pmic_glink_altmode_port {
+ struct pmic_glink_altmode *altmode;
+ unsigned int index;
+
+ struct typec_switch *typec_switch;
+ struct typec_mux *typec_mux;
+ struct typec_mux_state state;
+ struct typec_retimer *typec_retimer;
+ struct typec_retimer_state retimer_state;
+ struct typec_altmode dp_alt;
+
+ struct work_struct work;
+
+ struct drm_bridge bridge;
+
+ enum typec_orientation orientation;
+ u16 svid;
+ u8 dp_data;
+ u8 mode;
+ u8 hpd_state;
+ u8 hpd_irq;
+};
+
+#define work_to_altmode(w) container_of((w), struct pmic_glink_altmode, enable_work)
+
+struct pmic_glink_altmode {
+ struct device *dev;
+
+ unsigned int owner_id;
+
+ /* To synchronize WRITE_REQ acks */
+ struct mutex lock;
+
+ struct completion pan_ack;
+ struct pmic_glink_client *client;
+
+ struct work_struct enable_work;
+
+ struct pmic_glink_altmode_port ports[PMIC_GLINK_MAX_PORTS];
+};
+
+static int pmic_glink_altmode_request(struct pmic_glink_altmode *altmode, u32 cmd, u32 arg)
+{
+ struct usbc_write_req req = {};
+ unsigned long left;
+ int ret;
+
+ /*
+ * The USBC_CMD_WRITE_REQ ack doesn't identify the request, so wait for
+ * one ack at a time.
+ */
+ mutex_lock(&altmode->lock);
+
+ req.hdr.owner = cpu_to_le32(altmode->owner_id);
+ req.hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP);
+ req.hdr.opcode = cpu_to_le32(USBC_CMD_WRITE_REQ);
+ req.cmd = cpu_to_le32(cmd);
+ req.arg = cpu_to_le32(arg);
+
+ ret = pmic_glink_send(altmode->client, &req, sizeof(req));
+ if (ret) {
+ dev_err(altmode->dev, "failed to send altmode request: %#x (%d)\n", cmd, ret);
+ goto out_unlock;
+ }
+
+ left = wait_for_completion_timeout(&altmode->pan_ack, 5 * HZ);
+ if (!left) {
+ dev_err(altmode->dev, "timeout waiting for altmode request ack for: %#x\n", cmd);
+ ret = -ETIMEDOUT;
+ }
+
+out_unlock:
+ mutex_unlock(&altmode->lock);
+ return ret;
+}
+
+static void pmic_glink_altmode_enable_dp(struct pmic_glink_altmode *altmode,
+ struct pmic_glink_altmode_port *port,
+ u8 mode, bool hpd_state,
+ bool hpd_irq)
+{
+ struct typec_displayport_data dp_data = {};
+ int ret;
+
+ dp_data.status = DP_STATUS_ENABLED;
+ if (hpd_state)
+ dp_data.status |= DP_STATUS_HPD_STATE;
+ if (hpd_irq)
+ dp_data.status |= DP_STATUS_IRQ_HPD;
+ dp_data.conf = DP_CONF_SET_PIN_ASSIGN(mode);
+
+ port->state.alt = &port->dp_alt;
+ port->state.data = &dp_data;
+ port->state.mode = TYPEC_MODAL_STATE(mode);
+
+ ret = typec_mux_set(port->typec_mux, &port->state);
+ if (ret)
+ dev_err(altmode->dev, "failed to switch mux to DP\n");
+
+ port->retimer_state.alt = &port->dp_alt;
+ port->retimer_state.data = &dp_data;
+ port->retimer_state.mode = TYPEC_MODAL_STATE(mode);
+
+ ret = typec_retimer_set(port->typec_retimer, &port->retimer_state);
+ if (ret)
+ dev_err(altmode->dev, "failed to setup retimer to DP\n");
+}
+
+static void pmic_glink_altmode_enable_usb(struct pmic_glink_altmode *altmode,
+ struct pmic_glink_altmode_port *port)
+{
+ int ret;
+
+ port->state.alt = NULL;
+ port->state.data = NULL;
+ port->state.mode = TYPEC_STATE_USB;
+
+ ret = typec_mux_set(port->typec_mux, &port->state);
+ if (ret)
+ dev_err(altmode->dev, "failed to switch mux to USB\n");
+
+ port->retimer_state.alt = NULL;
+ port->retimer_state.data = NULL;
+ port->retimer_state.mode = TYPEC_STATE_USB;
+
+ ret = typec_retimer_set(port->typec_retimer, &port->retimer_state);
+ if (ret)
+ dev_err(altmode->dev, "failed to setup retimer to USB\n");
+}
+
+static void pmic_glink_altmode_safe(struct pmic_glink_altmode *altmode,
+ struct pmic_glink_altmode_port *port)
+{
+ int ret;
+
+ port->state.alt = NULL;
+ port->state.data = NULL;
+ port->state.mode = TYPEC_STATE_SAFE;
+
+ ret = typec_mux_set(port->typec_mux, &port->state);
+ if (ret)
+ dev_err(altmode->dev, "failed to switch mux to safe mode\n");
+
+ port->retimer_state.alt = NULL;
+ port->retimer_state.data = NULL;
+ port->retimer_state.mode = TYPEC_STATE_SAFE;
+
+ ret = typec_retimer_set(port->typec_retimer, &port->retimer_state);
+ if (ret)
+ dev_err(altmode->dev, "failed to setup retimer to USB\n");
+}
+
+static void pmic_glink_altmode_worker(struct work_struct *work)
+{
+ struct pmic_glink_altmode_port *alt_port = work_to_altmode_port(work);
+ struct pmic_glink_altmode *altmode = alt_port->altmode;
+
+ typec_switch_set(alt_port->typec_switch, alt_port->orientation);
+
+ if (alt_port->svid == USB_TYPEC_DP_SID && alt_port->mode == 0xff)
+ pmic_glink_altmode_safe(altmode, alt_port);
+ else if (alt_port->svid == USB_TYPEC_DP_SID)
+ pmic_glink_altmode_enable_dp(altmode, alt_port, alt_port->mode,
+ alt_port->hpd_state, alt_port->hpd_irq);
+ else
+ pmic_glink_altmode_enable_usb(altmode, alt_port);
+
+ if (alt_port->hpd_state)
+ drm_bridge_hpd_notify(&alt_port->bridge, connector_status_connected);
+ else
+ drm_bridge_hpd_notify(&alt_port->bridge, connector_status_disconnected);
+
+ pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index);
+};
+
+static enum typec_orientation pmic_glink_altmode_orientation(unsigned int orientation)
+{
+ if (orientation == 0)
+ return TYPEC_ORIENTATION_NORMAL;
+ else if (orientation == 1)
+ return TYPEC_ORIENTATION_REVERSE;
+ else
+ return TYPEC_ORIENTATION_NONE;
+}
+
+#define SC8180X_PORT_MASK 0x000000ff
+#define SC8180X_ORIENTATION_MASK 0x0000ff00
+#define SC8180X_MUX_MASK 0x00ff0000
+#define SC8180X_MODE_MASK 0x3f000000
+#define SC8180X_HPD_STATE_MASK 0x40000000
+#define SC8180X_HPD_IRQ_MASK 0x80000000
+
+static void pmic_glink_altmode_sc8180xp_notify(struct pmic_glink_altmode *altmode,
+ const void *data, size_t len)
+{
+ struct pmic_glink_altmode_port *alt_port;
+ const struct usbc_sc8180x_notify *msg;
+ u32 notification;
+ u8 orientation;
+ u8 hpd_state;
+ u8 hpd_irq;
+ u16 svid;
+ u8 port;
+ u8 mode;
+ u8 mux;
+
+ if (len != sizeof(*msg)) {
+ dev_warn(altmode->dev, "invalid length of USBC_NOTIFY indication: %zd\n", len);
+ return;
+ }
+
+ msg = data;
+ notification = le32_to_cpu(msg->notification);
+ port = FIELD_GET(SC8180X_PORT_MASK, notification);
+ orientation = FIELD_GET(SC8180X_ORIENTATION_MASK, notification);
+ mux = FIELD_GET(SC8180X_MUX_MASK, notification);
+ mode = FIELD_GET(SC8180X_MODE_MASK, notification);
+ hpd_state = FIELD_GET(SC8180X_HPD_STATE_MASK, notification);
+ hpd_irq = FIELD_GET(SC8180X_HPD_IRQ_MASK, notification);
+
+ svid = mux == 2 ? USB_TYPEC_DP_SID : 0;
+
+ if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) {
+ dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
+ return;
+ }
+
+ alt_port = &altmode->ports[port];
+ alt_port->orientation = pmic_glink_altmode_orientation(orientation);
+ alt_port->svid = svid;
+ alt_port->mode = mode;
+ alt_port->hpd_state = hpd_state;
+ alt_port->hpd_irq = hpd_irq;
+ schedule_work(&alt_port->work);
+}
+
+#define SC8280XP_DPAM_MASK 0x3f
+#define SC8280XP_HPD_STATE_MASK BIT(6)
+#define SC8280XP_HPD_IRQ_MASK BIT(7)
+
+static void pmic_glink_altmode_sc8280xp_notify(struct pmic_glink_altmode *altmode,
+ u16 svid, const void *data, size_t len)
+{
+ struct pmic_glink_altmode_port *alt_port;
+ const struct usbc_notify *notify;
+ u8 orientation;
+ u8 hpd_state;
+ u8 hpd_irq;
+ u8 mode;
+ u8 port;
+
+ if (len != sizeof(*notify)) {
+ dev_warn(altmode->dev, "invalid length USBC_NOTIFY_IND: %zd\n",
+ len);
+ return;
+ }
+
+ notify = data;
+
+ port = notify->payload[0];
+ orientation = notify->payload[1];
+ mode = FIELD_GET(SC8280XP_DPAM_MASK, notify->payload[8]) - DPAM_HPD_A;
+ hpd_state = FIELD_GET(SC8280XP_HPD_STATE_MASK, notify->payload[8]);
+ hpd_irq = FIELD_GET(SC8280XP_HPD_IRQ_MASK, notify->payload[8]);
+
+ if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) {
+ dev_dbg(altmode->dev, "notification on undefined port %d\n", port);
+ return;
+ }
+
+ alt_port = &altmode->ports[port];
+ alt_port->orientation = pmic_glink_altmode_orientation(orientation);
+ alt_port->svid = svid;
+ alt_port->mode = mode;
+ alt_port->hpd_state = hpd_state;
+ alt_port->hpd_irq = hpd_irq;
+ schedule_work(&alt_port->work);
+}
+
+static void pmic_glink_altmode_callback(const void *data, size_t len, void *priv)
+{
+ struct pmic_glink_altmode *altmode = priv;
+ const struct pmic_glink_hdr *hdr = data;
+ u16 opcode;
+ u16 svid;
+
+ opcode = le32_to_cpu(hdr->opcode) & 0xff;
+ svid = le32_to_cpu(hdr->opcode) >> 16;
+
+ switch (opcode) {
+ case USBC_CMD_WRITE_REQ:
+ complete(&altmode->pan_ack);
+ break;
+ case USBC_NOTIFY_IND:
+ pmic_glink_altmode_sc8280xp_notify(altmode, svid, data, len);
+ break;
+ case USBC_SC8180X_NOTIFY_IND:
+ pmic_glink_altmode_sc8180xp_notify(altmode, data, len);
+ break;
+ }
+}
+
+static int pmic_glink_altmode_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static const struct drm_bridge_funcs pmic_glink_altmode_bridge_funcs = {
+ .attach = pmic_glink_altmode_attach,
+};
+
+static void pmic_glink_altmode_put_retimer(void *data)
+{
+ typec_retimer_put(data);
+}
+
+static void pmic_glink_altmode_put_mux(void *data)
+{
+ typec_mux_put(data);
+}
+
+static void pmic_glink_altmode_put_switch(void *data)
+{
+ typec_switch_put(data);
+}
+
+static void pmic_glink_altmode_enable_worker(struct work_struct *work)
+{
+ struct pmic_glink_altmode *altmode = work_to_altmode(work);
+ int ret;
+
+ ret = pmic_glink_altmode_request(altmode, ALTMODE_PAN_EN, 0);
+ if (ret)
+ dev_err(altmode->dev, "failed to request altmode notifications\n");
+}
+
+static void pmic_glink_altmode_pdr_notify(void *priv, int state)
+{
+ struct pmic_glink_altmode *altmode = priv;
+
+ if (state == SERVREG_SERVICE_STATE_UP)
+ schedule_work(&altmode->enable_work);
+}
+
+static const struct of_device_id pmic_glink_altmode_of_quirks[] = {
+ { .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)PMIC_GLINK_OWNER_USBC },
+ {}
+};
+
+static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct pmic_glink_altmode_port *alt_port;
+ struct pmic_glink_altmode *altmode;
+ const struct of_device_id *match;
+ struct fwnode_handle *fwnode;
+ struct device *dev = &adev->dev;
+ u32 port;
+ int ret;
+
+ altmode = devm_kzalloc(dev, sizeof(*altmode), GFP_KERNEL);
+ if (!altmode)
+ return -ENOMEM;
+
+ altmode->dev = dev;
+
+ match = of_match_device(pmic_glink_altmode_of_quirks, dev->parent);
+ if (match)
+ altmode->owner_id = (unsigned long)match->data;
+ else
+ altmode->owner_id = PMIC_GLINK_OWNER_USBC_PAN;
+
+ INIT_WORK(&altmode->enable_work, pmic_glink_altmode_enable_worker);
+ init_completion(&altmode->pan_ack);
+ mutex_init(&altmode->lock);
+
+ device_for_each_child_node(dev, fwnode) {
+ ret = fwnode_property_read_u32(fwnode, "reg", &port);
+ if (ret < 0) {
+ dev_err(dev, "missing reg property of %pOFn\n", fwnode);
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+
+ if (port >= ARRAY_SIZE(altmode->ports)) {
+ dev_warn(dev, "invalid connector number, ignoring\n");
+ continue;
+ }
+
+ if (altmode->ports[port].altmode) {
+ dev_err(dev, "multiple connector definition for port %u\n", port);
+ fwnode_handle_put(fwnode);
+ return -EINVAL;
+ }
+
+ alt_port = &altmode->ports[port];
+ alt_port->altmode = altmode;
+ alt_port->index = port;
+ INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
+
+ alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
+ alt_port->bridge.of_node = to_of_node(fwnode);
+ alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
+ alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+
+ ret = devm_drm_bridge_add(dev, &alt_port->bridge);
+ if (ret) {
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+
+ alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
+ alt_port->dp_alt.mode = USB_TYPEC_DP_MODE;
+ alt_port->dp_alt.active = 1;
+
+ alt_port->typec_mux = fwnode_typec_mux_get(fwnode);
+ if (IS_ERR(alt_port->typec_mux)) {
+ fwnode_handle_put(fwnode);
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_mux),
+ "failed to acquire mode-switch for port: %d\n",
+ port);
+ }
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_mux,
+ alt_port->typec_mux);
+ if (ret) {
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+
+ alt_port->typec_retimer = fwnode_typec_retimer_get(fwnode);
+ if (IS_ERR(alt_port->typec_retimer)) {
+ fwnode_handle_put(fwnode);
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_retimer),
+ "failed to acquire retimer-switch for port: %d\n",
+ port);
+ }
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_retimer,
+ alt_port->typec_retimer);
+ if (ret) {
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+
+ alt_port->typec_switch = fwnode_typec_switch_get(fwnode);
+ if (IS_ERR(alt_port->typec_switch)) {
+ fwnode_handle_put(fwnode);
+ return dev_err_probe(dev, PTR_ERR(alt_port->typec_switch),
+ "failed to acquire orientation-switch for port: %d\n",
+ port);
+ }
+
+ ret = devm_add_action_or_reset(dev, pmic_glink_altmode_put_switch,
+ alt_port->typec_switch);
+ if (ret) {
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+ }
+
+ altmode->client = devm_pmic_glink_register_client(dev,
+ altmode->owner_id,
+ pmic_glink_altmode_callback,
+ pmic_glink_altmode_pdr_notify,
+ altmode);
+ return PTR_ERR_OR_ZERO(altmode->client);
+}
+
+static const struct auxiliary_device_id pmic_glink_altmode_id_table[] = {
+ { .name = "pmic_glink.altmode", },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, pmic_glink_altmode_id_table);
+
+static struct auxiliary_driver pmic_glink_altmode_driver = {
+ .name = "pmic_glink_altmode",
+ .probe = pmic_glink_altmode_probe,
+ .id_table = pmic_glink_altmode_id_table,
+};
+
+module_auxiliary_driver(pmic_glink_altmode_driver);
+
+MODULE_DESCRIPTION("Qualcomm PMIC GLINK Altmode driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
new file mode 100644
index 0000000000..ba78876283
--- /dev/null
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -0,0 +1,982 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+
+/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
+#define __DISABLE_TRACE_MMIO__
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/geni-se.h>
+
+/**
+ * DOC: Overview
+ *
+ * Generic Interface (GENI) Serial Engine (SE) Wrapper driver is introduced
+ * to manage GENI firmware based Qualcomm Universal Peripheral (QUP) Wrapper
+ * controller. QUP Wrapper is designed to support various serial bus protocols
+ * like UART, SPI, I2C, I3C, etc.
+ */
+
+/**
+ * DOC: Hardware description
+ *
+ * GENI based QUP is a highly-flexible and programmable module for supporting
+ * a wide range of serial interfaces like UART, SPI, I2C, I3C, etc. A single
+ * QUP module can provide upto 8 serial interfaces, using its internal
+ * serial engines. The actual configuration is determined by the target
+ * platform configuration. The protocol supported by each interface is
+ * determined by the firmware loaded to the serial engine. Each SE consists
+ * of a DMA Engine and GENI sub modules which enable serial engines to
+ * support FIFO and DMA modes of operation.
+ *
+ *
+ * +-----------------------------------------+
+ * |QUP Wrapper |
+ * | +----------------------------+ |
+ * --QUP & SE Clocks--> | Serial Engine N | +-IO------>
+ * | | ... | | Interface
+ * <---Clock Perf.----+ +----+-----------------------+ | |
+ * State Interface | | Serial Engine 1 | | |
+ * | | | | |
+ * | | | | |
+ * <--------AHB-------> | | | |
+ * | | +----+ |
+ * | | | |
+ * | | | |
+ * <------SE IRQ------+ +----------------------------+ |
+ * | |
+ * +-----------------------------------------+
+ *
+ * Figure 1: GENI based QUP Wrapper
+ *
+ * The GENI submodules include primary and secondary sequencers which are
+ * used to drive TX & RX operations. On serial interfaces that operate using
+ * master-slave model, primary sequencer drives both TX & RX operations. On
+ * serial interfaces that operate using peer-to-peer model, primary sequencer
+ * drives TX operation and secondary sequencer drives RX operation.
+ */
+
+/**
+ * DOC: Software description
+ *
+ * GENI SE Wrapper driver is structured into 2 parts:
+ *
+ * geni_wrapper represents QUP Wrapper controller. This part of the driver
+ * manages QUP Wrapper information such as hardware version, clock
+ * performance table that is common to all the internal serial engines.
+ *
+ * geni_se represents serial engine. This part of the driver manages serial
+ * engine information such as clocks, containing QUP Wrapper, etc. This part
+ * of driver also supports operations (eg. initialize the concerned serial
+ * engine, select between FIFO and DMA mode of operation etc.) that are
+ * common to all the serial engines and are independent of serial interfaces.
+ */
+
+#define MAX_CLK_PERF_LEVEL 32
+#define MAX_CLKS 2
+
+/**
+ * struct geni_wrapper - Data structure to represent the QUP Wrapper Core
+ * @dev: Device pointer of the QUP wrapper core
+ * @base: Base address of this instance of QUP wrapper core
+ * @clks: Handle to the primary & optional secondary AHB clocks
+ * @num_clks: Count of clocks
+ * @to_core: Core ICC path
+ */
+struct geni_wrapper {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data clks[MAX_CLKS];
+ unsigned int num_clks;
+};
+
+/**
+ * struct geni_se_desc - Data structure to represent the QUP Wrapper resources
+ * @clks: Name of the primary & optional secondary AHB clocks
+ * @num_clks: Count of clock names
+ */
+struct geni_se_desc {
+ unsigned int num_clks;
+ const char * const *clks;
+};
+
+static const char * const icc_path_names[] = {"qup-core", "qup-config",
+ "qup-memory"};
+
+#define QUP_HW_VER_REG 0x4
+
+/* Common SE registers */
+#define GENI_INIT_CFG_REVISION 0x0
+#define GENI_S_INIT_CFG_REVISION 0x4
+#define GENI_OUTPUT_CTRL 0x24
+#define GENI_CGC_CTRL 0x28
+#define GENI_CLK_CTRL_RO 0x60
+#define GENI_FW_S_REVISION_RO 0x6c
+#define SE_GENI_BYTE_GRAN 0x254
+#define SE_GENI_TX_PACKING_CFG0 0x260
+#define SE_GENI_TX_PACKING_CFG1 0x264
+#define SE_GENI_RX_PACKING_CFG0 0x284
+#define SE_GENI_RX_PACKING_CFG1 0x288
+#define SE_GENI_M_GP_LENGTH 0x910
+#define SE_GENI_S_GP_LENGTH 0x914
+#define SE_DMA_TX_PTR_L 0xc30
+#define SE_DMA_TX_PTR_H 0xc34
+#define SE_DMA_TX_ATTR 0xc38
+#define SE_DMA_TX_LEN 0xc3c
+#define SE_DMA_TX_IRQ_EN 0xc48
+#define SE_DMA_TX_IRQ_EN_SET 0xc4c
+#define SE_DMA_TX_IRQ_EN_CLR 0xc50
+#define SE_DMA_TX_LEN_IN 0xc54
+#define SE_DMA_TX_MAX_BURST 0xc5c
+#define SE_DMA_RX_PTR_L 0xd30
+#define SE_DMA_RX_PTR_H 0xd34
+#define SE_DMA_RX_ATTR 0xd38
+#define SE_DMA_RX_LEN 0xd3c
+#define SE_DMA_RX_IRQ_EN 0xd48
+#define SE_DMA_RX_IRQ_EN_SET 0xd4c
+#define SE_DMA_RX_IRQ_EN_CLR 0xd50
+#define SE_DMA_RX_LEN_IN 0xd54
+#define SE_DMA_RX_MAX_BURST 0xd5c
+#define SE_DMA_RX_FLUSH 0xd60
+#define SE_GSI_EVENT_EN 0xe18
+#define SE_IRQ_EN 0xe1c
+#define SE_DMA_GENERAL_CFG 0xe30
+
+/* GENI_OUTPUT_CTRL fields */
+#define DEFAULT_IO_OUTPUT_CTRL_MSK GENMASK(6, 0)
+
+/* GENI_CGC_CTRL fields */
+#define CFG_AHB_CLK_CGC_ON BIT(0)
+#define CFG_AHB_WR_ACLK_CGC_ON BIT(1)
+#define DATA_AHB_CLK_CGC_ON BIT(2)
+#define SCLK_CGC_ON BIT(3)
+#define TX_CLK_CGC_ON BIT(4)
+#define RX_CLK_CGC_ON BIT(5)
+#define EXT_CLK_CGC_ON BIT(6)
+#define PROG_RAM_HCLK_OFF BIT(8)
+#define PROG_RAM_SCLK_OFF BIT(9)
+#define DEFAULT_CGC_EN GENMASK(6, 0)
+
+/* SE_GSI_EVENT_EN fields */
+#define DMA_RX_EVENT_EN BIT(0)
+#define DMA_TX_EVENT_EN BIT(1)
+#define GENI_M_EVENT_EN BIT(2)
+#define GENI_S_EVENT_EN BIT(3)
+
+/* SE_IRQ_EN fields */
+#define DMA_RX_IRQ_EN BIT(0)
+#define DMA_TX_IRQ_EN BIT(1)
+#define GENI_M_IRQ_EN BIT(2)
+#define GENI_S_IRQ_EN BIT(3)
+
+/* SE_DMA_GENERAL_CFG */
+#define DMA_RX_CLK_CGC_ON BIT(0)
+#define DMA_TX_CLK_CGC_ON BIT(1)
+#define DMA_AHB_SLV_CFG_ON BIT(2)
+#define AHB_SEC_SLV_CLK_CGC_ON BIT(3)
+#define DUMMY_RX_NON_BUFFERABLE BIT(4)
+#define RX_DMA_ZERO_PADDING_EN BIT(5)
+#define RX_DMA_IRQ_DELAY_MSK GENMASK(8, 6)
+#define RX_DMA_IRQ_DELAY_SHFT 6
+
+/**
+ * geni_se_get_qup_hw_version() - Read the QUP wrapper Hardware version
+ * @se: Pointer to the corresponding serial engine.
+ *
+ * Return: Hardware Version of the wrapper.
+ */
+u32 geni_se_get_qup_hw_version(struct geni_se *se)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ return readl_relaxed(wrapper->base + QUP_HW_VER_REG);
+}
+EXPORT_SYMBOL(geni_se_get_qup_hw_version);
+
+static void geni_se_io_set_mode(void __iomem *base)
+{
+ u32 val;
+
+ val = readl_relaxed(base + SE_IRQ_EN);
+ val |= GENI_M_IRQ_EN | GENI_S_IRQ_EN;
+ val |= DMA_TX_IRQ_EN | DMA_RX_IRQ_EN;
+ writel_relaxed(val, base + SE_IRQ_EN);
+
+ val = readl_relaxed(base + SE_GENI_DMA_MODE_EN);
+ val &= ~GENI_DMA_MODE_EN;
+ writel_relaxed(val, base + SE_GENI_DMA_MODE_EN);
+
+ writel_relaxed(0, base + SE_GSI_EVENT_EN);
+}
+
+static void geni_se_io_init(void __iomem *base)
+{
+ u32 val;
+
+ val = readl_relaxed(base + GENI_CGC_CTRL);
+ val |= DEFAULT_CGC_EN;
+ writel_relaxed(val, base + GENI_CGC_CTRL);
+
+ val = readl_relaxed(base + SE_DMA_GENERAL_CFG);
+ val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON;
+ val |= DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON;
+ writel_relaxed(val, base + SE_DMA_GENERAL_CFG);
+
+ writel_relaxed(DEFAULT_IO_OUTPUT_CTRL_MSK, base + GENI_OUTPUT_CTRL);
+ writel_relaxed(FORCE_DEFAULT, base + GENI_FORCE_DEFAULT_REG);
+}
+
+static void geni_se_irq_clear(struct geni_se *se)
+{
+ writel_relaxed(0, se->base + SE_GSI_EVENT_EN);
+ writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR);
+ writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR);
+ writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR);
+ writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR);
+ writel_relaxed(0xffffffff, se->base + SE_IRQ_EN);
+}
+
+/**
+ * geni_se_init() - Initialize the GENI serial engine
+ * @se: Pointer to the concerned serial engine.
+ * @rx_wm: Receive watermark, in units of FIFO words.
+ * @rx_rfr: Ready-for-receive watermark, in units of FIFO words.
+ *
+ * This function is used to initialize the GENI serial engine, configure
+ * receive watermark and ready-for-receive watermarks.
+ */
+void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr)
+{
+ u32 val;
+
+ geni_se_irq_clear(se);
+ geni_se_io_init(se->base);
+ geni_se_io_set_mode(se->base);
+
+ writel_relaxed(rx_wm, se->base + SE_GENI_RX_WATERMARK_REG);
+ writel_relaxed(rx_rfr, se->base + SE_GENI_RX_RFR_WATERMARK_REG);
+
+ val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ val |= M_COMMON_GENI_M_IRQ_EN;
+ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+
+ val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
+ val |= S_COMMON_GENI_S_IRQ_EN;
+ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
+}
+EXPORT_SYMBOL(geni_se_init);
+
+static void geni_se_select_fifo_mode(struct geni_se *se)
+{
+ u32 proto = geni_se_read_proto(se);
+ u32 val, val_old;
+
+ geni_se_irq_clear(se);
+
+ /* UART driver manages enabling / disabling interrupts internally */
+ if (proto != GENI_SE_UART) {
+ /* Non-UART use only primary sequencer so dont bother about S_IRQ */
+ val_old = val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ val |= M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN;
+ val |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+ if (val != val_old)
+ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+ }
+
+ val_old = val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
+ val &= ~GENI_DMA_MODE_EN;
+ if (val != val_old)
+ writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
+}
+
+static void geni_se_select_dma_mode(struct geni_se *se)
+{
+ u32 proto = geni_se_read_proto(se);
+ u32 val, val_old;
+
+ geni_se_irq_clear(se);
+
+ /* UART driver manages enabling / disabling interrupts internally */
+ if (proto != GENI_SE_UART) {
+ /* Non-UART use only primary sequencer so dont bother about S_IRQ */
+ val_old = val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
+ val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
+ val &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ if (val != val_old)
+ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
+ }
+
+ val_old = val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
+ val |= GENI_DMA_MODE_EN;
+ if (val != val_old)
+ writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
+}
+
+static void geni_se_select_gpi_mode(struct geni_se *se)
+{
+ u32 val;
+
+ geni_se_irq_clear(se);
+
+ writel(0, se->base + SE_IRQ_EN);
+
+ val = readl(se->base + SE_GENI_M_IRQ_EN);
+ val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN |
+ M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+ writel(val, se->base + SE_GENI_M_IRQ_EN);
+
+ writel(GENI_DMA_MODE_EN, se->base + SE_GENI_DMA_MODE_EN);
+
+ val = readl(se->base + SE_GSI_EVENT_EN);
+ val |= (DMA_RX_EVENT_EN | DMA_TX_EVENT_EN | GENI_M_EVENT_EN | GENI_S_EVENT_EN);
+ writel(val, se->base + SE_GSI_EVENT_EN);
+}
+
+/**
+ * geni_se_select_mode() - Select the serial engine transfer mode
+ * @se: Pointer to the concerned serial engine.
+ * @mode: Transfer mode to be selected.
+ */
+void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
+{
+ WARN_ON(mode != GENI_SE_FIFO && mode != GENI_SE_DMA && mode != GENI_GPI_DMA);
+
+ switch (mode) {
+ case GENI_SE_FIFO:
+ geni_se_select_fifo_mode(se);
+ break;
+ case GENI_SE_DMA:
+ geni_se_select_dma_mode(se);
+ break;
+ case GENI_GPI_DMA:
+ geni_se_select_gpi_mode(se);
+ break;
+ case GENI_SE_INVALID:
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(geni_se_select_mode);
+
+/**
+ * DOC: Overview
+ *
+ * GENI FIFO packing is highly configurable. TX/RX packing/unpacking consist
+ * of up to 4 operations, each operation represented by 4 configuration vectors
+ * of 10 bits programmed in GENI_TX_PACKING_CFG0 and GENI_TX_PACKING_CFG1 for
+ * TX FIFO and in GENI_RX_PACKING_CFG0 and GENI_RX_PACKING_CFG1 for RX FIFO.
+ * Refer to below examples for detailed bit-field description.
+ *
+ * Example 1: word_size = 7, packing_mode = 4 x 8, msb_to_lsb = 1
+ *
+ * +-----------+-------+-------+-------+-------+
+ * | | vec_0 | vec_1 | vec_2 | vec_3 |
+ * +-----------+-------+-------+-------+-------+
+ * | start | 0x6 | 0xe | 0x16 | 0x1e |
+ * | direction | 1 | 1 | 1 | 1 |
+ * | length | 6 | 6 | 6 | 6 |
+ * | stop | 0 | 0 | 0 | 1 |
+ * +-----------+-------+-------+-------+-------+
+ *
+ * Example 2: word_size = 15, packing_mode = 2 x 16, msb_to_lsb = 0
+ *
+ * +-----------+-------+-------+-------+-------+
+ * | | vec_0 | vec_1 | vec_2 | vec_3 |
+ * +-----------+-------+-------+-------+-------+
+ * | start | 0x0 | 0x8 | 0x10 | 0x18 |
+ * | direction | 0 | 0 | 0 | 0 |
+ * | length | 7 | 6 | 7 | 6 |
+ * | stop | 0 | 0 | 0 | 1 |
+ * +-----------+-------+-------+-------+-------+
+ *
+ * Example 3: word_size = 23, packing_mode = 1 x 32, msb_to_lsb = 1
+ *
+ * +-----------+-------+-------+-------+-------+
+ * | | vec_0 | vec_1 | vec_2 | vec_3 |
+ * +-----------+-------+-------+-------+-------+
+ * | start | 0x16 | 0xe | 0x6 | 0x0 |
+ * | direction | 1 | 1 | 1 | 1 |
+ * | length | 7 | 7 | 6 | 0 |
+ * | stop | 0 | 0 | 1 | 0 |
+ * +-----------+-------+-------+-------+-------+
+ *
+ */
+
+#define NUM_PACKING_VECTORS 4
+#define PACKING_START_SHIFT 5
+#define PACKING_DIR_SHIFT 4
+#define PACKING_LEN_SHIFT 1
+#define PACKING_STOP_BIT BIT(0)
+#define PACKING_VECTOR_SHIFT 10
+/**
+ * geni_se_config_packing() - Packing configuration of the serial engine
+ * @se: Pointer to the concerned serial engine
+ * @bpw: Bits of data per transfer word.
+ * @pack_words: Number of words per fifo element.
+ * @msb_to_lsb: Transfer from MSB to LSB or vice-versa.
+ * @tx_cfg: Flag to configure the TX Packing.
+ * @rx_cfg: Flag to configure the RX Packing.
+ *
+ * This function is used to configure the packing rules for the current
+ * transfer.
+ */
+void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words,
+ bool msb_to_lsb, bool tx_cfg, bool rx_cfg)
+{
+ u32 cfg0, cfg1, cfg[NUM_PACKING_VECTORS] = {0};
+ int len;
+ int temp_bpw = bpw;
+ int idx_start = msb_to_lsb ? bpw - 1 : 0;
+ int idx = idx_start;
+ int idx_delta = msb_to_lsb ? -BITS_PER_BYTE : BITS_PER_BYTE;
+ int ceil_bpw = ALIGN(bpw, BITS_PER_BYTE);
+ int iter = (ceil_bpw * pack_words) / BITS_PER_BYTE;
+ int i;
+
+ if (iter <= 0 || iter > NUM_PACKING_VECTORS)
+ return;
+
+ for (i = 0; i < iter; i++) {
+ len = min_t(int, temp_bpw, BITS_PER_BYTE) - 1;
+ cfg[i] = idx << PACKING_START_SHIFT;
+ cfg[i] |= msb_to_lsb << PACKING_DIR_SHIFT;
+ cfg[i] |= len << PACKING_LEN_SHIFT;
+
+ if (temp_bpw <= BITS_PER_BYTE) {
+ idx = ((i + 1) * BITS_PER_BYTE) + idx_start;
+ temp_bpw = bpw;
+ } else {
+ idx = idx + idx_delta;
+ temp_bpw = temp_bpw - BITS_PER_BYTE;
+ }
+ }
+ cfg[iter - 1] |= PACKING_STOP_BIT;
+ cfg0 = cfg[0] | (cfg[1] << PACKING_VECTOR_SHIFT);
+ cfg1 = cfg[2] | (cfg[3] << PACKING_VECTOR_SHIFT);
+
+ if (tx_cfg) {
+ writel_relaxed(cfg0, se->base + SE_GENI_TX_PACKING_CFG0);
+ writel_relaxed(cfg1, se->base + SE_GENI_TX_PACKING_CFG1);
+ }
+ if (rx_cfg) {
+ writel_relaxed(cfg0, se->base + SE_GENI_RX_PACKING_CFG0);
+ writel_relaxed(cfg1, se->base + SE_GENI_RX_PACKING_CFG1);
+ }
+
+ /*
+ * Number of protocol words in each FIFO entry
+ * 0 - 4x8, four words in each entry, max word size of 8 bits
+ * 1 - 2x16, two words in each entry, max word size of 16 bits
+ * 2 - 1x32, one word in each entry, max word size of 32 bits
+ * 3 - undefined
+ */
+ if (pack_words || bpw == 32)
+ writel_relaxed(bpw / 16, se->base + SE_GENI_BYTE_GRAN);
+}
+EXPORT_SYMBOL(geni_se_config_packing);
+
+static void geni_se_clks_off(struct geni_se *se)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ clk_disable_unprepare(se->clk);
+ clk_bulk_disable_unprepare(wrapper->num_clks, wrapper->clks);
+}
+
+/**
+ * geni_se_resources_off() - Turn off resources associated with the serial
+ * engine
+ * @se: Pointer to the concerned serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_resources_off(struct geni_se *se)
+{
+ int ret;
+
+ if (has_acpi_companion(se->dev))
+ return 0;
+
+ ret = pinctrl_pm_select_sleep_state(se->dev);
+ if (ret)
+ return ret;
+
+ geni_se_clks_off(se);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_resources_off);
+
+static int geni_se_clks_on(struct geni_se *se)
+{
+ int ret;
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ ret = clk_bulk_prepare_enable(wrapper->num_clks, wrapper->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(se->clk);
+ if (ret)
+ clk_bulk_disable_unprepare(wrapper->num_clks, wrapper->clks);
+ return ret;
+}
+
+/**
+ * geni_se_resources_on() - Turn on resources associated with the serial
+ * engine
+ * @se: Pointer to the concerned serial engine.
+ *
+ * Return: 0 on success, standard Linux error codes on failure/error.
+ */
+int geni_se_resources_on(struct geni_se *se)
+{
+ int ret;
+
+ if (has_acpi_companion(se->dev))
+ return 0;
+
+ ret = geni_se_clks_on(se);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_pm_select_default_state(se->dev);
+ if (ret)
+ geni_se_clks_off(se);
+
+ return ret;
+}
+EXPORT_SYMBOL(geni_se_resources_on);
+
+/**
+ * geni_se_clk_tbl_get() - Get the clock table to program DFS
+ * @se: Pointer to the concerned serial engine.
+ * @tbl: Table in which the output is returned.
+ *
+ * This function is called by the protocol drivers to determine the different
+ * clock frequencies supported by serial engine core clock. The protocol
+ * drivers use the output to determine the clock frequency index to be
+ * programmed into DFS.
+ *
+ * Return: number of valid performance levels in the table on success,
+ * standard Linux error codes on failure.
+ */
+int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
+{
+ long freq = 0;
+ int i;
+
+ if (se->clk_perf_tbl) {
+ *tbl = se->clk_perf_tbl;
+ return se->num_clk_levels;
+ }
+
+ se->clk_perf_tbl = devm_kcalloc(se->dev, MAX_CLK_PERF_LEVEL,
+ sizeof(*se->clk_perf_tbl),
+ GFP_KERNEL);
+ if (!se->clk_perf_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
+ freq = clk_round_rate(se->clk, freq + 1);
+ if (freq <= 0 || freq == se->clk_perf_tbl[i - 1])
+ break;
+ se->clk_perf_tbl[i] = freq;
+ }
+ se->num_clk_levels = i;
+ *tbl = se->clk_perf_tbl;
+ return se->num_clk_levels;
+}
+EXPORT_SYMBOL(geni_se_clk_tbl_get);
+
+/**
+ * geni_se_clk_freq_match() - Get the matching or closest SE clock frequency
+ * @se: Pointer to the concerned serial engine.
+ * @req_freq: Requested clock frequency.
+ * @index: Index of the resultant frequency in the table.
+ * @res_freq: Resultant frequency of the source clock.
+ * @exact: Flag to indicate exact multiple requirement of the requested
+ * frequency.
+ *
+ * This function is called by the protocol drivers to determine the best match
+ * of the requested frequency as provided by the serial engine clock in order
+ * to meet the performance requirements.
+ *
+ * If we return success:
+ * - if @exact is true then @res_freq / <an_integer> == @req_freq
+ * - if @exact is false then @res_freq / <an_integer> <= @req_freq
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
+ unsigned int *index, unsigned long *res_freq,
+ bool exact)
+{
+ unsigned long *tbl;
+ int num_clk_levels;
+ int i;
+ unsigned long best_delta;
+ unsigned long new_delta;
+ unsigned int divider;
+
+ num_clk_levels = geni_se_clk_tbl_get(se, &tbl);
+ if (num_clk_levels < 0)
+ return num_clk_levels;
+
+ if (num_clk_levels == 0)
+ return -EINVAL;
+
+ best_delta = ULONG_MAX;
+ for (i = 0; i < num_clk_levels; i++) {
+ divider = DIV_ROUND_UP(tbl[i], req_freq);
+ new_delta = req_freq - tbl[i] / divider;
+ if (new_delta < best_delta) {
+ /* We have a new best! */
+ *index = i;
+ *res_freq = tbl[i];
+
+ /* If the new best is exact then we're done */
+ if (new_delta == 0)
+ return 0;
+
+ /* Record how close we got */
+ best_delta = new_delta;
+ }
+ }
+
+ if (exact)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_clk_freq_match);
+
+#define GENI_SE_DMA_DONE_EN BIT(0)
+#define GENI_SE_DMA_EOT_EN BIT(1)
+#define GENI_SE_DMA_AHB_ERR_EN BIT(2)
+#define GENI_SE_DMA_EOT_BUF BIT(0)
+
+/**
+ * geni_se_tx_init_dma() - Initiate TX DMA transfer on the serial engine
+ * @se: Pointer to the concerned serial engine.
+ * @iova: Mapped DMA address.
+ * @len: Length of the TX buffer.
+ *
+ * This function is used to initiate DMA TX transfer.
+ */
+void geni_se_tx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len)
+{
+ u32 val;
+
+ val = GENI_SE_DMA_DONE_EN;
+ val |= GENI_SE_DMA_EOT_EN;
+ val |= GENI_SE_DMA_AHB_ERR_EN;
+ writel_relaxed(val, se->base + SE_DMA_TX_IRQ_EN_SET);
+ writel_relaxed(lower_32_bits(iova), se->base + SE_DMA_TX_PTR_L);
+ writel_relaxed(upper_32_bits(iova), se->base + SE_DMA_TX_PTR_H);
+ writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR);
+ writel(len, se->base + SE_DMA_TX_LEN);
+}
+EXPORT_SYMBOL(geni_se_tx_init_dma);
+
+/**
+ * geni_se_tx_dma_prep() - Prepare the serial engine for TX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @buf: Pointer to the TX buffer.
+ * @len: Length of the TX buffer.
+ * @iova: Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA TX.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
+ dma_addr_t *iova)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ if (!wrapper)
+ return -EINVAL;
+
+ *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(wrapper->dev, *iova))
+ return -EIO;
+
+ geni_se_tx_init_dma(se, *iova, len);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_tx_dma_prep);
+
+/**
+ * geni_se_rx_init_dma() - Initiate RX DMA transfer on the serial engine
+ * @se: Pointer to the concerned serial engine.
+ * @iova: Mapped DMA address.
+ * @len: Length of the RX buffer.
+ *
+ * This function is used to initiate DMA RX transfer.
+ */
+void geni_se_rx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len)
+{
+ u32 val;
+
+ val = GENI_SE_DMA_DONE_EN;
+ val |= GENI_SE_DMA_EOT_EN;
+ val |= GENI_SE_DMA_AHB_ERR_EN;
+ writel_relaxed(val, se->base + SE_DMA_RX_IRQ_EN_SET);
+ writel_relaxed(lower_32_bits(iova), se->base + SE_DMA_RX_PTR_L);
+ writel_relaxed(upper_32_bits(iova), se->base + SE_DMA_RX_PTR_H);
+ /* RX does not have EOT buffer type bit. So just reset RX_ATTR */
+ writel_relaxed(0, se->base + SE_DMA_RX_ATTR);
+ writel(len, se->base + SE_DMA_RX_LEN);
+}
+EXPORT_SYMBOL(geni_se_rx_init_dma);
+
+/**
+ * geni_se_rx_dma_prep() - Prepare the serial engine for RX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @buf: Pointer to the RX buffer.
+ * @len: Length of the RX buffer.
+ * @iova: Pointer to store the mapped DMA address.
+ *
+ * This function is used to prepare the buffers for DMA RX.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
+ dma_addr_t *iova)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ if (!wrapper)
+ return -EINVAL;
+
+ *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(wrapper->dev, *iova))
+ return -EIO;
+
+ geni_se_rx_init_dma(se, *iova, len);
+ return 0;
+}
+EXPORT_SYMBOL(geni_se_rx_dma_prep);
+
+/**
+ * geni_se_tx_dma_unprep() - Unprepare the serial engine after TX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @iova: DMA address of the TX buffer.
+ * @len: Length of the TX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA TX.
+ */
+void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ if (!dma_mapping_error(wrapper->dev, iova))
+ dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_tx_dma_unprep);
+
+/**
+ * geni_se_rx_dma_unprep() - Unprepare the serial engine after RX DMA transfer
+ * @se: Pointer to the concerned serial engine.
+ * @iova: DMA address of the RX buffer.
+ * @len: Length of the RX buffer.
+ *
+ * This function is used to unprepare the DMA buffers after DMA RX.
+ */
+void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
+{
+ struct geni_wrapper *wrapper = se->wrapper;
+
+ if (!dma_mapping_error(wrapper->dev, iova))
+ dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(geni_se_rx_dma_unprep);
+
+int geni_icc_get(struct geni_se *se, const char *icc_ddr)
+{
+ int i, err;
+ const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
+
+ if (has_acpi_companion(se->dev))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ if (!icc_names[i])
+ continue;
+
+ se->icc_paths[i].path = devm_of_icc_get(se->dev, icc_names[i]);
+ if (IS_ERR(se->icc_paths[i].path))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ err = PTR_ERR(se->icc_paths[i].path);
+ if (err != -EPROBE_DEFER)
+ dev_err_ratelimited(se->dev, "Failed to get ICC path '%s': %d\n",
+ icc_names[i], err);
+ return err;
+
+}
+EXPORT_SYMBOL(geni_icc_get);
+
+int geni_icc_set_bw(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_set_bw(se->icc_paths[i].path,
+ se->icc_paths[i].avg_bw, se->icc_paths[i].avg_bw);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC BW voting failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_set_bw);
+
+void geni_icc_set_tag(struct geni_se *se, u32 tag)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++)
+ icc_set_tag(se->icc_paths[i].path, tag);
+}
+EXPORT_SYMBOL(geni_icc_set_tag);
+
+/* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */
+int geni_icc_enable(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_enable(se->icc_paths[i].path);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC enable failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_enable);
+
+int geni_icc_disable(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_disable(se->icc_paths[i].path);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC disable failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_disable);
+
+static int geni_se_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct geni_wrapper *wrapper;
+ int ret;
+
+ wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
+ if (!wrapper)
+ return -ENOMEM;
+
+ wrapper->dev = dev;
+ wrapper->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(wrapper->base))
+ return PTR_ERR(wrapper->base);
+
+ if (!has_acpi_companion(&pdev->dev)) {
+ const struct geni_se_desc *desc;
+ int i;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ wrapper->num_clks = min_t(unsigned int, desc->num_clks, MAX_CLKS);
+
+ for (i = 0; i < wrapper->num_clks; ++i)
+ wrapper->clks[i].id = desc->clks[i];
+
+ ret = of_count_phandle_with_args(dev->of_node, "clocks", "#clock-cells");
+ if (ret < 0) {
+ dev_err(dev, "invalid clocks property at %pOF\n", dev->of_node);
+ return ret;
+ }
+
+ if (ret < wrapper->num_clks) {
+ dev_err(dev, "invalid clocks count at %pOF, expected %d entries\n",
+ dev->of_node, wrapper->num_clks);
+ return -EINVAL;
+ }
+
+ ret = devm_clk_bulk_get(dev, wrapper->num_clks, wrapper->clks);
+ if (ret) {
+ dev_err(dev, "Err getting clks %d\n", ret);
+ return ret;
+ }
+ }
+
+ dev_set_drvdata(dev, wrapper);
+ dev_dbg(dev, "GENI SE Driver probed\n");
+ return devm_of_platform_populate(dev);
+}
+
+static const char * const qup_clks[] = {
+ "m-ahb",
+ "s-ahb",
+};
+
+static const struct geni_se_desc qup_desc = {
+ .clks = qup_clks,
+ .num_clks = ARRAY_SIZE(qup_clks),
+};
+
+static const char * const i2c_master_hub_clks[] = {
+ "s-ahb",
+};
+
+static const struct geni_se_desc i2c_master_hub_desc = {
+ .clks = i2c_master_hub_clks,
+ .num_clks = ARRAY_SIZE(i2c_master_hub_clks),
+};
+
+static const struct of_device_id geni_se_dt_match[] = {
+ { .compatible = "qcom,geni-se-qup", .data = &qup_desc },
+ { .compatible = "qcom,geni-se-i2c-master-hub", .data = &i2c_master_hub_desc },
+ {}
+};
+MODULE_DEVICE_TABLE(of, geni_se_dt_match);
+
+static struct platform_driver geni_se_driver = {
+ .driver = {
+ .name = "geni_se_qup",
+ .of_match_table = geni_se_dt_match,
+ },
+ .probe = geni_se_probe,
+};
+module_platform_driver(geni_se_driver);
+
+MODULE_DESCRIPTION("GENI Serial Engine Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
new file mode 100644
index 0000000000..77f0cf1266
--- /dev/null
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Ltd
+ */
+#include <linux/clk-provider.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/qcom_aoss.h>
+
+#define QMP_DESC_MAGIC 0x0
+#define QMP_DESC_VERSION 0x4
+#define QMP_DESC_FEATURES 0x8
+
+/* AOP-side offsets */
+#define QMP_DESC_UCORE_LINK_STATE 0xc
+#define QMP_DESC_UCORE_LINK_STATE_ACK 0x10
+#define QMP_DESC_UCORE_CH_STATE 0x14
+#define QMP_DESC_UCORE_CH_STATE_ACK 0x18
+#define QMP_DESC_UCORE_MBOX_SIZE 0x1c
+#define QMP_DESC_UCORE_MBOX_OFFSET 0x20
+
+/* Linux-side offsets */
+#define QMP_DESC_MCORE_LINK_STATE 0x24
+#define QMP_DESC_MCORE_LINK_STATE_ACK 0x28
+#define QMP_DESC_MCORE_CH_STATE 0x2c
+#define QMP_DESC_MCORE_CH_STATE_ACK 0x30
+#define QMP_DESC_MCORE_MBOX_SIZE 0x34
+#define QMP_DESC_MCORE_MBOX_OFFSET 0x38
+
+#define QMP_STATE_UP GENMASK(15, 0)
+#define QMP_STATE_DOWN GENMASK(31, 16)
+
+#define QMP_MAGIC 0x4d41494c /* mail */
+#define QMP_VERSION 1
+
+/* 64 bytes is enough to store the requests and provides padding to 4 bytes */
+#define QMP_MSG_LEN 64
+
+#define QMP_NUM_COOLING_RESOURCES 2
+
+static bool qmp_cdev_max_state = 1;
+
+struct qmp_cooling_device {
+ struct thermal_cooling_device *cdev;
+ struct qmp *qmp;
+ char *name;
+ bool state;
+};
+
+/**
+ * struct qmp - driver state for QMP implementation
+ * @msgram: iomem referencing the message RAM used for communication
+ * @dev: reference to QMP device
+ * @mbox_client: mailbox client used to ring the doorbell on transmit
+ * @mbox_chan: mailbox channel used to ring the doorbell on transmit
+ * @offset: offset within @msgram where messages should be written
+ * @size: maximum size of the messages to be transmitted
+ * @event: wait_queue for synchronization with the IRQ
+ * @tx_lock: provides synchronization between multiple callers of qmp_send()
+ * @qdss_clk: QDSS clock hw struct
+ * @cooling_devs: thermal cooling devices
+ */
+struct qmp {
+ void __iomem *msgram;
+ struct device *dev;
+
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+
+ size_t offset;
+ size_t size;
+
+ wait_queue_head_t event;
+
+ struct mutex tx_lock;
+
+ struct clk_hw qdss_clk;
+ struct qmp_cooling_device *cooling_devs;
+};
+
+static void qmp_kick(struct qmp *qmp)
+{
+ mbox_send_message(qmp->mbox_chan, NULL);
+ mbox_client_txdone(qmp->mbox_chan, 0);
+}
+
+static bool qmp_magic_valid(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
+}
+
+static bool qmp_link_acked(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
+}
+
+static bool qmp_mcore_channel_acked(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
+}
+
+static bool qmp_ucore_channel_up(struct qmp *qmp)
+{
+ return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
+}
+
+static int qmp_open(struct qmp *qmp)
+{
+ int ret;
+ u32 val;
+
+ if (!qmp_magic_valid(qmp)) {
+ dev_err(qmp->dev, "QMP magic doesn't match\n");
+ return -EINVAL;
+ }
+
+ val = readl(qmp->msgram + QMP_DESC_VERSION);
+ if (val != QMP_VERSION) {
+ dev_err(qmp->dev, "unsupported QMP version %d\n", val);
+ return -EINVAL;
+ }
+
+ qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
+ qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
+ if (!qmp->size) {
+ dev_err(qmp->dev, "invalid mailbox size\n");
+ return -EINVAL;
+ }
+
+ /* Ack remote core's link state */
+ val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
+ writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
+
+ /* Set local core's link state to up */
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't ack link\n");
+ goto timeout_close_link;
+ }
+
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't open channel\n");
+ goto timeout_close_channel;
+ }
+
+ /* Ack remote core's channel state */
+ writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
+
+ qmp_kick(qmp);
+
+ ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
+ if (!ret) {
+ dev_err(qmp->dev, "ucore didn't ack channel\n");
+ goto timeout_close_channel;
+ }
+
+ return 0;
+
+timeout_close_channel:
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+
+timeout_close_link:
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+ qmp_kick(qmp);
+
+ return -ETIMEDOUT;
+}
+
+static void qmp_close(struct qmp *qmp)
+{
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
+ writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
+ qmp_kick(qmp);
+}
+
+static irqreturn_t qmp_intr(int irq, void *data)
+{
+ struct qmp *qmp = data;
+
+ wake_up_all(&qmp->event);
+
+ return IRQ_HANDLED;
+}
+
+static bool qmp_message_empty(struct qmp *qmp)
+{
+ return readl(qmp->msgram + qmp->offset) == 0;
+}
+
+/**
+ * qmp_send() - send a message to the AOSS
+ * @qmp: qmp context
+ * @fmt: format string for message to be sent
+ * @...: arguments for the format string
+ *
+ * Transmit message to AOSS and wait for the AOSS to acknowledge the message.
+ * data must not be longer than the mailbox size. Access is synchronized by
+ * this implementation.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qmp_send(struct qmp *qmp, const char *fmt, ...)
+{
+ char buf[QMP_MSG_LEN];
+ long time_left;
+ va_list args;
+ int len;
+ int ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(qmp) || !fmt))
+ return -EINVAL;
+
+ memset(buf, 0, sizeof(buf));
+ va_start(args, fmt);
+ len = vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ if (WARN_ON(len >= sizeof(buf)))
+ return -EINVAL;
+
+ mutex_lock(&qmp->tx_lock);
+
+ /* The message RAM only implements 32-bit accesses */
+ __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
+ buf, sizeof(buf) / sizeof(u32));
+ writel(sizeof(buf), qmp->msgram + qmp->offset);
+
+ /* Read back length to confirm data written in message RAM */
+ readl(qmp->msgram + qmp->offset);
+ qmp_kick(qmp);
+
+ time_left = wait_event_interruptible_timeout(qmp->event,
+ qmp_message_empty(qmp), HZ);
+ if (!time_left) {
+ dev_err(qmp->dev, "ucore did not ack channel\n");
+ ret = -ETIMEDOUT;
+
+ /* Clear message from buffer */
+ writel(0, qmp->msgram + qmp->offset);
+ } else {
+ ret = 0;
+ }
+
+ mutex_unlock(&qmp->tx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmp_send);
+
+static int qmp_qdss_clk_prepare(struct clk_hw *hw)
+{
+ static const char *buf = "{class: clock, res: qdss, val: 1}";
+ struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
+
+ return qmp_send(qmp, buf);
+}
+
+static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
+{
+ static const char *buf = "{class: clock, res: qdss, val: 0}";
+ struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
+
+ qmp_send(qmp, buf);
+}
+
+static const struct clk_ops qmp_qdss_clk_ops = {
+ .prepare = qmp_qdss_clk_prepare,
+ .unprepare = qmp_qdss_clk_unprepare,
+};
+
+static int qmp_qdss_clk_add(struct qmp *qmp)
+{
+ static const struct clk_init_data qdss_init = {
+ .ops = &qmp_qdss_clk_ops,
+ .name = "qdss",
+ };
+ int ret;
+
+ qmp->qdss_clk.init = &qdss_init;
+ ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
+ if (ret < 0) {
+ dev_err(qmp->dev, "failed to register qdss clock\n");
+ return ret;
+ }
+
+ ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
+ &qmp->qdss_clk);
+ if (ret < 0) {
+ dev_err(qmp->dev, "unable to register of clk hw provider\n");
+ clk_hw_unregister(&qmp->qdss_clk);
+ }
+
+ return ret;
+}
+
+static void qmp_qdss_clk_remove(struct qmp *qmp)
+{
+ of_clk_del_provider(qmp->dev->of_node);
+ clk_hw_unregister(&qmp->qdss_clk);
+}
+
+static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = qmp_cdev_max_state;
+ return 0;
+}
+
+static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qmp_cooling_device *qmp_cdev = cdev->devdata;
+
+ *state = qmp_cdev->state;
+ return 0;
+}
+
+static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct qmp_cooling_device *qmp_cdev = cdev->devdata;
+ bool cdev_state;
+ int ret;
+
+ /* Normalize state */
+ cdev_state = !!state;
+
+ if (qmp_cdev->state == state)
+ return 0;
+
+ ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
+ qmp_cdev->name, cdev_state ? "on" : "off");
+ if (!ret)
+ qmp_cdev->state = cdev_state;
+
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops qmp_cooling_device_ops = {
+ .get_max_state = qmp_cdev_get_max_state,
+ .get_cur_state = qmp_cdev_get_cur_state,
+ .set_cur_state = qmp_cdev_set_cur_state,
+};
+
+static int qmp_cooling_device_add(struct qmp *qmp,
+ struct qmp_cooling_device *qmp_cdev,
+ struct device_node *node)
+{
+ char *cdev_name = (char *)node->name;
+
+ qmp_cdev->qmp = qmp;
+ qmp_cdev->state = !qmp_cdev_max_state;
+ qmp_cdev->name = cdev_name;
+ qmp_cdev->cdev = devm_thermal_of_cooling_device_register
+ (qmp->dev, node,
+ cdev_name,
+ qmp_cdev, &qmp_cooling_device_ops);
+
+ if (IS_ERR(qmp_cdev->cdev))
+ dev_err(qmp->dev, "unable to register %s cooling device\n",
+ cdev_name);
+
+ return PTR_ERR_OR_ZERO(qmp_cdev->cdev);
+}
+
+static int qmp_cooling_devices_register(struct qmp *qmp)
+{
+ struct device_node *np, *child;
+ int count = 0;
+ int ret;
+
+ np = qmp->dev->of_node;
+
+ qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
+ sizeof(*qmp->cooling_devs),
+ GFP_KERNEL);
+
+ if (!qmp->cooling_devs)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, child) {
+ if (!of_property_present(child, "#cooling-cells"))
+ continue;
+ ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
+ child);
+ if (ret) {
+ of_node_put(child);
+ goto unroll;
+ }
+ }
+
+ if (!count)
+ devm_kfree(qmp->dev, qmp->cooling_devs);
+
+ return 0;
+
+unroll:
+ while (--count >= 0)
+ thermal_cooling_device_unregister
+ (qmp->cooling_devs[count].cdev);
+ devm_kfree(qmp->dev, qmp->cooling_devs);
+
+ return ret;
+}
+
+static void qmp_cooling_devices_remove(struct qmp *qmp)
+{
+ int i;
+
+ for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++)
+ thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
+}
+
+/**
+ * qmp_get() - get a qmp handle from a device
+ * @dev: client device pointer
+ *
+ * Return: handle to qmp device on success, ERR_PTR() on failure
+ */
+struct qmp *qmp_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct qmp *qmp;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ np = of_parse_phandle(dev->of_node, "qcom,qmp", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-EINVAL);
+
+ qmp = platform_get_drvdata(pdev);
+
+ if (!qmp) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ return qmp;
+}
+EXPORT_SYMBOL(qmp_get);
+
+/**
+ * qmp_put() - release a qmp handle
+ * @qmp: qmp handle obtained from qmp_get()
+ */
+void qmp_put(struct qmp *qmp)
+{
+ /*
+ * Match get_device() inside of_find_device_by_node() in
+ * qmp_get()
+ */
+ if (!IS_ERR_OR_NULL(qmp))
+ put_device(qmp->dev);
+}
+EXPORT_SYMBOL(qmp_put);
+
+static int qmp_probe(struct platform_device *pdev)
+{
+ struct qmp *qmp;
+ int irq;
+ int ret;
+
+ qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = &pdev->dev;
+ init_waitqueue_head(&qmp->event);
+ mutex_init(&qmp->tx_lock);
+
+ qmp->msgram = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qmp->msgram))
+ return PTR_ERR(qmp->msgram);
+
+ qmp->mbox_client.dev = &pdev->dev;
+ qmp->mbox_client.knows_txdone = true;
+ qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
+ if (IS_ERR(qmp->mbox_chan)) {
+ dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
+ return PTR_ERR(qmp->mbox_chan);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0,
+ "aoss-qmp", qmp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request interrupt\n");
+ goto err_free_mbox;
+ }
+
+ ret = qmp_open(qmp);
+ if (ret < 0)
+ goto err_free_mbox;
+
+ ret = qmp_qdss_clk_add(qmp);
+ if (ret)
+ goto err_close_qmp;
+
+ ret = qmp_cooling_devices_register(qmp);
+ if (ret)
+ dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
+
+ platform_set_drvdata(pdev, qmp);
+
+ return 0;
+
+err_close_qmp:
+ qmp_close(qmp);
+err_free_mbox:
+ mbox_free_channel(qmp->mbox_chan);
+
+ return ret;
+}
+
+static int qmp_remove(struct platform_device *pdev)
+{
+ struct qmp *qmp = platform_get_drvdata(pdev);
+
+ qmp_qdss_clk_remove(qmp);
+ qmp_cooling_devices_remove(qmp);
+
+ qmp_close(qmp);
+ mbox_free_channel(qmp->mbox_chan);
+
+ return 0;
+}
+
+static const struct of_device_id qmp_dt_match[] = {
+ { .compatible = "qcom,sc7180-aoss-qmp", },
+ { .compatible = "qcom,sc7280-aoss-qmp", },
+ { .compatible = "qcom,sdm845-aoss-qmp", },
+ { .compatible = "qcom,sm8150-aoss-qmp", },
+ { .compatible = "qcom,sm8250-aoss-qmp", },
+ { .compatible = "qcom,sm8350-aoss-qmp", },
+ { .compatible = "qcom,aoss-qmp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qmp_dt_match);
+
+static struct platform_driver qmp_driver = {
+ .driver = {
+ .name = "qcom_aoss_qmp",
+ .of_match_table = qmp_dt_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = qmp_probe,
+ .remove = qmp_remove,
+};
+module_platform_driver(qmp_driver);
+
+MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
new file mode 100644
index 0000000000..df7907a83a
--- /dev/null
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, The Linux foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <dt-bindings/soc/qcom,gsbi.h>
+
+#define GSBI_CTRL_REG 0x0000
+#define GSBI_PROTOCOL_SHIFT 4
+#define MAX_GSBI 12
+
+#define TCSR_ADM_CRCI_BASE 0x70
+
+struct crci_config {
+ u32 num_rows;
+ const u32 (*array)[MAX_GSBI];
+};
+
+static const u32 crci_ipq8064[][MAX_GSBI] = {
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+};
+
+static const struct crci_config config_ipq8064 = {
+ .num_rows = ARRAY_SIZE(crci_ipq8064),
+ .array = crci_ipq8064,
+};
+
+static const unsigned int crci_apq8064[][MAX_GSBI] = {
+ {
+ 0x001800, 0x006000, 0x000030, 0x0000c0,
+ 0x000300, 0x000400, 0x000000, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+ {
+ 0x000000, 0x000000, 0x000000, 0x000000,
+ 0x000000, 0x000020, 0x0000c0, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+};
+
+static const struct crci_config config_apq8064 = {
+ .num_rows = ARRAY_SIZE(crci_apq8064),
+ .array = crci_apq8064,
+};
+
+static const unsigned int crci_msm8960[][MAX_GSBI] = {
+ {
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000400, 0x000000, 0x000000,
+ 0x000000, 0x000000, 0x000000, 0x000000
+ },
+ {
+ 0x000000, 0x000000, 0x000000, 0x000000,
+ 0x000000, 0x000020, 0x0000c0, 0x000300,
+ 0x001800, 0x006000, 0x000000, 0x000000
+ },
+};
+
+static const struct crci_config config_msm8960 = {
+ .num_rows = ARRAY_SIZE(crci_msm8960),
+ .array = crci_msm8960,
+};
+
+static const unsigned int crci_msm8660[][MAX_GSBI] = {
+ { /* ADM 0 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 0 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 1 - A */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+ { /* ADM 1 - B */
+ 0x000003, 0x00000c, 0x000030, 0x0000c0,
+ 0x000300, 0x000c00, 0x003000, 0x00c000,
+ 0x030000, 0x0c0000, 0x300000, 0xc00000
+ },
+};
+
+static const struct crci_config config_msm8660 = {
+ .num_rows = ARRAY_SIZE(crci_msm8660),
+ .array = crci_msm8660,
+};
+
+struct gsbi_info {
+ struct clk *hclk;
+ u32 mode;
+ u32 crci;
+ struct regmap *tcsr;
+};
+
+static const struct of_device_id tcsr_dt_match[] __maybe_unused = {
+ { .compatible = "qcom,tcsr-ipq8064", .data = &config_ipq8064},
+ { .compatible = "qcom,tcsr-apq8064", .data = &config_apq8064},
+ { .compatible = "qcom,tcsr-msm8960", .data = &config_msm8960},
+ { .compatible = "qcom,tcsr-msm8660", .data = &config_msm8660},
+ { },
+};
+
+static int gsbi_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *tcsr_node;
+ const struct of_device_id *match;
+ void __iomem *base;
+ struct gsbi_info *gsbi;
+ int i;
+ u32 mask, gsbi_num;
+ const struct crci_config *config = NULL;
+
+ gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL);
+
+ if (!gsbi)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* get the tcsr node and setup the config and regmap */
+ gsbi->tcsr = syscon_regmap_lookup_by_phandle(node, "syscon-tcsr");
+
+ if (!IS_ERR(gsbi->tcsr)) {
+ tcsr_node = of_parse_phandle(node, "syscon-tcsr", 0);
+ if (tcsr_node) {
+ match = of_match_node(tcsr_dt_match, tcsr_node);
+ if (match)
+ config = match->data;
+ else
+ dev_warn(&pdev->dev, "no matching TCSR\n");
+
+ of_node_put(tcsr_node);
+ }
+ }
+
+ if (of_property_read_u32(node, "cell-index", &gsbi_num)) {
+ dev_err(&pdev->dev, "missing cell-index\n");
+ return -EINVAL;
+ }
+
+ if (gsbi_num < 1 || gsbi_num > MAX_GSBI) {
+ dev_err(&pdev->dev, "invalid cell-index\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) {
+ dev_err(&pdev->dev, "missing mode configuration\n");
+ return -EINVAL;
+ }
+
+ /* not required, so default to 0 if not present */
+ of_property_read_u32(node, "qcom,crci", &gsbi->crci);
+
+ dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n",
+ gsbi->mode, gsbi->crci);
+ gsbi->hclk = devm_clk_get_enabled(&pdev->dev, "iface");
+ if (IS_ERR(gsbi->hclk))
+ return PTR_ERR(gsbi->hclk);
+
+ writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci,
+ base + GSBI_CTRL_REG);
+
+ /*
+ * modify tcsr to reflect mode and ADM CRCI mux
+ * Each gsbi contains a pair of bits, one for RX and one for TX
+ * SPI mode requires both bits cleared, otherwise they are set
+ */
+ if (config) {
+ for (i = 0; i < config->num_rows; i++) {
+ mask = config->array[i][gsbi_num - 1];
+
+ if (gsbi->mode == GSBI_PROT_SPI)
+ regmap_update_bits(gsbi->tcsr,
+ TCSR_ADM_CRCI_BASE + 4 * i, mask, 0);
+ else
+ regmap_update_bits(gsbi->tcsr,
+ TCSR_ADM_CRCI_BASE + 4 * i, mask, mask);
+
+ }
+ }
+
+ /* make sure the gsbi control write is not reordered */
+ wmb();
+
+ platform_set_drvdata(pdev, gsbi);
+
+ return of_platform_populate(node, NULL, NULL, &pdev->dev);
+}
+
+static int gsbi_remove(struct platform_device *pdev)
+{
+ struct gsbi_info *gsbi = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(gsbi->hclk);
+
+ return 0;
+}
+
+static const struct of_device_id gsbi_dt_match[] = {
+ { .compatible = "qcom,gsbi-v1.0.0", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, gsbi_dt_match);
+
+static struct platform_driver gsbi_driver = {
+ .driver = {
+ .name = "gsbi",
+ .of_match_table = gsbi_dt_match,
+ },
+ .probe = gsbi_probe,
+ .remove = gsbi_remove,
+};
+
+module_platform_driver(gsbi_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM GSBI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
new file mode 100644
index 0000000000..c207bb96c5
--- /dev/null
+++ b/drivers/soc/qcom/qcom_stats.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+#include <linux/soc/qcom/smem.h>
+#include <clocksource/arm_arch_timer.h>
+
+#define RPM_DYNAMIC_ADDR 0x14
+#define RPM_DYNAMIC_ADDR_MASK 0xFFFF
+
+#define STAT_TYPE_OFFSET 0x0
+#define COUNT_OFFSET 0x4
+#define LAST_ENTERED_AT_OFFSET 0x8
+#define LAST_EXITED_AT_OFFSET 0x10
+#define ACCUMULATED_OFFSET 0x18
+#define CLIENT_VOTES_OFFSET 0x20
+
+struct subsystem_data {
+ const char *name;
+ u32 smem_item;
+ u32 pid;
+};
+
+static const struct subsystem_data subsystems[] = {
+ { "modem", 605, 1 },
+ { "wpss", 605, 13 },
+ { "adsp", 606, 2 },
+ { "cdsp", 607, 5 },
+ { "slpi", 608, 3 },
+ { "gpu", 609, 0 },
+ { "display", 610, 0 },
+ { "adsp_island", 613, 2 },
+ { "slpi_island", 613, 3 },
+};
+
+struct stats_config {
+ size_t stats_offset;
+ size_t num_records;
+ bool appended_stats_avail;
+ bool dynamic_offset;
+ bool subsystem_stats_in_smem;
+};
+
+struct stats_data {
+ bool appended_stats_avail;
+ void __iomem *base;
+};
+
+struct sleep_stats {
+ u32 stat_type;
+ u32 count;
+ u64 last_entered_at;
+ u64 last_exited_at;
+ u64 accumulated;
+};
+
+struct appended_stats {
+ u32 client_votes;
+ u32 reserved[3];
+};
+
+static void qcom_print_stats(struct seq_file *s, const struct sleep_stats *stat)
+{
+ u64 accumulated = stat->accumulated;
+ /*
+ * If a subsystem is in sleep when reading the sleep stats adjust
+ * the accumulated sleep duration to show actual sleep time.
+ */
+ if (stat->last_entered_at > stat->last_exited_at)
+ accumulated += arch_timer_read_counter() - stat->last_entered_at;
+
+ seq_printf(s, "Count: %u\n", stat->count);
+ seq_printf(s, "Last Entered At: %llu\n", stat->last_entered_at);
+ seq_printf(s, "Last Exited At: %llu\n", stat->last_exited_at);
+ seq_printf(s, "Accumulated Duration: %llu\n", accumulated);
+}
+
+static int qcom_subsystem_sleep_stats_show(struct seq_file *s, void *unused)
+{
+ struct subsystem_data *subsystem = s->private;
+ struct sleep_stats *stat;
+
+ /* Items are allocated lazily, so lookup pointer each time */
+ stat = qcom_smem_get(subsystem->pid, subsystem->smem_item, NULL);
+ if (IS_ERR(stat))
+ return 0;
+
+ qcom_print_stats(s, stat);
+
+ return 0;
+}
+
+static int qcom_soc_sleep_stats_show(struct seq_file *s, void *unused)
+{
+ struct stats_data *d = s->private;
+ void __iomem *reg = d->base;
+ struct sleep_stats stat;
+
+ memcpy_fromio(&stat, reg, sizeof(stat));
+ qcom_print_stats(s, &stat);
+
+ if (d->appended_stats_avail) {
+ struct appended_stats votes;
+
+ memcpy_fromio(&votes, reg + CLIENT_VOTES_OFFSET, sizeof(votes));
+ seq_printf(s, "Client Votes: %#x\n", votes.client_votes);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qcom_soc_sleep_stats);
+DEFINE_SHOW_ATTRIBUTE(qcom_subsystem_sleep_stats);
+
+static void qcom_create_soc_sleep_stat_files(struct dentry *root, void __iomem *reg,
+ struct stats_data *d,
+ const struct stats_config *config)
+{
+ char stat_type[sizeof(u32) + 1] = {0};
+ size_t stats_offset = config->stats_offset;
+ u32 offset = 0, type;
+ int i, j;
+
+ /*
+ * On RPM targets, stats offset location is dynamic and changes from target
+ * to target and sometimes from build to build for same target.
+ *
+ * In such cases the dynamic address is present at 0x14 offset from base
+ * address in devicetree. The last 16bits indicates the stats_offset.
+ */
+ if (config->dynamic_offset) {
+ stats_offset = readl(reg + RPM_DYNAMIC_ADDR);
+ stats_offset &= RPM_DYNAMIC_ADDR_MASK;
+ }
+
+ for (i = 0; i < config->num_records; i++) {
+ d[i].base = reg + offset + stats_offset;
+
+ /*
+ * Read the low power mode name and create debugfs file for it.
+ * The names read could be of below,
+ * (may change depending on low power mode supported).
+ * For rpmh-sleep-stats: "aosd", "cxsd" and "ddr".
+ * For rpm-sleep-stats: "vmin" and "vlow".
+ */
+ type = readl(d[i].base);
+ for (j = 0; j < sizeof(u32); j++) {
+ stat_type[j] = type & 0xff;
+ type = type >> 8;
+ }
+ strim(stat_type);
+ debugfs_create_file(stat_type, 0400, root, &d[i],
+ &qcom_soc_sleep_stats_fops);
+
+ offset += sizeof(struct sleep_stats);
+ if (d[i].appended_stats_avail)
+ offset += sizeof(struct appended_stats);
+ }
+}
+
+static void qcom_create_subsystem_stat_files(struct dentry *root,
+ const struct stats_config *config)
+{
+ int i;
+
+ if (!config->subsystem_stats_in_smem)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(subsystems); i++)
+ debugfs_create_file(subsystems[i].name, 0400, root, (void *)&subsystems[i],
+ &qcom_subsystem_sleep_stats_fops);
+}
+
+static int qcom_stats_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ struct dentry *root;
+ const struct stats_config *config;
+ struct stats_data *d;
+ int i;
+
+ config = device_get_match_data(&pdev->dev);
+ if (!config)
+ return -ENODEV;
+
+ reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(reg))
+ return -ENOMEM;
+
+ d = devm_kcalloc(&pdev->dev, config->num_records,
+ sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ for (i = 0; i < config->num_records; i++)
+ d[i].appended_stats_avail = config->appended_stats_avail;
+
+ root = debugfs_create_dir("qcom_stats", NULL);
+
+ qcom_create_subsystem_stat_files(root, config);
+ qcom_create_soc_sleep_stat_files(root, reg, d, config);
+
+ platform_set_drvdata(pdev, root);
+
+ device_set_pm_not_required(&pdev->dev);
+
+ return 0;
+}
+
+static int qcom_stats_remove(struct platform_device *pdev)
+{
+ struct dentry *root = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(root);
+
+ return 0;
+}
+
+static const struct stats_config rpm_data = {
+ .stats_offset = 0,
+ .num_records = 2,
+ .appended_stats_avail = true,
+ .dynamic_offset = true,
+ .subsystem_stats_in_smem = false,
+};
+
+/* Older RPM firmwares have the stats at a fixed offset instead */
+static const struct stats_config rpm_data_dba0 = {
+ .stats_offset = 0xdba0,
+ .num_records = 2,
+ .appended_stats_avail = true,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = false,
+};
+
+static const struct stats_config rpmh_data_sdm845 = {
+ .stats_offset = 0x48,
+ .num_records = 2,
+ .appended_stats_avail = false,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = true,
+};
+
+static const struct stats_config rpmh_data = {
+ .stats_offset = 0x48,
+ .num_records = 3,
+ .appended_stats_avail = false,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = true,
+};
+
+static const struct of_device_id qcom_stats_table[] = {
+ { .compatible = "qcom,apq8084-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,msm8226-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,msm8916-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,msm8974-rpm-stats", .data = &rpm_data_dba0 },
+ { .compatible = "qcom,rpm-stats", .data = &rpm_data },
+ { .compatible = "qcom,rpmh-stats", .data = &rpmh_data },
+ { .compatible = "qcom,sdm845-rpmh-stats", .data = &rpmh_data_sdm845 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_stats_table);
+
+static struct platform_driver qcom_stats = {
+ .probe = qcom_stats_probe,
+ .remove = qcom_stats_remove,
+ .driver = {
+ .name = "qcom_stats",
+ .of_match_table = qcom_stats_table,
+ },
+};
+
+static int __init qcom_stats_init(void)
+{
+ return platform_driver_register(&qcom_stats);
+}
+late_initcall(qcom_stats_init);
+
+static void __exit qcom_stats_exit(void)
+{
+ platform_driver_unregister(&qcom_stats);
+}
+module_exit(qcom_stats_exit)
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. (QTI) Stats driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
new file mode 100644
index 0000000000..5c7161b18b
--- /dev/null
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define QMI_ENCDEC_ENCODE_TLV(type, length, p_dst) do { \
+ *p_dst++ = type; \
+ *p_dst++ = ((u8)((length) & 0xFF)); \
+ *p_dst++ = ((u8)(((length) >> 8) & 0xFF)); \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_TLV(p_type, p_length, p_src) do { \
+ *p_type = (u8)*p_src++; \
+ *p_length = (u8)*p_src++; \
+ *p_length |= ((u8)*p_src) << 8; \
+} while (0)
+
+#define QMI_ENCDEC_ENCODE_N_BYTES(p_dst, p_src, size) \
+do { \
+ memcpy(p_dst, p_src, size); \
+ p_dst = (u8 *)p_dst + size; \
+ p_src = (u8 *)p_src + size; \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_N_BYTES(p_dst, p_src, size) \
+do { \
+ memcpy(p_dst, p_src, size); \
+ p_dst = (u8 *)p_dst + size; \
+ p_src = (u8 *)p_src + size; \
+} while (0)
+
+#define UPDATE_ENCODE_VARIABLES(temp_si, buf_dst, \
+ encoded_bytes, tlv_len, encode_tlv, rc) \
+do { \
+ buf_dst = (u8 *)buf_dst + rc; \
+ encoded_bytes += rc; \
+ tlv_len += rc; \
+ temp_si = temp_si + 1; \
+ encode_tlv = 1; \
+} while (0)
+
+#define UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc) \
+do { \
+ buf_src = (u8 *)buf_src + rc; \
+ decoded_bytes += rc; \
+} while (0)
+
+#define TLV_LEN_SIZE sizeof(u16)
+#define TLV_TYPE_SIZE sizeof(u8)
+#define OPTIONAL_TLV_TYPE_START 0x10
+
+static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
+ const void *in_c_struct, u32 out_buf_len,
+ int enc_level);
+
+static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
+ const void *in_buf, u32 in_buf_len, int dec_level);
+
+/**
+ * skip_to_next_elem() - Skip to next element in the structure to be encoded
+ * @ei_array: Struct info describing the element to be skipped.
+ * @level: Depth level of encoding/decoding to identify nested structures.
+ *
+ * This function is used while encoding optional elements. If the flag
+ * corresponding to an optional element is not set, then encoding the
+ * optional element can be skipped. This function can be used to perform
+ * that operation.
+ *
+ * Return: struct info of the next element that can be encoded.
+ */
+static const struct qmi_elem_info *
+skip_to_next_elem(const struct qmi_elem_info *ei_array, int level)
+{
+ const struct qmi_elem_info *temp_ei = ei_array;
+ u8 tlv_type;
+
+ if (level > 1) {
+ temp_ei = temp_ei + 1;
+ } else {
+ do {
+ tlv_type = temp_ei->tlv_type;
+ temp_ei = temp_ei + 1;
+ } while (tlv_type == temp_ei->tlv_type);
+ }
+
+ return temp_ei;
+}
+
+/**
+ * qmi_calc_min_msg_len() - Calculate the minimum length of a QMI message
+ * @ei_array: Struct info array describing the structure.
+ * @level: Level to identify the depth of the nested structures.
+ *
+ * Return: Expected minimum length of the QMI message or 0 on error.
+ */
+static int qmi_calc_min_msg_len(const struct qmi_elem_info *ei_array,
+ int level)
+{
+ int min_msg_len = 0;
+ const struct qmi_elem_info *temp_ei = ei_array;
+
+ if (!ei_array)
+ return min_msg_len;
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ /* Optional elements do not count in minimum length */
+ if (temp_ei->data_type == QMI_OPT_FLAG) {
+ temp_ei = skip_to_next_elem(temp_ei, level);
+ continue;
+ }
+
+ if (temp_ei->data_type == QMI_DATA_LEN) {
+ min_msg_len += (temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16));
+ temp_ei++;
+ continue;
+ } else if (temp_ei->data_type == QMI_STRUCT) {
+ min_msg_len += qmi_calc_min_msg_len(temp_ei->ei_array,
+ (level + 1));
+ temp_ei++;
+ } else if (temp_ei->data_type == QMI_STRING) {
+ if (level > 1)
+ min_msg_len += temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ min_msg_len += temp_ei->elem_len * temp_ei->elem_size;
+ temp_ei++;
+ } else {
+ min_msg_len += (temp_ei->elem_len * temp_ei->elem_size);
+ temp_ei++;
+ }
+
+ /*
+ * Type & Length info. not prepended for elements in the
+ * nested structure.
+ */
+ if (level == 1)
+ min_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ }
+
+ return min_msg_len;
+}
+
+/**
+ * qmi_encode_basic_elem() - Encodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @elem_size: Size of a single instance of the element to be encoded.
+ *
+ * This function encodes the "elem_len" number of data elements, each of
+ * size "elem_size" bytes from the source buffer "buf_src" and stores the
+ * encoded information in the destination buffer "buf_dst". The elements are
+ * of primary data type which include u8 - u64 or similar. This
+ * function returns the number of bytes of encoded information.
+ *
+ * Return: The number of bytes of encoded information.
+ */
+static int qmi_encode_basic_elem(void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 elem_size)
+{
+ u32 i, rc = 0;
+
+ for (i = 0; i < elem_len; i++) {
+ QMI_ENCDEC_ENCODE_N_BYTES(buf_dst, buf_src, elem_size);
+ rc += elem_size;
+ }
+
+ return rc;
+}
+
+/**
+ * qmi_encode_struct_elem() - Encodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the nested structure from the main structure.
+ *
+ * This function encodes the "elem_len" number of struct elements, each of
+ * size "ei_array->elem_size" bytes from the source buffer "buf_src" and
+ * stores the encoded information in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of encoded information.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode_struct_elem(const struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 out_buf_len,
+ int enc_level)
+{
+ int i, rc, encoded_bytes = 0;
+ const struct qmi_elem_info *temp_ei = ei_array;
+
+ for (i = 0; i < elem_len; i++) {
+ rc = qmi_encode(temp_ei->ei_array, buf_dst, buf_src,
+ out_buf_len - encoded_bytes, enc_level);
+ if (rc < 0) {
+ pr_err("%s: STRUCT Encode failure\n", __func__);
+ return rc;
+ }
+ buf_dst = buf_dst + rc;
+ buf_src = buf_src + temp_ei->elem_size;
+ encoded_bytes += rc;
+ }
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_encode_string_elem() - Encodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the string element from the main structure.
+ *
+ * This function encodes a string element of maximum length "ei_array->elem_len"
+ * bytes from the source buffer "buf_src" and stores the encoded information in
+ * the destination buffer "buf_dst". This function returns the number of bytes
+ * of encoded information.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode_string_elem(const struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 out_buf_len, int enc_level)
+{
+ int rc;
+ int encoded_bytes = 0;
+ const struct qmi_elem_info *temp_ei = ei_array;
+ u32 string_len = 0;
+ u32 string_len_sz = 0;
+
+ string_len = strlen(buf_src);
+ string_len_sz = temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ if (string_len > temp_ei->elem_len) {
+ pr_err("%s: String to be encoded is longer - %d > %d\n",
+ __func__, string_len, temp_ei->elem_len);
+ return -EINVAL;
+ }
+
+ if (enc_level == 1) {
+ if (string_len + TLV_LEN_SIZE + TLV_TYPE_SIZE >
+ out_buf_len) {
+ pr_err("%s: Output len %d > Out Buf len %d\n",
+ __func__, string_len, out_buf_len);
+ return -ETOOSMALL;
+ }
+ } else {
+ if (string_len + string_len_sz > out_buf_len) {
+ pr_err("%s: Output len %d > Out Buf len %d\n",
+ __func__, string_len, out_buf_len);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, &string_len,
+ 1, string_len_sz);
+ encoded_bytes += rc;
+ }
+
+ rc = qmi_encode_basic_elem(buf_dst + encoded_bytes, buf_src,
+ string_len, temp_ei->elem_size);
+ encoded_bytes += rc;
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_encode() - Core Encode Function
+ * @ei_array: Struct info array describing the structure to be encoded.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @in_c_struct: Pointer to the C structure to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Encode level to indicate the depth of the nested structure,
+ * within the main structure, being encoded.
+ *
+ * Return: The number of bytes of encoded information on success or negative
+ * errno on error.
+ */
+static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
+ const void *in_c_struct, u32 out_buf_len,
+ int enc_level)
+{
+ const struct qmi_elem_info *temp_ei = ei_array;
+ u8 opt_flag_value = 0;
+ u32 data_len_value = 0, data_len_sz;
+ u8 *buf_dst = (u8 *)out_buf;
+ u8 *tlv_pointer;
+ u32 tlv_len;
+ u8 tlv_type;
+ u32 encoded_bytes = 0;
+ const void *buf_src;
+ int encode_tlv = 0;
+ int rc;
+
+ if (!ei_array)
+ return 0;
+
+ tlv_pointer = buf_dst;
+ tlv_len = 0;
+ if (enc_level == 1)
+ buf_dst = buf_dst + (TLV_LEN_SIZE + TLV_TYPE_SIZE);
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ buf_src = in_c_struct + temp_ei->offset;
+ tlv_type = temp_ei->tlv_type;
+
+ if (temp_ei->array_type == NO_ARRAY) {
+ data_len_value = 1;
+ } else if (temp_ei->array_type == STATIC_ARRAY) {
+ data_len_value = temp_ei->elem_len;
+ } else if (data_len_value <= 0 ||
+ temp_ei->elem_len < data_len_value) {
+ pr_err("%s: Invalid data length\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (temp_ei->data_type) {
+ case QMI_OPT_FLAG:
+ rc = qmi_encode_basic_elem(&opt_flag_value, buf_src,
+ 1, sizeof(u8));
+ if (opt_flag_value)
+ temp_ei = temp_ei + 1;
+ else
+ temp_ei = skip_to_next_elem(temp_ei, enc_level);
+ break;
+
+ case QMI_DATA_LEN:
+ memcpy(&data_len_value, buf_src, temp_ei->elem_size);
+ data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16);
+ /* Check to avoid out of range buffer access */
+ if ((data_len_sz + encoded_bytes + TLV_LEN_SIZE +
+ TLV_TYPE_SIZE) > out_buf_len) {
+ pr_err("%s: Too Small Buffer @DATA_LEN\n",
+ __func__);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, &data_len_value,
+ 1, data_len_sz);
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ if (!data_len_value)
+ temp_ei = skip_to_next_elem(temp_ei, enc_level);
+ else
+ encode_tlv = 0;
+ break;
+
+ case QMI_UNSIGNED_1_BYTE:
+ case QMI_UNSIGNED_2_BYTE:
+ case QMI_UNSIGNED_4_BYTE:
+ case QMI_UNSIGNED_8_BYTE:
+ case QMI_SIGNED_2_BYTE_ENUM:
+ case QMI_SIGNED_4_BYTE_ENUM:
+ /* Check to avoid out of range buffer access */
+ if (((data_len_value * temp_ei->elem_size) +
+ encoded_bytes + TLV_LEN_SIZE + TLV_TYPE_SIZE) >
+ out_buf_len) {
+ pr_err("%s: Too Small Buffer @data_type:%d\n",
+ __func__, temp_ei->data_type);
+ return -ETOOSMALL;
+ }
+ rc = qmi_encode_basic_elem(buf_dst, buf_src,
+ data_len_value,
+ temp_ei->elem_size);
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+
+ case QMI_STRUCT:
+ rc = qmi_encode_struct_elem(temp_ei, buf_dst, buf_src,
+ data_len_value,
+ out_buf_len - encoded_bytes,
+ enc_level + 1);
+ if (rc < 0)
+ return rc;
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+
+ case QMI_STRING:
+ rc = qmi_encode_string_elem(temp_ei, buf_dst, buf_src,
+ out_buf_len - encoded_bytes,
+ enc_level);
+ if (rc < 0)
+ return rc;
+ UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+ encoded_bytes, tlv_len,
+ encode_tlv, rc);
+ break;
+ default:
+ pr_err("%s: Unrecognized data type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (encode_tlv && enc_level == 1) {
+ QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+ encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ tlv_pointer = buf_dst;
+ tlv_len = 0;
+ buf_dst = buf_dst + TLV_LEN_SIZE + TLV_TYPE_SIZE;
+ encode_tlv = 0;
+ }
+ }
+
+ return encoded_bytes;
+}
+
+/**
+ * qmi_decode_basic_elem() - Decodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @elem_size: Size of a single instance of the element to be decoded.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "elem_size" bytes from the source buffer "buf_src" and stores
+ * the decoded elements in the destination buffer "buf_dst". The elements are
+ * of primary data type which include u8 - u64 or similar. This
+ * function returns the number of bytes of decoded information.
+ *
+ * Return: The total size of the decoded data elements, in bytes.
+ */
+static int qmi_decode_basic_elem(void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 elem_size)
+{
+ u32 i, rc = 0;
+
+ for (i = 0; i < elem_len; i++) {
+ QMI_ENCDEC_DECODE_N_BYTES(buf_dst, buf_src, elem_size);
+ rc += elem_size;
+ }
+
+ return rc;
+}
+
+/**
+ * qmi_decode_struct_elem() - Decodes elements of struct data type
+ * @ei_array: Struct info array describing the struct element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @tlv_len: Total size of the encoded information corresponding to
+ * this struct element.
+ * @dec_level: Depth of the nested structure from the main structure.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "(tlv_len/elem_len)" bytes from the source buffer "buf_src"
+ * and stores the decoded elements in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of decoded information.
+ *
+ * Return: The total size of the decoded data elements on success, negative
+ * errno on error.
+ */
+static int qmi_decode_struct_elem(const struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 elem_len, u32 tlv_len,
+ int dec_level)
+{
+ int i, rc, decoded_bytes = 0;
+ const struct qmi_elem_info *temp_ei = ei_array;
+
+ for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) {
+ rc = qmi_decode(temp_ei->ei_array, buf_dst, buf_src,
+ tlv_len - decoded_bytes, dec_level);
+ if (rc < 0)
+ return rc;
+ buf_src = buf_src + rc;
+ buf_dst = buf_dst + temp_ei->elem_size;
+ decoded_bytes += rc;
+ }
+
+ if ((dec_level <= 2 && decoded_bytes != tlv_len) ||
+ (dec_level > 2 && (i < elem_len || decoded_bytes > tlv_len))) {
+ pr_err("%s: Fault in decoding: dl(%d), db(%d), tl(%d), i(%d), el(%d)\n",
+ __func__, dec_level, decoded_bytes, tlv_len,
+ i, elem_len);
+ return -EFAULT;
+ }
+
+ return decoded_bytes;
+}
+
+/**
+ * qmi_decode_string_elem() - Decodes elements of string data type
+ * @ei_array: Struct info array describing the string element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @tlv_len: Total size of the encoded information corresponding to
+ * this string element.
+ * @dec_level: Depth of the string element from the main structure.
+ *
+ * This function decodes the string element of maximum length
+ * "ei_array->elem_len" from the source buffer "buf_src" and puts it into
+ * the destination buffer "buf_dst". This function returns number of bytes
+ * decoded from the input buffer.
+ *
+ * Return: The total size of the decoded data elements on success, negative
+ * errno on error.
+ */
+static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array,
+ void *buf_dst, const void *buf_src,
+ u32 tlv_len, int dec_level)
+{
+ int rc;
+ int decoded_bytes = 0;
+ u32 string_len = 0;
+ u32 string_len_sz = 0;
+ const struct qmi_elem_info *temp_ei = ei_array;
+
+ if (dec_level == 1) {
+ string_len = tlv_len;
+ } else {
+ string_len_sz = temp_ei->elem_len <= U8_MAX ?
+ sizeof(u8) : sizeof(u16);
+ rc = qmi_decode_basic_elem(&string_len, buf_src,
+ 1, string_len_sz);
+ decoded_bytes += rc;
+ }
+
+ if (string_len >= temp_ei->elem_len) {
+ pr_err("%s: String len %d >= Max Len %d\n",
+ __func__, string_len, temp_ei->elem_len);
+ return -ETOOSMALL;
+ } else if (string_len > tlv_len) {
+ pr_err("%s: String len %d > Input Buffer Len %d\n",
+ __func__, string_len, tlv_len);
+ return -EFAULT;
+ }
+
+ rc = qmi_decode_basic_elem(buf_dst, buf_src + decoded_bytes,
+ string_len, temp_ei->elem_size);
+ *((char *)buf_dst + string_len) = '\0';
+ decoded_bytes += rc;
+
+ return decoded_bytes;
+}
+
+/**
+ * find_ei() - Find element info corresponding to TLV Type
+ * @ei_array: Struct info array of the message being decoded.
+ * @type: TLV Type of the element being searched.
+ *
+ * Every element that got encoded in the QMI message will have a type
+ * information associated with it. While decoding the QMI message,
+ * this function is used to find the struct info regarding the element
+ * that corresponds to the type being decoded.
+ *
+ * Return: Pointer to struct info, if found
+ */
+static const struct qmi_elem_info *find_ei(const struct qmi_elem_info *ei_array,
+ u32 type)
+{
+ const struct qmi_elem_info *temp_ei = ei_array;
+
+ while (temp_ei->data_type != QMI_EOTI) {
+ if (temp_ei->tlv_type == (u8)type)
+ return temp_ei;
+ temp_ei = temp_ei + 1;
+ }
+
+ return NULL;
+}
+
+/**
+ * qmi_decode() - Core Decode Function
+ * @ei_array: Struct info array describing the structure to be decoded.
+ * @out_c_struct: Buffer to hold the decoded C struct
+ * @in_buf: Buffer containing the QMI message to be decoded
+ * @in_buf_len: Length of the QMI message to be decoded
+ * @dec_level: Decode level to indicate the depth of the nested structure,
+ * within the main structure, being decoded
+ *
+ * Return: The number of bytes of decoded information on success, negative
+ * errno on error.
+ */
+static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
+ const void *in_buf, u32 in_buf_len,
+ int dec_level)
+{
+ const struct qmi_elem_info *temp_ei = ei_array;
+ u8 opt_flag_value = 1;
+ u32 data_len_value = 0, data_len_sz = 0;
+ u8 *buf_dst = out_c_struct;
+ const u8 *tlv_pointer;
+ u32 tlv_len = 0;
+ u32 tlv_type;
+ u32 decoded_bytes = 0;
+ const void *buf_src = in_buf;
+ int rc;
+
+ while (decoded_bytes < in_buf_len) {
+ if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI)
+ return decoded_bytes;
+
+ if (dec_level == 1) {
+ tlv_pointer = buf_src;
+ QMI_ENCDEC_DECODE_TLV(&tlv_type,
+ &tlv_len, tlv_pointer);
+ buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+ temp_ei = find_ei(ei_array, tlv_type);
+ if (!temp_ei && tlv_type < OPTIONAL_TLV_TYPE_START) {
+ pr_err("%s: Inval element info\n", __func__);
+ return -EINVAL;
+ } else if (!temp_ei) {
+ UPDATE_DECODE_VARIABLES(buf_src,
+ decoded_bytes, tlv_len);
+ continue;
+ }
+ } else {
+ /*
+ * No length information for elements in nested
+ * structures. So use remaining decodable buffer space.
+ */
+ tlv_len = in_buf_len - decoded_bytes;
+ }
+
+ buf_dst = out_c_struct + temp_ei->offset;
+ if (temp_ei->data_type == QMI_OPT_FLAG) {
+ memcpy(buf_dst, &opt_flag_value, sizeof(u8));
+ temp_ei = temp_ei + 1;
+ buf_dst = out_c_struct + temp_ei->offset;
+ }
+
+ if (temp_ei->data_type == QMI_DATA_LEN) {
+ data_len_sz = temp_ei->elem_size == sizeof(u8) ?
+ sizeof(u8) : sizeof(u16);
+ rc = qmi_decode_basic_elem(&data_len_value, buf_src,
+ 1, data_len_sz);
+ memcpy(buf_dst, &data_len_value, sizeof(u32));
+ temp_ei = temp_ei + 1;
+ buf_dst = out_c_struct + temp_ei->offset;
+ tlv_len -= data_len_sz;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ }
+
+ if (temp_ei->array_type == NO_ARRAY) {
+ data_len_value = 1;
+ } else if (temp_ei->array_type == STATIC_ARRAY) {
+ data_len_value = temp_ei->elem_len;
+ } else if (data_len_value > temp_ei->elem_len) {
+ pr_err("%s: Data len %d > max spec %d\n",
+ __func__, data_len_value, temp_ei->elem_len);
+ return -ETOOSMALL;
+ }
+
+ switch (temp_ei->data_type) {
+ case QMI_UNSIGNED_1_BYTE:
+ case QMI_UNSIGNED_2_BYTE:
+ case QMI_UNSIGNED_4_BYTE:
+ case QMI_UNSIGNED_8_BYTE:
+ case QMI_SIGNED_2_BYTE_ENUM:
+ case QMI_SIGNED_4_BYTE_ENUM:
+ rc = qmi_decode_basic_elem(buf_dst, buf_src,
+ data_len_value,
+ temp_ei->elem_size);
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ case QMI_STRUCT:
+ rc = qmi_decode_struct_elem(temp_ei, buf_dst, buf_src,
+ data_len_value, tlv_len,
+ dec_level + 1);
+ if (rc < 0)
+ return rc;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ case QMI_STRING:
+ rc = qmi_decode_string_elem(temp_ei, buf_dst, buf_src,
+ tlv_len, dec_level);
+ if (rc < 0)
+ return rc;
+ UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+ break;
+
+ default:
+ pr_err("%s: Unrecognized data type\n", __func__);
+ return -EINVAL;
+ }
+ temp_ei = temp_ei + 1;
+ }
+
+ return decoded_bytes;
+}
+
+/**
+ * qmi_encode_message() - Encode C structure as QMI encoded message
+ * @type: Type of QMI message
+ * @msg_id: Message ID of the message
+ * @len: Passed as max length of the message, updated to actual size
+ * @txn_id: Transaction ID
+ * @ei: QMI message descriptor
+ * @c_struct: Reference to structure to encode
+ *
+ * Return: Buffer with encoded message, or negative ERR_PTR() on error
+ */
+void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
+ unsigned int txn_id, const struct qmi_elem_info *ei,
+ const void *c_struct)
+{
+ struct qmi_header *hdr;
+ ssize_t msglen = 0;
+ void *msg;
+ int ret;
+
+ /* Check the possibility of a zero length QMI message */
+ if (!c_struct) {
+ ret = qmi_calc_min_msg_len(ei, 1);
+ if (ret) {
+ pr_err("%s: Calc. len %d != 0, but NULL c_struct\n",
+ __func__, ret);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ msg = kzalloc(sizeof(*hdr) + *len, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ /* Encode message, if we have a message */
+ if (c_struct) {
+ msglen = qmi_encode(ei, msg + sizeof(*hdr), c_struct, *len, 1);
+ if (msglen < 0) {
+ kfree(msg);
+ return ERR_PTR(msglen);
+ }
+ }
+
+ hdr = msg;
+ hdr->type = type;
+ hdr->txn_id = txn_id;
+ hdr->msg_id = msg_id;
+ hdr->msg_len = msglen;
+
+ *len = sizeof(*hdr) + msglen;
+
+ return msg;
+}
+EXPORT_SYMBOL(qmi_encode_message);
+
+/**
+ * qmi_decode_message() - Decode QMI encoded message to C structure
+ * @buf: Buffer with encoded message
+ * @len: Amount of data in @buf
+ * @ei: QMI message descriptor
+ * @c_struct: Reference to structure to decode into
+ *
+ * Return: The number of bytes of decoded information on success, negative
+ * errno on error.
+ */
+int qmi_decode_message(const void *buf, size_t len,
+ const struct qmi_elem_info *ei, void *c_struct)
+{
+ if (!ei)
+ return -EINVAL;
+
+ if (!c_struct || !buf || !len)
+ return -EINVAL;
+
+ return qmi_decode(ei, c_struct, buf + sizeof(struct qmi_header),
+ len - sizeof(struct qmi_header), 1);
+}
+EXPORT_SYMBOL(qmi_decode_message);
+
+/* Common header in all QMI responses */
+const struct qmi_elem_info qmi_response_type_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_2_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct qmi_response_type_v01, result),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_SIGNED_2_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct qmi_response_type_v01, error),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .elem_len = 0,
+ .elem_size = 0,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = 0,
+ .ei_array = NULL,
+ },
+};
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
+
+MODULE_DESCRIPTION("QMI encoder/decoder helper");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
new file mode 100644
index 0000000000..78d7361fdc
--- /dev/null
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -0,0 +1,854 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/qrtr.h>
+#include <linux/net.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <net/sock.h>
+#include <linux/workqueue.h>
+#include <trace/events/sock.h>
+#include <linux/soc/qcom/qmi.h>
+
+static struct socket *qmi_sock_create(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq);
+
+/**
+ * qmi_recv_new_server() - handler of NEW_SERVER control message
+ * @qmi: qmi handle
+ * @service: service id of the new server
+ * @instance: instance id of the new server
+ * @node: node of the new server
+ * @port: port of the new server
+ *
+ * Calls the new_server callback to inform the client about a newly registered
+ * server matching the currently registered service lookup.
+ */
+static void qmi_recv_new_server(struct qmi_handle *qmi,
+ unsigned int service, unsigned int instance,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+ struct qmi_service *svc;
+ int ret;
+
+ if (!ops->new_server)
+ return;
+
+ /* Ignore EOF marker */
+ if (!node && !port)
+ return;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return;
+
+ svc->service = service;
+ svc->version = instance & 0xff;
+ svc->instance = instance >> 8;
+ svc->node = node;
+ svc->port = port;
+
+ ret = ops->new_server(qmi, svc);
+ if (ret < 0)
+ kfree(svc);
+ else
+ list_add(&svc->list_node, &qmi->lookup_results);
+}
+
+/**
+ * qmi_recv_del_server() - handler of DEL_SERVER control message
+ * @qmi: qmi handle
+ * @node: node of the dying server, a value of -1 matches all nodes
+ * @port: port of the dying server, a value of -1 matches all ports
+ *
+ * Calls the del_server callback for each previously seen server, allowing the
+ * client to react to the disappearing server.
+ */
+static void qmi_recv_del_server(struct qmi_handle *qmi,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+ struct qmi_service *svc;
+ struct qmi_service *tmp;
+
+ list_for_each_entry_safe(svc, tmp, &qmi->lookup_results, list_node) {
+ if (node != -1 && svc->node != node)
+ continue;
+ if (port != -1 && svc->port != port)
+ continue;
+
+ if (ops->del_server)
+ ops->del_server(qmi, svc);
+
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+}
+
+/**
+ * qmi_recv_bye() - handler of BYE control message
+ * @qmi: qmi handle
+ * @node: id of the dying node
+ *
+ * Signals the client that all previously registered services on this node are
+ * now gone and then calls the bye callback to allow the client further
+ * cleaning up resources associated with this remote.
+ */
+static void qmi_recv_bye(struct qmi_handle *qmi,
+ unsigned int node)
+{
+ struct qmi_ops *ops = &qmi->ops;
+
+ qmi_recv_del_server(qmi, node, -1);
+
+ if (ops->bye)
+ ops->bye(qmi, node);
+}
+
+/**
+ * qmi_recv_del_client() - handler of DEL_CLIENT control message
+ * @qmi: qmi handle
+ * @node: node of the dying client
+ * @port: port of the dying client
+ *
+ * Signals the client about a dying client, by calling the del_client callback.
+ */
+static void qmi_recv_del_client(struct qmi_handle *qmi,
+ unsigned int node, unsigned int port)
+{
+ struct qmi_ops *ops = &qmi->ops;
+
+ if (ops->del_client)
+ ops->del_client(qmi, node, port);
+}
+
+static void qmi_recv_ctrl_pkt(struct qmi_handle *qmi,
+ const void *buf, size_t len)
+{
+ const struct qrtr_ctrl_pkt *pkt = buf;
+
+ if (len < sizeof(struct qrtr_ctrl_pkt)) {
+ pr_debug("ignoring short control packet\n");
+ return;
+ }
+
+ switch (le32_to_cpu(pkt->cmd)) {
+ case QRTR_TYPE_BYE:
+ qmi_recv_bye(qmi, le32_to_cpu(pkt->client.node));
+ break;
+ case QRTR_TYPE_NEW_SERVER:
+ qmi_recv_new_server(qmi,
+ le32_to_cpu(pkt->server.service),
+ le32_to_cpu(pkt->server.instance),
+ le32_to_cpu(pkt->server.node),
+ le32_to_cpu(pkt->server.port));
+ break;
+ case QRTR_TYPE_DEL_SERVER:
+ qmi_recv_del_server(qmi,
+ le32_to_cpu(pkt->server.node),
+ le32_to_cpu(pkt->server.port));
+ break;
+ case QRTR_TYPE_DEL_CLIENT:
+ qmi_recv_del_client(qmi,
+ le32_to_cpu(pkt->client.node),
+ le32_to_cpu(pkt->client.port));
+ break;
+ }
+}
+
+static void qmi_send_new_lookup(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { };
+ struct kvec iv = { &pkt, sizeof(pkt) };
+ int ret;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP);
+ pkt.server.service = cpu_to_le32(svc->service);
+ pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
+
+ sq.sq_family = qmi->sq.sq_family;
+ sq.sq_node = qmi->sq.sq_node;
+ sq.sq_port = QRTR_PORT_CTRL;
+
+ msg.msg_name = &sq;
+ msg.msg_namelen = sizeof(sq);
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("failed to send lookup registration: %d\n", ret);
+ }
+ mutex_unlock(&qmi->sock_lock);
+}
+
+/**
+ * qmi_add_lookup() - register a new lookup with the name service
+ * @qmi: qmi handle
+ * @service: service id of the request
+ * @instance: instance id of the request
+ * @version: version number of the request
+ *
+ * Registering a lookup query with the name server will cause the name server
+ * to send NEW_SERVER and DEL_SERVER control messages to this socket as
+ * matching services are registered.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service,
+ unsigned int version, unsigned int instance)
+{
+ struct qmi_service *svc;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return -ENOMEM;
+
+ svc->service = service;
+ svc->version = version;
+ svc->instance = instance;
+
+ list_add(&svc->list_node, &qmi->lookups);
+
+ qmi_send_new_lookup(qmi, svc);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmi_add_lookup);
+
+static void qmi_send_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { };
+ struct kvec iv = { &pkt, sizeof(pkt) };
+ int ret;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER);
+ pkt.server.service = cpu_to_le32(svc->service);
+ pkt.server.instance = cpu_to_le32(svc->version | svc->instance << 8);
+ pkt.server.node = cpu_to_le32(qmi->sq.sq_node);
+ pkt.server.port = cpu_to_le32(qmi->sq.sq_port);
+
+ sq.sq_family = qmi->sq.sq_family;
+ sq.sq_node = qmi->sq.sq_node;
+ sq.sq_port = QRTR_PORT_CTRL;
+
+ msg.msg_name = &sq;
+ msg.msg_namelen = sizeof(sq);
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("send service registration failed: %d\n", ret);
+ }
+ mutex_unlock(&qmi->sock_lock);
+}
+
+/**
+ * qmi_add_server() - register a service with the name service
+ * @qmi: qmi handle
+ * @service: type of the service
+ * @instance: instance of the service
+ * @version: version of the service
+ *
+ * Register a new service with the name service. This allows clients to find
+ * and start sending messages to the client associated with @qmi.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_add_server(struct qmi_handle *qmi, unsigned int service,
+ unsigned int version, unsigned int instance)
+{
+ struct qmi_service *svc;
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ return -ENOMEM;
+
+ svc->service = service;
+ svc->version = version;
+ svc->instance = instance;
+
+ list_add(&svc->list_node, &qmi->services);
+
+ qmi_send_new_server(qmi, svc);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmi_add_server);
+
+/**
+ * qmi_txn_init() - allocate transaction id within the given QMI handle
+ * @qmi: QMI handle
+ * @txn: transaction context
+ * @ei: description of how to decode a matching response (optional)
+ * @c_struct: pointer to the object to decode the response into (optional)
+ *
+ * This allocates a transaction id within the QMI handle. If @ei and @c_struct
+ * are specified any responses to this transaction will be decoded as described
+ * by @ei into @c_struct.
+ *
+ * A client calling qmi_txn_init() must call either qmi_txn_wait() or
+ * qmi_txn_cancel() to free up the allocated resources.
+ *
+ * Return: Transaction id on success, negative errno on failure.
+ */
+int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
+ const struct qmi_elem_info *ei, void *c_struct)
+{
+ int ret;
+
+ memset(txn, 0, sizeof(*txn));
+
+ mutex_init(&txn->lock);
+ init_completion(&txn->completion);
+ txn->qmi = qmi;
+ txn->ei = ei;
+ txn->dest = c_struct;
+
+ mutex_lock(&qmi->txn_lock);
+ ret = idr_alloc_cyclic(&qmi->txns, txn, 0, U16_MAX, GFP_KERNEL);
+ if (ret < 0)
+ pr_err("failed to allocate transaction id\n");
+
+ txn->id = ret;
+ mutex_unlock(&qmi->txn_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmi_txn_init);
+
+/**
+ * qmi_txn_wait() - wait for a response on a transaction
+ * @txn: transaction handle
+ * @timeout: timeout, in jiffies
+ *
+ * If the transaction is decoded by the means of @ei and @c_struct the return
+ * value will be the returned value of qmi_decode_message(), otherwise it's up
+ * to the specified message handler to fill out the result.
+ *
+ * Return: the transaction response on success, negative errno on failure.
+ */
+int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
+{
+ struct qmi_handle *qmi = txn->qmi;
+ int ret;
+
+ ret = wait_for_completion_timeout(&txn->completion, timeout);
+
+ mutex_lock(&qmi->txn_lock);
+ mutex_lock(&txn->lock);
+ idr_remove(&qmi->txns, txn->id);
+ mutex_unlock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+ else
+ return txn->result;
+}
+EXPORT_SYMBOL(qmi_txn_wait);
+
+/**
+ * qmi_txn_cancel() - cancel an ongoing transaction
+ * @txn: transaction id
+ */
+void qmi_txn_cancel(struct qmi_txn *txn)
+{
+ struct qmi_handle *qmi = txn->qmi;
+
+ mutex_lock(&qmi->txn_lock);
+ mutex_lock(&txn->lock);
+ idr_remove(&qmi->txns, txn->id);
+ mutex_unlock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+}
+EXPORT_SYMBOL(qmi_txn_cancel);
+
+/**
+ * qmi_invoke_handler() - find and invoke a handler for a message
+ * @qmi: qmi handle
+ * @sq: sockaddr of the sender
+ * @txn: transaction object for the message
+ * @buf: buffer containing the message
+ * @len: length of @buf
+ *
+ * Find handler and invoke handler for the incoming message.
+ */
+static void qmi_invoke_handler(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *buf, size_t len)
+{
+ const struct qmi_msg_handler *handler;
+ const struct qmi_header *hdr = buf;
+ void *dest;
+ int ret;
+
+ if (!qmi->handlers)
+ return;
+
+ for (handler = qmi->handlers; handler->fn; handler++) {
+ if (handler->type == hdr->type &&
+ handler->msg_id == hdr->msg_id)
+ break;
+ }
+
+ if (!handler->fn)
+ return;
+
+ dest = kzalloc(handler->decoded_size, GFP_KERNEL);
+ if (!dest)
+ return;
+
+ ret = qmi_decode_message(buf, len, handler->ei, dest);
+ if (ret < 0)
+ pr_err("failed to decode incoming message\n");
+ else
+ handler->fn(qmi, sq, txn, dest);
+
+ kfree(dest);
+}
+
+/**
+ * qmi_handle_net_reset() - invoked to handle ENETRESET on a QMI handle
+ * @qmi: the QMI context
+ *
+ * As a result of registering a name service with the QRTR all open sockets are
+ * flagged with ENETRESET and this function will be called. The typical case is
+ * the initial boot, where this signals that the local node id has been
+ * configured and as such any bound sockets needs to be rebound. So close the
+ * socket, inform the client and re-initialize the socket.
+ *
+ * For clients it's generally sufficient to react to the del_server callbacks,
+ * but server code is expected to treat the net_reset callback as a "bye" from
+ * all nodes.
+ *
+ * Finally the QMI handle will send out registration requests for any lookups
+ * and services.
+ */
+static void qmi_handle_net_reset(struct qmi_handle *qmi)
+{
+ struct sockaddr_qrtr sq;
+ struct qmi_service *svc;
+ struct socket *sock;
+
+ sock = qmi_sock_create(qmi, &sq);
+ if (IS_ERR(sock))
+ return;
+
+ mutex_lock(&qmi->sock_lock);
+ sock_release(qmi->sock);
+ qmi->sock = NULL;
+ mutex_unlock(&qmi->sock_lock);
+
+ qmi_recv_del_server(qmi, -1, -1);
+
+ if (qmi->ops.net_reset)
+ qmi->ops.net_reset(qmi);
+
+ mutex_lock(&qmi->sock_lock);
+ qmi->sock = sock;
+ qmi->sq = sq;
+ mutex_unlock(&qmi->sock_lock);
+
+ list_for_each_entry(svc, &qmi->lookups, list_node)
+ qmi_send_new_lookup(qmi, svc);
+
+ list_for_each_entry(svc, &qmi->services, list_node)
+ qmi_send_new_server(qmi, svc);
+}
+
+static void qmi_handle_message(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ const void *buf, size_t len)
+{
+ const struct qmi_header *hdr;
+ struct qmi_txn tmp_txn;
+ struct qmi_txn *txn = NULL;
+ int ret;
+
+ if (len < sizeof(*hdr)) {
+ pr_err("ignoring short QMI packet\n");
+ return;
+ }
+
+ hdr = buf;
+
+ /* If this is a response, find the matching transaction handle */
+ if (hdr->type == QMI_RESPONSE) {
+ mutex_lock(&qmi->txn_lock);
+ txn = idr_find(&qmi->txns, hdr->txn_id);
+
+ /* Ignore unexpected responses */
+ if (!txn) {
+ mutex_unlock(&qmi->txn_lock);
+ return;
+ }
+
+ mutex_lock(&txn->lock);
+ mutex_unlock(&qmi->txn_lock);
+
+ if (txn->dest && txn->ei) {
+ ret = qmi_decode_message(buf, len, txn->ei, txn->dest);
+ if (ret < 0)
+ pr_err("failed to decode incoming message\n");
+
+ txn->result = ret;
+ complete(&txn->completion);
+ } else {
+ qmi_invoke_handler(qmi, sq, txn, buf, len);
+ }
+
+ mutex_unlock(&txn->lock);
+ } else {
+ /* Create a txn based on the txn_id of the incoming message */
+ memset(&tmp_txn, 0, sizeof(tmp_txn));
+ tmp_txn.id = hdr->txn_id;
+
+ qmi_invoke_handler(qmi, sq, &tmp_txn, buf, len);
+ }
+}
+
+static void qmi_data_ready_work(struct work_struct *work)
+{
+ struct qmi_handle *qmi = container_of(work, struct qmi_handle, work);
+ struct qmi_ops *ops = &qmi->ops;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) };
+ struct kvec iv;
+ ssize_t msglen;
+
+ for (;;) {
+ iv.iov_base = qmi->recv_buf;
+ iv.iov_len = qmi->recv_buf_size;
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock)
+ msglen = kernel_recvmsg(qmi->sock, &msg, &iv, 1,
+ iv.iov_len, MSG_DONTWAIT);
+ else
+ msglen = -EPIPE;
+ mutex_unlock(&qmi->sock_lock);
+ if (msglen == -EAGAIN)
+ break;
+
+ if (msglen == -ENETRESET) {
+ qmi_handle_net_reset(qmi);
+
+ /* The old qmi->sock is gone, our work is done */
+ break;
+ }
+
+ if (msglen < 0) {
+ pr_err("qmi recvmsg failed: %zd\n", msglen);
+ break;
+ }
+
+ if (sq.sq_node == qmi->sq.sq_node &&
+ sq.sq_port == QRTR_PORT_CTRL) {
+ qmi_recv_ctrl_pkt(qmi, qmi->recv_buf, msglen);
+ } else if (ops->msg_handler) {
+ ops->msg_handler(qmi, &sq, qmi->recv_buf, msglen);
+ } else {
+ qmi_handle_message(qmi, &sq, qmi->recv_buf, msglen);
+ }
+ }
+}
+
+static void qmi_data_ready(struct sock *sk)
+{
+ struct qmi_handle *qmi = sk->sk_user_data;
+
+ trace_sk_data_ready(sk);
+
+ /*
+ * This will be NULL if we receive data while being in
+ * qmi_handle_release()
+ */
+ if (!qmi)
+ return;
+
+ queue_work(qmi->wq, &qmi->work);
+}
+
+static struct socket *qmi_sock_create(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq)
+{
+ struct socket *sock;
+ int ret;
+
+ ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
+ PF_QIPCRTR, &sock);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ ret = kernel_getsockname(sock, (struct sockaddr *)sq);
+ if (ret < 0) {
+ sock_release(sock);
+ return ERR_PTR(ret);
+ }
+
+ sock->sk->sk_user_data = qmi;
+ sock->sk->sk_data_ready = qmi_data_ready;
+ sock->sk->sk_error_report = qmi_data_ready;
+
+ return sock;
+}
+
+/**
+ * qmi_handle_init() - initialize a QMI client handle
+ * @qmi: QMI handle to initialize
+ * @recv_buf_size: maximum size of incoming message
+ * @ops: reference to callbacks for QRTR notifications
+ * @handlers: NULL-terminated list of QMI message handlers
+ *
+ * This initializes the QMI client handle to allow sending and receiving QMI
+ * messages. As messages are received the appropriate handler will be invoked.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qmi_handle_init(struct qmi_handle *qmi, size_t recv_buf_size,
+ const struct qmi_ops *ops,
+ const struct qmi_msg_handler *handlers)
+{
+ int ret;
+
+ mutex_init(&qmi->txn_lock);
+ mutex_init(&qmi->sock_lock);
+
+ idr_init(&qmi->txns);
+
+ INIT_LIST_HEAD(&qmi->lookups);
+ INIT_LIST_HEAD(&qmi->lookup_results);
+ INIT_LIST_HEAD(&qmi->services);
+
+ INIT_WORK(&qmi->work, qmi_data_ready_work);
+
+ qmi->handlers = handlers;
+ if (ops)
+ qmi->ops = *ops;
+
+ /* Make room for the header */
+ recv_buf_size += sizeof(struct qmi_header);
+ /* Must also be sufficient to hold a control packet */
+ if (recv_buf_size < sizeof(struct qrtr_ctrl_pkt))
+ recv_buf_size = sizeof(struct qrtr_ctrl_pkt);
+
+ qmi->recv_buf_size = recv_buf_size;
+ qmi->recv_buf = kzalloc(recv_buf_size, GFP_KERNEL);
+ if (!qmi->recv_buf)
+ return -ENOMEM;
+
+ qmi->wq = alloc_ordered_workqueue("qmi_msg_handler", 0);
+ if (!qmi->wq) {
+ ret = -ENOMEM;
+ goto err_free_recv_buf;
+ }
+
+ qmi->sock = qmi_sock_create(qmi, &qmi->sq);
+ if (IS_ERR(qmi->sock)) {
+ if (PTR_ERR(qmi->sock) == -EAFNOSUPPORT) {
+ ret = -EPROBE_DEFER;
+ } else {
+ pr_err("failed to create QMI socket\n");
+ ret = PTR_ERR(qmi->sock);
+ }
+ goto err_destroy_wq;
+ }
+
+ return 0;
+
+err_destroy_wq:
+ destroy_workqueue(qmi->wq);
+err_free_recv_buf:
+ kfree(qmi->recv_buf);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmi_handle_init);
+
+/**
+ * qmi_handle_release() - release the QMI client handle
+ * @qmi: QMI client handle
+ *
+ * This closes the underlying socket and stops any handling of QMI messages.
+ */
+void qmi_handle_release(struct qmi_handle *qmi)
+{
+ struct socket *sock = qmi->sock;
+ struct qmi_service *svc, *tmp;
+
+ sock->sk->sk_user_data = NULL;
+ cancel_work_sync(&qmi->work);
+
+ qmi_recv_del_server(qmi, -1, -1);
+
+ mutex_lock(&qmi->sock_lock);
+ sock_release(sock);
+ qmi->sock = NULL;
+ mutex_unlock(&qmi->sock_lock);
+
+ destroy_workqueue(qmi->wq);
+
+ idr_destroy(&qmi->txns);
+
+ kfree(qmi->recv_buf);
+
+ /* Free registered lookup requests */
+ list_for_each_entry_safe(svc, tmp, &qmi->lookups, list_node) {
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+
+ /* Free registered service information */
+ list_for_each_entry_safe(svc, tmp, &qmi->services, list_node) {
+ list_del(&svc->list_node);
+ kfree(svc);
+ }
+}
+EXPORT_SYMBOL(qmi_handle_release);
+
+/**
+ * qmi_send_message() - send a QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @type: type of message to send
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * This function encodes @c_struct using @ei into a message of type @type,
+ * with @msg_id and @txn into a buffer of maximum size @len, and sends this to
+ * @sq.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+static ssize_t qmi_send_message(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq, struct qmi_txn *txn,
+ int type, int msg_id, size_t len,
+ const struct qmi_elem_info *ei,
+ const void *c_struct)
+{
+ struct msghdr msghdr = {};
+ struct kvec iv;
+ void *msg;
+ int ret;
+
+ msg = qmi_encode_message(type,
+ msg_id, &len,
+ txn->id, ei,
+ c_struct);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ iv.iov_base = msg;
+ iv.iov_len = len;
+
+ if (sq) {
+ msghdr.msg_name = sq;
+ msghdr.msg_namelen = sizeof(*sq);
+ }
+
+ mutex_lock(&qmi->sock_lock);
+ if (qmi->sock) {
+ ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len);
+ if (ret < 0)
+ pr_err("failed to send QMI message\n");
+ } else {
+ ret = -EPIPE;
+ }
+ mutex_unlock(&qmi->sock_lock);
+
+ kfree(msg);
+
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * qmi_send_request() - send a request QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, int msg_id, size_t len,
+ const struct qmi_elem_info *ei, const void *c_struct)
+{
+ return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei,
+ c_struct);
+}
+EXPORT_SYMBOL(qmi_send_request);
+
+/**
+ * qmi_send_response() - send a response QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @txn: transaction object to use for the message
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, int msg_id, size_t len,
+ const struct qmi_elem_info *ei, const void *c_struct)
+{
+ return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei,
+ c_struct);
+}
+EXPORT_SYMBOL(qmi_send_response);
+
+/**
+ * qmi_send_indication() - send an indication QMI message
+ * @qmi: QMI client handle
+ * @sq: destination sockaddr
+ * @msg_id: message id
+ * @len: max length of the QMI message
+ * @ei: QMI message description
+ * @c_struct: object to be encoded
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+ int msg_id, size_t len,
+ const struct qmi_elem_info *ei,
+ const void *c_struct)
+{
+ struct qmi_txn txn;
+ ssize_t rval;
+ int ret;
+
+ ret = qmi_txn_init(qmi, &txn, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ rval = qmi_send_message(qmi, sq, &txn, QMI_INDICATION, msg_id, len, ei,
+ c_struct);
+
+ /* We don't care about future messages on this txn */
+ qmi_txn_cancel(&txn);
+
+ return rval;
+}
+EXPORT_SYMBOL(qmi_send_indication);
diff --git a/drivers/soc/qcom/ramp_controller.c b/drivers/soc/qcom/ramp_controller.c
new file mode 100644
index 0000000000..e9a0cca071
--- /dev/null
+++ b/drivers/soc/qcom/ramp_controller.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm Ramp Controller driver
+ * Copyright (c) 2022, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define RC_UPDATE_EN BIT(0)
+#define RC_ROOT_EN BIT(1)
+
+#define RC_REG_CFG_UPDATE 0x60
+#define RC_CFG_UPDATE_EN BIT(8)
+#define RC_CFG_ACK GENMASK(31, 16)
+
+#define RC_DCVS_CFG_SID 2
+#define RC_LINK_SID 3
+#define RC_LMH_SID 6
+#define RC_DFS_SID 14
+
+#define RC_UPDATE_TIMEOUT_US 500
+
+/**
+ * struct qcom_ramp_controller_desc - SoC specific parameters
+ * @cfg_dfs_sid: Dynamic Frequency Scaling SID configuration
+ * @cfg_link_sid: Link SID configuration
+ * @cfg_lmh_sid: Limits Management hardware SID configuration
+ * @cfg_ramp_en: Ramp Controller enable sequence
+ * @cfg_ramp_dis: Ramp Controller disable sequence
+ * @cmd_reg: Command register offset
+ * @num_dfs_sids: Number of DFS SIDs (max 8)
+ * @num_link_sids: Number of Link SIDs (max 3)
+ * @num_lmh_sids: Number of LMh SIDs (max 8)
+ * @num_ramp_en: Number of entries in enable sequence
+ * @num_ramp_dis: Number of entries in disable sequence
+ */
+struct qcom_ramp_controller_desc {
+ const struct reg_sequence *cfg_dfs_sid;
+ const struct reg_sequence *cfg_link_sid;
+ const struct reg_sequence *cfg_lmh_sid;
+ const struct reg_sequence *cfg_ramp_en;
+ const struct reg_sequence *cfg_ramp_dis;
+ u8 cmd_reg;
+ u8 num_dfs_sids;
+ u8 num_link_sids;
+ u8 num_lmh_sids;
+ u8 num_ramp_en;
+ u8 num_ramp_dis;
+};
+
+/**
+ * struct qcom_ramp_controller - Main driver structure
+ * @regmap: Regmap handle
+ * @desc: SoC specific parameters
+ */
+struct qcom_ramp_controller {
+ struct regmap *regmap;
+ const struct qcom_ramp_controller_desc *desc;
+};
+
+/**
+ * rc_wait_for_update() - Wait for Ramp Controller root update
+ * @qrc: Main driver structure
+ *
+ * Return: Zero for success or negative number for failure
+ */
+static int rc_wait_for_update(struct qcom_ramp_controller *qrc)
+{
+ const struct qcom_ramp_controller_desc *d = qrc->desc;
+ struct regmap *r = qrc->regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_set_bits(r, d->cmd_reg, RC_ROOT_EN);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(r, d->cmd_reg, val, !(val & RC_UPDATE_EN),
+ 1, RC_UPDATE_TIMEOUT_US);
+}
+
+/**
+ * rc_set_cfg_update() - Ramp Controller configuration update
+ * @qrc: Main driver structure
+ * @ce: Configuration entry to update
+ *
+ * Return: Zero for success or negative number for failure
+ */
+static int rc_set_cfg_update(struct qcom_ramp_controller *qrc, u8 ce)
+{
+ const struct qcom_ramp_controller_desc *d = qrc->desc;
+ struct regmap *r = qrc->regmap;
+ u32 ack, val;
+ int ret;
+
+ /* The ack bit is between bits 16-31 of RC_REG_CFG_UPDATE */
+ ack = FIELD_PREP(RC_CFG_ACK, BIT(ce));
+
+ /* Write the configuration type first... */
+ ret = regmap_set_bits(r, d->cmd_reg + RC_REG_CFG_UPDATE, ce);
+ if (ret)
+ return ret;
+
+ /* ...and after that, enable the update bit to sync the changes */
+ ret = regmap_set_bits(r, d->cmd_reg + RC_REG_CFG_UPDATE, RC_CFG_UPDATE_EN);
+ if (ret)
+ return ret;
+
+ /* Wait for the changes to go through */
+ ret = regmap_read_poll_timeout(r, d->cmd_reg + RC_REG_CFG_UPDATE, val,
+ val & ack, 1, RC_UPDATE_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ /*
+ * Configuration update success! The CFG_UPDATE register will not be
+ * cleared automatically upon applying the configuration, so we have
+ * to do that manually in order to leave the ramp controller in a
+ * predictable and clean state.
+ */
+ ret = regmap_write(r, d->cmd_reg + RC_REG_CFG_UPDATE, 0);
+ if (ret)
+ return ret;
+
+ /* Wait for the update bit cleared ack */
+ return regmap_read_poll_timeout(r, d->cmd_reg + RC_REG_CFG_UPDATE,
+ val, !(val & RC_CFG_ACK), 1,
+ RC_UPDATE_TIMEOUT_US);
+}
+
+/**
+ * rc_write_cfg - Send configuration sequence
+ * @qrc: Main driver structure
+ * @seq: Register sequence to send before asking for update
+ * @ce: Configuration SID
+ * @nsids: Total number of SIDs
+ *
+ * Returns: Zero for success or negative number for error
+ */
+static int rc_write_cfg(struct qcom_ramp_controller *qrc,
+ const struct reg_sequence *seq,
+ u16 ce, u8 nsids)
+{
+ int ret;
+ u8 i;
+
+ /* Check if, and wait until the ramp controller is ready */
+ ret = rc_wait_for_update(qrc);
+ if (ret)
+ return ret;
+
+ /* Write the sequence */
+ ret = regmap_multi_reg_write(qrc->regmap, seq, nsids);
+ if (ret)
+ return ret;
+
+ /* Pull the trigger: do config update starting from the last sid */
+ for (i = 0; i < nsids; i++) {
+ ret = rc_set_cfg_update(qrc, (u8)ce - i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * rc_ramp_ctrl_enable() - Enable Ramp up/down Control
+ * @qrc: Main driver structure
+ *
+ * Return: Zero for success or negative number for error
+ */
+static int rc_ramp_ctrl_enable(struct qcom_ramp_controller *qrc)
+{
+ const struct qcom_ramp_controller_desc *d = qrc->desc;
+ int i, ret;
+
+ for (i = 0; i < d->num_ramp_en; i++) {
+ ret = rc_write_cfg(qrc, &d->cfg_ramp_en[i], RC_DCVS_CFG_SID, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * qcom_ramp_controller_start() - Initialize and start the ramp controller
+ * @qrc: Main driver structure
+ *
+ * The Ramp Controller needs to be initialized by programming the relevant
+ * registers with SoC-specific configuration: once programming is done,
+ * the hardware will take care of the rest (no further handling required).
+ *
+ * Return: Zero for success or negative number for error
+ */
+static int qcom_ramp_controller_start(struct qcom_ramp_controller *qrc)
+{
+ const struct qcom_ramp_controller_desc *d = qrc->desc;
+ int ret;
+
+ /* Program LMH, DFS, Link SIDs */
+ ret = rc_write_cfg(qrc, d->cfg_lmh_sid, RC_LMH_SID, d->num_lmh_sids);
+ if (ret)
+ return ret;
+
+ ret = rc_write_cfg(qrc, d->cfg_dfs_sid, RC_DFS_SID, d->num_dfs_sids);
+ if (ret)
+ return ret;
+
+ ret = rc_write_cfg(qrc, d->cfg_link_sid, RC_LINK_SID, d->num_link_sids);
+ if (ret)
+ return ret;
+
+ /* Everything is ready! Enable the ramp up/down control */
+ return rc_ramp_ctrl_enable(qrc);
+}
+
+static const struct regmap_config qrc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x68,
+ .fast_io = true,
+};
+
+static const struct reg_sequence msm8976_cfg_dfs_sid[] = {
+ { 0x10, 0xfefebff7 },
+ { 0x14, 0xfdff7fef },
+ { 0x18, 0xfbffdefb },
+ { 0x1c, 0xb69b5555 },
+ { 0x20, 0x24929249 },
+ { 0x24, 0x49241112 },
+ { 0x28, 0x11112111 },
+ { 0x2c, 0x8102 }
+};
+
+static const struct reg_sequence msm8976_cfg_link_sid[] = {
+ { 0x40, 0xfc987 }
+};
+
+static const struct reg_sequence msm8976_cfg_lmh_sid[] = {
+ { 0x30, 0x77706db },
+ { 0x34, 0x5550249 },
+ { 0x38, 0x111 }
+};
+
+static const struct reg_sequence msm8976_cfg_ramp_en[] = {
+ { 0x50, 0x800 }, /* pre_en */
+ { 0x50, 0xc00 }, /* en */
+ { 0x50, 0x400 } /* post_en */
+};
+
+static const struct reg_sequence msm8976_cfg_ramp_dis[] = {
+ { 0x50, 0x0 }
+};
+
+static const struct qcom_ramp_controller_desc msm8976_rc_cfg = {
+ .cfg_dfs_sid = msm8976_cfg_dfs_sid,
+ .num_dfs_sids = ARRAY_SIZE(msm8976_cfg_dfs_sid),
+
+ .cfg_link_sid = msm8976_cfg_link_sid,
+ .num_link_sids = ARRAY_SIZE(msm8976_cfg_link_sid),
+
+ .cfg_lmh_sid = msm8976_cfg_lmh_sid,
+ .num_lmh_sids = ARRAY_SIZE(msm8976_cfg_lmh_sid),
+
+ .cfg_ramp_en = msm8976_cfg_ramp_en,
+ .num_ramp_en = ARRAY_SIZE(msm8976_cfg_ramp_en),
+
+ .cfg_ramp_dis = msm8976_cfg_ramp_dis,
+ .num_ramp_dis = ARRAY_SIZE(msm8976_cfg_ramp_dis),
+
+ .cmd_reg = 0x0,
+};
+
+static int qcom_ramp_controller_probe(struct platform_device *pdev)
+{
+ struct qcom_ramp_controller *qrc;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qrc = devm_kmalloc(&pdev->dev, sizeof(*qrc), GFP_KERNEL);
+ if (!qrc)
+ return -ENOMEM;
+
+ qrc->desc = device_get_match_data(&pdev->dev);
+ if (!qrc->desc)
+ return -EINVAL;
+
+ qrc->regmap = devm_regmap_init_mmio(&pdev->dev, base, &qrc_regmap_config);
+ if (IS_ERR(qrc->regmap))
+ return PTR_ERR(qrc->regmap);
+
+ platform_set_drvdata(pdev, qrc);
+
+ return qcom_ramp_controller_start(qrc);
+}
+
+static void qcom_ramp_controller_remove(struct platform_device *pdev)
+{
+ struct qcom_ramp_controller *qrc = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = rc_write_cfg(qrc, qrc->desc->cfg_ramp_dis,
+ RC_DCVS_CFG_SID, qrc->desc->num_ramp_dis);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to send disable sequence\n");
+}
+
+static const struct of_device_id qcom_ramp_controller_match_table[] = {
+ { .compatible = "qcom,msm8976-ramp-controller", .data = &msm8976_rc_cfg },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, qcom_ramp_controller_match_table);
+
+static struct platform_driver qcom_ramp_controller_driver = {
+ .driver = {
+ .name = "qcom-ramp-controller",
+ .of_match_table = qcom_ramp_controller_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = qcom_ramp_controller_probe,
+ .remove_new = qcom_ramp_controller_remove,
+};
+
+static int __init qcom_ramp_controller_init(void)
+{
+ return platform_driver_register(&qcom_ramp_controller_driver);
+}
+arch_initcall(qcom_ramp_controller_init);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("Qualcomm Ramp Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
new file mode 100644
index 0000000000..f83811f511
--- /dev/null
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+
+#define QCOM_RMTFS_MEM_DEV_MAX (MINORMASK + 1)
+#define NUM_MAX_VMIDS 2
+
+static dev_t qcom_rmtfs_mem_major;
+
+struct qcom_rmtfs_mem {
+ struct device dev;
+ struct cdev cdev;
+
+ void *base;
+ phys_addr_t addr;
+ phys_addr_t size;
+
+ unsigned int client_id;
+
+ u64 perms;
+};
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL);
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+ struct qcom_rmtfs_mem,
+ dev);
+
+ if (attr == &dev_attr_phys_addr)
+ return sprintf(buf, "%pa\n", &rmtfs_mem->addr);
+ if (attr == &dev_attr_size)
+ return sprintf(buf, "%pa\n", &rmtfs_mem->size);
+ if (attr == &dev_attr_client_id)
+ return sprintf(buf, "%d\n", rmtfs_mem->client_id);
+
+ return -EINVAL;
+}
+
+static struct attribute *qcom_rmtfs_mem_attrs[] = {
+ &dev_attr_phys_addr.attr,
+ &dev_attr_size.attr,
+ &dev_attr_client_id.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(qcom_rmtfs_mem);
+
+static int qcom_rmtfs_mem_open(struct inode *inode, struct file *filp)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(inode->i_cdev,
+ struct qcom_rmtfs_mem,
+ cdev);
+
+ get_device(&rmtfs_mem->dev);
+ filp->private_data = rmtfs_mem;
+
+ return 0;
+}
+static ssize_t qcom_rmtfs_mem_read(struct file *filp,
+ char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ if (*f_pos >= rmtfs_mem->size)
+ return 0;
+
+ if (*f_pos + count >= rmtfs_mem->size)
+ count = rmtfs_mem->size - *f_pos;
+
+ if (copy_to_user(buf, rmtfs_mem->base + *f_pos, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static ssize_t qcom_rmtfs_mem_write(struct file *filp,
+ const char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ if (*f_pos >= rmtfs_mem->size)
+ return 0;
+
+ if (*f_pos + count >= rmtfs_mem->size)
+ count = rmtfs_mem->size - *f_pos;
+
+ if (copy_from_user(rmtfs_mem->base + *f_pos, buf, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ put_device(&rmtfs_mem->dev);
+
+ return 0;
+}
+
+static struct class rmtfs_class = {
+ .name = "rmtfs",
+};
+
+static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filep->private_data;
+
+ if (vma->vm_end - vma->vm_start > rmtfs_mem->size) {
+ dev_dbg(&rmtfs_mem->dev,
+ "vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%pa]\n",
+ vma->vm_end, vma->vm_start,
+ (vma->vm_end - vma->vm_start), &rmtfs_mem->size);
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ rmtfs_mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static const struct file_operations qcom_rmtfs_mem_fops = {
+ .owner = THIS_MODULE,
+ .open = qcom_rmtfs_mem_open,
+ .read = qcom_rmtfs_mem_read,
+ .write = qcom_rmtfs_mem_write,
+ .release = qcom_rmtfs_mem_release,
+ .llseek = default_llseek,
+ .mmap = qcom_rmtfs_mem_mmap,
+};
+
+static void qcom_rmtfs_mem_release_device(struct device *dev)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+ struct qcom_rmtfs_mem,
+ dev);
+
+ kfree(rmtfs_mem);
+}
+
+static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct qcom_scm_vmperm perms[NUM_MAX_VMIDS + 1];
+ struct reserved_mem *rmem;
+ struct qcom_rmtfs_mem *rmtfs_mem;
+ u32 client_id;
+ u32 vmid[NUM_MAX_VMIDS];
+ int num_vmids;
+ int ret, i;
+
+ rmem = of_reserved_mem_lookup(node);
+ if (!rmem) {
+ dev_err(&pdev->dev, "failed to acquire memory region\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "qcom,client-id", &client_id);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse \"qcom,client-id\"\n");
+ return ret;
+
+ }
+
+ rmtfs_mem = kzalloc(sizeof(*rmtfs_mem), GFP_KERNEL);
+ if (!rmtfs_mem)
+ return -ENOMEM;
+
+ rmtfs_mem->addr = rmem->base;
+ rmtfs_mem->client_id = client_id;
+ rmtfs_mem->size = rmem->size;
+
+ device_initialize(&rmtfs_mem->dev);
+ rmtfs_mem->dev.parent = &pdev->dev;
+ rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
+ rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
+
+ rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
+ rmtfs_mem->size, MEMREMAP_WC);
+ if (IS_ERR(rmtfs_mem->base)) {
+ dev_err(&pdev->dev, "failed to remap rmtfs_mem region\n");
+ ret = PTR_ERR(rmtfs_mem->base);
+ goto put_device;
+ }
+
+ cdev_init(&rmtfs_mem->cdev, &qcom_rmtfs_mem_fops);
+ rmtfs_mem->cdev.owner = THIS_MODULE;
+
+ dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
+ rmtfs_mem->dev.id = client_id;
+ rmtfs_mem->dev.class = &rmtfs_class;
+ rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
+
+ ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add cdev: %d\n", ret);
+ goto put_device;
+ }
+
+ num_vmids = of_property_count_u32_elems(node, "qcom,vmid");
+ if (num_vmids == -EINVAL) {
+ /* qcom,vmid is optional */
+ num_vmids = 0;
+ } else if (num_vmids < 0) {
+ dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", num_vmids);
+ ret = num_vmids;
+ goto remove_cdev;
+ } else if (num_vmids > NUM_MAX_VMIDS) {
+ dev_warn(&pdev->dev,
+ "too many VMIDs (%d) specified! Only mapping first %d entries\n",
+ num_vmids, NUM_MAX_VMIDS);
+ num_vmids = NUM_MAX_VMIDS;
+ }
+
+ ret = of_property_read_u32_array(node, "qcom,vmid", vmid, num_vmids);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
+ goto remove_cdev;
+ } else if (!ret) {
+ if (!qcom_scm_is_available()) {
+ ret = -EPROBE_DEFER;
+ goto remove_cdev;
+ }
+
+ perms[0].vmid = QCOM_SCM_VMID_HLOS;
+ perms[0].perm = QCOM_SCM_PERM_RW;
+
+ for (i = 0; i < num_vmids; i++) {
+ perms[i + 1].vmid = vmid[i];
+ perms[i + 1].perm = QCOM_SCM_PERM_RW;
+ }
+
+ rmtfs_mem->perms = BIT(QCOM_SCM_VMID_HLOS);
+ ret = qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
+ &rmtfs_mem->perms, perms, num_vmids + 1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "assign memory failed\n");
+ goto remove_cdev;
+ }
+ }
+
+ dev_set_drvdata(&pdev->dev, rmtfs_mem);
+
+ return 0;
+
+remove_cdev:
+ cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+put_device:
+ put_device(&rmtfs_mem->dev);
+
+ return ret;
+}
+
+static int qcom_rmtfs_mem_remove(struct platform_device *pdev)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev);
+ struct qcom_scm_vmperm perm;
+
+ if (rmtfs_mem->perms) {
+ perm.vmid = QCOM_SCM_VMID_HLOS;
+ perm.perm = QCOM_SCM_PERM_RW;
+
+ qcom_scm_assign_mem(rmtfs_mem->addr, rmtfs_mem->size,
+ &rmtfs_mem->perms, &perm, 1);
+ }
+
+ cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+ put_device(&rmtfs_mem->dev);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_rmtfs_mem_of_match[] = {
+ { .compatible = "qcom,rmtfs-mem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_rmtfs_mem_of_match);
+
+static struct platform_driver qcom_rmtfs_mem_driver = {
+ .probe = qcom_rmtfs_mem_probe,
+ .remove = qcom_rmtfs_mem_remove,
+ .driver = {
+ .name = "qcom_rmtfs_mem",
+ .of_match_table = qcom_rmtfs_mem_of_match,
+ },
+};
+
+static int __init qcom_rmtfs_mem_init(void)
+{
+ int ret;
+
+ ret = class_register(&rmtfs_class);
+ if (ret)
+ return ret;
+
+ ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
+ QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
+ if (ret < 0) {
+ pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
+ goto unregister_class;
+ }
+
+ ret = platform_driver_register(&qcom_rmtfs_mem_driver);
+ if (ret < 0) {
+ pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
+ goto unregister_chrdev;
+ }
+
+ return 0;
+
+unregister_chrdev:
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+unregister_class:
+ class_unregister(&rmtfs_class);
+ return ret;
+}
+module_init(qcom_rmtfs_mem_init);
+
+static void __exit qcom_rmtfs_mem_exit(void)
+{
+ platform_driver_unregister(&qcom_rmtfs_mem_driver);
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+ class_unregister(&rmtfs_class);
+}
+module_exit(qcom_rmtfs_mem_exit);
+
+MODULE_AUTHOR("Linaro Ltd");
+MODULE_DESCRIPTION("Qualcomm Remote Filesystem memory driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpm-proc.c b/drivers/soc/qcom/rpm-proc.c
new file mode 100644
index 0000000000..2995d9b901
--- /dev/null
+++ b/drivers/soc/qcom/rpm-proc.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021-2023, Stephan Gerhold <stephan@gerhold.net> */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg/qcom_smd.h>
+
+static int rpm_proc_probe(struct platform_device *pdev)
+{
+ struct qcom_smd_edge *edge = NULL;
+ struct device *dev = &pdev->dev;
+ struct device_node *edge_node;
+ int ret;
+
+ edge_node = of_get_child_by_name(dev->of_node, "smd-edge");
+ if (edge_node) {
+ edge = qcom_smd_register_edge(dev, edge_node);
+ of_node_put(edge_node);
+ if (IS_ERR(edge))
+ return dev_err_probe(dev, PTR_ERR(edge),
+ "Failed to register smd-edge\n");
+ }
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ dev_err(dev, "Failed to populate child devices: %d\n", ret);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, edge);
+ return 0;
+err:
+ if (edge)
+ qcom_smd_unregister_edge(edge);
+ return ret;
+}
+
+static void rpm_proc_remove(struct platform_device *pdev)
+{
+ struct qcom_smd_edge *edge = platform_get_drvdata(pdev);
+
+ if (edge)
+ qcom_smd_unregister_edge(edge);
+}
+
+static const struct of_device_id rpm_proc_of_match[] = {
+ { .compatible = "qcom,rpm-proc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rpm_proc_of_match);
+
+static struct platform_driver rpm_proc_driver = {
+ .probe = rpm_proc_probe,
+ .remove_new = rpm_proc_remove,
+ .driver = {
+ .name = "qcom-rpm-proc",
+ .of_match_table = rpm_proc_of_match,
+ },
+};
+
+static int __init rpm_proc_init(void)
+{
+ return platform_driver_register(&rpm_proc_driver);
+}
+arch_initcall(rpm_proc_init);
+
+static void __exit rpm_proc_exit(void)
+{
+ platform_driver_unregister(&rpm_proc_driver);
+}
+module_exit(rpm_proc_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM processor/subsystem driver");
+MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/rpm_master_stats.c b/drivers/soc/qcom/rpm_master_stats.c
new file mode 100644
index 0000000000..9ca13bcf67
--- /dev/null
+++ b/drivers/soc/qcom/rpm_master_stats.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ *
+ * This driver supports what is known as "Master Stats v2" in Qualcomm
+ * downstream kernel terms, which seems to be the only version which has
+ * ever shipped, all the way from 2013 to 2023.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+struct master_stats_data {
+ void __iomem *base;
+ const char *label;
+};
+
+struct rpm_master_stats {
+ u32 active_cores;
+ u32 num_shutdowns;
+ u64 shutdown_req;
+ u64 wakeup_idx;
+ u64 bringup_req;
+ u64 bringup_ack;
+ u32 wakeup_reason; /* 0 = "rude wakeup", 1 = scheduled wakeup */
+ u32 last_sleep_trans_dur;
+ u32 last_wake_trans_dur;
+
+ /* Per-subsystem (*not necessarily* SoC-wide) XO shutdown stats */
+ u32 xo_count;
+ u64 xo_last_enter;
+ u64 last_exit;
+ u64 xo_total_dur;
+} __packed;
+
+static int master_stats_show(struct seq_file *s, void *unused)
+{
+ struct master_stats_data *data = s->private;
+ struct rpm_master_stats stat;
+
+ memcpy_fromio(&stat, data->base, sizeof(stat));
+
+ seq_printf(s, "%s:\n", data->label);
+
+ seq_printf(s, "\tLast shutdown @ %llu\n", stat.shutdown_req);
+ seq_printf(s, "\tLast bringup req @ %llu\n", stat.bringup_req);
+ seq_printf(s, "\tLast bringup ack @ %llu\n", stat.bringup_ack);
+ seq_printf(s, "\tLast wakeup idx: %llu\n", stat.wakeup_idx);
+ seq_printf(s, "\tLast XO shutdown enter @ %llu\n", stat.xo_last_enter);
+ seq_printf(s, "\tLast XO shutdown exit @ %llu\n", stat.last_exit);
+ seq_printf(s, "\tXO total duration: %llu\n", stat.xo_total_dur);
+ seq_printf(s, "\tLast sleep transition duration: %u\n", stat.last_sleep_trans_dur);
+ seq_printf(s, "\tLast wake transition duration: %u\n", stat.last_wake_trans_dur);
+ seq_printf(s, "\tXO shutdown count: %u\n", stat.xo_count);
+ seq_printf(s, "\tWakeup reason: 0x%x\n", stat.wakeup_reason);
+ seq_printf(s, "\tShutdown count: %u\n", stat.num_shutdowns);
+ seq_printf(s, "\tActive cores bitmask: 0x%x\n", stat.active_cores);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(master_stats);
+
+static int master_stats_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct master_stats_data *data;
+ struct device_node *msgram_np;
+ struct dentry *dent, *root;
+ struct resource res;
+ int count, i, ret;
+
+ count = of_property_count_strings(dev->of_node, "qcom,master-names");
+ if (count < 0)
+ return count;
+
+ data = devm_kzalloc(dev, count * sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ root = debugfs_create_dir("qcom_rpm_master_stats", NULL);
+ platform_set_drvdata(pdev, root);
+
+ for (i = 0; i < count; i++) {
+ msgram_np = of_parse_phandle(dev->of_node, "qcom,rpm-msg-ram", i);
+ if (!msgram_np) {
+ debugfs_remove_recursive(root);
+ return dev_err_probe(dev, -ENODEV,
+ "Couldn't parse MSG RAM phandle idx %d", i);
+ }
+
+ /*
+ * Purposefully skip devm_platform helpers as we're using a
+ * shared resource.
+ */
+ ret = of_address_to_resource(msgram_np, 0, &res);
+ of_node_put(msgram_np);
+ if (ret < 0) {
+ debugfs_remove_recursive(root);
+ return ret;
+ }
+
+ data[i].base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!data[i].base) {
+ debugfs_remove_recursive(root);
+ return dev_err_probe(dev, -EINVAL,
+ "Could not map the MSG RAM slice idx %d!\n", i);
+ }
+
+ ret = of_property_read_string_index(dev->of_node, "qcom,master-names", i,
+ &data[i].label);
+ if (ret < 0) {
+ debugfs_remove_recursive(root);
+ return dev_err_probe(dev, ret,
+ "Could not read name idx %d!\n", i);
+ }
+
+ /*
+ * Generally it's not advised to fail on debugfs errors, but this
+ * driver's only job is exposing data therein.
+ */
+ dent = debugfs_create_file(data[i].label, 0444, root,
+ &data[i], &master_stats_fops);
+ if (IS_ERR(dent)) {
+ debugfs_remove_recursive(root);
+ return dev_err_probe(dev, PTR_ERR(dent),
+ "Failed to create debugfs file %s!\n", data[i].label);
+ }
+ }
+
+ device_set_pm_not_required(dev);
+
+ return 0;
+}
+
+static void master_stats_remove(struct platform_device *pdev)
+{
+ struct dentry *root = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(root);
+}
+
+static const struct of_device_id rpm_master_table[] = {
+ { .compatible = "qcom,rpm-master-stats" },
+ { },
+};
+
+static struct platform_driver master_stats_driver = {
+ .probe = master_stats_probe,
+ .remove_new = master_stats_remove,
+ .driver = {
+ .name = "qcom_rpm_master_stats",
+ .of_match_table = rpm_master_table,
+ },
+};
+module_platform_driver(master_stats_driver);
+
+MODULE_DESCRIPTION("Qualcomm RPM Master Statistics driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
new file mode 100644
index 0000000000..e3cf1beff8
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef __RPM_INTERNAL_H__
+#define __RPM_INTERNAL_H__
+
+#include <linux/bitmap.h>
+#include <linux/wait.h>
+#include <soc/qcom/tcs.h>
+
+#define TCS_TYPE_NR 4
+#define MAX_CMDS_PER_TCS 16
+#define MAX_TCS_PER_TYPE 3
+#define MAX_TCS_NR (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
+#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
+
+struct rsc_drv;
+
+/**
+ * struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
+ * to the controller
+ *
+ * @drv: The controller.
+ * @type: Type of the TCS in this group - active, sleep, wake.
+ * @mask: Mask of the TCSes relative to all the TCSes in the RSC.
+ * @offset: Start of the TCS group relative to the TCSes in the RSC.
+ * @num_tcs: Number of TCSes in this type.
+ * @ncpt: Number of commands in each TCS.
+ * @req: Requests that are sent from the TCS; only used for ACTIVE_ONLY
+ * transfers (could be on a wake/sleep TCS if we are borrowing for
+ * an ACTIVE_ONLY transfer).
+ * Start: grab drv->lock, set req, set tcs_in_use, drop drv->lock,
+ * trigger
+ * End: get irq, access req,
+ * grab drv->lock, clear tcs_in_use, drop drv->lock
+ * @slots: Indicates which of @cmd_addr are occupied; only used for
+ * SLEEP / WAKE TCSs. Things are tightly packed in the
+ * case that (ncpt < MAX_CMDS_PER_TCS). That is if ncpt = 2 and
+ * MAX_CMDS_PER_TCS = 16 then bit[2] = the first bit in 2nd TCS.
+ */
+struct tcs_group {
+ struct rsc_drv *drv;
+ int type;
+ u32 mask;
+ u32 offset;
+ int num_tcs;
+ int ncpt;
+ const struct tcs_request *req[MAX_TCS_PER_TYPE];
+ DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
+};
+
+/**
+ * struct rpmh_request: the message to be sent to rpmh-rsc
+ *
+ * @msg: the request
+ * @cmd: the payload that will be part of the @msg
+ * @completion: triggered when request is done
+ * @dev: the device making the request
+ * @needs_free: check to free dynamically allocated request object
+ */
+struct rpmh_request {
+ struct tcs_request msg;
+ struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
+ struct completion *completion;
+ const struct device *dev;
+ bool needs_free;
+};
+
+/**
+ * struct rpmh_ctrlr: our representation of the controller
+ *
+ * @cache: the list of cached requests
+ * @cache_lock: synchronize access to the cache data
+ * @dirty: was the cache updated since flush
+ * @batch_cache: Cache sleep and wake requests sent as batch
+ */
+struct rpmh_ctrlr {
+ struct list_head cache;
+ spinlock_t cache_lock;
+ bool dirty;
+ struct list_head batch_cache;
+};
+
+struct rsc_ver {
+ u32 major;
+ u32 minor;
+};
+
+/**
+ * struct rsc_drv: the Direct Resource Voter (DRV) of the
+ * Resource State Coordinator controller (RSC)
+ *
+ * @name: Controller identifier.
+ * @base: Start address of the DRV registers in this controller.
+ * @tcs_base: Start address of the TCS registers in this controller.
+ * @id: Instance id in the controller (Direct Resource Voter).
+ * @num_tcs: Number of TCSes in this DRV.
+ * @rsc_pm: CPU PM notifier for controller.
+ * Used when solver mode is not present.
+ * @cpus_in_pm: Number of CPUs not in idle power collapse.
+ * Used when solver mode and "power-domains" is not present.
+ * @genpd_nb: PM Domain notifier for cluster genpd notifications.
+ * @tcs: TCS groups.
+ * @tcs_in_use: S/W state of the TCS; only set for ACTIVE_ONLY
+ * transfers, but might show a sleep/wake TCS in use if
+ * it was borrowed for an active_only transfer. You
+ * must hold the lock in this struct (AKA drv->lock) in
+ * order to update this.
+ * @lock: Synchronize state of the controller. If RPMH's cache
+ * lock will also be held, the order is: drv->lock then
+ * cache_lock.
+ * @tcs_wait: Wait queue used to wait for @tcs_in_use to free up a
+ * slot
+ * @client: Handle to the DRV's client.
+ * @dev: RSC device.
+ */
+struct rsc_drv {
+ const char *name;
+ void __iomem *base;
+ void __iomem *tcs_base;
+ int id;
+ int num_tcs;
+ struct notifier_block rsc_pm;
+ struct notifier_block genpd_nb;
+ atomic_t cpus_in_pm;
+ struct tcs_group tcs[TCS_TYPE_NR];
+ DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
+ spinlock_t lock;
+ wait_queue_head_t tcs_wait;
+ struct rpmh_ctrlr client;
+ struct device *dev;
+ struct rsc_ver ver;
+ u32 *regs;
+};
+
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
+ const struct tcs_request *msg);
+void rpmh_rsc_invalidate(struct rsc_drv *drv);
+void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv);
+
+void rpmh_tx_done(const struct tcs_request *msg);
+int rpmh_flush(struct rpmh_ctrlr *ctrlr);
+
+#endif /* __RPM_INTERNAL_H__ */
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
new file mode 100644
index 0000000000..a021dc7180
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -0,0 +1,1160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
+
+#include <linux/atomic.h>
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <clocksource/arm_arch_timer.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+#include "rpmh-internal.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace-rpmh.h"
+
+
+#define RSC_DRV_ID 0
+
+#define MAJOR_VER_MASK 0xFF
+#define MAJOR_VER_SHIFT 16
+#define MINOR_VER_MASK 0xFF
+#define MINOR_VER_SHIFT 8
+
+enum {
+ RSC_DRV_TCS_OFFSET,
+ RSC_DRV_CMD_OFFSET,
+ DRV_SOLVER_CONFIG,
+ DRV_PRNT_CHLD_CONFIG,
+ RSC_DRV_IRQ_ENABLE,
+ RSC_DRV_IRQ_STATUS,
+ RSC_DRV_IRQ_CLEAR,
+ RSC_DRV_CMD_WAIT_FOR_CMPL,
+ RSC_DRV_CONTROL,
+ RSC_DRV_STATUS,
+ RSC_DRV_CMD_ENABLE,
+ RSC_DRV_CMD_MSGID,
+ RSC_DRV_CMD_ADDR,
+ RSC_DRV_CMD_DATA,
+ RSC_DRV_CMD_STATUS,
+ RSC_DRV_CMD_RESP_DATA,
+};
+
+/* DRV HW Solver Configuration Information Register */
+#define DRV_HW_SOLVER_MASK 1
+#define DRV_HW_SOLVER_SHIFT 24
+
+/* DRV TCS Configuration Information Register */
+#define DRV_NUM_TCS_MASK 0x3F
+#define DRV_NUM_TCS_SHIFT 6
+#define DRV_NCPT_MASK 0x1F
+#define DRV_NCPT_SHIFT 27
+
+/* Offsets for CONTROL TCS Registers */
+#define RSC_DRV_CTL_TCS_DATA_HI 0x38
+#define RSC_DRV_CTL_TCS_DATA_HI_MASK 0xFFFFFF
+#define RSC_DRV_CTL_TCS_DATA_HI_VALID BIT(31)
+#define RSC_DRV_CTL_TCS_DATA_LO 0x40
+#define RSC_DRV_CTL_TCS_DATA_LO_MASK 0xFFFFFFFF
+#define RSC_DRV_CTL_TCS_DATA_SIZE 32
+
+#define TCS_AMC_MODE_ENABLE BIT(16)
+#define TCS_AMC_MODE_TRIGGER BIT(24)
+
+/* TCS CMD register bit mask */
+#define CMD_MSGID_LEN 8
+#define CMD_MSGID_RESP_REQ BIT(8)
+#define CMD_MSGID_WRITE BIT(16)
+#define CMD_STATUS_ISSUED BIT(8)
+#define CMD_STATUS_COMPL BIT(16)
+
+/*
+ * Here's a high level overview of how all the registers in RPMH work
+ * together:
+ *
+ * - The main rpmh-rsc address is the base of a register space that can
+ * be used to find overall configuration of the hardware
+ * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
+ * space are all the TCS blocks. The offset of the TCS blocks is
+ * specified in the device tree by "qcom,tcs-offset" and used to
+ * compute tcs_base.
+ * - TCS blocks come one after another. Type, count, and order are
+ * specified by the device tree as "qcom,tcs-config".
+ * - Each TCS block has some registers, then space for up to 16 commands.
+ * Note that though address space is reserved for 16 commands, fewer
+ * might be present. See ncpt (num cmds per TCS).
+ *
+ * Here's a picture:
+ *
+ * +---------------------------------------------------+
+ * |RSC |
+ * | ctrl |
+ * | |
+ * | Drvs: |
+ * | +-----------------------------------------------+ |
+ * | |DRV0 | |
+ * | | ctrl/config | |
+ * | | IRQ | |
+ * | | | |
+ * | | TCSes: | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS0 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS1 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS2 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | ...... | |
+ * | +-----------------------------------------------+ |
+ * | +-----------------------------------------------+ |
+ * | |DRV1 | |
+ * | | (same as DRV0) | |
+ * | +-----------------------------------------------+ |
+ * | ...... |
+ * +---------------------------------------------------+
+ */
+
+#define USECS_TO_CYCLES(time_usecs) \
+ xloops_to_cycles((time_usecs) * 0x10C7UL)
+
+static inline unsigned long xloops_to_cycles(u64 xloops)
+{
+ return (xloops * loops_per_jiffy * HZ) >> 32;
+}
+
+static u32 rpmh_rsc_reg_offset_ver_2_7[] = {
+ [RSC_DRV_TCS_OFFSET] = 672,
+ [RSC_DRV_CMD_OFFSET] = 20,
+ [DRV_SOLVER_CONFIG] = 0x04,
+ [DRV_PRNT_CHLD_CONFIG] = 0x0C,
+ [RSC_DRV_IRQ_ENABLE] = 0x00,
+ [RSC_DRV_IRQ_STATUS] = 0x04,
+ [RSC_DRV_IRQ_CLEAR] = 0x08,
+ [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x10,
+ [RSC_DRV_CONTROL] = 0x14,
+ [RSC_DRV_STATUS] = 0x18,
+ [RSC_DRV_CMD_ENABLE] = 0x1C,
+ [RSC_DRV_CMD_MSGID] = 0x30,
+ [RSC_DRV_CMD_ADDR] = 0x34,
+ [RSC_DRV_CMD_DATA] = 0x38,
+ [RSC_DRV_CMD_STATUS] = 0x3C,
+ [RSC_DRV_CMD_RESP_DATA] = 0x40,
+};
+
+static u32 rpmh_rsc_reg_offset_ver_3_0[] = {
+ [RSC_DRV_TCS_OFFSET] = 672,
+ [RSC_DRV_CMD_OFFSET] = 24,
+ [DRV_SOLVER_CONFIG] = 0x04,
+ [DRV_PRNT_CHLD_CONFIG] = 0x0C,
+ [RSC_DRV_IRQ_ENABLE] = 0x00,
+ [RSC_DRV_IRQ_STATUS] = 0x04,
+ [RSC_DRV_IRQ_CLEAR] = 0x08,
+ [RSC_DRV_CMD_WAIT_FOR_CMPL] = 0x20,
+ [RSC_DRV_CONTROL] = 0x24,
+ [RSC_DRV_STATUS] = 0x28,
+ [RSC_DRV_CMD_ENABLE] = 0x2C,
+ [RSC_DRV_CMD_MSGID] = 0x34,
+ [RSC_DRV_CMD_ADDR] = 0x38,
+ [RSC_DRV_CMD_DATA] = 0x3C,
+ [RSC_DRV_CMD_STATUS] = 0x40,
+ [RSC_DRV_CMD_RESP_DATA] = 0x44,
+};
+
+static inline void __iomem *
+tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
+{
+ return drv->tcs_base + drv->regs[RSC_DRV_TCS_OFFSET] * tcs_id + reg;
+}
+
+static inline void __iomem *
+tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
+{
+ return tcs_reg_addr(drv, reg, tcs_id) + drv->regs[RSC_DRV_CMD_OFFSET] * cmd_id;
+}
+
+static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
+ int cmd_id)
+{
+ return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
+}
+
+static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
+{
+ return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
+}
+
+static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
+ int cmd_id, u32 data)
+{
+ writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
+}
+
+static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
+{
+ writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
+}
+
+static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
+{
+ int i;
+
+ writel(data, tcs_reg_addr(drv, reg, tcs_id));
+
+ /*
+ * Wait until we read back the same value. Use a counter rather than
+ * ktime for timeout since this may be called after timekeeping stops.
+ */
+ for (i = 0; i < USEC_PER_SEC; i++) {
+ if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
+ return;
+ udelay(1);
+ }
+ pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
+ data, tcs_id, reg);
+}
+
+/**
+ * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
+ * @drv: The RSC controller.
+ * @type: SLEEP_TCS or WAKE_TCS
+ *
+ * This will clear the "slots" variable of the given tcs_group and also
+ * tell the hardware to forget about all entries.
+ *
+ * The caller must ensure that no other RPMH actions are happening when this
+ * function is called, since otherwise the device may immediately become
+ * used again even before this function exits.
+ */
+static void tcs_invalidate(struct rsc_drv *drv, int type)
+{
+ int m;
+ struct tcs_group *tcs = &drv->tcs[type];
+
+ /* Caller ensures nobody else is running so no lock */
+ if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
+ return;
+
+ for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++)
+ write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], m, 0);
+
+ bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
+}
+
+/**
+ * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
+ * @drv: The RSC controller.
+ *
+ * The caller must ensure that no other RPMH actions are happening when this
+ * function is called, since otherwise the device may immediately become
+ * used again even before this function exits.
+ */
+void rpmh_rsc_invalidate(struct rsc_drv *drv)
+{
+ tcs_invalidate(drv, SLEEP_TCS);
+ tcs_invalidate(drv, WAKE_TCS);
+}
+
+/**
+ * get_tcs_for_msg() - Get the tcs_group used to send the given message.
+ * @drv: The RSC controller.
+ * @msg: The message we want to send.
+ *
+ * This is normally pretty straightforward except if we are trying to send
+ * an ACTIVE_ONLY message but don't have any active_only TCSes.
+ *
+ * Return: A pointer to a tcs_group or an ERR_PTR.
+ */
+static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
+ const struct tcs_request *msg)
+{
+ int type;
+ struct tcs_group *tcs;
+
+ switch (msg->state) {
+ case RPMH_ACTIVE_ONLY_STATE:
+ type = ACTIVE_TCS;
+ break;
+ case RPMH_WAKE_ONLY_STATE:
+ type = WAKE_TCS;
+ break;
+ case RPMH_SLEEP_STATE:
+ type = SLEEP_TCS;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * If we are making an active request on a RSC that does not have a
+ * dedicated TCS for active state use, then re-purpose a wake TCS to
+ * send active votes. This is safe because we ensure any active-only
+ * transfers have finished before we use it (maybe by running from
+ * the last CPU in PM code).
+ */
+ tcs = &drv->tcs[type];
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
+ tcs = &drv->tcs[WAKE_TCS];
+
+ return tcs;
+}
+
+/**
+ * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
+ * @drv: The RSC controller.
+ * @tcs_id: The global ID of this TCS.
+ *
+ * For ACTIVE_ONLY transfers we want to call back into the client when the
+ * transfer finishes. To do this we need the "request" that the client
+ * originally provided us. This function grabs the request that we stashed
+ * when we started the transfer.
+ *
+ * This only makes sense for ACTIVE_ONLY transfers since those are the only
+ * ones we track sending (the only ones we enable interrupts for and the only
+ * ones we call back to the client for).
+ *
+ * Return: The stashed request.
+ */
+static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
+ int tcs_id)
+{
+ struct tcs_group *tcs;
+ int i;
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ tcs = &drv->tcs[i];
+ if (tcs->mask & BIT(tcs_id))
+ return tcs->req[tcs_id - tcs->offset];
+ }
+
+ return NULL;
+}
+
+/**
+ * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @trigger: If true then untrigger/retrigger. If false then just untrigger.
+ *
+ * In the normal case we only ever call with "trigger=true" to start a
+ * transfer. That will un-trigger/disable the TCS from the last transfer
+ * then trigger/enable for this transfer.
+ *
+ * If we borrowed a wake TCS for an active-only transfer we'll also call
+ * this function with "trigger=false" to just do the un-trigger/disable
+ * before using the TCS for wake purposes again.
+ *
+ * Note that the AP is only in charge of triggering active-only transfers.
+ * The AP never triggers sleep/wake values using this function.
+ */
+static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
+{
+ u32 enable;
+ u32 reg = drv->regs[RSC_DRV_CONTROL];
+
+ /*
+ * HW req: Clear the DRV_CONTROL and enable TCS again
+ * While clearing ensure that the AMC mode trigger is cleared
+ * and then the mode enable is cleared.
+ */
+ enable = read_tcs_reg(drv, reg, tcs_id);
+ enable &= ~TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, reg, tcs_id, enable);
+ enable &= ~TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, reg, tcs_id, enable);
+
+ if (trigger) {
+ /* Enable the AMC mode on the TCS and then trigger the TCS */
+ enable = TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, reg, tcs_id, enable);
+ enable |= TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg(drv, reg, tcs_id, enable);
+ }
+}
+
+/**
+ * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @enable: If true then enable; if false then disable
+ *
+ * We only ever call this when we borrow a wake TCS for an active-only
+ * transfer. For active-only TCSes interrupts are always left enabled.
+ */
+static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
+{
+ u32 data;
+ u32 reg = drv->regs[RSC_DRV_IRQ_ENABLE];
+
+ data = readl_relaxed(drv->tcs_base + reg);
+ if (enable)
+ data |= BIT(tcs_id);
+ else
+ data &= ~BIT(tcs_id);
+ writel_relaxed(data, drv->tcs_base + reg);
+}
+
+/**
+ * tcs_tx_done() - TX Done interrupt handler.
+ * @irq: The IRQ number (ignored).
+ * @p: Pointer to "struct rsc_drv".
+ *
+ * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
+ * IRQ for) when a transfer is done.
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t tcs_tx_done(int irq, void *p)
+{
+ struct rsc_drv *drv = p;
+ int i;
+ unsigned long irq_status;
+ const struct tcs_request *req;
+
+ irq_status = readl_relaxed(drv->tcs_base + drv->regs[RSC_DRV_IRQ_STATUS]);
+
+ for_each_set_bit(i, &irq_status, BITS_PER_TYPE(u32)) {
+ req = get_req_from_tcs(drv, i);
+ if (WARN_ON(!req))
+ goto skip;
+
+ trace_rpmh_tx_done(drv, i, req);
+
+ /*
+ * If wake tcs was re-purposed for sending active
+ * votes, clear AMC trigger & enable modes and
+ * disable interrupt for this TCS
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ __tcs_set_trigger(drv, i, false);
+skip:
+ /* Reclaim the TCS */
+ write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
+ writel_relaxed(BIT(i), drv->tcs_base + drv->regs[RSC_DRV_IRQ_CLEAR]);
+ spin_lock(&drv->lock);
+ clear_bit(i, drv->tcs_in_use);
+ /*
+ * Disable interrupt for WAKE TCS to avoid being
+ * spammed with interrupts coming when the solver
+ * sends its wake votes.
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ enable_tcs_irq(drv, i, false);
+ spin_unlock(&drv->lock);
+ wake_up(&drv->tcs_wait);
+ if (req)
+ rpmh_tx_done(req);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @cmd_id: The index within the TCS to start writing.
+ * @msg: The message we want to send, which will contain several addr/data
+ * pairs to program (but few enough that they all fit in one TCS).
+ *
+ * This is used for all types of transfers (active, sleep, and wake).
+ */
+static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
+ const struct tcs_request *msg)
+{
+ u32 msgid;
+ u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE;
+ u32 cmd_enable = 0;
+ struct tcs_cmd *cmd;
+ int i, j;
+
+ /* Convert all commands to RR when the request has wait_for_compl set */
+ cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
+
+ for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
+ cmd = &msg->cmds[i];
+ cmd_enable |= BIT(j);
+ msgid = cmd_msgid;
+ /*
+ * Additionally, if the cmd->wait is set, make the command
+ * response reqd even if the overall request was fire-n-forget.
+ */
+ msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
+
+ write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_MSGID], tcs_id, j, msgid);
+ write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], tcs_id, j, cmd->addr);
+ write_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_DATA], tcs_id, j, cmd->data);
+ trace_rpmh_send_msg(drv, tcs_id, msg->state, j, msgid, cmd);
+ }
+
+ cmd_enable |= read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id);
+ write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, cmd_enable);
+}
+
+/**
+ * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
+ * @drv: The controller.
+ * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
+ * @msg: The message we want to send, which will contain several addr/data
+ * pairs to program (but few enough that they all fit in one TCS).
+ *
+ * This will walk through the TCSes in the group and check if any of them
+ * appear to be sending to addresses referenced in the message. If it finds
+ * one it'll return -EBUSY.
+ *
+ * Only for use for active-only transfers.
+ *
+ * Must be called with the drv->lock held since that protects tcs_in_use.
+ *
+ * Return: 0 if nothing in flight or -EBUSY if we should try again later.
+ * The caller must re-enable interrupts between tries since that's
+ * the only way tcs_in_use will ever be updated and the only way
+ * RSC_DRV_CMD_ENABLE will ever be cleared.
+ */
+static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
+ const struct tcs_request *msg)
+{
+ unsigned long curr_enabled;
+ u32 addr;
+ int j, k;
+ int i = tcs->offset;
+
+ for_each_set_bit_from(i, drv->tcs_in_use, tcs->offset + tcs->num_tcs) {
+ curr_enabled = read_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i);
+
+ for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
+ addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j);
+ for (k = 0; k < msg->num_cmds; k++) {
+ if (addr == msg->cmds[k].addr)
+ return -EBUSY;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
+ * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
+ * we borrowed it because there are zero active-only ones).
+ *
+ * Must be called with the drv->lock held since that protects tcs_in_use.
+ *
+ * Return: The first tcs that's free or -EBUSY if all in use.
+ */
+static int find_free_tcs(struct tcs_group *tcs)
+{
+ const struct rsc_drv *drv = tcs->drv;
+ unsigned long i;
+ unsigned long max = tcs->offset + tcs->num_tcs;
+
+ i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset);
+ if (i >= max)
+ return -EBUSY;
+
+ return i;
+}
+
+/**
+ * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
+ * @drv: The controller.
+ * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
+ * @msg: The data to be sent.
+ *
+ * Claims a tcs in the given tcs_group while making sure that no existing cmd
+ * is in flight that would conflict with the one in @msg.
+ *
+ * Context: Must be called with the drv->lock held since that protects
+ * tcs_in_use.
+ *
+ * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
+ * or the tcs_group is full.
+ */
+static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
+ const struct tcs_request *msg)
+{
+ int ret;
+
+ /*
+ * The h/w does not like if we send a request to the same address,
+ * when one is already in-flight or being processed.
+ */
+ ret = check_for_req_inflight(drv, tcs, msg);
+ if (ret)
+ return ret;
+
+ return find_free_tcs(tcs);
+}
+
+/**
+ * rpmh_rsc_send_data() - Write / trigger active-only message.
+ * @drv: The controller.
+ * @msg: The data to be sent.
+ *
+ * NOTES:
+ * - This is only used for "ACTIVE_ONLY" since the limitations of this
+ * function don't make sense for sleep/wake cases.
+ * - To do the transfer, we will grab a whole TCS for ourselves--we don't
+ * try to share. If there are none available we'll wait indefinitely
+ * for a free one.
+ * - This function will not wait for the commands to be finished, only for
+ * data to be programmed into the RPMh. See rpmh_tx_done() which will
+ * be called when the transfer is fully complete.
+ * - This function must be called with interrupts enabled. If the hardware
+ * is busy doing someone else's transfer we need that transfer to fully
+ * finish so that we can have the hardware, and to fully finish it needs
+ * the interrupt handler to run. If the interrupts is set to run on the
+ * active CPU this can never happen if interrupts are disabled.
+ *
+ * Return: 0 on success, -EINVAL on error.
+ */
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id;
+ unsigned long flags;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ spin_lock_irqsave(&drv->lock, flags);
+
+ /* Wait forever for a free tcs. It better be there eventually! */
+ wait_event_lock_irq(drv->tcs_wait,
+ (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
+ drv->lock);
+
+ tcs->req[tcs_id - tcs->offset] = msg;
+ set_bit(tcs_id, drv->tcs_in_use);
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
+ /*
+ * Clear previously programmed WAKE commands in selected
+ * repurposed TCS to avoid triggering them. tcs->slots will be
+ * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
+ */
+ write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0);
+ enable_tcs_irq(drv, tcs_id, true);
+ }
+ spin_unlock_irqrestore(&drv->lock, flags);
+
+ /*
+ * These two can be done after the lock is released because:
+ * - We marked "tcs_in_use" under lock.
+ * - Once "tcs_in_use" has been marked nobody else could be writing
+ * to these registers until the interrupt goes off.
+ * - The interrupt can't go off until we trigger w/ the last line
+ * of __tcs_set_trigger() below.
+ */
+ __tcs_buffer_write(drv, tcs_id, 0, msg);
+ __tcs_set_trigger(drv, tcs_id, true);
+
+ return 0;
+}
+
+/**
+ * find_slots() - Find a place to write the given message.
+ * @tcs: The tcs group to search.
+ * @msg: The message we want to find room for.
+ * @tcs_id: If we return 0 from the function, we return the global ID of the
+ * TCS to write to here.
+ * @cmd_id: If we return 0 from the function, we return the index of
+ * the command array of the returned TCS where the client should
+ * start writing the message.
+ *
+ * Only for use on sleep/wake TCSes since those are the only ones we maintain
+ * tcs->slots for.
+ *
+ * Return: -ENOMEM if there was no room, else 0.
+ */
+static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
+ int *tcs_id, int *cmd_id)
+{
+ int slot, offset;
+ int i = 0;
+
+ /* Do over, until we can fit the full payload in a single TCS */
+ do {
+ slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
+ i, msg->num_cmds, 0);
+ if (slot >= tcs->num_tcs * tcs->ncpt)
+ return -ENOMEM;
+ i += tcs->ncpt;
+ } while (slot + msg->num_cmds - 1 >= i);
+
+ bitmap_set(tcs->slots, slot, msg->num_cmds);
+
+ offset = slot / tcs->ncpt;
+ *tcs_id = offset + tcs->offset;
+ *cmd_id = slot % tcs->ncpt;
+
+ return 0;
+}
+
+/**
+ * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
+ * @drv: The controller.
+ * @msg: The data to be written to the controller.
+ *
+ * This should only be called for sleep/wake state, never active-only
+ * state.
+ *
+ * The caller must ensure that no other RPMH actions are happening and the
+ * controller is idle when this function is called since it runs lockless.
+ *
+ * Return: 0 if no error; else -error.
+ */
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id = 0, cmd_id = 0;
+ int ret;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ /* find the TCS id and the command in the TCS to write to */
+ ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
+ if (!ret)
+ __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
+
+ return ret;
+}
+
+/**
+ * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
+ * @drv: The controller
+ *
+ * Checks if any of the AMCs are busy in handling ACTIVE sets.
+ * This is called from the last cpu powering down before flushing
+ * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
+ * power collapse, so deny from the last cpu's pm notification.
+ *
+ * Context: Must be called with the drv->lock held.
+ *
+ * Return:
+ * * False - AMCs are idle
+ * * True - AMCs are busy
+ */
+static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
+{
+ unsigned long set;
+ const struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
+ unsigned long max;
+
+ /*
+ * If we made an active request on a RSC that does not have a
+ * dedicated TCS for active state use, then re-purposed wake TCSes
+ * should be checked for not busy, because we used wake TCSes for
+ * active requests in this case.
+ */
+ if (!tcs->num_tcs)
+ tcs = &drv->tcs[WAKE_TCS];
+
+ max = tcs->offset + tcs->num_tcs;
+ set = find_next_bit(drv->tcs_in_use, max, tcs->offset);
+
+ return set < max;
+}
+
+/**
+ * rpmh_rsc_write_next_wakeup() - Write next wakeup in CONTROL_TCS.
+ * @drv: The controller
+ *
+ * Writes maximum wakeup cycles when called from suspend.
+ * Writes earliest hrtimer wakeup when called from idle.
+ */
+void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv)
+{
+ ktime_t now, wakeup;
+ u64 wakeup_us, wakeup_cycles = ~0;
+ u32 lo, hi;
+
+ if (!drv->tcs[CONTROL_TCS].num_tcs || !drv->genpd_nb.notifier_call)
+ return;
+
+ /* Set highest time when system (timekeeping) is suspended */
+ if (system_state == SYSTEM_SUSPEND)
+ goto exit;
+
+ /* Find the earliest hrtimer wakeup from online cpus */
+ wakeup = dev_pm_genpd_get_next_hrtimer(drv->dev);
+
+ /* Find the relative wakeup in kernel time scale */
+ now = ktime_get();
+ wakeup = ktime_sub(wakeup, now);
+ wakeup_us = ktime_to_us(wakeup);
+
+ /* Convert the wakeup to arch timer scale */
+ wakeup_cycles = USECS_TO_CYCLES(wakeup_us);
+ wakeup_cycles += arch_timer_read_counter();
+
+exit:
+ lo = wakeup_cycles & RSC_DRV_CTL_TCS_DATA_LO_MASK;
+ hi = wakeup_cycles >> RSC_DRV_CTL_TCS_DATA_SIZE;
+ hi &= RSC_DRV_CTL_TCS_DATA_HI_MASK;
+ hi |= RSC_DRV_CTL_TCS_DATA_HI_VALID;
+
+ writel_relaxed(lo, drv->base + RSC_DRV_CTL_TCS_DATA_LO);
+ writel_relaxed(hi, drv->base + RSC_DRV_CTL_TCS_DATA_HI);
+}
+
+/**
+ * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
+ * @nfb: Pointer to the notifier block in struct rsc_drv.
+ * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
+ * @v: Unused
+ *
+ * This function is given to cpu_pm_register_notifier so we can be informed
+ * about when CPUs go down. When all CPUs go down we know no more active
+ * transfers will be started so we write sleep/wake sets. This function gets
+ * called from cpuidle code paths and also at system suspend time.
+ *
+ * If its last CPU going down and AMCs are not busy then writes cached sleep
+ * and wake messages to TCSes. The firmware then takes care of triggering
+ * them when entering deepest low power modes.
+ *
+ * Return: See cpu_pm_register_notifier()
+ */
+static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
+ unsigned long action, void *v)
+{
+ struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
+ int ret = NOTIFY_OK;
+ int cpus_in_pm;
+
+ switch (action) {
+ case CPU_PM_ENTER:
+ cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
+ /*
+ * NOTE: comments for num_online_cpus() point out that it's
+ * only a snapshot so we need to be careful. It should be OK
+ * for us to use, though. It's important for us not to miss
+ * if we're the last CPU going down so it would only be a
+ * problem if a CPU went offline right after we did the check
+ * AND that CPU was not idle AND that CPU was the last non-idle
+ * CPU. That can't happen. CPUs would have to come out of idle
+ * before the CPU could go offline.
+ */
+ if (cpus_in_pm < num_online_cpus())
+ return NOTIFY_OK;
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ atomic_dec(&drv->cpus_in_pm);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * It's likely we're on the last CPU. Grab the drv->lock and write
+ * out the sleep/wake commands to RPMH hardware. Grabbing the lock
+ * means that if we race with another CPU coming up we are still
+ * guaranteed to be safe. If another CPU came up just after we checked
+ * and has grabbed the lock or started an active transfer then we'll
+ * notice we're busy and abort. If another CPU comes up after we start
+ * flushing it will be blocked from starting an active transfer until
+ * we're done flushing. If another CPU starts an active transfer after
+ * we release the lock we're still OK because we're no longer the last
+ * CPU.
+ */
+ if (spin_trylock(&drv->lock)) {
+ if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
+ ret = NOTIFY_BAD;
+ spin_unlock(&drv->lock);
+ } else {
+ /* Another CPU must be up */
+ return NOTIFY_OK;
+ }
+
+ if (ret == NOTIFY_BAD) {
+ /* Double-check if we're here because someone else is up */
+ if (cpus_in_pm < num_online_cpus())
+ ret = NOTIFY_OK;
+ else
+ /* We won't be called w/ CPU_PM_ENTER_FAILED */
+ atomic_dec(&drv->cpus_in_pm);
+ }
+
+ return ret;
+}
+
+/**
+ * rpmh_rsc_pd_callback() - Check if any of the AMCs are busy.
+ * @nfb: Pointer to the genpd notifier block in struct rsc_drv.
+ * @action: GENPD_NOTIFY_PRE_OFF, GENPD_NOTIFY_OFF, GENPD_NOTIFY_PRE_ON or GENPD_NOTIFY_ON.
+ * @v: Unused
+ *
+ * This function is given to dev_pm_genpd_add_notifier() so we can be informed
+ * about when cluster-pd is going down. When cluster go down we know no more active
+ * transfers will be started so we write sleep/wake sets. This function gets
+ * called from cpuidle code paths and also at system suspend time.
+ *
+ * If AMCs are not busy then writes cached sleep and wake messages to TCSes.
+ * The firmware then takes care of triggering them when entering deepest low power modes.
+ *
+ * Return:
+ * * NOTIFY_OK - success
+ * * NOTIFY_BAD - failure
+ */
+static int rpmh_rsc_pd_callback(struct notifier_block *nfb,
+ unsigned long action, void *v)
+{
+ struct rsc_drv *drv = container_of(nfb, struct rsc_drv, genpd_nb);
+
+ /* We don't need to lock as genpd on/off are serialized */
+ if ((action == GENPD_NOTIFY_PRE_OFF) &&
+ (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client)))
+ return NOTIFY_BAD;
+
+ return NOTIFY_OK;
+}
+
+static int rpmh_rsc_pd_attach(struct rsc_drv *drv, struct device *dev)
+{
+ int ret;
+
+ pm_runtime_enable(dev);
+ drv->genpd_nb.notifier_call = rpmh_rsc_pd_callback;
+ ret = dev_pm_genpd_add_notifier(dev, &drv->genpd_nb);
+ if (ret)
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int rpmh_probe_tcs_config(struct platform_device *pdev, struct rsc_drv *drv)
+{
+ struct tcs_type_config {
+ u32 type;
+ u32 n;
+ } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
+ struct device_node *dn = pdev->dev.of_node;
+ u32 config, max_tcs, ncpt, offset;
+ int i, ret, n, st = 0;
+ struct tcs_group *tcs;
+
+ ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
+ if (ret)
+ return ret;
+ drv->tcs_base = drv->base + offset;
+
+ config = readl_relaxed(drv->base + drv->regs[DRV_PRNT_CHLD_CONFIG]);
+
+ max_tcs = config;
+ max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
+ max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
+
+ ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
+ ncpt = ncpt >> DRV_NCPT_SHIFT;
+
+ n = of_property_count_u32_elems(dn, "qcom,tcs-config");
+ if (n != 2 * TCS_TYPE_NR)
+ return -EINVAL;
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+ i * 2, &tcs_cfg[i].type);
+ if (ret)
+ return ret;
+ if (tcs_cfg[i].type >= TCS_TYPE_NR)
+ return -EINVAL;
+
+ ret = of_property_read_u32_index(dn, "qcom,tcs-config",
+ i * 2 + 1, &tcs_cfg[i].n);
+ if (ret)
+ return ret;
+ if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TCS_TYPE_NR; i++) {
+ tcs = &drv->tcs[tcs_cfg[i].type];
+ if (tcs->drv)
+ return -EINVAL;
+ tcs->drv = drv;
+ tcs->type = tcs_cfg[i].type;
+ tcs->num_tcs = tcs_cfg[i].n;
+ tcs->ncpt = ncpt;
+
+ if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
+ continue;
+
+ if (st + tcs->num_tcs > max_tcs ||
+ st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
+ return -EINVAL;
+
+ tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
+ tcs->offset = st;
+ st += tcs->num_tcs;
+ }
+
+ drv->num_tcs = st;
+
+ return 0;
+}
+
+static int rpmh_rsc_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct rsc_drv *drv;
+ char drv_id[10] = {0};
+ int ret, irq;
+ u32 solver_config;
+ u32 rsc_id;
+
+ /*
+ * Even though RPMh doesn't directly use cmd-db, all of its children
+ * do. To avoid adding this check to our children we'll do it now.
+ */
+ ret = cmd_db_ready();
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Command DB not available (%d)\n",
+ ret);
+ return ret;
+ }
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
+ if (ret)
+ return ret;
+
+ drv->name = of_get_property(dn, "label", NULL);
+ if (!drv->name)
+ drv->name = dev_name(&pdev->dev);
+
+ snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
+ drv->base = devm_platform_ioremap_resource_byname(pdev, drv_id);
+ if (IS_ERR(drv->base))
+ return PTR_ERR(drv->base);
+
+ rsc_id = readl_relaxed(drv->base + RSC_DRV_ID);
+ drv->ver.major = rsc_id & (MAJOR_VER_MASK << MAJOR_VER_SHIFT);
+ drv->ver.major >>= MAJOR_VER_SHIFT;
+ drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT);
+ drv->ver.minor >>= MINOR_VER_SHIFT;
+
+ if (drv->ver.major == 3)
+ drv->regs = rpmh_rsc_reg_offset_ver_3_0;
+ else
+ drv->regs = rpmh_rsc_reg_offset_ver_2_7;
+
+ ret = rpmh_probe_tcs_config(pdev, drv);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&drv->lock);
+ init_waitqueue_head(&drv->tcs_wait);
+ bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
+
+ irq = platform_get_irq(pdev, drv->id);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
+ drv->name, drv);
+ if (ret)
+ return ret;
+
+ /*
+ * CPU PM/genpd notification are not required for controllers that support
+ * 'HW solver' mode where they can be in autonomous mode executing low
+ * power mode to power down.
+ */
+ solver_config = readl_relaxed(drv->base + drv->regs[DRV_SOLVER_CONFIG]);
+ solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
+ solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
+ if (!solver_config) {
+ if (pdev->dev.pm_domain) {
+ ret = rpmh_rsc_pd_attach(drv, &pdev->dev);
+ if (ret)
+ return ret;
+ } else {
+ drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
+ cpu_pm_register_notifier(&drv->rsc_pm);
+ }
+ }
+
+ /* Enable the active TCS to send requests immediately */
+ writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
+ drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]);
+
+ spin_lock_init(&drv->client.cache_lock);
+ INIT_LIST_HEAD(&drv->client.cache);
+ INIT_LIST_HEAD(&drv->client.batch_cache);
+
+ dev_set_drvdata(&pdev->dev, drv);
+ drv->dev = &pdev->dev;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret && pdev->dev.pm_domain) {
+ dev_pm_genpd_remove_notifier(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
+
+ return ret;
+}
+
+static const struct of_device_id rpmh_drv_match[] = {
+ { .compatible = "qcom,rpmh-rsc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmh_drv_match);
+
+static struct platform_driver rpmh_driver = {
+ .probe = rpmh_rsc_probe,
+ .driver = {
+ .name = "rpmh",
+ .of_match_table = rpmh_drv_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rpmh_driver_init(void)
+{
+ return platform_driver_register(&rpmh_driver);
+}
+arch_initcall(rpmh_driver_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
new file mode 100644
index 0000000000..08e09642d7
--- /dev/null
+++ b/drivers/soc/qcom/rpmh.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <soc/qcom/rpmh.h>
+
+#include "rpmh-internal.h"
+
+#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
+
+#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
+ struct rpmh_request name = { \
+ .msg = { \
+ .state = s, \
+ .cmds = name.cmd, \
+ .num_cmds = 0, \
+ .wait_for_compl = true, \
+ }, \
+ .cmd = { { 0 } }, \
+ .completion = q, \
+ .dev = device, \
+ .needs_free = false, \
+ }
+
+#define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
+
+/**
+ * struct cache_req: the request object for caching
+ *
+ * @addr: the address of the resource
+ * @sleep_val: the sleep vote
+ * @wake_val: the wake vote
+ * @list: linked list obj
+ */
+struct cache_req {
+ u32 addr;
+ u32 sleep_val;
+ u32 wake_val;
+ struct list_head list;
+};
+
+/**
+ * struct batch_cache_req - An entry in our batch catch
+ *
+ * @list: linked list obj
+ * @count: number of messages
+ * @rpm_msgs: the messages
+ */
+
+struct batch_cache_req {
+ struct list_head list;
+ int count;
+ struct rpmh_request rpm_msgs[];
+};
+
+static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
+{
+ struct rsc_drv *drv = dev_get_drvdata(dev->parent);
+
+ return &drv->client;
+}
+
+void rpmh_tx_done(const struct tcs_request *msg)
+{
+ struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
+ msg);
+ struct completion *compl = rpm_msg->completion;
+ bool free = rpm_msg->needs_free;
+
+ if (!compl)
+ goto exit;
+
+ /* Signal the blocking thread we are done */
+ complete(compl);
+
+exit:
+ if (free)
+ kfree(rpm_msg);
+}
+
+static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
+{
+ struct cache_req *p, *req = NULL;
+
+ list_for_each_entry(p, &ctrlr->cache, list) {
+ if (p->addr == addr) {
+ req = p;
+ break;
+ }
+ }
+
+ return req;
+}
+
+static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
+ enum rpmh_state state,
+ struct tcs_cmd *cmd)
+{
+ struct cache_req *req;
+ unsigned long flags;
+ u32 old_sleep_val, old_wake_val;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ req = __find_req(ctrlr, cmd->addr);
+ if (req)
+ goto existing;
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req) {
+ req = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+
+ req->addr = cmd->addr;
+ req->sleep_val = req->wake_val = UINT_MAX;
+ list_add_tail(&req->list, &ctrlr->cache);
+
+existing:
+ old_sleep_val = req->sleep_val;
+ old_wake_val = req->wake_val;
+
+ switch (state) {
+ case RPMH_ACTIVE_ONLY_STATE:
+ case RPMH_WAKE_ONLY_STATE:
+ req->wake_val = cmd->data;
+ break;
+ case RPMH_SLEEP_STATE:
+ req->sleep_val = cmd->data;
+ break;
+ }
+
+ ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
+ req->wake_val != old_wake_val) &&
+ req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX;
+
+unlock:
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return req;
+}
+
+/**
+ * __rpmh_write: Cache and send the RPMH request
+ *
+ * @dev: The device making the request
+ * @state: Active/Sleep request type
+ * @rpm_msg: The data that needs to be sent (cmds).
+ *
+ * Cache the RPMH request and send if the state is ACTIVE_ONLY.
+ * SLEEP/WAKE_ONLY requests are not sent to the controller at
+ * this time. Use rpmh_flush() to send them to the controller.
+ */
+static int __rpmh_write(const struct device *dev, enum rpmh_state state,
+ struct rpmh_request *rpm_msg)
+{
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ int ret = -EINVAL;
+ struct cache_req *req;
+ int i;
+
+ /* Cache the request in our store and link the payload */
+ for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
+ req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ }
+
+ if (state == RPMH_ACTIVE_ONLY_STATE) {
+ WARN_ON(irqs_disabled());
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
+ } else {
+ /* Clean up our call by spoofing tx_done */
+ ret = 0;
+ rpmh_tx_done(&rpm_msg->msg);
+ }
+
+ return ret;
+}
+
+static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
+ return -EINVAL;
+
+ memcpy(req->cmd, cmd, n * sizeof(*cmd));
+
+ req->msg.state = state;
+ req->msg.cmds = req->cmd;
+ req->msg.num_cmds = n;
+
+ return 0;
+}
+
+/**
+ * rpmh_write_async: Write a set of RPMH commands
+ *
+ * @dev: The device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in payload
+ *
+ * Write a set of RPMH commands, the order of commands is maintained
+ * and will be sent as a single shot.
+ */
+int rpmh_write_async(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ struct rpmh_request *rpm_msg;
+ int ret;
+
+ rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
+ if (!rpm_msg)
+ return -ENOMEM;
+ rpm_msg->needs_free = true;
+
+ ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
+ if (ret) {
+ kfree(rpm_msg);
+ return ret;
+ }
+
+ return __rpmh_write(dev, state, rpm_msg);
+}
+EXPORT_SYMBOL(rpmh_write_async);
+
+/**
+ * rpmh_write: Write a set of RPMH commands and block until response
+ *
+ * @dev: The device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in @cmd
+ *
+ * May sleep. Do not call from atomic contexts.
+ */
+int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{
+ DECLARE_COMPLETION_ONSTACK(compl);
+ DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
+ int ret;
+
+ ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n);
+ if (ret)
+ return ret;
+
+ ret = __rpmh_write(dev, state, &rpm_msg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
+ WARN_ON(!ret);
+ return (ret > 0) ? 0 : -ETIMEDOUT;
+}
+EXPORT_SYMBOL(rpmh_write);
+
+static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_add_tail(&req->list, &ctrlr->batch_cache);
+ ctrlr->dirty = true;
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+static int flush_batch(struct rpmh_ctrlr *ctrlr)
+{
+ struct batch_cache_req *req;
+ const struct rpmh_request *rpm_msg;
+ int ret = 0;
+ int i;
+
+ /* Send Sleep/Wake requests to the controller, expect no response */
+ list_for_each_entry(req, &ctrlr->batch_cache, list) {
+ for (i = 0; i < req->count; i++) {
+ rpm_msg = req->rpm_msgs + i;
+ ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+ &rpm_msg->msg);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
+ * batch to finish.
+ *
+ * @dev: the device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The array of count of elements in each batch, 0 terminated.
+ *
+ * Write a request to the RSC controller without caching. If the request
+ * state is ACTIVE, then the requests are treated as completion request
+ * and sent to the controller immediately. The function waits until all the
+ * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
+ * request is sent as fire-n-forget and no ack is expected.
+ *
+ * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
+ */
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n)
+{
+ struct batch_cache_req *req;
+ struct rpmh_request *rpm_msgs;
+ struct completion *compls;
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ unsigned long time_left;
+ int count = 0;
+ int ret, i;
+ void *ptr;
+
+ if (!cmd || !n)
+ return -EINVAL;
+
+ while (n[count] > 0)
+ count++;
+ if (!count)
+ return -EINVAL;
+
+ ptr = kzalloc(sizeof(*req) +
+ count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
+ GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ req = ptr;
+ compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
+
+ req->count = count;
+ rpm_msgs = req->rpm_msgs;
+
+ for (i = 0; i < count; i++) {
+ __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
+ cmd += n[i];
+ }
+
+ if (state != RPMH_ACTIVE_ONLY_STATE) {
+ cache_batch(ctrlr, req);
+ return 0;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct completion *compl = &compls[i];
+
+ init_completion(compl);
+ rpm_msgs[i].completion = compl;
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
+ if (ret) {
+ pr_err("Error(%d) sending RPMH message addr=%#x\n",
+ ret, rpm_msgs[i].msg.cmds[0].addr);
+ break;
+ }
+ }
+
+ time_left = RPMH_TIMEOUT_MS;
+ while (i--) {
+ time_left = wait_for_completion_timeout(&compls[i], time_left);
+ if (!time_left) {
+ /*
+ * Better hope they never finish because they'll signal
+ * the completion that we're going to free once
+ * we've returned from this function.
+ */
+ WARN_ON(1);
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ }
+
+exit:
+ kfree(ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmh_write_batch);
+
+static int is_req_valid(struct cache_req *req)
+{
+ return (req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX &&
+ req->sleep_val != req->wake_val);
+}
+
+static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
+ u32 addr, u32 data)
+{
+ DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
+
+ /* Wake sets are always complete and sleep sets are not */
+ rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
+ rpm_msg.cmd[0].addr = addr;
+ rpm_msg.cmd[0].data = data;
+ rpm_msg.msg.num_cmds = 1;
+
+ return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
+}
+
+/**
+ * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
+ *
+ * @ctrlr: Controller making request to flush cached data
+ *
+ * Return:
+ * * 0 - Success
+ * * Error code - Otherwise
+ */
+int rpmh_flush(struct rpmh_ctrlr *ctrlr)
+{
+ struct cache_req *p;
+ int ret = 0;
+
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Currently rpmh_flush() is only called when we think we're running
+ * on the last processor. If the lock is busy it means another
+ * processor is up and it's better to abort than spin.
+ */
+ if (!spin_trylock(&ctrlr->cache_lock))
+ return -EBUSY;
+
+ if (!ctrlr->dirty) {
+ pr_debug("Skipping flush, TCS has latest data.\n");
+ goto write_next_wakeup;
+ }
+
+ /* Invalidate the TCSes first to avoid stale data */
+ rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+
+ /* First flush the cached batch requests */
+ ret = flush_batch(ctrlr);
+ if (ret)
+ goto exit;
+
+ list_for_each_entry(p, &ctrlr->cache, list) {
+ if (!is_req_valid(p)) {
+ pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
+ __func__, p->addr, p->sleep_val, p->wake_val);
+ continue;
+ }
+ ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
+ p->sleep_val);
+ if (ret)
+ goto exit;
+ ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
+ p->wake_val);
+ if (ret)
+ goto exit;
+ }
+
+ ctrlr->dirty = false;
+
+write_next_wakeup:
+ rpmh_rsc_write_next_wakeup(ctrlr_to_drv(ctrlr));
+exit:
+ spin_unlock(&ctrlr->cache_lock);
+ return ret;
+}
+
+/**
+ * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
+ *
+ * @dev: The device making the request
+ *
+ * Invalidate the sleep and wake values in batch_cache.
+ */
+void rpmh_invalidate(const struct device *dev)
+{
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ struct batch_cache_req *req, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+ kfree(req);
+ INIT_LIST_HEAD(&ctrlr->batch_cache);
+ ctrlr->dirty = true;
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
new file mode 100644
index 0000000000..f9fd617711
--- /dev/null
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include <linux/rpmsg.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#define RPM_REQUEST_TIMEOUT (5 * HZ)
+
+/**
+ * struct qcom_smd_rpm - state of the rpm device driver
+ * @rpm_channel: reference to the smd channel
+ * @dev: rpm device
+ * @ack: completion for acks
+ * @lock: mutual exclusion around the send/complete pair
+ * @ack_status: result of the rpm request
+ */
+struct qcom_smd_rpm {
+ struct rpmsg_endpoint *rpm_channel;
+ struct device *dev;
+
+ struct completion ack;
+ struct mutex lock;
+ int ack_status;
+};
+
+/**
+ * struct qcom_rpm_header - header for all rpm requests and responses
+ * @service_type: identifier of the service
+ * @length: length of the payload
+ */
+struct qcom_rpm_header {
+ __le32 service_type;
+ __le32 length;
+};
+
+/**
+ * struct qcom_rpm_request - request message to the rpm
+ * @msg_id: identifier of the outgoing message
+ * @flags: active/sleep state flags
+ * @type: resource type
+ * @id: resource id
+ * @data_len: length of the payload following this header
+ */
+struct qcom_rpm_request {
+ __le32 msg_id;
+ __le32 flags;
+ __le32 type;
+ __le32 id;
+ __le32 data_len;
+};
+
+/**
+ * struct qcom_rpm_message - response message from the rpm
+ * @msg_type: indicator of the type of message
+ * @length: the size of this message, including the message header
+ * @msg_id: message id
+ * @message: textual message from the rpm
+ *
+ * Multiple of these messages can be stacked in an rpm message.
+ */
+struct qcom_rpm_message {
+ __le32 msg_type;
+ __le32 length;
+ union {
+ __le32 msg_id;
+ DECLARE_FLEX_ARRAY(u8, message);
+ };
+};
+
+#define RPM_SERVICE_TYPE_REQUEST 0x00716572 /* "req\0" */
+
+#define RPM_MSG_TYPE_ERR 0x00727265 /* "err\0" */
+#define RPM_MSG_TYPE_MSG_ID 0x2367736d /* "msg#" */
+
+/**
+ * qcom_rpm_smd_write - write @buf to @type:@id
+ * @rpm: rpm handle
+ * @state: active/sleep state flags
+ * @type: resource type
+ * @id: resource identifier
+ * @buf: the data to be written
+ * @count: number of bytes in @buf
+ */
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+ int state,
+ u32 type, u32 id,
+ void *buf,
+ size_t count)
+{
+ static unsigned msg_id = 1;
+ int left;
+ int ret;
+ struct {
+ struct qcom_rpm_header hdr;
+ struct qcom_rpm_request req;
+ u8 payload[];
+ } *pkt;
+ size_t size = sizeof(*pkt) + count;
+
+ /* SMD packets to the RPM may not exceed 256 bytes */
+ if (WARN_ON(size >= 256))
+ return -EINVAL;
+
+ pkt = kmalloc(size, GFP_ATOMIC);
+ if (!pkt)
+ return -ENOMEM;
+
+ mutex_lock(&rpm->lock);
+
+ pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST);
+ pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count);
+
+ pkt->req.msg_id = cpu_to_le32(msg_id++);
+ pkt->req.flags = cpu_to_le32(state);
+ pkt->req.type = cpu_to_le32(type);
+ pkt->req.id = cpu_to_le32(id);
+ pkt->req.data_len = cpu_to_le32(count);
+ memcpy(pkt->payload, buf, count);
+
+ ret = rpmsg_send(rpm->rpm_channel, pkt, size);
+ if (ret)
+ goto out;
+
+ left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
+ if (!left)
+ ret = -ETIMEDOUT;
+ else
+ ret = rpm->ack_status;
+
+out:
+ kfree(pkt);
+ mutex_unlock(&rpm->lock);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_smd_write);
+
+static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev,
+ void *data,
+ int count,
+ void *priv,
+ u32 addr)
+{
+ const struct qcom_rpm_header *hdr = data;
+ size_t hdr_length = le32_to_cpu(hdr->length);
+ const struct qcom_rpm_message *msg;
+ struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev);
+ const u8 *buf = data + sizeof(struct qcom_rpm_header);
+ const u8 *end = buf + hdr_length;
+ char msgbuf[32];
+ int status = 0;
+ u32 len, msg_length;
+
+ if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
+ hdr_length < sizeof(struct qcom_rpm_message)) {
+ dev_err(rpm->dev, "invalid request\n");
+ return 0;
+ }
+
+ while (buf < end) {
+ msg = (struct qcom_rpm_message *)buf;
+ msg_length = le32_to_cpu(msg->length);
+ switch (le32_to_cpu(msg->msg_type)) {
+ case RPM_MSG_TYPE_MSG_ID:
+ break;
+ case RPM_MSG_TYPE_ERR:
+ len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf));
+ memcpy_fromio(msgbuf, msg->message, len);
+ msgbuf[len - 1] = 0;
+
+ if (!strcmp(msgbuf, "resource does not exist"))
+ status = -ENXIO;
+ else
+ status = -EINVAL;
+ break;
+ }
+
+ buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4);
+ }
+
+ rpm->ack_status = status;
+ complete(&rpm->ack);
+ return 0;
+}
+
+static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
+{
+ struct qcom_smd_rpm *rpm;
+
+ if (!rpdev->dev.of_node)
+ return -EINVAL;
+
+ rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
+ if (!rpm)
+ return -ENOMEM;
+
+ mutex_init(&rpm->lock);
+ init_completion(&rpm->ack);
+
+ rpm->dev = &rpdev->dev;
+ rpm->rpm_channel = rpdev->ept;
+ dev_set_drvdata(&rpdev->dev, rpm);
+
+ return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
+}
+
+static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
+{
+ of_platform_depopulate(&rpdev->dev);
+}
+
+static const struct rpmsg_device_id qcom_smd_rpm_id_table[] = {
+ { .name = "rpm_requests", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(rpmsg, qcom_smd_rpm_id_table);
+
+static struct rpmsg_driver qcom_smd_rpm_driver = {
+ .probe = qcom_smd_rpm_probe,
+ .remove = qcom_smd_rpm_remove,
+ .callback = qcom_smd_rpm_callback,
+ .id_table = qcom_smd_rpm_id_table,
+ .drv.name = "qcom_smd_rpm",
+};
+
+static int __init qcom_smd_rpm_init(void)
+{
+ return register_rpmsg_driver(&qcom_smd_rpm_driver);
+}
+arch_initcall(qcom_smd_rpm_init);
+
+static void __exit qcom_smd_rpm_exit(void)
+{
+ unregister_rpmsg_driver(&qcom_smd_rpm_driver);
+}
+module_exit(qcom_smd_rpm_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
new file mode 100644
index 0000000000..d4a89d2bb4
--- /dev/null
+++ b/drivers/soc/qcom/smem.c
@@ -0,0 +1,1230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/socinfo.h>
+
+/*
+ * The Qualcomm shared memory system is a allocate only heap structure that
+ * consists of one of more memory areas that can be accessed by the processors
+ * in the SoC.
+ *
+ * All systems contains a global heap, accessible by all processors in the SoC,
+ * with a table of contents data structure (@smem_header) at the beginning of
+ * the main shared memory block.
+ *
+ * The global header contains meta data for allocations as well as a fixed list
+ * of 512 entries (@smem_global_entry) that can be initialized to reference
+ * parts of the shared memory space.
+ *
+ *
+ * In addition to this global heap a set of "private" heaps can be set up at
+ * boot time with access restrictions so that only certain processor pairs can
+ * access the data.
+ *
+ * These partitions are referenced from an optional partition table
+ * (@smem_ptable), that is found 4kB from the end of the main smem region. The
+ * partition table entries (@smem_ptable_entry) lists the involved processors
+ * (or hosts) and their location in the main shared memory region.
+ *
+ * Each partition starts with a header (@smem_partition_header) that identifies
+ * the partition and holds properties for the two internal memory regions. The
+ * two regions are cached and non-cached memory respectively. Each region
+ * contain a link list of allocation headers (@smem_private_entry) followed by
+ * their data.
+ *
+ * Items in the non-cached region are allocated from the start of the partition
+ * while items in the cached region are allocated from the end. The free area
+ * is hence the region between the cached and non-cached offsets. The header of
+ * cached items comes after the data.
+ *
+ * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
+ * for the global heap. A new global partition is created from the global heap
+ * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
+ * set by the bootloader.
+ *
+ * To synchronize allocations in the shared memory heaps a remote spinlock must
+ * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
+ * platforms.
+ *
+ */
+
+/*
+ * The version member of the smem header contains an array of versions for the
+ * various software components in the SoC. We verify that the boot loader
+ * version is a valid version as a sanity check.
+ */
+#define SMEM_MASTER_SBL_VERSION_INDEX 7
+#define SMEM_GLOBAL_HEAP_VERSION 11
+#define SMEM_GLOBAL_PART_VERSION 12
+
+/*
+ * The first 8 items are only to be allocated by the boot loader while
+ * initializing the heap.
+ */
+#define SMEM_ITEM_LAST_FIXED 8
+
+/* Highest accepted item number, for both global and private heaps */
+#define SMEM_ITEM_COUNT 512
+
+/* Processor/host identifier for the application processor */
+#define SMEM_HOST_APPS 0
+
+/* Processor/host identifier for the global partition */
+#define SMEM_GLOBAL_HOST 0xfffe
+
+/* Max number of processors/hosts in a system */
+#define SMEM_HOST_COUNT 20
+
+/**
+ * struct smem_proc_comm - proc_comm communication struct (legacy)
+ * @command: current command to be executed
+ * @status: status of the currently requested command
+ * @params: parameters to the command
+ */
+struct smem_proc_comm {
+ __le32 command;
+ __le32 status;
+ __le32 params[2];
+};
+
+/**
+ * struct smem_global_entry - entry to reference smem items on the heap
+ * @allocated: boolean to indicate if this entry is used
+ * @offset: offset to the allocated space
+ * @size: size of the allocated space, 8 byte aligned
+ * @aux_base: base address for the memory region used by this unit, or 0 for
+ * the default region. bits 0,1 are reserved
+ */
+struct smem_global_entry {
+ __le32 allocated;
+ __le32 offset;
+ __le32 size;
+ __le32 aux_base; /* bits 1:0 reserved */
+};
+#define AUX_BASE_MASK 0xfffffffc
+
+/**
+ * struct smem_header - header found in beginning of primary smem region
+ * @proc_comm: proc_comm communication interface (legacy)
+ * @version: array of versions for the various subsystems
+ * @initialized: boolean to indicate that smem is initialized
+ * @free_offset: index of the first unallocated byte in smem
+ * @available: number of bytes available for allocation
+ * @reserved: reserved field, must be 0
+ * @toc: array of references to items
+ */
+struct smem_header {
+ struct smem_proc_comm proc_comm[4];
+ __le32 version[32];
+ __le32 initialized;
+ __le32 free_offset;
+ __le32 available;
+ __le32 reserved;
+ struct smem_global_entry toc[SMEM_ITEM_COUNT];
+};
+
+/**
+ * struct smem_ptable_entry - one entry in the @smem_ptable list
+ * @offset: offset, within the main shared memory region, of the partition
+ * @size: size of the partition
+ * @flags: flags for the partition (currently unused)
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @cacheline: alignment for "cached" entries
+ * @reserved: reserved entries for later use
+ */
+struct smem_ptable_entry {
+ __le32 offset;
+ __le32 size;
+ __le32 flags;
+ __le16 host0;
+ __le16 host1;
+ __le32 cacheline;
+ __le32 reserved[7];
+};
+
+/**
+ * struct smem_ptable - partition table for the private partitions
+ * @magic: magic number, must be SMEM_PTABLE_MAGIC
+ * @version: version of the partition table
+ * @num_entries: number of partitions in the table
+ * @reserved: for now reserved entries
+ * @entry: list of @smem_ptable_entry for the @num_entries partitions
+ */
+struct smem_ptable {
+ u8 magic[4];
+ __le32 version;
+ __le32 num_entries;
+ __le32 reserved[5];
+ struct smem_ptable_entry entry[];
+};
+
+static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
+
+/**
+ * struct smem_partition_header - header of the partitions
+ * @magic: magic number, must be SMEM_PART_MAGIC
+ * @host0: first processor/host with access to this partition
+ * @host1: second processor/host with access to this partition
+ * @size: size of the partition
+ * @offset_free_uncached: offset to the first free byte of uncached memory in
+ * this partition
+ * @offset_free_cached: offset to the first free byte of cached memory in this
+ * partition
+ * @reserved: for now reserved entries
+ */
+struct smem_partition_header {
+ u8 magic[4];
+ __le16 host0;
+ __le16 host1;
+ __le32 size;
+ __le32 offset_free_uncached;
+ __le32 offset_free_cached;
+ __le32 reserved[3];
+};
+
+/**
+ * struct smem_partition - describes smem partition
+ * @virt_base: starting virtual address of partition
+ * @phys_base: starting physical address of partition
+ * @cacheline: alignment for "cached" entries
+ * @size: size of partition
+ */
+struct smem_partition {
+ void __iomem *virt_base;
+ phys_addr_t phys_base;
+ size_t cacheline;
+ size_t size;
+};
+
+static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
+
+/**
+ * struct smem_private_entry - header of each item in the private partition
+ * @canary: magic number, must be SMEM_PRIVATE_CANARY
+ * @item: identifying number of the smem item
+ * @size: size of the data, including padding bytes
+ * @padding_data: number of bytes of padding of data
+ * @padding_hdr: number of bytes of padding between the header and the data
+ * @reserved: for now reserved entry
+ */
+struct smem_private_entry {
+ u16 canary; /* bytes are the same so no swapping needed */
+ __le16 item;
+ __le32 size; /* includes padding bytes */
+ __le16 padding_data;
+ __le16 padding_hdr;
+ __le32 reserved;
+};
+#define SMEM_PRIVATE_CANARY 0xa5a5
+
+/**
+ * struct smem_info - smem region info located after the table of contents
+ * @magic: magic number, must be SMEM_INFO_MAGIC
+ * @size: size of the smem region
+ * @base_addr: base address of the smem region
+ * @reserved: for now reserved entry
+ * @num_items: highest accepted item number
+ */
+struct smem_info {
+ u8 magic[4];
+ __le32 size;
+ __le32 base_addr;
+ __le32 reserved;
+ __le16 num_items;
+};
+
+static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
+
+/**
+ * struct smem_region - representation of a chunk of memory used for smem
+ * @aux_base: identifier of aux_mem base
+ * @virt_base: virtual base address of memory with this aux_mem identifier
+ * @size: size of the memory region
+ */
+struct smem_region {
+ phys_addr_t aux_base;
+ void __iomem *virt_base;
+ size_t size;
+};
+
+/**
+ * struct qcom_smem - device data for the smem device
+ * @dev: device pointer
+ * @hwlock: reference to a hwspinlock
+ * @ptable: virtual base of partition table
+ * @global_partition: describes for global partition when in use
+ * @partitions: list of partitions of current processor/host
+ * @item_count: max accepted item number
+ * @socinfo: platform device pointer
+ * @num_regions: number of @regions
+ * @regions: list of the memory regions defining the shared memory
+ */
+struct qcom_smem {
+ struct device *dev;
+
+ struct hwspinlock *hwlock;
+
+ u32 item_count;
+ struct platform_device *socinfo;
+ struct smem_ptable *ptable;
+ struct smem_partition global_partition;
+ struct smem_partition partitions[SMEM_HOST_COUNT];
+
+ unsigned num_regions;
+ struct smem_region regions[];
+};
+
+static void *
+phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->offset_free_uncached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_cached_entry(struct smem_partition_header *phdr,
+ size_t cacheline)
+{
+ void *p = phdr;
+ struct smem_private_entry *e;
+
+ return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
+}
+
+static void *
+phdr_to_last_cached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->offset_free_cached);
+}
+
+static struct smem_private_entry *
+phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
+{
+ void *p = phdr;
+
+ return p + sizeof(*phdr);
+}
+
+static struct smem_private_entry *
+uncached_entry_next(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
+ le32_to_cpu(e->size);
+}
+
+static struct smem_private_entry *
+cached_entry_next(struct smem_private_entry *e, size_t cacheline)
+{
+ void *p = e;
+
+ return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
+}
+
+static void *uncached_entry_to_item(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
+}
+
+static void *cached_entry_to_item(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p - le32_to_cpu(e->size);
+}
+
+/* Pointer to the one and only smem handle */
+static struct qcom_smem *__smem;
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT 1000
+
+/**
+ * qcom_smem_is_available() - Check if SMEM is available
+ *
+ * Return: true if SMEM is available, false otherwise.
+ */
+bool qcom_smem_is_available(void)
+{
+ return !!__smem;
+}
+EXPORT_SYMBOL(qcom_smem_is_available);
+
+static int qcom_smem_alloc_private(struct qcom_smem *smem,
+ struct smem_partition *part,
+ unsigned item,
+ size_t size)
+{
+ struct smem_private_entry *hdr, *end;
+ struct smem_partition_header *phdr;
+ size_t alloc_size;
+ void *cached;
+ void *p_end;
+
+ phdr = (struct smem_partition_header __force *)part->virt_base;
+ p_end = (void *)phdr + part->size;
+
+ hdr = phdr_to_first_uncached_entry(phdr);
+ end = phdr_to_last_uncached_entry(phdr);
+ cached = phdr_to_last_cached_entry(phdr);
+
+ if (WARN_ON((void *)end > p_end || cached > p_end))
+ return -EINVAL;
+
+ while (hdr < end) {
+ if (hdr->canary != SMEM_PRIVATE_CANARY)
+ goto bad_canary;
+ if (le16_to_cpu(hdr->item) == item)
+ return -EEXIST;
+
+ hdr = uncached_entry_next(hdr);
+ }
+
+ if (WARN_ON((void *)hdr > p_end))
+ return -EINVAL;
+
+ /* Check that we don't grow into the cached region */
+ alloc_size = sizeof(*hdr) + ALIGN(size, 8);
+ if ((void *)hdr + alloc_size > cached) {
+ dev_err(smem->dev, "Out of memory\n");
+ return -ENOSPC;
+ }
+
+ hdr->canary = SMEM_PRIVATE_CANARY;
+ hdr->item = cpu_to_le16(item);
+ hdr->size = cpu_to_le32(ALIGN(size, 8));
+ hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
+ hdr->padding_hdr = 0;
+
+ /*
+ * Ensure the header is written before we advance the free offset, so
+ * that remote processors that does not take the remote spinlock still
+ * gets a consistent view of the linked list.
+ */
+ wmb();
+ le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
+
+ return 0;
+bad_canary:
+ dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
+ le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
+
+ return -EINVAL;
+}
+
+static int qcom_smem_alloc_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t size)
+{
+ struct smem_global_entry *entry;
+ struct smem_header *header;
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (entry->allocated)
+ return -EEXIST;
+
+ size = ALIGN(size, 8);
+ if (WARN_ON(size > le32_to_cpu(header->available)))
+ return -ENOMEM;
+
+ entry->offset = header->free_offset;
+ entry->size = cpu_to_le32(size);
+
+ /*
+ * Ensure the header is consistent before we mark the item allocated,
+ * so that remote processors will get a consistent view of the item
+ * even though they do not take the spinlock on read.
+ */
+ wmb();
+ entry->allocated = cpu_to_le32(1);
+
+ le32_add_cpu(&header->free_offset, size);
+ le32_add_cpu(&header->available, -size);
+
+ return 0;
+}
+
+/**
+ * qcom_smem_alloc() - allocate space for a smem item
+ * @host: remote processor id, or -1
+ * @item: smem item handle
+ * @size: number of bytes to be allocated
+ *
+ * Allocate space for a given smem item of size @size, given that the item is
+ * not yet allocated.
+ */
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
+{
+ struct smem_partition *part;
+ unsigned long flags;
+ int ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (item < SMEM_ITEM_LAST_FIXED) {
+ dev_err(__smem->dev,
+ "Rejecting allocation of static entry %d\n", item);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(item >= __smem->item_count))
+ return -EINVAL;
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ret;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ ret = qcom_smem_alloc_private(__smem, part, item, size);
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ ret = qcom_smem_alloc_private(__smem, part, item, size);
+ } else {
+ ret = qcom_smem_alloc_global(__smem, item, size);
+ }
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_alloc);
+
+static void *qcom_smem_get_global(struct qcom_smem *smem,
+ unsigned item,
+ size_t *size)
+{
+ struct smem_header *header;
+ struct smem_region *region;
+ struct smem_global_entry *entry;
+ u64 entry_offset;
+ u32 e_size;
+ u32 aux_base;
+ unsigned i;
+
+ header = smem->regions[0].virt_base;
+ entry = &header->toc[item];
+ if (!entry->allocated)
+ return ERR_PTR(-ENXIO);
+
+ aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
+
+ for (i = 0; i < smem->num_regions; i++) {
+ region = &smem->regions[i];
+
+ if ((u32)region->aux_base == aux_base || !aux_base) {
+ e_size = le32_to_cpu(entry->size);
+ entry_offset = le32_to_cpu(entry->offset);
+
+ if (WARN_ON(e_size + entry_offset > region->size))
+ return ERR_PTR(-EINVAL);
+
+ if (size != NULL)
+ *size = e_size;
+
+ return region->virt_base + entry_offset;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void *qcom_smem_get_private(struct qcom_smem *smem,
+ struct smem_partition *part,
+ unsigned item,
+ size_t *size)
+{
+ struct smem_private_entry *e, *end;
+ struct smem_partition_header *phdr;
+ void *item_ptr, *p_end;
+ u32 padding_data;
+ u32 e_size;
+
+ phdr = (struct smem_partition_header __force *)part->virt_base;
+ p_end = (void *)phdr + part->size;
+
+ e = phdr_to_first_uncached_entry(phdr);
+ end = phdr_to_last_uncached_entry(phdr);
+
+ while (e < end) {
+ if (e->canary != SMEM_PRIVATE_CANARY)
+ goto invalid_canary;
+
+ if (le16_to_cpu(e->item) == item) {
+ if (size != NULL) {
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
+
+ if (WARN_ON(e_size > part->size || padding_data > e_size))
+ return ERR_PTR(-EINVAL);
+
+ *size = e_size - padding_data;
+ }
+
+ item_ptr = uncached_entry_to_item(e);
+ if (WARN_ON(item_ptr > p_end))
+ return ERR_PTR(-EINVAL);
+
+ return item_ptr;
+ }
+
+ e = uncached_entry_next(e);
+ }
+
+ if (WARN_ON((void *)e > p_end))
+ return ERR_PTR(-EINVAL);
+
+ /* Item was not found in the uncached list, search the cached list */
+
+ e = phdr_to_first_cached_entry(phdr, part->cacheline);
+ end = phdr_to_last_cached_entry(phdr);
+
+ if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
+ return ERR_PTR(-EINVAL);
+
+ while (e > end) {
+ if (e->canary != SMEM_PRIVATE_CANARY)
+ goto invalid_canary;
+
+ if (le16_to_cpu(e->item) == item) {
+ if (size != NULL) {
+ e_size = le32_to_cpu(e->size);
+ padding_data = le16_to_cpu(e->padding_data);
+
+ if (WARN_ON(e_size > part->size || padding_data > e_size))
+ return ERR_PTR(-EINVAL);
+
+ *size = e_size - padding_data;
+ }
+
+ item_ptr = cached_entry_to_item(e);
+ if (WARN_ON(item_ptr < (void *)phdr))
+ return ERR_PTR(-EINVAL);
+
+ return item_ptr;
+ }
+
+ e = cached_entry_next(e, part->cacheline);
+ }
+
+ if (WARN_ON((void *)e < (void *)phdr))
+ return ERR_PTR(-EINVAL);
+
+ return ERR_PTR(-ENOENT);
+
+invalid_canary:
+ dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
+ le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
+
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * qcom_smem_get() - resolve ptr of size of a smem item
+ * @host: the remote processor, or -1
+ * @item: smem item handle
+ * @size: pointer to be filled out with size of the item
+ *
+ * Looks up smem item and returns pointer to it. Size of smem
+ * item is returned in @size.
+ */
+void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
+{
+ struct smem_partition *part;
+ unsigned long flags;
+ int ret;
+ void *ptr = ERR_PTR(-EPROBE_DEFER);
+
+ if (!__smem)
+ return ptr;
+
+ if (WARN_ON(item >= __smem->item_count))
+ return ERR_PTR(-EINVAL);
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ ptr = qcom_smem_get_private(__smem, part, item, size);
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ ptr = qcom_smem_get_private(__smem, part, item, size);
+ } else {
+ ptr = qcom_smem_get_global(__smem, item, size);
+ }
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+ return ptr;
+
+}
+EXPORT_SYMBOL_GPL(qcom_smem_get);
+
+/**
+ * qcom_smem_get_free_space() - retrieve amount of free space in a partition
+ * @host: the remote processor identifying a partition, or -1
+ *
+ * To be used by smem clients as a quick way to determine if any new
+ * allocations has been made.
+ */
+int qcom_smem_get_free_space(unsigned host)
+{
+ struct smem_partition *part;
+ struct smem_partition_header *phdr;
+ struct smem_header *header;
+ unsigned ret;
+
+ if (!__smem)
+ return -EPROBE_DEFER;
+
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
+ part = &__smem->partitions[host];
+ phdr = part->virt_base;
+ ret = le32_to_cpu(phdr->offset_free_cached) -
+ le32_to_cpu(phdr->offset_free_uncached);
+
+ if (ret > le32_to_cpu(part->size))
+ return -EINVAL;
+ } else if (__smem->global_partition.virt_base) {
+ part = &__smem->global_partition;
+ phdr = part->virt_base;
+ ret = le32_to_cpu(phdr->offset_free_cached) -
+ le32_to_cpu(phdr->offset_free_uncached);
+
+ if (ret > le32_to_cpu(part->size))
+ return -EINVAL;
+ } else {
+ header = __smem->regions[0].virt_base;
+ ret = le32_to_cpu(header->available);
+
+ if (ret > __smem->regions[0].size)
+ return -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_get_free_space);
+
+static bool addr_in_range(void __iomem *base, size_t size, void *addr)
+{
+ return base && ((void __iomem *)addr >= base && (void __iomem *)addr < base + size);
+}
+
+/**
+ * qcom_smem_virt_to_phys() - return the physical address associated
+ * with an smem item pointer (previously returned by qcom_smem_get()
+ * @p: the virtual address to convert
+ *
+ * Returns 0 if the pointer provided is not within any smem region.
+ */
+phys_addr_t qcom_smem_virt_to_phys(void *p)
+{
+ struct smem_partition *part;
+ struct smem_region *area;
+ u64 offset;
+ u32 i;
+
+ for (i = 0; i < SMEM_HOST_COUNT; i++) {
+ part = &__smem->partitions[i];
+
+ if (addr_in_range(part->virt_base, part->size, p)) {
+ offset = p - part->virt_base;
+
+ return (phys_addr_t)part->phys_base + offset;
+ }
+ }
+
+ part = &__smem->global_partition;
+
+ if (addr_in_range(part->virt_base, part->size, p)) {
+ offset = p - part->virt_base;
+
+ return (phys_addr_t)part->phys_base + offset;
+ }
+
+ for (i = 0; i < __smem->num_regions; i++) {
+ area = &__smem->regions[i];
+
+ if (addr_in_range(area->virt_base, area->size, p)) {
+ offset = p - area->virt_base;
+
+ return (phys_addr_t)area->aux_base + offset;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_virt_to_phys);
+
+/**
+ * qcom_smem_get_soc_id() - return the SoC ID
+ * @id: On success, we return the SoC ID here.
+ *
+ * Look up SoC ID from HW/SW build ID and return it.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int qcom_smem_get_soc_id(u32 *id)
+{
+ struct socinfo *info;
+
+ info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ *id = __le32_to_cpu(info->id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id);
+
+static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
+{
+ struct smem_header *header;
+ __le32 *versions;
+
+ header = smem->regions[0].virt_base;
+ versions = header->version;
+
+ return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
+}
+
+static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
+{
+ struct smem_ptable *ptable;
+ u32 version;
+
+ ptable = smem->ptable;
+ if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
+ return ERR_PTR(-ENOENT);
+
+ version = le32_to_cpu(ptable->version);
+ if (version != 1) {
+ dev_err(smem->dev,
+ "Unsupported partition header version %d\n", version);
+ return ERR_PTR(-EINVAL);
+ }
+ return ptable;
+}
+
+static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
+{
+ struct smem_ptable *ptable;
+ struct smem_info *info;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR_OR_NULL(ptable))
+ return SMEM_ITEM_COUNT;
+
+ info = (struct smem_info *)&ptable->entry[ptable->num_entries];
+ if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
+ return SMEM_ITEM_COUNT;
+
+ return le16_to_cpu(info->num_items);
+}
+
+/*
+ * Validate the partition header for a partition whose partition
+ * table entry is supplied. Returns a pointer to its header if
+ * valid, or a null pointer otherwise.
+ */
+static struct smem_partition_header *
+qcom_smem_partition_header(struct qcom_smem *smem,
+ struct smem_ptable_entry *entry, u16 host0, u16 host1)
+{
+ struct smem_partition_header *header;
+ u32 phys_addr;
+ u32 size;
+
+ phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
+ header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
+
+ if (!header)
+ return NULL;
+
+ if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
+ dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
+ return NULL;
+ }
+
+ if (host0 != le16_to_cpu(header->host0)) {
+ dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
+ host0, le16_to_cpu(header->host0));
+ return NULL;
+ }
+ if (host1 != le16_to_cpu(header->host1)) {
+ dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
+ host1, le16_to_cpu(header->host1));
+ return NULL;
+ }
+
+ size = le32_to_cpu(header->size);
+ if (size != le32_to_cpu(entry->size)) {
+ dev_err(smem->dev, "bad partition size (%u != %u)\n",
+ size, le32_to_cpu(entry->size));
+ return NULL;
+ }
+
+ if (le32_to_cpu(header->offset_free_uncached) > size) {
+ dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
+ le32_to_cpu(header->offset_free_uncached), size);
+ return NULL;
+ }
+
+ return header;
+}
+
+static int qcom_smem_set_global_partition(struct qcom_smem *smem)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ bool found = false;
+ int i;
+
+ if (smem->global_partition.virt_base) {
+ dev_err(smem->dev, "Already found the global partition\n");
+ return -EINVAL;
+ }
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR(ptable))
+ return PTR_ERR(ptable);
+
+ for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+ entry = &ptable->entry[i];
+ if (!le32_to_cpu(entry->offset))
+ continue;
+ if (!le32_to_cpu(entry->size))
+ continue;
+
+ if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
+ continue;
+
+ if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(smem->dev, "Missing entry for global partition\n");
+ return -EINVAL;
+ }
+
+ header = qcom_smem_partition_header(smem, entry,
+ SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
+ if (!header)
+ return -EINVAL;
+
+ smem->global_partition.virt_base = (void __iomem *)header;
+ smem->global_partition.phys_base = smem->regions[0].aux_base +
+ le32_to_cpu(entry->offset);
+ smem->global_partition.size = le32_to_cpu(entry->size);
+ smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);
+
+ return 0;
+}
+
+static int
+qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ u16 remote_host;
+ u16 host0, host1;
+ int i;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR(ptable))
+ return PTR_ERR(ptable);
+
+ for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+ entry = &ptable->entry[i];
+ if (!le32_to_cpu(entry->offset))
+ continue;
+ if (!le32_to_cpu(entry->size))
+ continue;
+
+ host0 = le16_to_cpu(entry->host0);
+ host1 = le16_to_cpu(entry->host1);
+ if (host0 == local_host)
+ remote_host = host1;
+ else if (host1 == local_host)
+ remote_host = host0;
+ else
+ continue;
+
+ if (remote_host >= SMEM_HOST_COUNT) {
+ dev_err(smem->dev, "bad host %u\n", remote_host);
+ return -EINVAL;
+ }
+
+ if (smem->partitions[remote_host].virt_base) {
+ dev_err(smem->dev, "duplicate host %u\n", remote_host);
+ return -EINVAL;
+ }
+
+ header = qcom_smem_partition_header(smem, entry, host0, host1);
+ if (!header)
+ return -EINVAL;
+
+ smem->partitions[remote_host].virt_base = (void __iomem *)header;
+ smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
+ le32_to_cpu(entry->offset);
+ smem->partitions[remote_host].size = le32_to_cpu(entry->size);
+ smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
+ }
+
+ return 0;
+}
+
+static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
+{
+ u32 ptable_start;
+
+ /* map starting 4K for smem header */
+ region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
+ ptable_start = region->aux_base + region->size - SZ_4K;
+ /* map last 4k for toc */
+ smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
+
+ if (!region->virt_base || !smem->ptable)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
+{
+ u32 phys_addr;
+
+ phys_addr = smem->regions[0].aux_base;
+
+ smem->regions[0].size = size;
+ smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
+
+ if (!smem->regions[0].virt_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
+ struct smem_region *region)
+{
+ struct device *dev = smem->dev;
+ struct device_node *np;
+ struct resource r;
+ int ret;
+
+ np = of_parse_phandle(dev->of_node, name, 0);
+ if (!np) {
+ dev_err(dev, "No %s specified\n", name);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ region->aux_base = r.start;
+ region->size = resource_size(&r);
+
+ return 0;
+}
+
+static int qcom_smem_probe(struct platform_device *pdev)
+{
+ struct smem_header *header;
+ struct reserved_mem *rmem;
+ struct qcom_smem *smem;
+ unsigned long flags;
+ int num_regions;
+ int hwlock_id;
+ u32 version;
+ u32 size;
+ int ret;
+ int i;
+
+ num_regions = 1;
+ if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram"))
+ num_regions++;
+
+ smem = devm_kzalloc(&pdev->dev, struct_size(smem, regions, num_regions),
+ GFP_KERNEL);
+ if (!smem)
+ return -ENOMEM;
+
+ smem->dev = &pdev->dev;
+ smem->num_regions = num_regions;
+
+ rmem = of_reserved_mem_lookup(pdev->dev.of_node);
+ if (rmem) {
+ smem->regions[0].aux_base = rmem->base;
+ smem->regions[0].size = rmem->size;
+ } else {
+ /*
+ * Fall back to the memory-region reference, if we're not a
+ * reserved-memory node.
+ */
+ ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
+ if (ret)
+ return ret;
+ }
+
+ if (num_regions > 1) {
+ ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
+ if (ret)
+ return ret;
+ }
+
+
+ ret = qcom_smem_map_toc(smem, &smem->regions[0]);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < num_regions; i++) {
+ smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
+ smem->regions[i].aux_base,
+ smem->regions[i].size);
+ if (!smem->regions[i].virt_base) {
+ dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
+ return -ENOMEM;
+ }
+ }
+
+ header = smem->regions[0].virt_base;
+ if (le32_to_cpu(header->initialized) != 1 ||
+ le32_to_cpu(header->reserved)) {
+ dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
+ return -EINVAL;
+ }
+
+ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+ if (hwlock_id < 0) {
+ if (hwlock_id != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ return hwlock_id;
+ }
+
+ smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+ if (!smem->hwlock)
+ return -ENXIO;
+
+ ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
+ if (ret)
+ return ret;
+ size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
+ hwspin_unlock_irqrestore(smem->hwlock, &flags);
+
+ version = qcom_smem_get_sbl_version(smem);
+ /*
+ * smem header mapping is required only in heap version scheme, so unmap
+ * it here. It will be remapped in qcom_smem_map_global() when whole
+ * partition is mapped again.
+ */
+ devm_iounmap(smem->dev, smem->regions[0].virt_base);
+ switch (version >> 16) {
+ case SMEM_GLOBAL_PART_VERSION:
+ ret = qcom_smem_set_global_partition(smem);
+ if (ret < 0)
+ return ret;
+ smem->item_count = qcom_smem_get_item_count(smem);
+ break;
+ case SMEM_GLOBAL_HEAP_VERSION:
+ qcom_smem_map_global(smem, size);
+ smem->item_count = SMEM_ITEM_COUNT;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
+ ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
+ if (ret < 0 && ret != -ENOENT)
+ return ret;
+
+ __smem = smem;
+
+ smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
+ PLATFORM_DEVID_NONE, NULL,
+ 0);
+ if (IS_ERR(smem->socinfo))
+ dev_dbg(&pdev->dev, "failed to register socinfo device\n");
+
+ return 0;
+}
+
+static int qcom_smem_remove(struct platform_device *pdev)
+{
+ platform_device_unregister(__smem->socinfo);
+
+ hwspin_lock_free(__smem->hwlock);
+ __smem = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smem_of_match[] = {
+ { .compatible = "qcom,smem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
+
+static struct platform_driver qcom_smem_driver = {
+ .probe = qcom_smem_probe,
+ .remove = qcom_smem_remove,
+ .driver = {
+ .name = "qcom-smem",
+ .of_match_table = qcom_smem_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init qcom_smem_init(void)
+{
+ return platform_driver_register(&qcom_smem_driver);
+}
+arch_initcall(qcom_smem_init);
+
+static void __exit qcom_smem_exit(void)
+{
+ platform_driver_unregister(&qcom_smem_driver);
+}
+module_exit(qcom_smem_exit)
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
new file mode 100644
index 0000000000..e848cc9a3c
--- /dev/null
+++ b/drivers/soc/qcom/smem_state.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem_state.h>
+
+static LIST_HEAD(smem_states);
+static DEFINE_MUTEX(list_lock);
+
+/**
+ * struct qcom_smem_state - state context
+ * @refcount: refcount for the state
+ * @orphan: boolean indicator that this state has been unregistered
+ * @list: entry in smem_states list
+ * @of_node: of_node to use for matching the state in DT
+ * @priv: implementation private data
+ * @ops: ops for the state
+ */
+struct qcom_smem_state {
+ struct kref refcount;
+ bool orphan;
+
+ struct list_head list;
+ struct device_node *of_node;
+
+ void *priv;
+
+ struct qcom_smem_state_ops ops;
+};
+
+/**
+ * qcom_smem_state_update_bits() - update the masked bits in state with value
+ * @state: state handle acquired by calling qcom_smem_state_get()
+ * @mask: bit mask for the change
+ * @value: new value for the masked bits
+ *
+ * Returns 0 on success, otherwise negative errno.
+ */
+int qcom_smem_state_update_bits(struct qcom_smem_state *state,
+ u32 mask,
+ u32 value)
+{
+ if (state->orphan)
+ return -ENXIO;
+
+ if (!state->ops.update_bits)
+ return -ENOTSUPP;
+
+ return state->ops.update_bits(state->priv, mask, value);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_update_bits);
+
+static struct qcom_smem_state *of_node_to_state(struct device_node *np)
+{
+ struct qcom_smem_state *state;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(state, &smem_states, list) {
+ if (state->of_node == np) {
+ kref_get(&state->refcount);
+ goto unlock;
+ }
+ }
+ state = ERR_PTR(-EPROBE_DEFER);
+
+unlock:
+ mutex_unlock(&list_lock);
+
+ return state;
+}
+
+/**
+ * qcom_smem_state_get() - acquire handle to a state
+ * @dev: client device pointer
+ * @con_id: name of the state to lookup
+ * @bit: flags from the state reference, indicating which bit's affected
+ *
+ * Returns handle to the state, or ERR_PTR(). qcom_smem_state_put() must be
+ * called to release the returned state handle.
+ */
+struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+ const char *con_id,
+ unsigned *bit)
+{
+ struct qcom_smem_state *state;
+ struct of_phandle_args args;
+ int index = 0;
+ int ret;
+
+ if (con_id) {
+ index = of_property_match_string(dev->of_node,
+ "qcom,smem-state-names",
+ con_id);
+ if (index < 0) {
+ dev_err(dev, "missing qcom,smem-state-names\n");
+ return ERR_PTR(index);
+ }
+ }
+
+ ret = of_parse_phandle_with_args(dev->of_node,
+ "qcom,smem-states",
+ "#qcom,smem-state-cells",
+ index,
+ &args);
+ if (ret) {
+ dev_err(dev, "failed to parse qcom,smem-states property\n");
+ return ERR_PTR(ret);
+ }
+
+ if (args.args_count != 1) {
+ dev_err(dev, "invalid #qcom,smem-state-cells\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ state = of_node_to_state(args.np);
+ if (IS_ERR(state))
+ goto put;
+
+ *bit = args.args[0];
+
+put:
+ of_node_put(args.np);
+ return state;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_get);
+
+static void qcom_smem_state_release(struct kref *ref)
+{
+ struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount);
+
+ list_del(&state->list);
+ of_node_put(state->of_node);
+ kfree(state);
+}
+
+/**
+ * qcom_smem_state_put() - release state handle
+ * @state: state handle to be released
+ */
+void qcom_smem_state_put(struct qcom_smem_state *state)
+{
+ mutex_lock(&list_lock);
+ kref_put(&state->refcount, qcom_smem_state_release);
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_put);
+
+static void devm_qcom_smem_state_release(struct device *dev, void *res)
+{
+ qcom_smem_state_put(*(struct qcom_smem_state **)res);
+}
+
+/**
+ * devm_qcom_smem_state_get() - acquire handle to a devres managed state
+ * @dev: client device pointer
+ * @con_id: name of the state to lookup
+ * @bit: flags from the state reference, indicating which bit's affected
+ *
+ * Returns handle to the state, or ERR_PTR(). qcom_smem_state_put() is called
+ * automatically when @dev is removed.
+ */
+struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev,
+ const char *con_id,
+ unsigned *bit)
+{
+ struct qcom_smem_state **ptr, *state;
+
+ ptr = devres_alloc(devm_qcom_smem_state_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ state = qcom_smem_state_get(dev, con_id, bit);
+ if (!IS_ERR(state)) {
+ *ptr = state;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(devm_qcom_smem_state_get);
+
+/**
+ * qcom_smem_state_register() - register a new state
+ * @of_node: of_node used for matching client lookups
+ * @ops: implementation ops
+ * @priv: implementation specific private data
+ */
+struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
+ const struct qcom_smem_state_ops *ops,
+ void *priv)
+{
+ struct qcom_smem_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&state->refcount);
+
+ state->of_node = of_node_get(of_node);
+ state->ops = *ops;
+ state->priv = priv;
+
+ mutex_lock(&list_lock);
+ list_add(&state->list, &smem_states);
+ mutex_unlock(&list_lock);
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_register);
+
+/**
+ * qcom_smem_state_unregister() - unregister a registered state
+ * @state: state handle to be unregistered
+ */
+void qcom_smem_state_unregister(struct qcom_smem_state *state)
+{
+ state->orphan = true;
+ qcom_smem_state_put(state);
+}
+EXPORT_SYMBOL_GPL(qcom_smem_state_unregister);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
new file mode 100644
index 0000000000..e9c8030d50
--- /dev/null
+++ b/drivers/soc/qcom/smp2p.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_client.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/spinlock.h>
+
+/*
+ * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
+ * of a single 32-bit value between two processors. Each value has a single
+ * writer (the local side) and a single reader (the remote side). Values are
+ * uniquely identified in the system by the directed edge (local processor ID
+ * to remote processor ID) and a string identifier.
+ *
+ * Each processor is responsible for creating the outgoing SMEM items and each
+ * item is writable by the local processor and readable by the remote
+ * processor. By using two separate SMEM items that are single-reader and
+ * single-writer, SMP2P does not require any remote locking mechanisms.
+ *
+ * The driver uses the Linux GPIO and interrupt framework to expose a virtual
+ * GPIO for each outbound entry and a virtual interrupt controller for each
+ * inbound entry.
+ */
+
+#define SMP2P_MAX_ENTRY 16
+#define SMP2P_MAX_ENTRY_NAME 16
+
+#define SMP2P_FEATURE_SSR_ACK 0x1
+#define SMP2P_FLAGS_RESTART_DONE_BIT 0
+#define SMP2P_FLAGS_RESTART_ACK_BIT 1
+
+#define SMP2P_MAGIC 0x504d5324
+#define SMP2P_ALL_FEATURES SMP2P_FEATURE_SSR_ACK
+
+/**
+ * struct smp2p_smem_item - in memory communication structure
+ * @magic: magic number
+ * @version: version - must be 1
+ * @features: features flag - currently unused
+ * @local_pid: processor id of sending end
+ * @remote_pid: processor id of receiving end
+ * @total_entries: number of entries - always SMP2P_MAX_ENTRY
+ * @valid_entries: number of allocated entries
+ * @flags:
+ * @entries: individual communication entries
+ * @name: name of the entry
+ * @value: content of the entry
+ */
+struct smp2p_smem_item {
+ u32 magic;
+ u8 version;
+ unsigned features:24;
+ u16 local_pid;
+ u16 remote_pid;
+ u16 total_entries;
+ u16 valid_entries;
+ u32 flags;
+
+ struct {
+ u8 name[SMP2P_MAX_ENTRY_NAME];
+ u32 value;
+ } entries[SMP2P_MAX_ENTRY];
+} __packed;
+
+/**
+ * struct smp2p_entry - driver context matching one entry
+ * @node: list entry to keep track of allocated entries
+ * @smp2p: reference to the device driver context
+ * @name: name of the entry, to match against smp2p_smem_item
+ * @value: pointer to smp2p_smem_item entry value
+ * @last_value: last handled value
+ * @domain: irq_domain for inbound entries
+ * @irq_enabled:bitmap to track enabled irq bits
+ * @irq_rising: bitmap to mark irq bits for rising detection
+ * @irq_falling:bitmap to mark irq bits for falling detection
+ * @state: smem state handle
+ * @lock: spinlock to protect read-modify-write of the value
+ */
+struct smp2p_entry {
+ struct list_head node;
+ struct qcom_smp2p *smp2p;
+
+ const char *name;
+ u32 *value;
+ u32 last_value;
+
+ struct irq_domain *domain;
+ DECLARE_BITMAP(irq_enabled, 32);
+ DECLARE_BITMAP(irq_rising, 32);
+ DECLARE_BITMAP(irq_falling, 32);
+
+ struct qcom_smem_state *state;
+
+ spinlock_t lock;
+};
+
+#define SMP2P_INBOUND 0
+#define SMP2P_OUTBOUND 1
+
+/**
+ * struct qcom_smp2p - device driver context
+ * @dev: device driver handle
+ * @in: pointer to the inbound smem item
+ * @out: pointer to the outbound smem item
+ * @smem_items: ids of the two smem items
+ * @valid_entries: already scanned inbound entries
+ * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled
+ * @ssr_ack: current cached state of the local ack bit
+ * @negotiation_done: whether negotiating finished
+ * @local_pid: processor id of the inbound edge
+ * @remote_pid: processor id of the outbound edge
+ * @ipc_regmap: regmap for the outbound ipc
+ * @ipc_offset: offset within the regmap
+ * @ipc_bit: bit in regmap@offset to kick to signal remote processor
+ * @mbox_client: mailbox client handle
+ * @mbox_chan: apcs ipc mailbox channel handle
+ * @inbound: list of inbound entries
+ * @outbound: list of outbound entries
+ */
+struct qcom_smp2p {
+ struct device *dev;
+
+ struct smp2p_smem_item *in;
+ struct smp2p_smem_item *out;
+
+ unsigned smem_items[SMP2P_OUTBOUND + 1];
+
+ unsigned valid_entries;
+
+ bool ssr_ack_enabled;
+ bool ssr_ack;
+ bool negotiation_done;
+
+ unsigned local_pid;
+ unsigned remote_pid;
+
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+
+ struct list_head inbound;
+ struct list_head outbound;
+};
+
+static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
+{
+ /* Make sure any updated data is written before the kick */
+ wmb();
+
+ if (smp2p->mbox_chan) {
+ mbox_send_message(smp2p->mbox_chan, NULL);
+ mbox_client_txdone(smp2p->mbox_chan, 0);
+ } else {
+ regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
+ }
+}
+
+static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *in = smp2p->in;
+ bool restart;
+
+ if (!smp2p->ssr_ack_enabled)
+ return false;
+
+ restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT);
+
+ return restart != smp2p->ssr_ack;
+}
+
+static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ u32 val;
+
+ smp2p->ssr_ack = !smp2p->ssr_ack;
+
+ val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
+ if (smp2p->ssr_ack)
+ val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
+ out->flags = val;
+
+ qcom_smp2p_kick(smp2p);
+}
+
+static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ struct smp2p_smem_item *in = smp2p->in;
+
+ if (in->version == out->version) {
+ out->features &= in->features;
+
+ if (out->features & SMP2P_FEATURE_SSR_ACK)
+ smp2p->ssr_ack_enabled = true;
+
+ smp2p->negotiation_done = true;
+ }
+}
+
+static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *in;
+ struct smp2p_entry *entry;
+ int irq_pin;
+ u32 status;
+ char buf[SMP2P_MAX_ENTRY_NAME];
+ u32 val;
+ int i;
+
+ in = smp2p->in;
+
+ /* Match newly created entries */
+ for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
+ list_for_each_entry(entry, &smp2p->inbound, node) {
+ memcpy(buf, in->entries[i].name, sizeof(buf));
+ if (!strcmp(buf, entry->name)) {
+ entry->value = &in->entries[i].value;
+ break;
+ }
+ }
+ }
+ smp2p->valid_entries = i;
+
+ /* Fire interrupts based on any value changes */
+ list_for_each_entry(entry, &smp2p->inbound, node) {
+ /* Ignore entries not yet allocated by the remote side */
+ if (!entry->value)
+ continue;
+
+ val = readl(entry->value);
+
+ status = val ^ entry->last_value;
+ entry->last_value = val;
+
+ /* No changes of this entry? */
+ if (!status)
+ continue;
+
+ for_each_set_bit(i, entry->irq_enabled, 32) {
+ if (!(status & BIT(i)))
+ continue;
+
+ if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
+ (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ }
+ }
+}
+
+/**
+ * qcom_smp2p_intr() - interrupt handler for incoming notifications
+ * @irq: unused
+ * @data: smp2p driver context
+ *
+ * Handle notifications from the remote side to handle newly allocated entries
+ * or any changes to the state bits of existing entries.
+ */
+static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+{
+ struct smp2p_smem_item *in;
+ struct qcom_smp2p *smp2p = data;
+ unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
+ unsigned int pid = smp2p->remote_pid;
+ bool ack_restart;
+ size_t size;
+
+ in = smp2p->in;
+
+ /* Acquire smem item, if not already found */
+ if (!in) {
+ in = qcom_smem_get(pid, smem_id, &size);
+ if (IS_ERR(in)) {
+ dev_err(smp2p->dev,
+ "Unable to acquire remote smp2p item\n");
+ goto out;
+ }
+
+ smp2p->in = in;
+ }
+
+ if (!smp2p->negotiation_done)
+ qcom_smp2p_negotiate(smp2p);
+
+ if (smp2p->negotiation_done) {
+ ack_restart = qcom_smp2p_check_ssr(smp2p);
+ qcom_smp2p_notify_in(smp2p);
+
+ if (ack_restart)
+ qcom_smp2p_do_ssr_ack(smp2p);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static void smp2p_mask_irq(struct irq_data *irqd)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ clear_bit(irq, entry->irq_enabled);
+}
+
+static void smp2p_unmask_irq(struct irq_data *irqd)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ set_bit(irq, entry->irq_enabled);
+}
+
+static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ set_bit(irq, entry->irq_rising);
+ else
+ clear_bit(irq, entry->irq_rising);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ set_bit(irq, entry->irq_falling);
+ else
+ clear_bit(irq, entry->irq_falling);
+
+ return 0;
+}
+
+static struct irq_chip smp2p_irq_chip = {
+ .name = "smp2p",
+ .irq_mask = smp2p_mask_irq,
+ .irq_unmask = smp2p_unmask_irq,
+ .irq_set_type = smp2p_set_irq_type,
+};
+
+static int smp2p_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct smp2p_entry *entry = d->host_data;
+
+ irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, entry);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops smp2p_irq_ops = {
+ .map = smp2p_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
+ struct smp2p_entry *entry,
+ struct device_node *node)
+{
+ entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
+ if (!entry->domain) {
+ dev_err(smp2p->dev, "failed to add irq_domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int smp2p_update_bits(void *data, u32 mask, u32 value)
+{
+ struct smp2p_entry *entry = data;
+ unsigned long flags;
+ u32 orig;
+ u32 val;
+
+ spin_lock_irqsave(&entry->lock, flags);
+ val = orig = readl(entry->value);
+ val &= ~mask;
+ val |= value;
+ writel(val, entry->value);
+ spin_unlock_irqrestore(&entry->lock, flags);
+
+ if (val != orig)
+ qcom_smp2p_kick(entry->smp2p);
+
+ return 0;
+}
+
+static const struct qcom_smem_state_ops smp2p_state_ops = {
+ .update_bits = smp2p_update_bits,
+};
+
+static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
+ struct smp2p_entry *entry,
+ struct device_node *node)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ char buf[SMP2P_MAX_ENTRY_NAME] = {};
+
+ /* Allocate an entry from the smem item */
+ strscpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
+ memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
+
+ /* Make the logical entry reference the physical value */
+ entry->value = &out->entries[out->valid_entries].value;
+
+ out->valid_entries++;
+
+ entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
+ if (IS_ERR(entry->state)) {
+ dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
+ return PTR_ERR(entry->state);
+ }
+
+ return 0;
+}
+
+static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out;
+ unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
+ unsigned pid = smp2p->remote_pid;
+ int ret;
+
+ ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
+ if (ret < 0 && ret != -EEXIST) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(smp2p->dev,
+ "unable to allocate local smp2p item\n");
+ return ret;
+ }
+
+ out = qcom_smem_get(pid, smem_id, NULL);
+ if (IS_ERR(out)) {
+ dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
+ return PTR_ERR(out);
+ }
+
+ memset(out, 0, sizeof(*out));
+ out->magic = SMP2P_MAGIC;
+ out->local_pid = smp2p->local_pid;
+ out->remote_pid = smp2p->remote_pid;
+ out->total_entries = SMP2P_MAX_ENTRY;
+ out->valid_entries = 0;
+ out->features = SMP2P_ALL_FEATURES;
+
+ /*
+ * Make sure the rest of the header is written before we validate the
+ * item by writing a valid version number.
+ */
+ wmb();
+ out->version = 1;
+
+ qcom_smp2p_kick(smp2p);
+
+ smp2p->out = out;
+
+ return 0;
+}
+
+static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
+{
+ struct device_node *syscon;
+ struct device *dev = smp2p->dev;
+ const char *key;
+ int ret;
+
+ syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
+ if (!syscon) {
+ dev_err(dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(smp2p->ipc_regmap))
+ return PTR_ERR(smp2p->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
+ if (ret < 0) {
+ dev_err(dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
+ if (ret < 0) {
+ dev_err(dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qcom_smp2p_probe(struct platform_device *pdev)
+{
+ struct smp2p_entry *entry;
+ struct device_node *node;
+ struct qcom_smp2p *smp2p;
+ const char *key;
+ int irq;
+ int ret;
+
+ smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
+ if (!smp2p)
+ return -ENOMEM;
+
+ smp2p->dev = &pdev->dev;
+ INIT_LIST_HEAD(&smp2p->inbound);
+ INIT_LIST_HEAD(&smp2p->outbound);
+
+ platform_set_drvdata(pdev, smp2p);
+
+ key = "qcom,smem";
+ ret = of_property_read_u32_array(pdev->dev.of_node, key,
+ smp2p->smem_items, 2);
+ if (ret)
+ return ret;
+
+ key = "qcom,local-pid";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
+ if (ret)
+ goto report_read_failure;
+
+ key = "qcom,remote-pid";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
+ if (ret)
+ goto report_read_failure;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ smp2p->mbox_client.dev = &pdev->dev;
+ smp2p->mbox_client.knows_txdone = true;
+ smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
+ if (IS_ERR(smp2p->mbox_chan)) {
+ if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
+ return PTR_ERR(smp2p->mbox_chan);
+
+ smp2p->mbox_chan = NULL;
+
+ ret = smp2p_parse_ipc(smp2p);
+ if (ret)
+ return ret;
+ }
+
+ ret = qcom_smp2p_alloc_outbound_item(smp2p);
+ if (ret < 0)
+ goto release_mbox;
+
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ ret = -ENOMEM;
+ of_node_put(node);
+ goto unwind_interfaces;
+ }
+
+ entry->smp2p = smp2p;
+ spin_lock_init(&entry->lock);
+
+ ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
+ if (ret < 0) {
+ of_node_put(node);
+ goto unwind_interfaces;
+ }
+
+ if (of_property_read_bool(node, "interrupt-controller")) {
+ ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
+ if (ret < 0) {
+ of_node_put(node);
+ goto unwind_interfaces;
+ }
+
+ list_add(&entry->node, &smp2p->inbound);
+ } else {
+ ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
+ if (ret < 0) {
+ of_node_put(node);
+ goto unwind_interfaces;
+ }
+
+ list_add(&entry->node, &smp2p->outbound);
+ }
+ }
+
+ /* Kick the outgoing edge after allocating entries */
+ qcom_smp2p_kick(smp2p);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, qcom_smp2p_intr,
+ IRQF_ONESHOT,
+ "smp2p", (void *)smp2p);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request interrupt\n");
+ goto unwind_interfaces;
+ }
+
+ /*
+ * Treat smp2p interrupt as wakeup source, but keep it disabled
+ * by default. User space can decide enabling it depending on its
+ * use cases. For example if remoteproc crashes and device wants
+ * to handle it immediatedly (e.g. to not miss phone calls) it can
+ * enable wakeup source from user space, while other devices which
+ * do not have proper autosleep feature may want to handle it with
+ * other wakeup events (e.g. Power button) instead waking up immediately.
+ */
+ device_set_wakeup_capable(&pdev->dev, true);
+
+ ret = dev_pm_set_wake_irq(&pdev->dev, irq);
+ if (ret)
+ goto set_wake_irq_fail;
+
+ return 0;
+
+set_wake_irq_fail:
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+unwind_interfaces:
+ list_for_each_entry(entry, &smp2p->inbound, node)
+ irq_domain_remove(entry->domain);
+
+ list_for_each_entry(entry, &smp2p->outbound, node)
+ qcom_smem_state_unregister(entry->state);
+
+ smp2p->out->valid_entries = 0;
+
+release_mbox:
+ mbox_free_channel(smp2p->mbox_chan);
+
+ return ret;
+
+report_read_failure:
+ dev_err(&pdev->dev, "failed to read %s\n", key);
+ return -EINVAL;
+}
+
+static int qcom_smp2p_remove(struct platform_device *pdev)
+{
+ struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
+ struct smp2p_entry *entry;
+
+ dev_pm_clear_wake_irq(&pdev->dev);
+
+ list_for_each_entry(entry, &smp2p->inbound, node)
+ irq_domain_remove(entry->domain);
+
+ list_for_each_entry(entry, &smp2p->outbound, node)
+ qcom_smem_state_unregister(entry->state);
+
+ mbox_free_channel(smp2p->mbox_chan);
+
+ smp2p->out->valid_entries = 0;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smp2p_of_match[] = {
+ { .compatible = "qcom,smp2p" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
+
+static struct platform_driver qcom_smp2p_driver = {
+ .probe = qcom_smp2p_probe,
+ .remove = qcom_smp2p_remove,
+ .driver = {
+ .name = "qcom_smp2p",
+ .of_match_table = qcom_smp2p_of_match,
+ },
+};
+module_platform_driver(qcom_smp2p_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
new file mode 100644
index 0000000000..c58cfff648
--- /dev/null
+++ b/drivers/soc/qcom/smsm.c
@@ -0,0 +1,647 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+/*
+ * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
+ * for communicating single bit state information to remote processors.
+ *
+ * The implementation is based on two sections of shared memory; the first
+ * holding the state bits and the second holding a matrix of subscription bits.
+ *
+ * The state bits are structured in entries of 32 bits, each belonging to one
+ * system in the SoC. The entry belonging to the local system is considered
+ * read-write, while the rest should be considered read-only.
+ *
+ * The subscription matrix consists of N bitmaps per entry, denoting interest
+ * in updates of the entry for each of the N hosts. Upon updating a state bit
+ * each host's subscription bitmap should be queried and the remote system
+ * should be interrupted if they request so.
+ *
+ * The subscription matrix is laid out in entry-major order:
+ * entry0: [host0 ... hostN]
+ * .
+ * .
+ * entryM: [host0 ... hostN]
+ *
+ * A third, optional, shared memory region might contain information regarding
+ * the number of entries in the state bitmap as well as number of columns in
+ * the subscription matrix.
+ */
+
+/*
+ * Shared memory identifiers, used to acquire handles to respective memory
+ * region.
+ */
+#define SMEM_SMSM_SHARED_STATE 85
+#define SMEM_SMSM_CPU_INTR_MASK 333
+#define SMEM_SMSM_SIZE_INFO 419
+
+/*
+ * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
+ */
+#define SMSM_DEFAULT_NUM_ENTRIES 8
+#define SMSM_DEFAULT_NUM_HOSTS 3
+
+struct smsm_entry;
+struct smsm_host;
+
+/**
+ * struct qcom_smsm - smsm driver context
+ * @dev: smsm device pointer
+ * @local_host: column in the subscription matrix representing this system
+ * @num_hosts: number of columns in the subscription matrix
+ * @num_entries: number of entries in the state map and rows in the subscription
+ * matrix
+ * @local_state: pointer to the local processor's state bits
+ * @subscription: pointer to local processor's row in subscription matrix
+ * @state: smem state handle
+ * @lock: spinlock for read-modify-write of the outgoing state
+ * @entries: context for each of the entries
+ * @hosts: context for each of the hosts
+ */
+struct qcom_smsm {
+ struct device *dev;
+
+ u32 local_host;
+
+ u32 num_hosts;
+ u32 num_entries;
+
+ u32 *local_state;
+ u32 *subscription;
+ struct qcom_smem_state *state;
+
+ spinlock_t lock;
+
+ struct smsm_entry *entries;
+ struct smsm_host *hosts;
+};
+
+/**
+ * struct smsm_entry - per remote processor entry context
+ * @smsm: back-reference to driver context
+ * @domain: IRQ domain for this entry, if representing a remote system
+ * @irq_enabled: bitmap of which state bits IRQs are enabled
+ * @irq_rising: bitmap tracking if rising bits should be propagated
+ * @irq_falling: bitmap tracking if falling bits should be propagated
+ * @last_value: snapshot of state bits last time the interrupts where propagated
+ * @remote_state: pointer to this entry's state bits
+ * @subscription: pointer to a row in the subscription matrix representing this
+ * entry
+ */
+struct smsm_entry {
+ struct qcom_smsm *smsm;
+
+ struct irq_domain *domain;
+ DECLARE_BITMAP(irq_enabled, 32);
+ DECLARE_BITMAP(irq_rising, 32);
+ DECLARE_BITMAP(irq_falling, 32);
+ unsigned long last_value;
+
+ u32 *remote_state;
+ u32 *subscription;
+};
+
+/**
+ * struct smsm_host - representation of a remote host
+ * @ipc_regmap: regmap for outgoing interrupt
+ * @ipc_offset: offset in @ipc_regmap for outgoing interrupt
+ * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt
+ */
+struct smsm_host {
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+};
+
+/**
+ * smsm_update_bits() - change bit in outgoing entry and inform subscribers
+ * @data: smsm context pointer
+ * @mask: value mask
+ * @value: new value
+ *
+ * Used to set and clear the bits in the outgoing/local entry and inform
+ * subscribers about the change.
+ */
+static int smsm_update_bits(void *data, u32 mask, u32 value)
+{
+ struct qcom_smsm *smsm = data;
+ struct smsm_host *hostp;
+ unsigned long flags;
+ u32 changes;
+ u32 host;
+ u32 orig;
+ u32 val;
+
+ spin_lock_irqsave(&smsm->lock, flags);
+
+ /* Update the entry */
+ val = orig = readl(smsm->local_state);
+ val &= ~mask;
+ val |= value;
+
+ /* Don't signal if we didn't change the value */
+ changes = val ^ orig;
+ if (!changes) {
+ spin_unlock_irqrestore(&smsm->lock, flags);
+ goto done;
+ }
+
+ /* Write out the new value */
+ writel(val, smsm->local_state);
+ spin_unlock_irqrestore(&smsm->lock, flags);
+
+ /* Make sure the value update is ordered before any kicks */
+ wmb();
+
+ /* Iterate over all hosts to check whom wants a kick */
+ for (host = 0; host < smsm->num_hosts; host++) {
+ hostp = &smsm->hosts[host];
+
+ val = readl(smsm->subscription + host);
+ if (val & changes && hostp->ipc_regmap) {
+ regmap_write(hostp->ipc_regmap,
+ hostp->ipc_offset,
+ BIT(hostp->ipc_bit));
+ }
+ }
+
+done:
+ return 0;
+}
+
+static const struct qcom_smem_state_ops smsm_state_ops = {
+ .update_bits = smsm_update_bits,
+};
+
+/**
+ * smsm_intr() - cascading IRQ handler for SMSM
+ * @irq: unused
+ * @data: entry related to this IRQ
+ *
+ * This function cascades an incoming interrupt from a remote system, based on
+ * the state bits and configuration.
+ */
+static irqreturn_t smsm_intr(int irq, void *data)
+{
+ struct smsm_entry *entry = data;
+ unsigned i;
+ int irq_pin;
+ u32 changed;
+ u32 val;
+
+ val = readl(entry->remote_state);
+ changed = val ^ xchg(&entry->last_value, val);
+
+ for_each_set_bit(i, entry->irq_enabled, 32) {
+ if (!(changed & BIT(i)))
+ continue;
+
+ if (val & BIT(i)) {
+ if (test_bit(i, entry->irq_rising)) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ } else {
+ if (test_bit(i, entry->irq_falling)) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
+ * @irqd: IRQ handle to be masked
+ *
+ * This un-subscribes the local CPU from interrupts upon changes to the defines
+ * status bit. The bit is also cleared from cascading.
+ */
+static void smsm_mask_irq(struct irq_data *irqd)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+ struct qcom_smsm *smsm = entry->smsm;
+ u32 val;
+
+ if (entry->subscription) {
+ val = readl(entry->subscription + smsm->local_host);
+ val &= ~BIT(irq);
+ writel(val, entry->subscription + smsm->local_host);
+ }
+
+ clear_bit(irq, entry->irq_enabled);
+}
+
+/**
+ * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
+ * @irqd: IRQ handle to be unmasked
+ *
+ * This subscribes the local CPU to interrupts upon changes to the defined
+ * status bit. The bit is also marked for cascading.
+ */
+static void smsm_unmask_irq(struct irq_data *irqd)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+ struct qcom_smsm *smsm = entry->smsm;
+ u32 val;
+
+ /* Make sure our last cached state is up-to-date */
+ if (readl(entry->remote_state) & BIT(irq))
+ set_bit(irq, &entry->last_value);
+ else
+ clear_bit(irq, &entry->last_value);
+
+ set_bit(irq, entry->irq_enabled);
+
+ if (entry->subscription) {
+ val = readl(entry->subscription + smsm->local_host);
+ val |= BIT(irq);
+ writel(val, entry->subscription + smsm->local_host);
+ }
+}
+
+/**
+ * smsm_set_irq_type() - updates the requested IRQ type for the cascading
+ * @irqd: consumer interrupt handle
+ * @type: requested flags
+ */
+static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ set_bit(irq, entry->irq_rising);
+ else
+ clear_bit(irq, entry->irq_rising);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ set_bit(irq, entry->irq_falling);
+ else
+ clear_bit(irq, entry->irq_falling);
+
+ return 0;
+}
+
+static int smsm_get_irqchip_state(struct irq_data *irqd,
+ enum irqchip_irq_state which, bool *state)
+{
+ struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+ u32 val;
+
+ if (which != IRQCHIP_STATE_LINE_LEVEL)
+ return -EINVAL;
+
+ val = readl(entry->remote_state);
+ *state = !!(val & BIT(irq));
+
+ return 0;
+}
+
+static struct irq_chip smsm_irq_chip = {
+ .name = "smsm",
+ .irq_mask = smsm_mask_irq,
+ .irq_unmask = smsm_unmask_irq,
+ .irq_set_type = smsm_set_irq_type,
+ .irq_get_irqchip_state = smsm_get_irqchip_state,
+};
+
+/**
+ * smsm_irq_map() - sets up a mapping for a cascaded IRQ
+ * @d: IRQ domain representing an entry
+ * @irq: IRQ to set up
+ * @hw: unused
+ */
+static int smsm_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct smsm_entry *entry = d->host_data;
+
+ irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, entry);
+ irq_set_nested_thread(irq, 1);
+
+ return 0;
+}
+
+static const struct irq_domain_ops smsm_irq_ops = {
+ .map = smsm_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+/**
+ * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
+ * @smsm: smsm driver context
+ * @host_id: index of the remote host to be resolved
+ *
+ * Parses device tree to acquire the information needed for sending the
+ * outgoing interrupts to a remote host - identified by @host_id.
+ */
+static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
+{
+ struct device_node *syscon;
+ struct device_node *node = smsm->dev->of_node;
+ struct smsm_host *host = &smsm->hosts[host_id];
+ char key[16];
+ int ret;
+
+ snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
+ syscon = of_parse_phandle(node, key, 0);
+ if (!syscon)
+ return 0;
+
+ host->ipc_regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(host->ipc_regmap))
+ return PTR_ERR(host->ipc_regmap);
+
+ ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
+ if (ret < 0) {
+ dev_err(smsm->dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
+ if (ret < 0) {
+ dev_err(smsm->dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
+ * @smsm: smsm driver context
+ * @entry: entry context to be set up
+ * @node: dt node containing the entry's properties
+ */
+static int smsm_inbound_entry(struct qcom_smsm *smsm,
+ struct smsm_entry *entry,
+ struct device_node *node)
+{
+ int ret;
+ int irq;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ dev_err(smsm->dev, "failed to parse smsm interrupt\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(smsm->dev, irq,
+ NULL, smsm_intr,
+ IRQF_ONESHOT,
+ "smsm", (void *)entry);
+ if (ret) {
+ dev_err(smsm->dev, "failed to request interrupt\n");
+ return ret;
+ }
+
+ entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
+ if (!entry->domain) {
+ dev_err(smsm->dev, "failed to add irq_domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * smsm_get_size_info() - parse the optional memory segment for sizes
+ * @smsm: smsm driver context
+ *
+ * Attempt to acquire the number of hosts and entries from the optional shared
+ * memory location. Not being able to find this segment should indicate that
+ * we're on a older system where these values was hard coded to
+ * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
+ *
+ * Returns 0 on success, negative errno on failure.
+ */
+static int smsm_get_size_info(struct qcom_smsm *smsm)
+{
+ size_t size;
+ struct {
+ u32 num_hosts;
+ u32 num_entries;
+ u32 reserved0;
+ u32 reserved1;
+ } *info;
+
+ info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
+ if (IS_ERR(info) && PTR_ERR(info) != -ENOENT)
+ return dev_err_probe(smsm->dev, PTR_ERR(info),
+ "unable to retrieve smsm size info\n");
+ else if (IS_ERR(info) || size != sizeof(*info)) {
+ dev_warn(smsm->dev, "no smsm size info, using defaults\n");
+ smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
+ smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
+ return 0;
+ }
+
+ smsm->num_entries = info->num_entries;
+ smsm->num_hosts = info->num_hosts;
+
+ dev_dbg(smsm->dev,
+ "found custom size of smsm: %d entries %d hosts\n",
+ smsm->num_entries, smsm->num_hosts);
+
+ return 0;
+}
+
+static int qcom_smsm_probe(struct platform_device *pdev)
+{
+ struct device_node *local_node;
+ struct device_node *node;
+ struct smsm_entry *entry;
+ struct qcom_smsm *smsm;
+ u32 *intr_mask;
+ size_t size;
+ u32 *states;
+ u32 id;
+ int ret;
+
+ smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
+ if (!smsm)
+ return -ENOMEM;
+ smsm->dev = &pdev->dev;
+ spin_lock_init(&smsm->lock);
+
+ ret = smsm_get_size_info(smsm);
+ if (ret)
+ return ret;
+
+ smsm->entries = devm_kcalloc(&pdev->dev,
+ smsm->num_entries,
+ sizeof(struct smsm_entry),
+ GFP_KERNEL);
+ if (!smsm->entries)
+ return -ENOMEM;
+
+ smsm->hosts = devm_kcalloc(&pdev->dev,
+ smsm->num_hosts,
+ sizeof(struct smsm_host),
+ GFP_KERNEL);
+ if (!smsm->hosts)
+ return -ENOMEM;
+
+ for_each_child_of_node(pdev->dev.of_node, local_node) {
+ if (of_property_present(local_node, "#qcom,smem-state-cells"))
+ break;
+ }
+ if (!local_node) {
+ dev_err(&pdev->dev, "no state entry\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32(pdev->dev.of_node,
+ "qcom,local-host",
+ &smsm->local_host);
+
+ /* Parse the host properties */
+ for (id = 0; id < smsm->num_hosts; id++) {
+ ret = smsm_parse_ipc(smsm, id);
+ if (ret < 0)
+ goto out_put;
+ }
+
+ /* Acquire the main SMSM state vector */
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
+ smsm->num_entries * sizeof(u32));
+ if (ret < 0 && ret != -EEXIST) {
+ dev_err(&pdev->dev, "unable to allocate shared state entry\n");
+ goto out_put;
+ }
+
+ states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
+ if (IS_ERR(states)) {
+ dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
+ ret = PTR_ERR(states);
+ goto out_put;
+ }
+
+ /* Acquire the list of interrupt mask vectors */
+ size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
+ if (ret < 0 && ret != -EEXIST) {
+ dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
+ goto out_put;
+ }
+
+ intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
+ if (IS_ERR(intr_mask)) {
+ dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
+ ret = PTR_ERR(intr_mask);
+ goto out_put;
+ }
+
+ /* Setup the reference to the local state bits */
+ smsm->local_state = states + smsm->local_host;
+ smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
+
+ /* Register the outgoing state */
+ smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
+ if (IS_ERR(smsm->state)) {
+ dev_err(smsm->dev, "failed to register qcom_smem_state\n");
+ ret = PTR_ERR(smsm->state);
+ goto out_put;
+ }
+
+ /* Register handlers for remote processor entries of interest. */
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ if (!of_property_read_bool(node, "interrupt-controller"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret || id >= smsm->num_entries) {
+ dev_err(&pdev->dev, "invalid reg of entry\n");
+ if (!ret)
+ ret = -EINVAL;
+ goto unwind_interfaces;
+ }
+ entry = &smsm->entries[id];
+
+ entry->smsm = smsm;
+ entry->remote_state = states + id;
+
+ /* Setup subscription pointers and unsubscribe to any kicks */
+ entry->subscription = intr_mask + id * smsm->num_hosts;
+ writel(0, entry->subscription + smsm->local_host);
+
+ ret = smsm_inbound_entry(smsm, entry, node);
+ if (ret < 0)
+ goto unwind_interfaces;
+ }
+
+ platform_set_drvdata(pdev, smsm);
+ of_node_put(local_node);
+
+ return 0;
+
+unwind_interfaces:
+ of_node_put(node);
+ for (id = 0; id < smsm->num_entries; id++)
+ if (smsm->entries[id].domain)
+ irq_domain_remove(smsm->entries[id].domain);
+
+ qcom_smem_state_unregister(smsm->state);
+out_put:
+ of_node_put(local_node);
+ return ret;
+}
+
+static int qcom_smsm_remove(struct platform_device *pdev)
+{
+ struct qcom_smsm *smsm = platform_get_drvdata(pdev);
+ unsigned id;
+
+ for (id = 0; id < smsm->num_entries; id++)
+ if (smsm->entries[id].domain)
+ irq_domain_remove(smsm->entries[id].domain);
+
+ qcom_smem_state_unregister(smsm->state);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smsm_of_match[] = {
+ { .compatible = "qcom,smsm" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
+
+static struct platform_driver qcom_smsm_driver = {
+ .probe = qcom_smsm_probe,
+ .remove = qcom_smsm_remove,
+ .driver = {
+ .name = "qcom-smsm",
+ .of_match_table = qcom_smsm_of_match,
+ },
+};
+module_platform_driver(qcom_smsm_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
new file mode 100644
index 0000000000..497cfb720f
--- /dev/null
+++ b/drivers/soc/qcom/socinfo.c
@@ -0,0 +1,802 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, Linaro Ltd.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/socinfo.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+#include <linux/sys_soc.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#include <dt-bindings/arm/qcom,ids.h>
+
+/*
+ * SoC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits.
+ */
+#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define SOCINFO_MINOR(ver) ((ver) & 0xffff)
+#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
+
+/* Helper macros to create soc_id table */
+#define qcom_board_id(id) QCOM_ID_ ## id, __stringify(id)
+#define qcom_board_id_named(id, name) QCOM_ID_ ## id, (name)
+
+#ifdef CONFIG_DEBUG_FS
+#define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32
+#define SMEM_IMAGE_VERSION_SIZE 4096
+#define SMEM_IMAGE_VERSION_NAME_SIZE 75
+#define SMEM_IMAGE_VERSION_VARIANT_SIZE 20
+#define SMEM_IMAGE_VERSION_OEM_SIZE 32
+
+/*
+ * SMEM Image table indices
+ */
+#define SMEM_IMAGE_TABLE_BOOT_INDEX 0
+#define SMEM_IMAGE_TABLE_TZ_INDEX 1
+#define SMEM_IMAGE_TABLE_RPM_INDEX 3
+#define SMEM_IMAGE_TABLE_APPS_INDEX 10
+#define SMEM_IMAGE_TABLE_MPSS_INDEX 11
+#define SMEM_IMAGE_TABLE_ADSP_INDEX 12
+#define SMEM_IMAGE_TABLE_CNSS_INDEX 13
+#define SMEM_IMAGE_TABLE_VIDEO_INDEX 14
+#define SMEM_IMAGE_VERSION_TABLE 469
+
+/*
+ * SMEM Image table names
+ */
+static const char *const socinfo_image_names[] = {
+ [SMEM_IMAGE_TABLE_ADSP_INDEX] = "adsp",
+ [SMEM_IMAGE_TABLE_APPS_INDEX] = "apps",
+ [SMEM_IMAGE_TABLE_BOOT_INDEX] = "boot",
+ [SMEM_IMAGE_TABLE_CNSS_INDEX] = "cnss",
+ [SMEM_IMAGE_TABLE_MPSS_INDEX] = "mpss",
+ [SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm",
+ [SMEM_IMAGE_TABLE_TZ_INDEX] = "tz",
+ [SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video",
+};
+
+static const char *const pmic_models[] = {
+ [0] = "Unknown PMIC model",
+ [1] = "PM8941",
+ [2] = "PM8841",
+ [3] = "PM8019",
+ [4] = "PM8226",
+ [5] = "PM8110",
+ [6] = "PMA8084",
+ [7] = "PMI8962",
+ [8] = "PMD9635",
+ [9] = "PM8994",
+ [10] = "PMI8994",
+ [11] = "PM8916",
+ [12] = "PM8004",
+ [13] = "PM8909/PM8058",
+ [14] = "PM8028",
+ [15] = "PM8901",
+ [16] = "PM8950/PM8027",
+ [17] = "PMI8950/ISL9519",
+ [18] = "PMK8001/PM8921",
+ [19] = "PMI8996/PM8018",
+ [20] = "PM8998/PM8015",
+ [21] = "PMI8998/PM8014",
+ [22] = "PM8821",
+ [23] = "PM8038",
+ [24] = "PM8005/PM8922",
+ [25] = "PM8917",
+ [26] = "PM660L",
+ [27] = "PM660",
+ [30] = "PM8150",
+ [31] = "PM8150L",
+ [32] = "PM8150B",
+ [33] = "PMK8002",
+ [36] = "PM8009",
+ [37] = "PMI632",
+ [38] = "PM8150C",
+ [40] = "PM6150",
+ [41] = "SMB2351",
+ [44] = "PM8008",
+ [45] = "PM6125",
+ [46] = "PM7250B",
+ [47] = "PMK8350",
+ [48] = "PM8350",
+ [49] = "PM8350C",
+ [50] = "PM8350B",
+ [51] = "PMR735A",
+ [52] = "PMR735B",
+ [55] = "PM2250",
+ [58] = "PM8450",
+ [65] = "PM8010",
+};
+
+struct socinfo_params {
+ u32 raw_device_family;
+ u32 hw_plat_subtype;
+ u32 accessory_chip;
+ u32 raw_device_num;
+ u32 chip_family;
+ u32 foundry_id;
+ u32 plat_ver;
+ u32 raw_ver;
+ u32 hw_plat;
+ u32 fmt;
+ u32 nproduct_id;
+ u32 num_clusters;
+ u32 ncluster_array_offset;
+ u32 num_subset_parts;
+ u32 nsubset_parts_array_offset;
+ u32 nmodem_supported;
+ u32 feature_code;
+ u32 pcode;
+ u32 oem_variant;
+ u32 num_func_clusters;
+ u32 boot_cluster;
+ u32 boot_core;
+};
+
+struct smem_image_version {
+ char name[SMEM_IMAGE_VERSION_NAME_SIZE];
+ char variant[SMEM_IMAGE_VERSION_VARIANT_SIZE];
+ char pad;
+ char oem[SMEM_IMAGE_VERSION_OEM_SIZE];
+};
+#endif /* CONFIG_DEBUG_FS */
+
+struct qcom_socinfo {
+ struct soc_device *soc_dev;
+ struct soc_device_attribute attr;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbg_root;
+ struct socinfo_params info;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+struct soc_id {
+ unsigned int id;
+ const char *name;
+};
+
+static const struct soc_id soc_id[] = {
+ { qcom_board_id(MSM8260) },
+ { qcom_board_id(MSM8660) },
+ { qcom_board_id(APQ8060) },
+ { qcom_board_id(MSM8960) },
+ { qcom_board_id(APQ8064) },
+ { qcom_board_id(MSM8930) },
+ { qcom_board_id(MSM8630) },
+ { qcom_board_id(MSM8230) },
+ { qcom_board_id(APQ8030) },
+ { qcom_board_id(MSM8627) },
+ { qcom_board_id(MSM8227) },
+ { qcom_board_id(MSM8660A) },
+ { qcom_board_id(MSM8260A) },
+ { qcom_board_id(APQ8060A) },
+ { qcom_board_id(MSM8974) },
+ { qcom_board_id(MSM8225) },
+ { qcom_board_id(MSM8625) },
+ { qcom_board_id(MPQ8064) },
+ { qcom_board_id(MSM8960AB) },
+ { qcom_board_id(APQ8060AB) },
+ { qcom_board_id(MSM8260AB) },
+ { qcom_board_id(MSM8660AB) },
+ { qcom_board_id(MSM8930AA) },
+ { qcom_board_id(MSM8630AA) },
+ { qcom_board_id(MSM8230AA) },
+ { qcom_board_id(MSM8626) },
+ { qcom_board_id(MSM8610) },
+ { qcom_board_id(APQ8064AB) },
+ { qcom_board_id(MSM8930AB) },
+ { qcom_board_id(MSM8630AB) },
+ { qcom_board_id(MSM8230AB) },
+ { qcom_board_id(APQ8030AB) },
+ { qcom_board_id(MSM8226) },
+ { qcom_board_id(MSM8526) },
+ { qcom_board_id(APQ8030AA) },
+ { qcom_board_id(MSM8110) },
+ { qcom_board_id(MSM8210) },
+ { qcom_board_id(MSM8810) },
+ { qcom_board_id(MSM8212) },
+ { qcom_board_id(MSM8612) },
+ { qcom_board_id(MSM8112) },
+ { qcom_board_id(MSM8125) },
+ { qcom_board_id(MSM8225Q) },
+ { qcom_board_id(MSM8625Q) },
+ { qcom_board_id(MSM8125Q) },
+ { qcom_board_id(APQ8064AA) },
+ { qcom_board_id(APQ8084) },
+ { qcom_board_id(MSM8130) },
+ { qcom_board_id(MSM8130AA) },
+ { qcom_board_id(MSM8130AB) },
+ { qcom_board_id(MSM8627AA) },
+ { qcom_board_id(MSM8227AA) },
+ { qcom_board_id(APQ8074) },
+ { qcom_board_id(MSM8274) },
+ { qcom_board_id(MSM8674) },
+ { qcom_board_id(MDM9635) },
+ { qcom_board_id_named(MSM8974PRO_AC, "MSM8974PRO-AC") },
+ { qcom_board_id(MSM8126) },
+ { qcom_board_id(APQ8026) },
+ { qcom_board_id(MSM8926) },
+ { qcom_board_id(IPQ8062) },
+ { qcom_board_id(IPQ8064) },
+ { qcom_board_id(IPQ8066) },
+ { qcom_board_id(IPQ8068) },
+ { qcom_board_id(MSM8326) },
+ { qcom_board_id(MSM8916) },
+ { qcom_board_id(MSM8994) },
+ { qcom_board_id_named(APQ8074PRO_AA, "APQ8074PRO-AA") },
+ { qcom_board_id_named(APQ8074PRO_AB, "APQ8074PRO-AB") },
+ { qcom_board_id_named(APQ8074PRO_AC, "APQ8074PRO-AC") },
+ { qcom_board_id_named(MSM8274PRO_AA, "MSM8274PRO-AA") },
+ { qcom_board_id_named(MSM8274PRO_AB, "MSM8274PRO-AB") },
+ { qcom_board_id_named(MSM8274PRO_AC, "MSM8274PRO-AC") },
+ { qcom_board_id_named(MSM8674PRO_AA, "MSM8674PRO-AA") },
+ { qcom_board_id_named(MSM8674PRO_AB, "MSM8674PRO-AB") },
+ { qcom_board_id_named(MSM8674PRO_AC, "MSM8674PRO-AC") },
+ { qcom_board_id_named(MSM8974PRO_AA, "MSM8974PRO-AA") },
+ { qcom_board_id_named(MSM8974PRO_AB, "MSM8974PRO-AB") },
+ { qcom_board_id(APQ8028) },
+ { qcom_board_id(MSM8128) },
+ { qcom_board_id(MSM8228) },
+ { qcom_board_id(MSM8528) },
+ { qcom_board_id(MSM8628) },
+ { qcom_board_id(MSM8928) },
+ { qcom_board_id(MSM8510) },
+ { qcom_board_id(MSM8512) },
+ { qcom_board_id(MSM8936) },
+ { qcom_board_id(MDM9640) },
+ { qcom_board_id(MSM8939) },
+ { qcom_board_id(APQ8036) },
+ { qcom_board_id(APQ8039) },
+ { qcom_board_id(MSM8236) },
+ { qcom_board_id(MSM8636) },
+ { qcom_board_id(MSM8909) },
+ { qcom_board_id(MSM8996) },
+ { qcom_board_id(APQ8016) },
+ { qcom_board_id(MSM8216) },
+ { qcom_board_id(MSM8116) },
+ { qcom_board_id(MSM8616) },
+ { qcom_board_id(MSM8992) },
+ { qcom_board_id(APQ8092) },
+ { qcom_board_id(APQ8094) },
+ { qcom_board_id(MSM8209) },
+ { qcom_board_id(MSM8208) },
+ { qcom_board_id(MDM9209) },
+ { qcom_board_id(MDM9309) },
+ { qcom_board_id(MDM9609) },
+ { qcom_board_id(MSM8239) },
+ { qcom_board_id(MSM8952) },
+ { qcom_board_id(APQ8009) },
+ { qcom_board_id(MSM8956) },
+ { qcom_board_id(MSM8929) },
+ { qcom_board_id(MSM8629) },
+ { qcom_board_id(MSM8229) },
+ { qcom_board_id(APQ8029) },
+ { qcom_board_id(APQ8056) },
+ { qcom_board_id(MSM8609) },
+ { qcom_board_id(APQ8076) },
+ { qcom_board_id(MSM8976) },
+ { qcom_board_id(IPQ8065) },
+ { qcom_board_id(IPQ8069) },
+ { qcom_board_id(MDM9650) },
+ { qcom_board_id(MDM9655) },
+ { qcom_board_id(MDM9250) },
+ { qcom_board_id(MDM9255) },
+ { qcom_board_id(MDM9350) },
+ { qcom_board_id(APQ8052) },
+ { qcom_board_id(MDM9607) },
+ { qcom_board_id(APQ8096) },
+ { qcom_board_id(MSM8998) },
+ { qcom_board_id(MSM8953) },
+ { qcom_board_id(MSM8937) },
+ { qcom_board_id(APQ8037) },
+ { qcom_board_id(MDM8207) },
+ { qcom_board_id(MDM9207) },
+ { qcom_board_id(MDM9307) },
+ { qcom_board_id(MDM9628) },
+ { qcom_board_id(MSM8909W) },
+ { qcom_board_id(APQ8009W) },
+ { qcom_board_id(MSM8996L) },
+ { qcom_board_id(MSM8917) },
+ { qcom_board_id(APQ8053) },
+ { qcom_board_id(MSM8996SG) },
+ { qcom_board_id(APQ8017) },
+ { qcom_board_id(MSM8217) },
+ { qcom_board_id(MSM8617) },
+ { qcom_board_id(MSM8996AU) },
+ { qcom_board_id(APQ8096AU) },
+ { qcom_board_id(APQ8096SG) },
+ { qcom_board_id(MSM8940) },
+ { qcom_board_id(SDX201) },
+ { qcom_board_id(SDM660) },
+ { qcom_board_id(SDM630) },
+ { qcom_board_id(APQ8098) },
+ { qcom_board_id(MSM8920) },
+ { qcom_board_id(SDM845) },
+ { qcom_board_id(MDM9206) },
+ { qcom_board_id(IPQ8074) },
+ { qcom_board_id(SDA660) },
+ { qcom_board_id(SDM658) },
+ { qcom_board_id(SDA658) },
+ { qcom_board_id(SDA630) },
+ { qcom_board_id(MSM8905) },
+ { qcom_board_id(SDX202) },
+ { qcom_board_id(SDM450) },
+ { qcom_board_id(SM8150) },
+ { qcom_board_id(SDA845) },
+ { qcom_board_id(IPQ8072) },
+ { qcom_board_id(IPQ8076) },
+ { qcom_board_id(IPQ8078) },
+ { qcom_board_id(SDM636) },
+ { qcom_board_id(SDA636) },
+ { qcom_board_id(SDM632) },
+ { qcom_board_id(SDA632) },
+ { qcom_board_id(SDA450) },
+ { qcom_board_id(SDM439) },
+ { qcom_board_id(SDM429) },
+ { qcom_board_id(SM8250) },
+ { qcom_board_id(SA8155) },
+ { qcom_board_id(SDA439) },
+ { qcom_board_id(SDA429) },
+ { qcom_board_id(SM7150) },
+ { qcom_board_id(IPQ8070) },
+ { qcom_board_id(IPQ8071) },
+ { qcom_board_id(QM215) },
+ { qcom_board_id(IPQ8072A) },
+ { qcom_board_id(IPQ8074A) },
+ { qcom_board_id(IPQ8076A) },
+ { qcom_board_id(IPQ8078A) },
+ { qcom_board_id(SM6125) },
+ { qcom_board_id(IPQ8070A) },
+ { qcom_board_id(IPQ8071A) },
+ { qcom_board_id(IPQ6018) },
+ { qcom_board_id(IPQ6028) },
+ { qcom_board_id(SDM429W) },
+ { qcom_board_id(SM4250) },
+ { qcom_board_id(IPQ6000) },
+ { qcom_board_id(IPQ6010) },
+ { qcom_board_id(SC7180) },
+ { qcom_board_id(SM6350) },
+ { qcom_board_id(QCM2150) },
+ { qcom_board_id(SDA429W) },
+ { qcom_board_id(SM8350) },
+ { qcom_board_id(QCM2290) },
+ { qcom_board_id(SM7125) },
+ { qcom_board_id(SM6115) },
+ { qcom_board_id(IPQ5010) },
+ { qcom_board_id(IPQ5018) },
+ { qcom_board_id(IPQ5028) },
+ { qcom_board_id(SC8280XP) },
+ { qcom_board_id(IPQ6005) },
+ { qcom_board_id(QRB5165) },
+ { qcom_board_id(SM8450) },
+ { qcom_board_id(SM7225) },
+ { qcom_board_id(SA8295P) },
+ { qcom_board_id(SA8540P) },
+ { qcom_board_id(QCM4290) },
+ { qcom_board_id(QCS4290) },
+ { qcom_board_id_named(SM8450_2, "SM8450") },
+ { qcom_board_id_named(SM8450_3, "SM8450") },
+ { qcom_board_id(SC7280) },
+ { qcom_board_id(SC7180P) },
+ { qcom_board_id(IPQ5000) },
+ { qcom_board_id(IPQ0509) },
+ { qcom_board_id(IPQ0518) },
+ { qcom_board_id(SM6375) },
+ { qcom_board_id(IPQ9514) },
+ { qcom_board_id(IPQ9550) },
+ { qcom_board_id(IPQ9554) },
+ { qcom_board_id(IPQ9570) },
+ { qcom_board_id(IPQ9574) },
+ { qcom_board_id(SM8550) },
+ { qcom_board_id(IPQ5016) },
+ { qcom_board_id(IPQ9510) },
+ { qcom_board_id(QRB4210) },
+ { qcom_board_id(QRB2210) },
+ { qcom_board_id(SA8775P) },
+ { qcom_board_id(QRU1000) },
+ { qcom_board_id(QDU1000) },
+ { qcom_board_id(SM4450) },
+ { qcom_board_id(QDU1010) },
+ { qcom_board_id(QRU1032) },
+ { qcom_board_id(QRU1052) },
+ { qcom_board_id(QRU1062) },
+ { qcom_board_id(IPQ5332) },
+ { qcom_board_id(IPQ5322) },
+ { qcom_board_id(IPQ5312) },
+ { qcom_board_id(IPQ5302) },
+ { qcom_board_id(IPQ5300) },
+};
+
+static const char *socinfo_machine(struct device *dev, unsigned int id)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(soc_id); idx++) {
+ if (soc_id[idx].id == id)
+ return soc_id[idx].name;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define QCOM_OPEN(name, _func) \
+static int qcom_open_##name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, _func, inode->i_private); \
+} \
+ \
+static const struct file_operations qcom_ ##name## _ops = { \
+ .open = qcom_open_##name, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define DEBUGFS_ADD(info, name) \
+ debugfs_create_file(__stringify(name), 0444, \
+ qcom_socinfo->dbg_root, \
+ info, &qcom_ ##name## _ops)
+
+
+static int qcom_show_build_id(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%s\n", socinfo->build_id);
+
+ return 0;
+}
+
+static int qcom_show_pmic_model(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+ int model = SOCINFO_MINOR(le32_to_cpu(socinfo->pmic_model));
+
+ if (model < 0)
+ return -EINVAL;
+
+ if (model < ARRAY_SIZE(pmic_models) && pmic_models[model])
+ seq_printf(seq, "%s\n", pmic_models[model]);
+ else
+ seq_printf(seq, "unknown (%d)\n", model);
+
+ return 0;
+}
+
+static int qcom_show_pmic_model_array(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+ unsigned int num_pmics = le32_to_cpu(socinfo->num_pmics);
+ unsigned int pmic_array_offset = le32_to_cpu(socinfo->pmic_array_offset);
+ int i;
+ void *ptr = socinfo;
+
+ ptr += pmic_array_offset;
+
+ /* No need for bounds checking, it happened at socinfo_debugfs_init */
+ for (i = 0; i < num_pmics; i++) {
+ unsigned int model = SOCINFO_MINOR(get_unaligned_le32(ptr + 2 * i * sizeof(u32)));
+ unsigned int die_rev = get_unaligned_le32(ptr + (2 * i + 1) * sizeof(u32));
+
+ if (model < ARRAY_SIZE(pmic_models) && pmic_models[model])
+ seq_printf(seq, "%s %u.%u\n", pmic_models[model],
+ SOCINFO_MAJOR(die_rev),
+ SOCINFO_MINOR(die_rev));
+ else
+ seq_printf(seq, "unknown (%d)\n", model);
+ }
+
+ return 0;
+}
+
+static int qcom_show_pmic_die_revision(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%u.%u\n",
+ SOCINFO_MAJOR(le32_to_cpu(socinfo->pmic_die_rev)),
+ SOCINFO_MINOR(le32_to_cpu(socinfo->pmic_die_rev)));
+
+ return 0;
+}
+
+static int qcom_show_chip_id(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%s\n", socinfo->chip_id);
+
+ return 0;
+}
+
+QCOM_OPEN(build_id, qcom_show_build_id);
+QCOM_OPEN(pmic_model, qcom_show_pmic_model);
+QCOM_OPEN(pmic_model_array, qcom_show_pmic_model_array);
+QCOM_OPEN(pmic_die_rev, qcom_show_pmic_die_revision);
+QCOM_OPEN(chip_id, qcom_show_chip_id);
+
+#define DEFINE_IMAGE_OPS(type) \
+static int show_image_##type(struct seq_file *seq, void *p) \
+{ \
+ struct smem_image_version *image_version = seq->private; \
+ if (image_version->type[0] != '\0') \
+ seq_printf(seq, "%s\n", image_version->type); \
+ return 0; \
+} \
+static int open_image_##type(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, show_image_##type, inode->i_private); \
+} \
+ \
+static const struct file_operations qcom_image_##type##_ops = { \
+ .open = open_image_##type, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+DEFINE_IMAGE_OPS(name);
+DEFINE_IMAGE_OPS(variant);
+DEFINE_IMAGE_OPS(oem);
+
+static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
+ struct socinfo *info, size_t info_size)
+{
+ struct smem_image_version *versions;
+ struct dentry *dentry;
+ size_t size;
+ int i;
+ unsigned int num_pmics;
+ unsigned int pmic_array_offset;
+
+ qcom_socinfo->dbg_root = debugfs_create_dir("qcom_socinfo", NULL);
+
+ qcom_socinfo->info.fmt = __le32_to_cpu(info->fmt);
+
+ debugfs_create_x32("info_fmt", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.fmt);
+
+ switch (qcom_socinfo->info.fmt) {
+ case SOCINFO_VERSION(0, 19):
+ qcom_socinfo->info.num_func_clusters = __le32_to_cpu(info->num_func_clusters);
+ qcom_socinfo->info.boot_cluster = __le32_to_cpu(info->boot_cluster);
+ qcom_socinfo->info.boot_core = __le32_to_cpu(info->boot_core);
+
+ debugfs_create_u32("num_func_clusters", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_func_clusters);
+ debugfs_create_u32("boot_cluster", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.boot_cluster);
+ debugfs_create_u32("boot_core", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.boot_core);
+ fallthrough;
+ case SOCINFO_VERSION(0, 18):
+ case SOCINFO_VERSION(0, 17):
+ qcom_socinfo->info.oem_variant = __le32_to_cpu(info->oem_variant);
+ debugfs_create_u32("oem_variant", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.oem_variant);
+ fallthrough;
+ case SOCINFO_VERSION(0, 16):
+ qcom_socinfo->info.feature_code = __le32_to_cpu(info->feature_code);
+ qcom_socinfo->info.pcode = __le32_to_cpu(info->pcode);
+
+ debugfs_create_u32("feature_code", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.feature_code);
+ debugfs_create_u32("pcode", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.pcode);
+ fallthrough;
+ case SOCINFO_VERSION(0, 15):
+ qcom_socinfo->info.nmodem_supported = __le32_to_cpu(info->nmodem_supported);
+
+ debugfs_create_u32("nmodem_supported", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nmodem_supported);
+ fallthrough;
+ case SOCINFO_VERSION(0, 14):
+ qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters);
+ qcom_socinfo->info.ncluster_array_offset = __le32_to_cpu(info->ncluster_array_offset);
+ qcom_socinfo->info.num_subset_parts = __le32_to_cpu(info->num_subset_parts);
+ qcom_socinfo->info.nsubset_parts_array_offset =
+ __le32_to_cpu(info->nsubset_parts_array_offset);
+
+ debugfs_create_u32("num_clusters", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_clusters);
+ debugfs_create_u32("ncluster_array_offset", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ncluster_array_offset);
+ debugfs_create_u32("num_subset_parts", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_subset_parts);
+ debugfs_create_u32("nsubset_parts_array_offset", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nsubset_parts_array_offset);
+ fallthrough;
+ case SOCINFO_VERSION(0, 13):
+ qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id);
+
+ debugfs_create_u32("nproduct_id", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nproduct_id);
+ DEBUGFS_ADD(info, chip_id);
+ fallthrough;
+ case SOCINFO_VERSION(0, 12):
+ qcom_socinfo->info.chip_family =
+ __le32_to_cpu(info->chip_family);
+ qcom_socinfo->info.raw_device_family =
+ __le32_to_cpu(info->raw_device_family);
+ qcom_socinfo->info.raw_device_num =
+ __le32_to_cpu(info->raw_device_num);
+
+ debugfs_create_x32("chip_family", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.chip_family);
+ debugfs_create_x32("raw_device_family", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_device_family);
+ debugfs_create_x32("raw_device_number", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_device_num);
+ fallthrough;
+ case SOCINFO_VERSION(0, 11):
+ num_pmics = le32_to_cpu(info->num_pmics);
+ pmic_array_offset = le32_to_cpu(info->pmic_array_offset);
+ if (pmic_array_offset + 2 * num_pmics * sizeof(u32) <= info_size)
+ DEBUGFS_ADD(info, pmic_model_array);
+ fallthrough;
+ case SOCINFO_VERSION(0, 10):
+ case SOCINFO_VERSION(0, 9):
+ qcom_socinfo->info.foundry_id = __le32_to_cpu(info->foundry_id);
+
+ debugfs_create_u32("foundry_id", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.foundry_id);
+ fallthrough;
+ case SOCINFO_VERSION(0, 8):
+ case SOCINFO_VERSION(0, 7):
+ DEBUGFS_ADD(info, pmic_model);
+ DEBUGFS_ADD(info, pmic_die_rev);
+ fallthrough;
+ case SOCINFO_VERSION(0, 6):
+ qcom_socinfo->info.hw_plat_subtype =
+ __le32_to_cpu(info->hw_plat_subtype);
+
+ debugfs_create_u32("hardware_platform_subtype", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.hw_plat_subtype);
+ fallthrough;
+ case SOCINFO_VERSION(0, 5):
+ qcom_socinfo->info.accessory_chip =
+ __le32_to_cpu(info->accessory_chip);
+
+ debugfs_create_u32("accessory_chip", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.accessory_chip);
+ fallthrough;
+ case SOCINFO_VERSION(0, 4):
+ qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver);
+
+ debugfs_create_u32("platform_version", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.plat_ver);
+ fallthrough;
+ case SOCINFO_VERSION(0, 3):
+ qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat);
+
+ debugfs_create_u32("hardware_platform", 0444,
+ qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.hw_plat);
+ fallthrough;
+ case SOCINFO_VERSION(0, 2):
+ qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver);
+
+ debugfs_create_u32("raw_version", 0444, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.raw_ver);
+ fallthrough;
+ case SOCINFO_VERSION(0, 1):
+ DEBUGFS_ADD(info, build_id);
+ break;
+ }
+
+ versions = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_IMAGE_VERSION_TABLE,
+ &size);
+
+ for (i = 0; i < ARRAY_SIZE(socinfo_image_names); i++) {
+ if (!socinfo_image_names[i])
+ continue;
+
+ dentry = debugfs_create_dir(socinfo_image_names[i],
+ qcom_socinfo->dbg_root);
+ debugfs_create_file("name", 0444, dentry, &versions[i],
+ &qcom_image_name_ops);
+ debugfs_create_file("variant", 0444, dentry, &versions[i],
+ &qcom_image_variant_ops);
+ debugfs_create_file("oem", 0444, dentry, &versions[i],
+ &qcom_image_oem_ops);
+ }
+}
+
+static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo)
+{
+ debugfs_remove_recursive(qcom_socinfo->dbg_root);
+}
+#else
+static void socinfo_debugfs_init(struct qcom_socinfo *qcom_socinfo,
+ struct socinfo *info, size_t info_size)
+{
+}
+static void socinfo_debugfs_exit(struct qcom_socinfo *qcom_socinfo) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int qcom_socinfo_probe(struct platform_device *pdev)
+{
+ struct qcom_socinfo *qs;
+ struct socinfo *info;
+ size_t item_size;
+
+ info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID,
+ &item_size);
+ if (IS_ERR(info)) {
+ dev_err(&pdev->dev, "Couldn't find socinfo\n");
+ return PTR_ERR(info);
+ }
+
+ qs = devm_kzalloc(&pdev->dev, sizeof(*qs), GFP_KERNEL);
+ if (!qs)
+ return -ENOMEM;
+
+ qs->attr.family = "Snapdragon";
+ qs->attr.machine = socinfo_machine(&pdev->dev,
+ le32_to_cpu(info->id));
+ qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u",
+ le32_to_cpu(info->id));
+ qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u",
+ SOCINFO_MAJOR(le32_to_cpu(info->ver)),
+ SOCINFO_MINOR(le32_to_cpu(info->ver)));
+ if (offsetof(struct socinfo, serial_num) <= item_size)
+ qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%u",
+ le32_to_cpu(info->serial_num));
+
+ qs->soc_dev = soc_device_register(&qs->attr);
+ if (IS_ERR(qs->soc_dev))
+ return PTR_ERR(qs->soc_dev);
+
+ socinfo_debugfs_init(qs, info, item_size);
+
+ /* Feed the soc specific unique data into entropy pool */
+ add_device_randomness(info, item_size);
+
+ platform_set_drvdata(pdev, qs);
+
+ return 0;
+}
+
+static int qcom_socinfo_remove(struct platform_device *pdev)
+{
+ struct qcom_socinfo *qs = platform_get_drvdata(pdev);
+
+ soc_device_unregister(qs->soc_dev);
+
+ socinfo_debugfs_exit(qs);
+
+ return 0;
+}
+
+static struct platform_driver qcom_socinfo_driver = {
+ .probe = qcom_socinfo_probe,
+ .remove = qcom_socinfo_remove,
+ .driver = {
+ .name = "qcom-socinfo",
+ },
+};
+
+module_platform_driver(qcom_socinfo_driver);
+
+MODULE_DESCRIPTION("Qualcomm SoCinfo driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-socinfo");
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
new file mode 100644
index 0000000000..2f0b1bfe76
--- /dev/null
+++ b/drivers/soc/qcom/spm.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ *
+ * SAW power controller driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/spm.h>
+
+#define SPM_CTL_INDEX 0x7f
+#define SPM_CTL_INDEX_SHIFT 4
+#define SPM_CTL_EN BIT(0)
+
+enum spm_reg {
+ SPM_REG_CFG,
+ SPM_REG_SPM_CTL,
+ SPM_REG_DLY,
+ SPM_REG_PMIC_DLY,
+ SPM_REG_PMIC_DATA_0,
+ SPM_REG_PMIC_DATA_1,
+ SPM_REG_VCTL,
+ SPM_REG_SEQ_ENTRY,
+ SPM_REG_SPM_STS,
+ SPM_REG_PMIC_STS,
+ SPM_REG_AVS_CTL,
+ SPM_REG_AVS_LIMIT,
+ SPM_REG_NR,
+};
+
+static const u16 spm_reg_offset_v4_1[SPM_REG_NR] = {
+ [SPM_REG_AVS_CTL] = 0x904,
+ [SPM_REG_AVS_LIMIT] = 0x908,
+};
+
+static const struct spm_reg_data spm_reg_660_gold_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4580458,
+};
+
+static const struct spm_reg_data spm_reg_660_silver_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x101c031,
+ .avs_limit = 0x4580458,
+};
+
+static const struct spm_reg_data spm_reg_8998_gold_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4700470,
+};
+
+static const struct spm_reg_data spm_reg_8998_silver_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4200420,
+};
+
+static const u16 spm_reg_offset_v3_0[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_SEQ_ENTRY] = 0x400,
+};
+
+/* SPM register data for 8909 */
+static const struct spm_reg_data spm_reg_8909_cpu = {
+ .reg_offset = spm_reg_offset_v3_0,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x76, 0x76, 0x0B, 0x94, 0x5B, 0x80,
+ 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+/* SPM register data for 8916 */
+static const struct spm_reg_data spm_reg_8916_cpu = {
+ .reg_offset = spm_reg_offset_v3_0,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
+ 0x80, 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+static const struct spm_reg_data spm_reg_8939_cpu = {
+ .reg_offset = spm_reg_offset_v3_0,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x50, 0x1B, 0x10, 0x80,
+ 0x30, 0x90, 0x5B, 0x60, 0x50, 0x03, 0x60, 0x76, 0x76, 0x0B,
+ 0x50, 0x1B, 0x94, 0x5B, 0x80, 0x10, 0x26, 0x30, 0x50, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+static const u16 spm_reg_offset_v2_3[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_PMIC_DATA_0] = 0x40,
+ [SPM_REG_PMIC_DATA_1] = 0x44,
+};
+
+/* SPM register data for 8976 */
+static const struct spm_reg_data spm_reg_8976_gold_l2 = {
+ .reg_offset = spm_reg_offset_v2_3,
+ .spm_cfg = 0x14,
+ .spm_dly = 0x3c11840a,
+ .pmic_data[0] = 0x03030080,
+ .pmic_data[1] = 0x00030000,
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 3,
+};
+
+static const struct spm_reg_data spm_reg_8976_silver_l2 = {
+ .reg_offset = spm_reg_offset_v2_3,
+ .spm_cfg = 0x14,
+ .spm_dly = 0x3c102800,
+ .pmic_data[0] = 0x03030080,
+ .pmic_data[1] = 0x00030000,
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 2,
+};
+
+static const u16 spm_reg_offset_v2_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8974, 8084 */
+static const struct spm_reg_data spm_reg_8974_8084_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
+ 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
+ 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 3,
+};
+
+/* SPM register data for 8226 */
+static const struct spm_reg_data spm_reg_8226_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x0,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
+ 0x80, 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x20,
+ [SPM_REG_PMIC_DLY] = 0x24,
+ [SPM_REG_PMIC_DATA_0] = 0x28,
+ [SPM_REG_PMIC_DATA_1] = 0x2C,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8064 */
+static const struct spm_reg_data spm_reg_8064_cpu = {
+ .reg_offset = spm_reg_offset_v1_1,
+ .spm_cfg = 0x1F,
+ .pmic_dly = 0x02020004,
+ .pmic_data[0] = 0x0084009C,
+ .pmic_data[1] = 0x00A4001C,
+ .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
+ 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 2,
+};
+
+static inline void spm_register_write(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ if (drv->reg_data->reg_offset[reg])
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+}
+
+/* Ensure a guaranteed write, before return */
+static inline void spm_register_write_sync(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ u32 ret;
+
+ if (!drv->reg_data->reg_offset[reg])
+ return;
+
+ do {
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ ret = readl_relaxed(drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ if (ret == val)
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+static inline u32 spm_register_read(struct spm_driver_data *drv,
+ enum spm_reg reg)
+{
+ return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
+}
+
+void spm_set_low_power_mode(struct spm_driver_data *drv,
+ enum pm_sleep_mode mode)
+{
+ u32 start_index;
+ u32 ctl_val;
+
+ start_index = drv->reg_data->start_index[mode];
+
+ ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
+ ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
+ ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
+ ctl_val |= SPM_CTL_EN;
+ spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
+}
+
+static const struct of_device_id spm_match_table[] = {
+ { .compatible = "qcom,sdm660-gold-saw2-v4.1-l2",
+ .data = &spm_reg_660_gold_l2 },
+ { .compatible = "qcom,sdm660-silver-saw2-v4.1-l2",
+ .data = &spm_reg_660_silver_l2 },
+ { .compatible = "qcom,msm8226-saw2-v2.1-cpu",
+ .data = &spm_reg_8226_cpu },
+ { .compatible = "qcom,msm8909-saw2-v3.0-cpu",
+ .data = &spm_reg_8909_cpu },
+ { .compatible = "qcom,msm8916-saw2-v3.0-cpu",
+ .data = &spm_reg_8916_cpu },
+ { .compatible = "qcom,msm8939-saw2-v3.0-cpu",
+ .data = &spm_reg_8939_cpu },
+ { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,msm8976-gold-saw2-v2.3-l2",
+ .data = &spm_reg_8976_gold_l2 },
+ { .compatible = "qcom,msm8976-silver-saw2-v2.3-l2",
+ .data = &spm_reg_8976_silver_l2 },
+ { .compatible = "qcom,msm8998-gold-saw2-v4.1-l2",
+ .data = &spm_reg_8998_gold_l2 },
+ { .compatible = "qcom,msm8998-silver-saw2-v4.1-l2",
+ .data = &spm_reg_8998_silver_l2 },
+ { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
+ .data = &spm_reg_8064_cpu },
+ { },
+};
+MODULE_DEVICE_TABLE(of, spm_match_table);
+
+static int spm_dev_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match_id;
+ struct spm_driver_data *drv;
+ void __iomem *addr;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ drv->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(drv->reg_base))
+ return PTR_ERR(drv->reg_base);
+
+ match_id = of_match_node(spm_match_table, pdev->dev.of_node);
+ if (!match_id)
+ return -ENODEV;
+
+ drv->reg_data = match_id->data;
+ platform_set_drvdata(pdev, drv);
+
+ /* Write the SPM sequences first.. */
+ addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
+ __iowrite32_copy(addr, drv->reg_data->seq,
+ ARRAY_SIZE(drv->reg_data->seq) / 4);
+
+ /*
+ * ..and then the control registers.
+ * On some SoC if the control registers are written first and if the
+ * CPU was held in reset, the reset signal could trigger the SPM state
+ * machine, before the sequences are completely written.
+ */
+ spm_register_write(drv, SPM_REG_AVS_CTL, drv->reg_data->avs_ctl);
+ spm_register_write(drv, SPM_REG_AVS_LIMIT, drv->reg_data->avs_limit);
+ spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+ spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0,
+ drv->reg_data->pmic_data[0]);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1,
+ drv->reg_data->pmic_data[1]);
+
+ /* Set up Standby as the default low power mode */
+ if (drv->reg_data->reg_offset[SPM_REG_SPM_CTL])
+ spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+ return 0;
+}
+
+static struct platform_driver spm_driver = {
+ .probe = spm_dev_probe,
+ .driver = {
+ .name = "qcom_spm",
+ .of_match_table = spm_match_table,
+ },
+};
+
+static int __init qcom_spm_init(void)
+{
+ return platform_driver_register(&spm_driver);
+}
+arch_initcall(qcom_spm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/trace-rpmh.h b/drivers/soc/qcom/trace-rpmh.h
new file mode 100644
index 0000000000..be6b42ecc1
--- /dev/null
+++ b/drivers/soc/qcom/trace-rpmh.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_TRACE_RPMH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPMH_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpmh
+
+#include <linux/tracepoint.h>
+#include "rpmh-internal.h"
+
+TRACE_EVENT(rpmh_tx_done,
+
+ TP_PROTO(struct rsc_drv *d, int m, const struct tcs_request *r),
+
+ TP_ARGS(d, m, r),
+
+ TP_STRUCT__entry(
+ __string(name, d->name)
+ __field(int, m)
+ __field(u32, addr)
+ __field(u32, data)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, d->name);
+ __entry->m = m;
+ __entry->addr = r->cmds[0].addr;
+ __entry->data = r->cmds[0].data;
+ ),
+
+ TP_printk("%s: ack: tcs-m: %d addr: %#x data: %#x",
+ __get_str(name), __entry->m, __entry->addr, __entry->data)
+);
+
+TRACE_EVENT(rpmh_send_msg,
+
+ TP_PROTO(struct rsc_drv *d, int m, enum rpmh_state state, int n, u32 h,
+ const struct tcs_cmd *c),
+
+ TP_ARGS(d, m, state, n, h, c),
+
+ TP_STRUCT__entry(
+ __string(name, d->name)
+ __field(int, m)
+ __field(u32, state)
+ __field(int, n)
+ __field(u32, hdr)
+ __field(u32, addr)
+ __field(u32, data)
+ __field(bool, wait)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, d->name);
+ __entry->m = m;
+ __entry->state = state;
+ __entry->n = n;
+ __entry->hdr = h;
+ __entry->addr = c->addr;
+ __entry->data = c->data;
+ __entry->wait = c->wait;
+ ),
+
+ TP_printk("%s: tcs(m): %d [%s] cmd(n): %d msgid: %#x addr: %#x data: %#x complete: %d",
+ __get_str(name), __entry->m,
+ __print_symbolic(__entry->state,
+ { RPMH_SLEEP_STATE, "sleep" },
+ { RPMH_WAKE_ONLY_STATE, "wake" },
+ { RPMH_ACTIVE_ONLY_STATE, "active" }),
+ __entry->n,
+ __entry->hdr,
+ __entry->addr, __entry->data, __entry->wait)
+);
+
+#endif /* _TRACE_RPMH_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-rpmh
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
new file mode 100644
index 0000000000..ad9942412c
--- /dev/null
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, Linaro Ltd.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ */
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
+
+#define WCNSS_REQUEST_TIMEOUT (5 * HZ)
+#define WCNSS_CBC_TIMEOUT (10 * HZ)
+
+#define WCNSS_ACK_DONE_BOOTING 1
+#define WCNSS_ACK_COLD_BOOTING 2
+
+#define NV_FRAGMENT_SIZE 3072
+#define NVBIN_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+
+/**
+ * struct wcnss_ctrl - driver context
+ * @dev: device handle
+ * @channel: SMD channel handle
+ * @ack: completion for outstanding requests
+ * @cbc: completion for cbc complete indication
+ * @ack_status: status of the outstanding request
+ * @probe_work: worker for uploading nv binary
+ */
+struct wcnss_ctrl {
+ struct device *dev;
+ struct rpmsg_endpoint *channel;
+
+ struct completion ack;
+ struct completion cbc;
+ int ack_status;
+
+ struct work_struct probe_work;
+};
+
+/* message types */
+enum {
+ WCNSS_VERSION_REQ = 0x01000000,
+ WCNSS_VERSION_RESP,
+ WCNSS_DOWNLOAD_NV_REQ,
+ WCNSS_DOWNLOAD_NV_RESP,
+ WCNSS_UPLOAD_CAL_REQ,
+ WCNSS_UPLOAD_CAL_RESP,
+ WCNSS_DOWNLOAD_CAL_REQ,
+ WCNSS_DOWNLOAD_CAL_RESP,
+ WCNSS_VBAT_LEVEL_IND,
+ WCNSS_BUILD_VERSION_REQ,
+ WCNSS_BUILD_VERSION_RESP,
+ WCNSS_PM_CONFIG_REQ,
+ WCNSS_CBC_COMPLETE_IND,
+};
+
+/**
+ * struct wcnss_msg_hdr - common packet header for requests and responses
+ * @type: packet message type
+ * @len: total length of the packet, including this header
+ */
+struct wcnss_msg_hdr {
+ u32 type;
+ u32 len;
+} __packed;
+
+/*
+ * struct wcnss_version_resp - version request response
+ */
+struct wcnss_version_resp {
+ struct wcnss_msg_hdr hdr;
+ u8 major;
+ u8 minor;
+ u8 version;
+ u8 revision;
+} __packed;
+
+/**
+ * struct wcnss_download_nv_req - firmware fragment request
+ * @hdr: common packet wcnss_msg_hdr header
+ * @seq: sequence number of this fragment
+ * @last: boolean indicator of this being the last fragment of the binary
+ * @frag_size: length of this fragment
+ * @fragment: fragment data
+ */
+struct wcnss_download_nv_req {
+ struct wcnss_msg_hdr hdr;
+ u16 seq;
+ u16 last;
+ u32 frag_size;
+ u8 fragment[];
+} __packed;
+
+/**
+ * struct wcnss_download_nv_resp - firmware download response
+ * @hdr: common packet wcnss_msg_hdr header
+ * @status: boolean to indicate success of the download
+ */
+struct wcnss_download_nv_resp {
+ struct wcnss_msg_hdr hdr;
+ u8 status;
+} __packed;
+
+/**
+ * wcnss_ctrl_smd_callback() - handler from SMD responses
+ * @rpdev: remote processor message device pointer
+ * @data: pointer to the incoming data packet
+ * @count: size of the incoming data packet
+ * @priv: unused
+ * @addr: unused
+ *
+ * Handles any incoming packets from the remote WCNSS_CTRL service.
+ */
+static int wcnss_ctrl_smd_callback(struct rpmsg_device *rpdev,
+ void *data,
+ int count,
+ void *priv,
+ u32 addr)
+{
+ struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
+ const struct wcnss_download_nv_resp *nvresp;
+ const struct wcnss_version_resp *version;
+ const struct wcnss_msg_hdr *hdr = data;
+
+ switch (hdr->type) {
+ case WCNSS_VERSION_RESP:
+ if (count != sizeof(*version)) {
+ dev_err(wcnss->dev,
+ "invalid size of version response\n");
+ break;
+ }
+
+ version = data;
+ dev_info(wcnss->dev, "WCNSS Version %d.%d %d.%d\n",
+ version->major, version->minor,
+ version->version, version->revision);
+
+ complete(&wcnss->ack);
+ break;
+ case WCNSS_DOWNLOAD_NV_RESP:
+ if (count != sizeof(*nvresp)) {
+ dev_err(wcnss->dev,
+ "invalid size of download response\n");
+ break;
+ }
+
+ nvresp = data;
+ wcnss->ack_status = nvresp->status;
+ complete(&wcnss->ack);
+ break;
+ case WCNSS_CBC_COMPLETE_IND:
+ dev_dbg(wcnss->dev, "cold boot complete\n");
+ complete(&wcnss->cbc);
+ break;
+ default:
+ dev_info(wcnss->dev, "unknown message type %d\n", hdr->type);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * wcnss_request_version() - send a version request to WCNSS
+ * @wcnss: wcnss ctrl driver context
+ */
+static int wcnss_request_version(struct wcnss_ctrl *wcnss)
+{
+ struct wcnss_msg_hdr msg;
+ int ret;
+
+ msg.type = WCNSS_VERSION_REQ;
+ msg.len = sizeof(msg);
+ ret = rpmsg_send(wcnss->channel, &msg, sizeof(msg));
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_CBC_TIMEOUT);
+ if (!ret) {
+ dev_err(wcnss->dev, "timeout waiting for version response\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * wcnss_download_nv() - send nv binary to WCNSS
+ * @wcnss: wcnss_ctrl state handle
+ * @expect_cbc: indicator to caller that an cbc event is expected
+ *
+ * Returns 0 on success. Negative errno on failure.
+ */
+static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
+{
+ struct wcnss_download_nv_req *req;
+ const struct firmware *fw;
+ struct device *dev = wcnss->dev;
+ const char *nvbin = NVBIN_FILE;
+ const void *data;
+ ssize_t left;
+ int ret;
+
+ req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = of_property_read_string(dev->of_node, "firmware-name", &nvbin);
+ if (ret < 0 && ret != -EINVAL)
+ goto free_req;
+
+ ret = request_firmware(&fw, nvbin, dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to load nv file %s: %d\n", nvbin, ret);
+ goto free_req;
+ }
+
+ data = fw->data;
+ left = fw->size;
+
+ req->hdr.type = WCNSS_DOWNLOAD_NV_REQ;
+ req->hdr.len = sizeof(*req) + NV_FRAGMENT_SIZE;
+
+ req->last = 0;
+ req->frag_size = NV_FRAGMENT_SIZE;
+
+ req->seq = 0;
+ do {
+ if (left <= NV_FRAGMENT_SIZE) {
+ req->last = 1;
+ req->frag_size = left;
+ req->hdr.len = sizeof(*req) + left;
+ }
+
+ memcpy(req->fragment, data, req->frag_size);
+
+ ret = rpmsg_send(wcnss->channel, req, req->hdr.len);
+ if (ret < 0) {
+ dev_err(dev, "failed to send smd packet\n");
+ goto release_fw;
+ }
+
+ /* Increment for next fragment */
+ req->seq++;
+
+ data += NV_FRAGMENT_SIZE;
+ left -= NV_FRAGMENT_SIZE;
+ } while (left > 0);
+
+ ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT);
+ if (!ret) {
+ dev_err(dev, "timeout waiting for nv upload ack\n");
+ ret = -ETIMEDOUT;
+ } else {
+ *expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING;
+ ret = 0;
+ }
+
+release_fw:
+ release_firmware(fw);
+free_req:
+ kfree(req);
+
+ return ret;
+}
+
+/**
+ * qcom_wcnss_open_channel() - open additional SMD channel to WCNSS
+ * @wcnss: wcnss handle, retrieved from drvdata
+ * @name: SMD channel name
+ * @cb: callback to handle incoming data on the channel
+ * @priv: private data for use in the call-back
+ */
+struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv)
+{
+ struct rpmsg_channel_info chinfo;
+ struct wcnss_ctrl *_wcnss = wcnss;
+
+ strscpy(chinfo.name, name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = RPMSG_ADDR_ANY;
+
+ return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo);
+}
+EXPORT_SYMBOL(qcom_wcnss_open_channel);
+
+static void wcnss_async_probe(struct work_struct *work)
+{
+ struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work);
+ bool expect_cbc;
+ int ret;
+
+ ret = wcnss_request_version(wcnss);
+ if (ret < 0)
+ return;
+
+ ret = wcnss_download_nv(wcnss, &expect_cbc);
+ if (ret < 0)
+ return;
+
+ /* Wait for pending cold boot completion if indicated by the nv downloader */
+ if (expect_cbc) {
+ ret = wait_for_completion_timeout(&wcnss->cbc, WCNSS_REQUEST_TIMEOUT);
+ if (!ret)
+ dev_err(wcnss->dev, "expected cold boot completion\n");
+ }
+
+ of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev);
+}
+
+static int wcnss_ctrl_probe(struct rpmsg_device *rpdev)
+{
+ struct wcnss_ctrl *wcnss;
+
+ wcnss = devm_kzalloc(&rpdev->dev, sizeof(*wcnss), GFP_KERNEL);
+ if (!wcnss)
+ return -ENOMEM;
+
+ wcnss->dev = &rpdev->dev;
+ wcnss->channel = rpdev->ept;
+
+ init_completion(&wcnss->ack);
+ init_completion(&wcnss->cbc);
+ INIT_WORK(&wcnss->probe_work, wcnss_async_probe);
+
+ dev_set_drvdata(&rpdev->dev, wcnss);
+
+ schedule_work(&wcnss->probe_work);
+
+ return 0;
+}
+
+static void wcnss_ctrl_remove(struct rpmsg_device *rpdev)
+{
+ struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev);
+
+ cancel_work_sync(&wcnss->probe_work);
+ of_platform_depopulate(&rpdev->dev);
+}
+
+static const struct of_device_id wcnss_ctrl_of_match[] = {
+ { .compatible = "qcom,wcnss", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, wcnss_ctrl_of_match);
+
+static struct rpmsg_driver wcnss_ctrl_driver = {
+ .probe = wcnss_ctrl_probe,
+ .remove = wcnss_ctrl_remove,
+ .callback = wcnss_ctrl_smd_callback,
+ .drv = {
+ .name = "qcom_wcnss_ctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = wcnss_ctrl_of_match,
+ },
+};
+
+module_rpmsg_driver(wcnss_ctrl_driver);
+
+MODULE_DESCRIPTION("Qualcomm WCNSS control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
new file mode 100644
index 0000000000..acc812e490
--- /dev/null
+++ b/drivers/soc/renesas/Kconfig
@@ -0,0 +1,461 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig SOC_RENESAS
+ bool "Renesas SoC driver support" if COMPILE_TEST && !ARCH_RENESAS
+ default y if ARCH_RENESAS
+ select GPIOLIB
+ select PINCTRL
+ select SOC_BUS
+
+if SOC_RENESAS
+
+config ARCH_RCAR_GEN1
+ bool
+ select PM
+ select PM_GENERIC_DOMAINS
+ select RENESAS_INTC_IRQPIN
+ select RST_RCAR
+ select SYS_SUPPORTS_SH_TMU
+
+config ARCH_RCAR_GEN2
+ bool
+ select HAVE_ARM_ARCH_TIMER
+ select PM
+ select PM_GENERIC_DOMAINS
+ select RENESAS_IRQC
+ select RST_RCAR
+ select SYS_SUPPORTS_SH_CMT
+
+config ARCH_RCAR_GEN3
+ bool
+ select PM
+ select PM_GENERIC_DOMAINS
+ select RENESAS_IRQC
+ select RST_RCAR
+ select SYS_SUPPORTS_SH_CMT
+ select SYS_SUPPORTS_SH_TMU
+
+config ARCH_RMOBILE
+ bool
+ select PM
+ select PM_GENERIC_DOMAINS
+ select SYS_SUPPORTS_SH_CMT
+ select SYS_SUPPORTS_SH_TMU
+ select SYSC_RMOBILE
+
+config ARCH_RZG2L
+ bool
+ select PM
+ select PM_GENERIC_DOMAINS
+ select RENESAS_RZG2L_IRQC
+
+config ARCH_RZN1
+ bool
+ select PM
+ select PM_GENERIC_DOMAINS
+ select ARM_AMBA
+
+if ARM && ARCH_RENESAS
+
+#comment "Renesas ARM SoCs System Type"
+
+config ARCH_EMEV2
+ bool "ARM32 Platform support for Emma Mobile EV2"
+ select HAVE_ARM_SCU if SMP
+ select SYS_SUPPORTS_EM_STI
+
+config ARCH_R8A7794
+ bool "ARM32 Platform support for R-Car E2"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
+ select SYSC_R8A7794
+
+config ARCH_R8A7779
+ bool "ARM32 Platform support for R-Car H1"
+ select ARCH_RCAR_GEN1
+ select ARM_ERRATA_754322
+ select ARM_GLOBAL_TIMER
+ select HAVE_ARM_SCU if SMP
+ select HAVE_ARM_TWD if SMP
+ select SYSC_R8A7779
+
+config ARCH_R8A7790
+ bool "ARM32 Platform support for R-Car H2"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select ARM_ERRATA_814220
+ select I2C
+ select SYSC_R8A7790
+
+config ARCH_R8A7778
+ bool "ARM32 Platform support for R-Car M1A"
+ select ARCH_RCAR_GEN1
+ select ARM_ERRATA_754322
+
+config ARCH_R8A7793
+ bool "ARM32 Platform support for R-Car M2-N"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select I2C
+ select SYSC_R8A7791
+
+config ARCH_R8A7791
+ bool "ARM32 Platform support for R-Car M2-W"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select I2C
+ select SYSC_R8A7791
+
+config ARCH_R8A7792
+ bool "ARM32 Platform support for R-Car V2H"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select SYSC_R8A7792
+
+config ARCH_R8A7740
+ bool "ARM32 Platform support for R-Mobile A1"
+ select ARCH_RMOBILE
+ select ARM_ERRATA_754322
+ select RENESAS_INTC_IRQPIN
+
+config ARCH_R8A73A4
+ bool "ARM32 Platform support for R-Mobile APE6"
+ select ARCH_RMOBILE
+ select ARM_ERRATA_798181 if SMP
+ select ARM_ERRATA_814220
+ select HAVE_ARM_ARCH_TIMER
+ select RENESAS_IRQC
+
+config ARCH_R7S72100
+ bool "ARM32 Platform support for RZ/A1H"
+ select ARM_ERRATA_754322
+ select PM
+ select PM_GENERIC_DOMAINS
+ select RENESAS_OSTM
+ select RENESAS_RZA1_IRQC
+ select SYS_SUPPORTS_SH_MTU2
+
+config ARCH_R7S9210
+ bool "ARM32 Platform support for RZ/A2"
+ select PM
+ select PM_GENERIC_DOMAINS
+ select RENESAS_OSTM
+ select RENESAS_RZA1_IRQC
+
+config ARCH_R8A77470
+ bool "ARM32 Platform support for RZ/G1C"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
+ select SYSC_R8A77470
+
+config ARCH_R8A7745
+ bool "ARM32 Platform support for RZ/G1E"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
+ select SYSC_R8A7745
+
+config ARCH_R8A7742
+ bool "ARM32 Platform support for RZ/G1H"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select ARM_ERRATA_814220
+ select SYSC_R8A7742
+
+config ARCH_R8A7743
+ bool "ARM32 Platform support for RZ/G1M"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select SYSC_R8A7743
+
+config ARCH_R8A7744
+ bool "ARM32 Platform support for RZ/G1N"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select SYSC_R8A7743
+
+config ARCH_R9A06G032
+ bool "ARM32 Platform support for RZ/N1D"
+ select ARCH_RZN1
+ select ARM_ERRATA_814220
+
+config ARCH_SH73A0
+ bool "ARM32 Platform support for SH-Mobile AG5"
+ select ARCH_RMOBILE
+ select ARM_ERRATA_754322
+ select ARM_GLOBAL_TIMER
+ select HAVE_ARM_SCU if SMP
+ select HAVE_ARM_TWD if SMP
+ select RENESAS_INTC_IRQPIN
+
+endif # ARM
+
+if ARM64
+
+config ARCH_R8A77995
+ bool "ARM64 Platform support for R-Car D3"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77995
+ help
+ This enables support for the Renesas R-Car D3 SoC.
+ This includes different gradings like R-Car D3e.
+
+config ARCH_R8A77990
+ bool "ARM64 Platform support for R-Car E3"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77990
+ help
+ This enables support for the Renesas R-Car E3 SoC.
+ This includes different gradings like R-Car E3e.
+
+config ARCH_R8A77951
+ bool "ARM64 Platform support for R-Car H3 ES2.0+"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A7795
+ help
+ This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and
+ later).
+ This includes different gradings like R-Car H3e, H3e-2G, and H3Ne.
+
+config ARCH_R8A77965
+ bool "ARM64 Platform support for R-Car M3-N"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77965
+ help
+ This enables support for the Renesas R-Car M3-N SoC.
+ This includes different gradings like R-Car M3Ne and M3Ne-2G.
+
+config ARCH_R8A77960
+ bool "ARM64 Platform support for R-Car M3-W"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77960
+ help
+ This enables support for the Renesas R-Car M3-W SoC.
+
+config ARCH_R8A77961
+ bool "ARM64 Platform support for R-Car M3-W+"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77961
+ help
+ This enables support for the Renesas R-Car M3-W+ SoC.
+ This includes different gradings like R-Car M3e and M3e-2G.
+
+config ARCH_R8A779F0
+ bool "ARM64 Platform support for R-Car S4-8"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A779F0
+ help
+ This enables support for the Renesas R-Car S4-8 SoC.
+
+config ARCH_R8A77980
+ bool "ARM64 Platform support for R-Car V3H"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77980
+ help
+ This enables support for the Renesas R-Car V3H SoC.
+
+config ARCH_R8A77970
+ bool "ARM64 Platform support for R-Car V3M"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77970
+ help
+ This enables support for the Renesas R-Car V3M SoC.
+
+config ARCH_R8A779A0
+ bool "ARM64 Platform support for R-Car V3U"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A779A0
+ help
+ This enables support for the Renesas R-Car V3U SoC.
+
+config ARCH_R8A779G0
+ bool "ARM64 Platform support for R-Car V4H"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A779G0
+ help
+ This enables support for the Renesas R-Car V4H SoC.
+
+config ARCH_R8A774C0
+ bool "ARM64 Platform support for RZ/G2E"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774C0
+ help
+ This enables support for the Renesas RZ/G2E SoC.
+
+config ARCH_R8A774E1
+ bool "ARM64 Platform support for RZ/G2H"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774E1
+ help
+ This enables support for the Renesas RZ/G2H SoC.
+
+config ARCH_R8A774A1
+ bool "ARM64 Platform support for RZ/G2M"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774A1
+ help
+ This enables support for the Renesas RZ/G2M SoC.
+
+config ARCH_R8A774B1
+ bool "ARM64 Platform support for RZ/G2N"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774B1
+ help
+ This enables support for the Renesas RZ/G2N SoC.
+
+config ARCH_R9A07G043
+ bool "ARM64 Platform support for RZ/G2UL"
+ select ARCH_RZG2L
+ help
+ This enables support for the Renesas RZ/G2UL SoC variants.
+
+config ARCH_R9A07G044
+ bool "ARM64 Platform support for RZ/G2L"
+ select ARCH_RZG2L
+ help
+ This enables support for the Renesas RZ/G2L SoC variants.
+
+config ARCH_R9A07G054
+ bool "ARM64 Platform support for RZ/V2L"
+ select ARCH_RZG2L
+ help
+ This enables support for the Renesas RZ/V2L SoC variants.
+
+config ARCH_R9A09G011
+ bool "ARM64 Platform support for RZ/V2M"
+ select PM
+ select PM_GENERIC_DOMAINS
+ select PWC_RZV2M
+ help
+ This enables support for the Renesas RZ/V2M SoC.
+
+endif # ARM64
+
+if RISCV
+
+config ARCH_R9A07G043
+ bool "RISC-V Platform support for RZ/Five"
+ depends on NONPORTABLE
+ depends on RISCV_ALTERNATIVE
+ depends on !RISCV_ISA_ZICBOM
+ depends on RISCV_SBI
+ select ARCH_RZG2L
+ select AX45MP_L2_CACHE
+ select DMA_GLOBAL_POOL
+ select ERRATA_ANDES
+ select ERRATA_ANDES_CMO
+ help
+ This enables support for the Renesas RZ/Five SoC.
+
+endif # RISCV
+
+config PWC_RZV2M
+ bool "Renesas RZ/V2M PWC support" if COMPILE_TEST
+
+config RST_RCAR
+ bool "Reset Controller support for R-Car" if COMPILE_TEST
+
+config SYSC_RCAR
+ bool "System Controller support for R-Car" if COMPILE_TEST
+
+config SYSC_RCAR_GEN4
+ bool "System Controller support for R-Car Gen4" if COMPILE_TEST
+
+config SYSC_R8A77995
+ bool "System Controller support for R-Car D3" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7794
+ bool "System Controller support for R-Car E2" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77990
+ bool "System Controller support for R-Car E3" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7779
+ bool "System Controller support for R-Car H1" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7790
+ bool "System Controller support for R-Car H2" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7795
+ bool "System Controller support for R-Car H3" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7791
+ bool "System Controller support for R-Car M2-W/N" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77965
+ bool "System Controller support for R-Car M3-N" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77960
+ bool "System Controller support for R-Car M3-W" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77961
+ bool "System Controller support for R-Car M3-W+" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A779F0
+ bool "System Controller support for R-Car S4-8" if COMPILE_TEST
+ select SYSC_RCAR_GEN4
+
+config SYSC_R8A7792
+ bool "System Controller support for R-Car V2H" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77980
+ bool "System Controller support for R-Car V3H" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77970
+ bool "System Controller support for R-Car V3M" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A779A0
+ bool "System Controller support for R-Car V3U" if COMPILE_TEST
+ select SYSC_RCAR_GEN4
+
+config SYSC_R8A779G0
+ bool "System Controller support for R-Car V4H" if COMPILE_TEST
+ select SYSC_RCAR_GEN4
+
+config SYSC_RMOBILE
+ bool "System Controller support for R-Mobile" if COMPILE_TEST
+
+config SYSC_R8A77470
+ bool "System Controller support for RZ/G1C" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7745
+ bool "System Controller support for RZ/G1E" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7742
+ bool "System Controller support for RZ/G1H" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7743
+ bool "System Controller support for RZ/G1M" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A774C0
+ bool "System Controller support for RZ/G2E" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A774E1
+ bool "System Controller support for RZ/G2H" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A774A1
+ bool "System Controller support for RZ/G2M" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A774B1
+ bool "System Controller support for RZ/G2N" if COMPILE_TEST
+ select SYSC_RCAR
+
+endif # SOC_RENESAS
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
new file mode 100644
index 0000000000..734f8f8cef
--- /dev/null
+++ b/drivers/soc/renesas/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+# Generic, must be first because of soc_device_register()
+obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
+
+# SoC
+ifdef CONFIG_SMP
+obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
+endif
+
+# Family
+obj-$(CONFIG_PWC_RZV2M) += pwc-rzv2m.o
+obj-$(CONFIG_RST_RCAR) += rcar-rst.o
diff --git a/drivers/soc/renesas/pwc-rzv2m.c b/drivers/soc/renesas/pwc-rzv2m.c
new file mode 100644
index 0000000000..452cee8d68
--- /dev/null
+++ b/drivers/soc/renesas/pwc-rzv2m.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Renesas Electronics Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+#define PWC_PWCRST 0x00
+#define PWC_PWCCKEN 0x04
+#define PWC_PWCCTL 0x50
+#define PWC_GPIO 0x80
+
+#define PWC_PWCRST_RSTSOFTAX 0x1
+#define PWC_PWCCKEN_ENGCKMAIN 0x1
+#define PWC_PWCCTL_PWOFF 0x1
+
+struct rzv2m_pwc_priv {
+ void __iomem *base;
+ struct device *dev;
+ struct gpio_chip gp;
+ DECLARE_BITMAP(ch_en_bits, 2);
+};
+
+static void rzv2m_pwc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct rzv2m_pwc_priv *priv = gpiochip_get_data(chip);
+ u32 reg;
+
+ /* BIT 16 enables write to BIT 0, and BIT 17 enables write to BIT 1 */
+ reg = BIT(offset + 16);
+ if (value)
+ reg |= BIT(offset);
+
+ writel(reg, priv->base + PWC_GPIO);
+
+ assign_bit(offset, priv->ch_en_bits, value);
+}
+
+static int rzv2m_pwc_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzv2m_pwc_priv *priv = gpiochip_get_data(chip);
+
+ return test_bit(offset, priv->ch_en_bits);
+}
+
+static int rzv2m_pwc_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int nr, int value)
+{
+ if (nr > 1)
+ return -EINVAL;
+
+ rzv2m_pwc_gpio_set(gc, nr, value);
+
+ return 0;
+}
+
+static const struct gpio_chip rzv2m_pwc_gc = {
+ .label = "gpio_rzv2m_pwc",
+ .owner = THIS_MODULE,
+ .get = rzv2m_pwc_gpio_get,
+ .set = rzv2m_pwc_gpio_set,
+ .direction_output = rzv2m_pwc_gpio_direction_output,
+ .can_sleep = false,
+ .ngpio = 2,
+ .base = -1,
+};
+
+static int rzv2m_pwc_poweroff(struct sys_off_data *data)
+{
+ struct rzv2m_pwc_priv *priv = data->cb_data;
+
+ writel(PWC_PWCRST_RSTSOFTAX, priv->base + PWC_PWCRST);
+ writel(PWC_PWCCKEN_ENGCKMAIN, priv->base + PWC_PWCCKEN);
+ writel(PWC_PWCCTL_PWOFF, priv->base + PWC_PWCCTL);
+
+ mdelay(150);
+
+ dev_err(priv->dev, "Failed to power off the system");
+
+ return NOTIFY_DONE;
+}
+
+static int rzv2m_pwc_probe(struct platform_device *pdev)
+{
+ struct rzv2m_pwc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /*
+ * The register used by this driver cannot be read, therefore set the
+ * outputs to their default values and initialize priv->ch_en_bits
+ * accordingly. BIT 16 enables write to BIT 0, BIT 17 enables write to
+ * BIT 1, and the default value of both BIT 0 and BIT 1 is 0.
+ */
+ writel(BIT(17) | BIT(16), priv->base + PWC_GPIO);
+ bitmap_zero(priv->ch_en_bits, 2);
+
+ priv->gp = rzv2m_pwc_gc;
+ priv->gp.parent = pdev->dev.parent;
+ priv->gp.fwnode = dev_fwnode(&pdev->dev);
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &priv->gp, priv);
+ if (ret)
+ return ret;
+
+ if (device_property_read_bool(&pdev->dev, "renesas,rzv2m-pwc-power"))
+ ret = devm_register_power_off_handler(&pdev->dev,
+ rzv2m_pwc_poweroff, priv);
+
+ return ret;
+}
+
+static const struct of_device_id rzv2m_pwc_of_match[] = {
+ { .compatible = "renesas,rzv2m-pwc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzv2m_pwc_of_match);
+
+static struct platform_driver rzv2m_pwc_driver = {
+ .probe = rzv2m_pwc_probe,
+ .driver = {
+ .name = "rzv2m_pwc",
+ .of_match_table = rzv2m_pwc_of_match,
+ },
+};
+module_platform_driver(rzv2m_pwc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fabrizio Castro <castro.fabrizio.jz@renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/V2M PWC driver");
diff --git a/drivers/soc/renesas/r9a06g032-smp.c b/drivers/soc/renesas/r9a06g032-smp.c
new file mode 100644
index 0000000000..a1926e8d73
--- /dev/null
+++ b/drivers/soc/renesas/r9a06g032-smp.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R9A06G032 Second CA7 enabler.
+ *
+ * Copyright (C) 2018 Renesas Electronics Europe Limited
+ *
+ * Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ * Derived from actions,s500-smp
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/smp.h>
+
+/*
+ * The second CPU is parked in ROM at boot time. It requires waking it after
+ * writing an address into the BOOTADDR register of sysctrl.
+ *
+ * So the default value of the "cpu-release-addr" corresponds to BOOTADDR...
+ *
+ * *However* the BOOTADDR register is not available when the kernel
+ * starts in NONSEC mode.
+ *
+ * So for NONSEC mode, the bootloader re-parks the second CPU into a pen
+ * in SRAM, and changes the "cpu-release-addr" of linux's DT to a SRAM address,
+ * which is not restricted.
+ */
+
+static void __iomem *cpu_bootaddr;
+
+static DEFINE_SPINLOCK(cpu_lock);
+
+static int
+r9a06g032_smp_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ if (!cpu_bootaddr)
+ return -ENODEV;
+
+ spin_lock(&cpu_lock);
+
+ writel(__pa_symbol(secondary_startup), cpu_bootaddr);
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ spin_unlock(&cpu_lock);
+
+ return 0;
+}
+
+static void __init r9a06g032_smp_prepare_cpus(unsigned int max_cpus)
+{
+ struct device_node *dn;
+ int ret = -EINVAL, dns;
+ u32 bootaddr;
+
+ dn = of_get_cpu_node(1, NULL);
+ if (!dn) {
+ pr_err("CPU#1: missing device tree node\n");
+ return;
+ }
+ /*
+ * Determine the address from which the CPU is polling.
+ * The bootloader *does* change this property.
+ * Note: The property can be either 64 or 32 bits, so handle both cases
+ */
+ if (of_find_property(dn, "cpu-release-addr", &dns)) {
+ if (dns == sizeof(u64)) {
+ u64 temp;
+
+ ret = of_property_read_u64(dn,
+ "cpu-release-addr", &temp);
+ bootaddr = temp;
+ } else {
+ ret = of_property_read_u32(dn,
+ "cpu-release-addr",
+ &bootaddr);
+ }
+ }
+ of_node_put(dn);
+ if (ret) {
+ pr_err("CPU#1: invalid cpu-release-addr property\n");
+ return;
+ }
+ pr_info("CPU#1: cpu-release-addr %08x\n", bootaddr);
+
+ cpu_bootaddr = ioremap(bootaddr, sizeof(bootaddr));
+}
+
+static const struct smp_operations r9a06g032_smp_ops __initconst = {
+ .smp_prepare_cpus = r9a06g032_smp_prepare_cpus,
+ .smp_boot_secondary = r9a06g032_smp_boot_secondary,
+};
+
+CPU_METHOD_OF_DECLARE(r9a06g032_smp,
+ "renesas,r9a06g032-smp", &r9a06g032_smp_ops);
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
new file mode 100644
index 0000000000..98fd97da6c
--- /dev/null
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R-Car Gen1 RESET/WDT, R-Car Gen2, Gen3, and RZ/G RST Driver
+ *
+ * Copyright (C) 2016 Glider bvba
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#define WDTRSTCR_RESET 0xA55A0002
+#define WDTRSTCR 0x0054
+#define GEN4_WDTRSTCR 0x0010
+
+#define CR7BAR 0x0070
+#define CR7BAREN BIT(4)
+#define CR7BAR_MASK 0xFFFC0000
+
+static void __iomem *rcar_rst_base;
+static u32 saved_mode __initdata;
+static int (*rcar_rst_set_rproc_boot_addr_func)(u64 boot_addr);
+
+static int rcar_rst_enable_wdt_reset(void __iomem *base)
+{
+ iowrite32(WDTRSTCR_RESET, base + WDTRSTCR);
+ return 0;
+}
+
+static int rcar_rst_v3u_enable_wdt_reset(void __iomem *base)
+{
+ iowrite32(WDTRSTCR_RESET, base + GEN4_WDTRSTCR);
+ return 0;
+}
+
+/*
+ * Most of the R-Car Gen3 SoCs have an ARM Realtime Core.
+ * Firmware boot address has to be set in CR7BAR before
+ * starting the realtime core.
+ * Boot address must be aligned on a 256k boundary.
+ */
+static int rcar_rst_set_gen3_rproc_boot_addr(u64 boot_addr)
+{
+ if (boot_addr & ~(u64)CR7BAR_MASK) {
+ pr_err("Invalid boot address got %llx\n", boot_addr);
+ return -EINVAL;
+ }
+
+ iowrite32(boot_addr, rcar_rst_base + CR7BAR);
+ iowrite32(boot_addr | CR7BAREN, rcar_rst_base + CR7BAR);
+
+ return 0;
+}
+
+struct rst_config {
+ unsigned int modemr; /* Mode Monitoring Register Offset */
+ int (*configure)(void __iomem *base); /* Platform specific config */
+ int (*set_rproc_boot_addr)(u64 boot_addr);
+};
+
+static const struct rst_config rcar_rst_gen1 __initconst = {
+ .modemr = 0x20,
+};
+
+static const struct rst_config rcar_rst_gen2 __initconst = {
+ .modemr = 0x60,
+ .configure = rcar_rst_enable_wdt_reset,
+};
+
+static const struct rst_config rcar_rst_gen3 __initconst = {
+ .modemr = 0x60,
+ .set_rproc_boot_addr = rcar_rst_set_gen3_rproc_boot_addr,
+};
+
+/* V3U firmware doesn't enable WDT reset and there won't be updates anymore */
+static const struct rst_config rcar_rst_v3u __initconst = {
+ .modemr = 0x00, /* MODEMR0 and it has CPG related bits */
+ .configure = rcar_rst_v3u_enable_wdt_reset,
+};
+
+static const struct rst_config rcar_rst_gen4 __initconst = {
+ .modemr = 0x00, /* MODEMR0 and it has CPG related bits */
+};
+
+static const struct of_device_id rcar_rst_matches[] __initconst = {
+ /* RZ/G1 is handled like R-Car Gen2 */
+ { .compatible = "renesas,r8a7742-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7743-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7744-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7745-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a77470-rst", .data = &rcar_rst_gen2 },
+ /* RZ/G2 is handled like R-Car Gen3 */
+ { .compatible = "renesas,r8a774a1-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a774b1-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a774c0-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a774e1-rst", .data = &rcar_rst_gen3 },
+ /* R-Car Gen1 */
+ { .compatible = "renesas,r8a7778-reset-wdt", .data = &rcar_rst_gen1 },
+ { .compatible = "renesas,r8a7779-reset-wdt", .data = &rcar_rst_gen1 },
+ /* R-Car Gen2 */
+ { .compatible = "renesas,r8a7790-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7791-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7792-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7793-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a7794-rst", .data = &rcar_rst_gen2 },
+ /* R-Car Gen3 */
+ { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77961-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77965-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77990-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 },
+ /* R-Car Gen4 */
+ { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_v3u },
+ { .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 },
+ { .compatible = "renesas,r8a779g0-rst", .data = &rcar_rst_gen4 },
+ { /* sentinel */ }
+};
+
+static int __init rcar_rst_init(void)
+{
+ const struct of_device_id *match;
+ const struct rst_config *cfg;
+ struct device_node *np;
+ void __iomem *base;
+ int error = 0;
+
+ np = of_find_matching_node_and_match(NULL, rcar_rst_matches, &match);
+ if (!np)
+ return -ENODEV;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("%pOF: Cannot map regs\n", np);
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ rcar_rst_base = base;
+ cfg = match->data;
+ rcar_rst_set_rproc_boot_addr_func = cfg->set_rproc_boot_addr;
+
+ saved_mode = ioread32(base + cfg->modemr);
+ if (cfg->configure) {
+ error = cfg->configure(base);
+ if (error) {
+ pr_warn("%pOF: Cannot run SoC specific configuration\n",
+ np);
+ goto out_put;
+ }
+ }
+
+ pr_debug("%pOF: MODE = 0x%08x\n", np, saved_mode);
+
+out_put:
+ of_node_put(np);
+ return error;
+}
+
+int __init rcar_rst_read_mode_pins(u32 *mode)
+{
+ int error;
+
+ if (!rcar_rst_base) {
+ error = rcar_rst_init();
+ if (error)
+ return error;
+ }
+
+ *mode = saved_mode;
+ return 0;
+}
+
+int rcar_rst_set_rproc_boot_addr(u64 boot_addr)
+{
+ if (!rcar_rst_set_rproc_boot_addr_func)
+ return -EIO;
+
+ return rcar_rst_set_rproc_boot_addr_func(boot_addr);
+}
+EXPORT_SYMBOL_GPL(rcar_rst_set_rproc_boot_addr);
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
new file mode 100644
index 0000000000..42af7c09f7
--- /dev/null
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas SoC Identification
+ *
+ * Copyright (C) 2014-2016 Glider bvba
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+
+struct renesas_family {
+ const char name[16];
+ u32 reg; /* CCCR or PRR, if not in DT */
+};
+
+static const struct renesas_family fam_rcar_gen1 __initconst __maybe_unused = {
+ .name = "R-Car Gen1",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen2 __initconst __maybe_unused = {
+ .name = "R-Car Gen2",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = {
+ .name = "R-Car Gen3",
+ .reg = 0xfff00044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen4 __initconst __maybe_unused = {
+ .name = "R-Car Gen4",
+};
+
+static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
+ .name = "R-Mobile",
+ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
+};
+
+static const struct renesas_family fam_rza1 __initconst __maybe_unused = {
+ .name = "RZ/A1",
+};
+
+static const struct renesas_family fam_rza2 __initconst __maybe_unused = {
+ .name = "RZ/A2",
+};
+
+static const struct renesas_family fam_rzfive __initconst __maybe_unused = {
+ .name = "RZ/Five",
+};
+
+static const struct renesas_family fam_rzg1 __initconst __maybe_unused = {
+ .name = "RZ/G1",
+ .reg = 0xff000044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rzg2 __initconst __maybe_unused = {
+ .name = "RZ/G2",
+ .reg = 0xfff00044, /* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rzg2l __initconst __maybe_unused = {
+ .name = "RZ/G2L",
+};
+
+static const struct renesas_family fam_rzg2ul __initconst __maybe_unused = {
+ .name = "RZ/G2UL",
+};
+
+static const struct renesas_family fam_rzv2l __initconst __maybe_unused = {
+ .name = "RZ/V2L",
+};
+
+static const struct renesas_family fam_rzv2m __initconst __maybe_unused = {
+ .name = "RZ/V2M",
+};
+
+static const struct renesas_family fam_shmobile __initconst __maybe_unused = {
+ .name = "SH-Mobile",
+ .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
+};
+
+
+struct renesas_soc {
+ const struct renesas_family *family;
+ u32 id;
+};
+
+static const struct renesas_soc soc_rz_a1h __initconst __maybe_unused = {
+ .family = &fam_rza1,
+};
+
+static const struct renesas_soc soc_rz_a2m __initconst __maybe_unused = {
+ .family = &fam_rza2,
+ .id = 0x3b,
+};
+
+static const struct renesas_soc soc_rmobile_ape6 __initconst __maybe_unused = {
+ .family = &fam_rmobile,
+ .id = 0x3f,
+};
+
+static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = {
+ .family = &fam_rmobile,
+ .id = 0x40,
+};
+
+static const struct renesas_soc soc_rz_five __initconst __maybe_unused = {
+ .family = &fam_rzfive,
+ .id = 0x847c447,
+};
+
+static const struct renesas_soc soc_rz_g1h __initconst __maybe_unused = {
+ .family = &fam_rzg1,
+ .id = 0x45,
+};
+
+static const struct renesas_soc soc_rz_g1m __initconst __maybe_unused = {
+ .family = &fam_rzg1,
+ .id = 0x47,
+};
+
+static const struct renesas_soc soc_rz_g1n __initconst __maybe_unused = {
+ .family = &fam_rzg1,
+ .id = 0x4b,
+};
+
+static const struct renesas_soc soc_rz_g1e __initconst __maybe_unused = {
+ .family = &fam_rzg1,
+ .id = 0x4c,
+};
+
+static const struct renesas_soc soc_rz_g1c __initconst __maybe_unused = {
+ .family = &fam_rzg1,
+ .id = 0x53,
+};
+
+static const struct renesas_soc soc_rz_g2m __initconst __maybe_unused = {
+ .family = &fam_rzg2,
+ .id = 0x52,
+};
+
+static const struct renesas_soc soc_rz_g2n __initconst __maybe_unused = {
+ .family = &fam_rzg2,
+ .id = 0x55,
+};
+
+static const struct renesas_soc soc_rz_g2e __initconst __maybe_unused = {
+ .family = &fam_rzg2,
+ .id = 0x57,
+};
+
+static const struct renesas_soc soc_rz_g2h __initconst __maybe_unused = {
+ .family = &fam_rzg2,
+ .id = 0x4f,
+};
+
+static const struct renesas_soc soc_rz_g2l __initconst __maybe_unused = {
+ .family = &fam_rzg2l,
+ .id = 0x841c447,
+};
+
+static const struct renesas_soc soc_rz_g2ul __initconst __maybe_unused = {
+ .family = &fam_rzg2ul,
+ .id = 0x8450447,
+};
+
+static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = {
+ .family = &fam_rzv2l,
+ .id = 0x8447447,
+};
+
+static const struct renesas_soc soc_rz_v2m __initconst __maybe_unused = {
+ .family = &fam_rzv2m,
+};
+
+static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
+ .family = &fam_rcar_gen1,
+};
+
+static const struct renesas_soc soc_rcar_h1 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen1,
+ .id = 0x3b,
+};
+
+static const struct renesas_soc soc_rcar_h2 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x45,
+};
+
+static const struct renesas_soc soc_rcar_m2_w __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x47,
+};
+
+static const struct renesas_soc soc_rcar_v2h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4a,
+};
+
+static const struct renesas_soc soc_rcar_m2_n __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4b,
+};
+
+static const struct renesas_soc soc_rcar_e2 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen2,
+ .id = 0x4c,
+};
+
+static const struct renesas_soc soc_rcar_h3 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x4f,
+};
+
+static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x52,
+};
+
+static const struct renesas_soc soc_rcar_m3_n __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x55,
+};
+
+static const struct renesas_soc soc_rcar_v3m __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x54,
+};
+
+static const struct renesas_soc soc_rcar_v3h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x56,
+};
+
+static const struct renesas_soc soc_rcar_e3 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x57,
+};
+
+static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x58,
+};
+
+static const struct renesas_soc soc_rcar_v3u __initconst __maybe_unused = {
+ .family = &fam_rcar_gen4,
+ .id = 0x59,
+};
+
+static const struct renesas_soc soc_rcar_s4 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen4,
+ .id = 0x5a,
+};
+
+static const struct renesas_soc soc_rcar_v4h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen4,
+ .id = 0x5c,
+};
+
+static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
+ .family = &fam_shmobile,
+ .id = 0x37,
+};
+
+
+static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
+#ifdef CONFIG_ARCH_R7S72100
+ { .compatible = "renesas,r7s72100", .data = &soc_rz_a1h },
+#endif
+#ifdef CONFIG_ARCH_R7S9210
+ { .compatible = "renesas,r7s9210", .data = &soc_rz_a2m },
+#endif
+#ifdef CONFIG_ARCH_R8A73A4
+ { .compatible = "renesas,r8a73a4", .data = &soc_rmobile_ape6 },
+#endif
+#ifdef CONFIG_ARCH_R8A7740
+ { .compatible = "renesas,r8a7740", .data = &soc_rmobile_a1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7742
+ { .compatible = "renesas,r8a7742", .data = &soc_rz_g1h },
+#endif
+#ifdef CONFIG_ARCH_R8A7743
+ { .compatible = "renesas,r8a7743", .data = &soc_rz_g1m },
+#endif
+#ifdef CONFIG_ARCH_R8A7744
+ { .compatible = "renesas,r8a7744", .data = &soc_rz_g1n },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+ { .compatible = "renesas,r8a7745", .data = &soc_rz_g1e },
+#endif
+#ifdef CONFIG_ARCH_R8A77470
+ { .compatible = "renesas,r8a77470", .data = &soc_rz_g1c },
+#endif
+#ifdef CONFIG_ARCH_R8A774A1
+ { .compatible = "renesas,r8a774a1", .data = &soc_rz_g2m },
+#endif
+#ifdef CONFIG_ARCH_R8A774B1
+ { .compatible = "renesas,r8a774b1", .data = &soc_rz_g2n },
+#endif
+#ifdef CONFIG_ARCH_R8A774C0
+ { .compatible = "renesas,r8a774c0", .data = &soc_rz_g2e },
+#endif
+#ifdef CONFIG_ARCH_R8A774E1
+ { .compatible = "renesas,r8a774e1", .data = &soc_rz_g2h },
+#endif
+#ifdef CONFIG_ARCH_R8A7778
+ { .compatible = "renesas,r8a7778", .data = &soc_rcar_m1a },
+#endif
+#ifdef CONFIG_ARCH_R8A7779
+ { .compatible = "renesas,r8a7779", .data = &soc_rcar_h1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7790
+ { .compatible = "renesas,r8a7790", .data = &soc_rcar_h2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7791
+ { .compatible = "renesas,r8a7791", .data = &soc_rcar_m2_w },
+#endif
+#ifdef CONFIG_ARCH_R8A7792
+ { .compatible = "renesas,r8a7792", .data = &soc_rcar_v2h },
+#endif
+#ifdef CONFIG_ARCH_R8A7793
+ { .compatible = "renesas,r8a7793", .data = &soc_rcar_m2_n },
+#endif
+#ifdef CONFIG_ARCH_R8A7794
+ { .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 },
+#endif
+#ifdef CONFIG_ARCH_R8A77951
+ { .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
+ { .compatible = "renesas,r8a779m0", .data = &soc_rcar_h3 },
+ { .compatible = "renesas,r8a779m1", .data = &soc_rcar_h3 },
+ { .compatible = "renesas,r8a779m8", .data = &soc_rcar_h3 },
+ { .compatible = "renesas,r8a779mb", .data = &soc_rcar_h3 },
+#endif
+#ifdef CONFIG_ARCH_R8A77960
+ { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
+#endif
+#ifdef CONFIG_ARCH_R8A77961
+ { .compatible = "renesas,r8a77961", .data = &soc_rcar_m3_w },
+ { .compatible = "renesas,r8a779m2", .data = &soc_rcar_m3_w },
+ { .compatible = "renesas,r8a779m3", .data = &soc_rcar_m3_w },
+#endif
+#ifdef CONFIG_ARCH_R8A77965
+ { .compatible = "renesas,r8a77965", .data = &soc_rcar_m3_n },
+ { .compatible = "renesas,r8a779m4", .data = &soc_rcar_m3_n },
+ { .compatible = "renesas,r8a779m5", .data = &soc_rcar_m3_n },
+#endif
+#ifdef CONFIG_ARCH_R8A77970
+ { .compatible = "renesas,r8a77970", .data = &soc_rcar_v3m },
+#endif
+#ifdef CONFIG_ARCH_R8A77980
+ { .compatible = "renesas,r8a77980", .data = &soc_rcar_v3h },
+#endif
+#ifdef CONFIG_ARCH_R8A77990
+ { .compatible = "renesas,r8a77990", .data = &soc_rcar_e3 },
+ { .compatible = "renesas,r8a779m6", .data = &soc_rcar_e3 },
+#endif
+#ifdef CONFIG_ARCH_R8A77995
+ { .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 },
+ { .compatible = "renesas,r8a779m7", .data = &soc_rcar_d3 },
+#endif
+#ifdef CONFIG_ARCH_R8A779A0
+ { .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u },
+#endif
+#ifdef CONFIG_ARCH_R8A779F0
+ { .compatible = "renesas,r8a779f0", .data = &soc_rcar_s4 },
+#endif
+#ifdef CONFIG_ARCH_R8A779G0
+ { .compatible = "renesas,r8a779g0", .data = &soc_rcar_v4h },
+#endif
+#ifdef CONFIG_ARCH_R9A07G043
+#ifdef CONFIG_RISCV
+ { .compatible = "renesas,r9a07g043", .data = &soc_rz_five },
+#else
+ { .compatible = "renesas,r9a07g043", .data = &soc_rz_g2ul },
+#endif
+#endif
+#ifdef CONFIG_ARCH_R9A07G044
+ { .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
+#endif
+#ifdef CONFIG_ARCH_R9A07G054
+ { .compatible = "renesas,r9a07g054", .data = &soc_rz_v2l },
+#endif
+#ifdef CONFIG_ARCH_R9A09G011
+ { .compatible = "renesas,r9a09g011", .data = &soc_rz_v2m },
+#endif
+#ifdef CONFIG_ARCH_SH73A0
+ { .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
+#endif
+ { /* sentinel */ }
+};
+
+struct renesas_id {
+ unsigned int offset;
+ u32 mask;
+};
+
+static const struct renesas_id id_bsid __initconst = {
+ .offset = 0,
+ .mask = 0xff0000,
+ /*
+ * TODO: Upper 4 bits of BSID are for chip version, but the format is
+ * not known at this time so we don't know how to specify eshi and eslo
+ */
+};
+
+static const struct renesas_id id_rzg2l __initconst = {
+ .offset = 0xa04,
+ .mask = 0xfffffff,
+};
+
+static const struct renesas_id id_rzv2m __initconst = {
+ .offset = 0x104,
+ .mask = 0xff,
+};
+
+static const struct renesas_id id_prr __initconst = {
+ .offset = 0,
+ .mask = 0xff00,
+};
+
+static const struct of_device_id renesas_ids[] __initconst = {
+ { .compatible = "renesas,bsid", .data = &id_bsid },
+ { .compatible = "renesas,r9a07g043-sysc", .data = &id_rzg2l },
+ { .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l },
+ { .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l },
+ { .compatible = "renesas,r9a09g011-sys", .data = &id_rzv2m },
+ { .compatible = "renesas,prr", .data = &id_prr },
+ { /* sentinel */ }
+};
+
+static int __init renesas_soc_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ unsigned int product, eshi = 0, eslo;
+ const struct renesas_family *family;
+ const struct of_device_id *match;
+ const struct renesas_soc *soc;
+ const struct renesas_id *id;
+ void __iomem *chipid = NULL;
+ const char *rev_prefix = "";
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ const char *soc_id;
+ int ret;
+
+ match = of_match_node(renesas_socs, of_root);
+ if (!match)
+ return -ENODEV;
+
+ soc_id = strchr(match->compatible, ',') + 1;
+ soc = match->data;
+ family = soc->family;
+
+ np = of_find_matching_node_and_match(NULL, renesas_ids, &match);
+ if (np) {
+ id = match->data;
+ chipid = of_iomap(np, 0);
+ of_node_put(np);
+ } else if (soc->id && family->reg) {
+ /* Try hardcoded CCCR/PRR fallback */
+ id = &id_prr;
+ chipid = ioremap(family->reg, 4);
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr) {
+ if (chipid)
+ iounmap(chipid);
+ return -ENOMEM;
+ }
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
+ soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL);
+
+ if (chipid) {
+ product = readl(chipid + id->offset);
+ iounmap(chipid);
+
+ if (id == &id_prr) {
+ /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
+ if ((product & 0x7fff) == 0x5210)
+ product ^= 0x11;
+ /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
+ if ((product & 0x7fff) == 0x5211)
+ product ^= 0x12;
+
+ eshi = ((product >> 4) & 0x0f) + 1;
+ eslo = product & 0xf;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
+ eshi, eslo);
+ } else if (id == &id_rzg2l) {
+ eshi = ((product >> 28) & 0x0f);
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u",
+ eshi);
+ rev_prefix = "Rev ";
+ } else if (id == &id_rzv2m) {
+ eshi = ((product >> 4) & 0x0f);
+ eslo = product & 0xf;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u.%u",
+ eshi, eslo);
+ }
+
+ if (soc->id &&
+ ((product & id->mask) >> __ffs(id->mask)) != soc->id) {
+ pr_warn("SoC mismatch (product = 0x%x)\n", product);
+ ret = -ENODEV;
+ goto free_soc_dev_attr;
+ }
+ }
+
+ pr_info("Detected Renesas %s %s %s%s\n", soc_dev_attr->family,
+ soc_dev_attr->soc_id, rev_prefix, soc_dev_attr->revision ?: "");
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto free_soc_dev_attr;
+ }
+
+ return 0;
+
+free_soc_dev_attr:
+ kfree(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree_const(soc_dev_attr->family);
+ kfree(soc_dev_attr);
+ return ret;
+}
+early_initcall(renesas_soc_init);
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
new file mode 100644
index 0000000000..aff2f7e952
--- /dev/null
+++ b/drivers/soc/rockchip/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if ARCH_ROCKCHIP || COMPILE_TEST
+
+#
+# Rockchip Soc drivers
+#
+
+config ROCKCHIP_GRF
+ bool "Rockchip General Register Files support" if COMPILE_TEST
+ default y if ARCH_ROCKCHIP
+ help
+ The General Register Files are a central component providing
+ special additional settings registers for a lot of soc-components.
+ In a lot of cases there also need to be default settings initialized
+ to make some of them conform to expectations of the kernel.
+
+config ROCKCHIP_IODOMAIN
+ tristate "Rockchip IO domain support"
+ depends on OF
+ help
+ Say y here to enable support io domains on Rockchip SoCs. It is
+ necessary for the io domain setting of the SoC to match the
+ voltage supplied by the regulators.
+
+config ROCKCHIP_PM_DOMAINS
+ bool "Rockchip generic power domain"
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y here to enable power domain support.
+ In order to meet high performance and low power requirements, a power
+ management unit is designed or saving power when RK3288 in low power
+ mode. The RK3288 PMU is dedicated for managing the power of the whole chip.
+
+ If unsure, say N.
+
+config ROCKCHIP_DTPM
+ tristate "Rockchip DTPM hierarchy"
+ depends on DTPM && m
+ help
+ Describe the hierarchy for the Dynamic Thermal Power Management tree
+ on this platform. That will create all the power capping capable
+ devices.
+
+endif
diff --git a/drivers/soc/rockchip/Makefile b/drivers/soc/rockchip/Makefile
new file mode 100644
index 0000000000..23d414433c
--- /dev/null
+++ b/drivers/soc/rockchip/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Rockchip Soc drivers
+#
+obj-$(CONFIG_ROCKCHIP_GRF) += grf.o
+obj-$(CONFIG_ROCKCHIP_IODOMAIN) += io-domain.o
+obj-$(CONFIG_ROCKCHIP_DTPM) += dtpm.o
diff --git a/drivers/soc/rockchip/dtpm.c b/drivers/soc/rockchip/dtpm.c
new file mode 100644
index 0000000000..b36d4f752c
--- /dev/null
+++ b/drivers/soc/rockchip/dtpm.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * DTPM hierarchy description
+ */
+#include <linux/dtpm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static struct dtpm_node __initdata rk3399_hierarchy[] = {
+ [0] = { .name = "rk3399",
+ .type = DTPM_NODE_VIRTUAL },
+ [1] = { .name = "package",
+ .type = DTPM_NODE_VIRTUAL,
+ .parent = &rk3399_hierarchy[0] },
+ [2] = { .name = "/cpus/cpu@0",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [3] = { .name = "/cpus/cpu@1",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [4] = { .name = "/cpus/cpu@2",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [5] = { .name = "/cpus/cpu@3",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [6] = { .name = "/cpus/cpu@100",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [7] = { .name = "/cpus/cpu@101",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [8] = { .name = "/gpu@ff9a0000",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [9] = { /* sentinel */ }
+};
+
+static struct of_device_id __initdata rockchip_dtpm_match_table[] = {
+ { .compatible = "rockchip,rk3399", .data = rk3399_hierarchy },
+ {},
+};
+
+static int __init rockchip_dtpm_init(void)
+{
+ return dtpm_create_hierarchy(rockchip_dtpm_match_table);
+}
+module_init(rockchip_dtpm_init);
+
+static void __exit rockchip_dtpm_exit(void)
+{
+ return dtpm_destroy_hierarchy();
+}
+module_exit(rockchip_dtpm_exit);
+
+MODULE_SOFTDEP("pre: panfrost cpufreq-dt");
+MODULE_DESCRIPTION("Rockchip DTPM driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dtpm");
+MODULE_AUTHOR("Daniel Lezcano <daniel.lezcano@kernel.org");
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
new file mode 100644
index 0000000000..5fd62046b2
--- /dev/null
+++ b/drivers/soc/rockchip/grf.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip Generic Register Files setup
+ *
+ * Copyright (c) 2016 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
+
+struct rockchip_grf_value {
+ const char *desc;
+ u32 reg;
+ u32 val;
+};
+
+struct rockchip_grf_info {
+ const struct rockchip_grf_value *values;
+ int num_values;
+};
+
+#define RK3036_GRF_SOC_CON0 0x140
+
+static const struct rockchip_grf_value rk3036_defaults[] __initconst = {
+ /*
+ * Disable auto jtag/sdmmc switching that causes issues with the
+ * clock-framework and the mmc controllers making them unreliable.
+ */
+ { "jtag switching", RK3036_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 11) },
+};
+
+static const struct rockchip_grf_info rk3036_grf __initconst = {
+ .values = rk3036_defaults,
+ .num_values = ARRAY_SIZE(rk3036_defaults),
+};
+
+#define RK3128_GRF_SOC_CON0 0x140
+
+static const struct rockchip_grf_value rk3128_defaults[] __initconst = {
+ { "jtag switching", RK3128_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 8) },
+};
+
+static const struct rockchip_grf_info rk3128_grf __initconst = {
+ .values = rk3128_defaults,
+ .num_values = ARRAY_SIZE(rk3128_defaults),
+};
+
+#define RK3228_GRF_SOC_CON6 0x418
+
+static const struct rockchip_grf_value rk3228_defaults[] __initconst = {
+ { "jtag switching", RK3228_GRF_SOC_CON6, HIWORD_UPDATE(0, 1, 8) },
+};
+
+static const struct rockchip_grf_info rk3228_grf __initconst = {
+ .values = rk3228_defaults,
+ .num_values = ARRAY_SIZE(rk3228_defaults),
+};
+
+#define RK3288_GRF_SOC_CON0 0x244
+#define RK3288_GRF_SOC_CON2 0x24c
+
+static const struct rockchip_grf_value rk3288_defaults[] __initconst = {
+ { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) },
+ { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) },
+};
+
+static const struct rockchip_grf_info rk3288_grf __initconst = {
+ .values = rk3288_defaults,
+ .num_values = ARRAY_SIZE(rk3288_defaults),
+};
+
+#define RK3328_GRF_SOC_CON4 0x410
+
+static const struct rockchip_grf_value rk3328_defaults[] __initconst = {
+ { "jtag switching", RK3328_GRF_SOC_CON4, HIWORD_UPDATE(0, 1, 12) },
+};
+
+static const struct rockchip_grf_info rk3328_grf __initconst = {
+ .values = rk3328_defaults,
+ .num_values = ARRAY_SIZE(rk3328_defaults),
+};
+
+#define RK3368_GRF_SOC_CON15 0x43c
+
+static const struct rockchip_grf_value rk3368_defaults[] __initconst = {
+ { "jtag switching", RK3368_GRF_SOC_CON15, HIWORD_UPDATE(0, 1, 13) },
+};
+
+static const struct rockchip_grf_info rk3368_grf __initconst = {
+ .values = rk3368_defaults,
+ .num_values = ARRAY_SIZE(rk3368_defaults),
+};
+
+#define RK3399_GRF_SOC_CON7 0xe21c
+
+static const struct rockchip_grf_value rk3399_defaults[] __initconst = {
+ { "jtag switching", RK3399_GRF_SOC_CON7, HIWORD_UPDATE(0, 1, 12) },
+};
+
+static const struct rockchip_grf_info rk3399_grf __initconst = {
+ .values = rk3399_defaults,
+ .num_values = ARRAY_SIZE(rk3399_defaults),
+};
+
+#define RK3566_GRF_USB3OTG0_CON1 0x0104
+
+static const struct rockchip_grf_value rk3566_defaults[] __initconst = {
+ { "usb3otg port switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(0, 1, 12) },
+ { "usb3otg clock switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 7) },
+ { "usb3otg disable usb3", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 0) },
+};
+
+static const struct rockchip_grf_info rk3566_pipegrf __initconst = {
+ .values = rk3566_defaults,
+ .num_values = ARRAY_SIZE(rk3566_defaults),
+};
+
+#define RK3588_GRF_SOC_CON6 0x0318
+
+static const struct rockchip_grf_value rk3588_defaults[] __initconst = {
+ { "jtag switching", RK3588_GRF_SOC_CON6, HIWORD_UPDATE(0, 1, 14) },
+};
+
+static const struct rockchip_grf_info rk3588_sysgrf __initconst = {
+ .values = rk3588_defaults,
+ .num_values = ARRAY_SIZE(rk3588_defaults),
+};
+
+
+static const struct of_device_id rockchip_grf_dt_match[] __initconst = {
+ {
+ .compatible = "rockchip,rk3036-grf",
+ .data = (void *)&rk3036_grf,
+ }, {
+ .compatible = "rockchip,rk3128-grf",
+ .data = (void *)&rk3128_grf,
+ }, {
+ .compatible = "rockchip,rk3228-grf",
+ .data = (void *)&rk3228_grf,
+ }, {
+ .compatible = "rockchip,rk3288-grf",
+ .data = (void *)&rk3288_grf,
+ }, {
+ .compatible = "rockchip,rk3328-grf",
+ .data = (void *)&rk3328_grf,
+ }, {
+ .compatible = "rockchip,rk3368-grf",
+ .data = (void *)&rk3368_grf,
+ }, {
+ .compatible = "rockchip,rk3399-grf",
+ .data = (void *)&rk3399_grf,
+ }, {
+ .compatible = "rockchip,rk3566-pipe-grf",
+ .data = (void *)&rk3566_pipegrf,
+ }, {
+ .compatible = "rockchip,rk3588-sys-grf",
+ .data = (void *)&rk3588_sysgrf,
+ },
+ { /* sentinel */ },
+};
+
+static int __init rockchip_grf_init(void)
+{
+ const struct rockchip_grf_info *grf_info;
+ const struct of_device_id *match;
+ struct device_node *np;
+ struct regmap *grf;
+ int ret, i;
+
+ np = of_find_matching_node_and_match(NULL, rockchip_grf_dt_match,
+ &match);
+ if (!np)
+ return -ENODEV;
+ if (!match || !match->data) {
+ pr_err("%s: missing grf data\n", __func__);
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ grf_info = match->data;
+
+ grf = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(grf)) {
+ pr_err("%s: could not get grf syscon\n", __func__);
+ return PTR_ERR(grf);
+ }
+
+ for (i = 0; i < grf_info->num_values; i++) {
+ const struct rockchip_grf_value *val = &grf_info->values[i];
+
+ pr_debug("%s: adjusting %s in %#6x to %#10x\n", __func__,
+ val->desc, val->reg, val->val);
+ ret = regmap_write(grf, val->reg, val->val);
+ if (ret < 0)
+ pr_err("%s: write to %#6x failed with %d\n",
+ __func__, val->reg, ret);
+ }
+
+ return 0;
+}
+postcore_initcall(rockchip_grf_init);
diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c
new file mode 100644
index 0000000000..6619256c2d
--- /dev/null
+++ b/drivers/soc/rockchip/io-domain.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip IO Voltage Domain driver
+ *
+ * Copyright 2014 MundoReader S.L.
+ * Copyright 2014 Google, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define MAX_SUPPLIES 16
+
+/*
+ * The max voltage for 1.8V and 3.3V come from the Rockchip datasheet under
+ * "Recommended Operating Conditions" for "Digital GPIO". When the typical
+ * is 3.3V the max is 3.6V. When the typical is 1.8V the max is 1.98V.
+ *
+ * They are used like this:
+ * - If the voltage on a rail is above the "1.8" voltage (1.98V) we'll tell the
+ * SoC we're at 3.3.
+ * - If the voltage on a rail is above the "3.3" voltage (3.6V) we'll consider
+ * that to be an error.
+ */
+#define MAX_VOLTAGE_1_8 1980000
+#define MAX_VOLTAGE_3_3 3600000
+
+#define PX30_IO_VSEL 0x180
+#define PX30_IO_VSEL_VCCIO6_SRC BIT(0)
+#define PX30_IO_VSEL_VCCIO6_SUPPLY_NUM 1
+
+#define RK3288_SOC_CON2 0x24c
+#define RK3288_SOC_CON2_FLASH0 BIT(7)
+#define RK3288_SOC_FLASH_SUPPLY_NUM 2
+
+#define RK3328_SOC_CON4 0x410
+#define RK3328_SOC_CON4_VCCIO2 BIT(7)
+#define RK3328_SOC_VCCIO2_SUPPLY_NUM 1
+
+#define RK3368_SOC_CON15 0x43c
+#define RK3368_SOC_CON15_FLASH0 BIT(14)
+#define RK3368_SOC_FLASH_SUPPLY_NUM 2
+
+#define RK3399_PMUGRF_CON0 0x180
+#define RK3399_PMUGRF_CON0_VSEL BIT(8)
+#define RK3399_PMUGRF_VSEL_SUPPLY_NUM 9
+
+#define RK3568_PMU_GRF_IO_VSEL0 (0x0140)
+#define RK3568_PMU_GRF_IO_VSEL1 (0x0144)
+#define RK3568_PMU_GRF_IO_VSEL2 (0x0148)
+
+struct rockchip_iodomain;
+
+struct rockchip_iodomain_supply {
+ struct rockchip_iodomain *iod;
+ struct regulator *reg;
+ struct notifier_block nb;
+ int idx;
+};
+
+struct rockchip_iodomain_soc_data {
+ int grf_offset;
+ const char *supply_names[MAX_SUPPLIES];
+ void (*init)(struct rockchip_iodomain *iod);
+ int (*write)(struct rockchip_iodomain_supply *supply, int uV);
+};
+
+struct rockchip_iodomain {
+ struct device *dev;
+ struct regmap *grf;
+ const struct rockchip_iodomain_soc_data *soc_data;
+ struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
+ int (*write)(struct rockchip_iodomain_supply *supply, int uV);
+};
+
+static int rk3568_iodomain_write(struct rockchip_iodomain_supply *supply, int uV)
+{
+ struct rockchip_iodomain *iod = supply->iod;
+ u32 is_3v3 = uV > MAX_VOLTAGE_1_8;
+ u32 val0, val1;
+ int b;
+
+ switch (supply->idx) {
+ case 0: /* pmuio1 */
+ break;
+ case 1: /* pmuio2 */
+ b = supply->idx;
+ val0 = BIT(16 + b) | (is_3v3 ? 0 : BIT(b));
+ b = supply->idx + 4;
+ val1 = BIT(16 + b) | (is_3v3 ? BIT(b) : 0);
+
+ regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL2, val0);
+ regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL2, val1);
+ break;
+ case 3: /* vccio2 */
+ break;
+ case 2: /* vccio1 */
+ case 4: /* vccio3 */
+ case 5: /* vccio4 */
+ case 6: /* vccio5 */
+ case 7: /* vccio6 */
+ case 8: /* vccio7 */
+ b = supply->idx - 1;
+ val0 = BIT(16 + b) | (is_3v3 ? 0 : BIT(b));
+ val1 = BIT(16 + b) | (is_3v3 ? BIT(b) : 0);
+
+ regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL0, val0);
+ regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL1, val1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rockchip_iodomain_write(struct rockchip_iodomain_supply *supply,
+ int uV)
+{
+ struct rockchip_iodomain *iod = supply->iod;
+ u32 val;
+ int ret;
+
+ /* set value bit */
+ val = (uV > MAX_VOLTAGE_1_8) ? 0 : 1;
+ val <<= supply->idx;
+
+ /* apply hiword-mask */
+ val |= (BIT(supply->idx) << 16);
+
+ ret = regmap_write(iod->grf, iod->soc_data->grf_offset, val);
+ if (ret)
+ dev_err(iod->dev, "Couldn't write to GRF\n");
+
+ return ret;
+}
+
+static int rockchip_iodomain_notify(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct rockchip_iodomain_supply *supply =
+ container_of(nb, struct rockchip_iodomain_supply, nb);
+ int uV;
+ int ret;
+
+ /*
+ * According to Rockchip it's important to keep the SoC IO domain
+ * higher than (or equal to) the external voltage. That means we need
+ * to change it before external voltage changes happen in the case
+ * of an increase.
+ *
+ * Note that in the "pre" change we pick the max possible voltage that
+ * the regulator might end up at (the client requests a range and we
+ * don't know for certain the exact voltage). Right now we rely on the
+ * slop in MAX_VOLTAGE_1_8 and MAX_VOLTAGE_3_3 to save us if clients
+ * request something like a max of 3.6V when they really want 3.3V.
+ * We could attempt to come up with better rules if this fails.
+ */
+ if (event & REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) {
+ struct pre_voltage_change_data *pvc_data = data;
+
+ uV = max_t(unsigned long, pvc_data->old_uV, pvc_data->max_uV);
+ } else if (event & (REGULATOR_EVENT_VOLTAGE_CHANGE |
+ REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE)) {
+ uV = (unsigned long)data;
+ } else {
+ return NOTIFY_OK;
+ }
+
+ dev_dbg(supply->iod->dev, "Setting to %d\n", uV);
+
+ if (uV > MAX_VOLTAGE_3_3) {
+ dev_err(supply->iod->dev, "Voltage too high: %d\n", uV);
+
+ if (event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
+ return NOTIFY_BAD;
+ }
+
+ ret = supply->iod->write(supply, uV);
+ if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
+ return NOTIFY_BAD;
+
+ dev_dbg(supply->iod->dev, "Setting to %d done\n", uV);
+ return NOTIFY_OK;
+}
+
+static void px30_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no VCCIO6 supply we should leave things alone */
+ if (!iod->supplies[PX30_IO_VSEL_VCCIO6_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set vccio6 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = PX30_IO_VSEL_VCCIO6_SRC | (PX30_IO_VSEL_VCCIO6_SRC << 16);
+ ret = regmap_write(iod->grf, PX30_IO_VSEL, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update vccio6 ctrl\n");
+}
+
+static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no flash supply we should leave things alone */
+ if (!iod->supplies[RK3288_SOC_FLASH_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set flash0 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3288_SOC_CON2_FLASH0 | (RK3288_SOC_CON2_FLASH0 << 16);
+ ret = regmap_write(iod->grf, RK3288_SOC_CON2, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
+}
+
+static void rk3328_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no vccio2 supply we should leave things alone */
+ if (!iod->supplies[RK3328_SOC_VCCIO2_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set vccio2 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3328_SOC_CON4_VCCIO2 | (RK3328_SOC_CON4_VCCIO2 << 16);
+ ret = regmap_write(iod->grf, RK3328_SOC_CON4, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update vccio2 vsel ctrl\n");
+}
+
+static void rk3368_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no flash supply we should leave things alone */
+ if (!iod->supplies[RK3368_SOC_FLASH_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set flash0 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3368_SOC_CON15_FLASH0 | (RK3368_SOC_CON15_FLASH0 << 16);
+ ret = regmap_write(iod->grf, RK3368_SOC_CON15, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
+}
+
+static void rk3399_pmu_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no pmu io supply we should leave things alone */
+ if (!iod->supplies[RK3399_PMUGRF_VSEL_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set pmu io iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3399_PMUGRF_CON0_VSEL | (RK3399_PMUGRF_CON0_VSEL << 16);
+ ret = regmap_write(iod->grf, RK3399_PMUGRF_CON0, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update pmu io iodomain ctrl\n");
+}
+
+static const struct rockchip_iodomain_soc_data soc_data_px30 = {
+ .grf_offset = 0x180,
+ .supply_names = {
+ NULL,
+ "vccio6",
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ "vccio5",
+ "vccio-oscgpi",
+ },
+ .init = px30_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_px30_pmu = {
+ .grf_offset = 0x100,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "pmuio1",
+ "pmuio2",
+ },
+};
+
+/*
+ * On the rk3188 the io-domains are handled by a shared register with the
+ * lower 8 bits being still being continuing drive-strength settings.
+ */
+static const struct rockchip_iodomain_soc_data soc_data_rk3188 = {
+ .grf_offset = 0x104,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "ap0",
+ "ap1",
+ "cif",
+ "flash",
+ "vccio0",
+ "vccio1",
+ "lcdc0",
+ "lcdc1",
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3228 = {
+ .grf_offset = 0x418,
+ .supply_names = {
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
+ .grf_offset = 0x380,
+ .supply_names = {
+ "lcdc", /* LCDC_VDD */
+ "dvp", /* DVPIO_VDD */
+ "flash0", /* FLASH0_VDD (emmc) */
+ "flash1", /* FLASH1_VDD (sdio1) */
+ "wifi", /* APIO3_VDD (sdio0) */
+ "bb", /* APIO5_VDD */
+ "audio", /* APIO4_VDD */
+ "sdcard", /* SDMMC0_VDD (sdmmc) */
+ "gpio30", /* APIO1_VDD */
+ "gpio1830", /* APIO2_VDD */
+ },
+ .init = rk3288_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3328 = {
+ .grf_offset = 0x410,
+ .supply_names = {
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ "vccio5",
+ "vccio6",
+ "pmuio",
+ },
+ .init = rk3328_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3368 = {
+ .grf_offset = 0x900,
+ .supply_names = {
+ NULL, /* reserved */
+ "dvp", /* DVPIO_VDD */
+ "flash0", /* FLASH0_VDD (emmc) */
+ "wifi", /* APIO2_VDD (sdio0) */
+ NULL,
+ "audio", /* APIO3_VDD */
+ "sdcard", /* SDMMC0_VDD (sdmmc) */
+ "gpio30", /* APIO1_VDD */
+ "gpio1830", /* APIO4_VDD (gpujtag) */
+ },
+ .init = rk3368_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = {
+ .grf_offset = 0x100,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "pmu", /*PMU IO domain*/
+ "vop", /*LCDC IO domain*/
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3399 = {
+ .grf_offset = 0xe640,
+ .supply_names = {
+ "bt656", /* APIO2_VDD */
+ "audio", /* APIO5_VDD */
+ "sdmmc", /* SDMMC0_VDD */
+ "gpio1830", /* APIO4_VDD */
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = {
+ .grf_offset = 0x180,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "pmu1830", /* PMUIO2_VDD */
+ },
+ .init = rk3399_pmu_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3568_pmu = {
+ .grf_offset = 0x140,
+ .supply_names = {
+ "pmuio1",
+ "pmuio2",
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ "vccio5",
+ "vccio6",
+ "vccio7",
+ },
+ .write = rk3568_iodomain_write,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rv1108 = {
+ .grf_offset = 0x404,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio5",
+ "vccio6",
+ },
+
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
+ .grf_offset = 0x104,
+ .supply_names = {
+ "pmu",
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rv1126_pmu = {
+ .grf_offset = 0x140,
+ .supply_names = {
+ NULL,
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ "vccio5",
+ "vccio6",
+ "vccio7",
+ "pmuio0",
+ "pmuio1",
+ },
+};
+
+static const struct of_device_id rockchip_iodomain_match[] = {
+ {
+ .compatible = "rockchip,px30-io-voltage-domain",
+ .data = (void *)&soc_data_px30
+ },
+ {
+ .compatible = "rockchip,px30-pmu-io-voltage-domain",
+ .data = (void *)&soc_data_px30_pmu
+ },
+ {
+ .compatible = "rockchip,rk3188-io-voltage-domain",
+ .data = &soc_data_rk3188
+ },
+ {
+ .compatible = "rockchip,rk3228-io-voltage-domain",
+ .data = &soc_data_rk3228
+ },
+ {
+ .compatible = "rockchip,rk3288-io-voltage-domain",
+ .data = &soc_data_rk3288
+ },
+ {
+ .compatible = "rockchip,rk3328-io-voltage-domain",
+ .data = &soc_data_rk3328
+ },
+ {
+ .compatible = "rockchip,rk3368-io-voltage-domain",
+ .data = &soc_data_rk3368
+ },
+ {
+ .compatible = "rockchip,rk3368-pmu-io-voltage-domain",
+ .data = &soc_data_rk3368_pmu
+ },
+ {
+ .compatible = "rockchip,rk3399-io-voltage-domain",
+ .data = &soc_data_rk3399
+ },
+ {
+ .compatible = "rockchip,rk3399-pmu-io-voltage-domain",
+ .data = &soc_data_rk3399_pmu
+ },
+ {
+ .compatible = "rockchip,rk3568-pmu-io-voltage-domain",
+ .data = &soc_data_rk3568_pmu
+ },
+ {
+ .compatible = "rockchip,rv1108-io-voltage-domain",
+ .data = &soc_data_rv1108
+ },
+ {
+ .compatible = "rockchip,rv1108-pmu-io-voltage-domain",
+ .data = &soc_data_rv1108_pmu
+ },
+ {
+ .compatible = "rockchip,rv1126-pmu-io-voltage-domain",
+ .data = &soc_data_rv1126_pmu
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_iodomain_match);
+
+static int rockchip_iodomain_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct rockchip_iodomain *iod;
+ struct device *parent;
+ int i, ret = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ iod = devm_kzalloc(&pdev->dev, sizeof(*iod), GFP_KERNEL);
+ if (!iod)
+ return -ENOMEM;
+
+ iod->dev = &pdev->dev;
+ platform_set_drvdata(pdev, iod);
+
+ match = of_match_node(rockchip_iodomain_match, np);
+ iod->soc_data = match->data;
+
+ if (iod->soc_data->write)
+ iod->write = iod->soc_data->write;
+ else
+ iod->write = rockchip_iodomain_write;
+
+ parent = pdev->dev.parent;
+ if (parent && parent->of_node) {
+ iod->grf = syscon_node_to_regmap(parent->of_node);
+ } else {
+ dev_dbg(&pdev->dev, "falling back to old binding\n");
+ iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ }
+
+ if (IS_ERR(iod->grf)) {
+ dev_err(&pdev->dev, "couldn't find grf regmap\n");
+ return PTR_ERR(iod->grf);
+ }
+
+ for (i = 0; i < MAX_SUPPLIES; i++) {
+ const char *supply_name = iod->soc_data->supply_names[i];
+ struct rockchip_iodomain_supply *supply = &iod->supplies[i];
+ struct regulator *reg;
+ int uV;
+
+ if (!supply_name)
+ continue;
+
+ reg = devm_regulator_get_optional(iod->dev, supply_name);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+
+ /* If a supply wasn't specified, that's OK */
+ if (ret == -ENODEV)
+ continue;
+ else if (ret != -EPROBE_DEFER)
+ dev_err(iod->dev, "couldn't get regulator %s\n",
+ supply_name);
+ goto unreg_notify;
+ }
+
+ /* set initial correct value */
+ uV = regulator_get_voltage(reg);
+
+ /* must be a regulator we can get the voltage of */
+ if (uV < 0) {
+ dev_err(iod->dev, "Can't determine voltage: %s\n",
+ supply_name);
+ ret = uV;
+ goto unreg_notify;
+ }
+
+ if (uV > MAX_VOLTAGE_3_3) {
+ dev_crit(iod->dev,
+ "%d uV is too high. May damage SoC!\n",
+ uV);
+ ret = -EINVAL;
+ goto unreg_notify;
+ }
+
+ /* setup our supply */
+ supply->idx = i;
+ supply->iod = iod;
+ supply->reg = reg;
+ supply->nb.notifier_call = rockchip_iodomain_notify;
+
+ ret = iod->write(supply, uV);
+ if (ret) {
+ supply->reg = NULL;
+ goto unreg_notify;
+ }
+
+ /* register regulator notifier */
+ ret = regulator_register_notifier(reg, &supply->nb);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "regulator notifier request failed\n");
+ supply->reg = NULL;
+ goto unreg_notify;
+ }
+ }
+
+ if (iod->soc_data->init)
+ iod->soc_data->init(iod);
+
+ return 0;
+
+unreg_notify:
+ for (i = MAX_SUPPLIES - 1; i >= 0; i--) {
+ struct rockchip_iodomain_supply *io_supply = &iod->supplies[i];
+
+ if (io_supply->reg)
+ regulator_unregister_notifier(io_supply->reg,
+ &io_supply->nb);
+ }
+
+ return ret;
+}
+
+static int rockchip_iodomain_remove(struct platform_device *pdev)
+{
+ struct rockchip_iodomain *iod = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = MAX_SUPPLIES - 1; i >= 0; i--) {
+ struct rockchip_iodomain_supply *io_supply = &iod->supplies[i];
+
+ if (io_supply->reg)
+ regulator_unregister_notifier(io_supply->reg,
+ &io_supply->nb);
+ }
+
+ return 0;
+}
+
+static struct platform_driver rockchip_iodomain_driver = {
+ .probe = rockchip_iodomain_probe,
+ .remove = rockchip_iodomain_remove,
+ .driver = {
+ .name = "rockchip-iodomain",
+ .of_match_table = rockchip_iodomain_match,
+ },
+};
+
+module_platform_driver(rockchip_iodomain_driver);
+
+MODULE_DESCRIPTION("Rockchip IO-domain driver");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_AUTHOR("Doug Anderson <dianders@chromium.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
new file mode 100644
index 0000000000..7a8f291e77
--- /dev/null
+++ b/drivers/soc/samsung/Kconfig
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Samsung SoC drivers
+#
+menuconfig SOC_SAMSUNG
+ bool "Samsung SoC driver support" if COMPILE_TEST
+
+if SOC_SAMSUNG
+
+# There is no need to enable these drivers for ARMv8
+config EXYNOS_ASV_ARM
+ bool "Exynos ASV ARMv7-specific driver extensions" if COMPILE_TEST
+ depends on EXYNOS_CHIPID
+
+config EXYNOS_CHIPID
+ tristate "Exynos ChipID controller and ASV driver"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ default ARCH_EXYNOS
+ select EXYNOS_ASV_ARM if ARM && ARCH_EXYNOS
+ select MFD_SYSCON
+ select SOC_BUS
+ help
+ Support for Samsung Exynos SoC ChipID and Adaptive Supply Voltage.
+ This driver can also be built as module (exynos_chipid).
+
+config EXYNOS_USI
+ tristate "Exynos USI (Universal Serial Interface) driver"
+ default ARCH_EXYNOS && ARM64
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Enable support for USI block. USI (Universal Serial Interface) is an
+ IP-core found in modern Samsung Exynos SoCs, like Exynos850 and
+ ExynosAutoV9. USI block can be configured to provide one of the
+ following serial protocols: UART, SPI or High Speed I2C.
+
+ This driver allows one to configure USI for desired protocol, which
+ is usually done in USI node in Device Tree.
+
+config EXYNOS_PMU
+ bool "Exynos PMU controller driver" if COMPILE_TEST
+ depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
+ select EXYNOS_PMU_ARM_DRIVERS if ARM && ARCH_EXYNOS
+ select MFD_CORE
+
+# There is no need to enable these drivers for ARMv8
+config EXYNOS_PMU_ARM_DRIVERS
+ bool "Exynos PMU ARMv7-specific driver extensions" if COMPILE_TEST
+ depends on EXYNOS_PMU
+
+config EXYNOS_PM_DOMAINS
+ bool "Exynos PM domains" if COMPILE_TEST
+ depends on (ARCH_EXYNOS && PM_GENERIC_DOMAINS) || COMPILE_TEST
+
+config SAMSUNG_PM_CHECK
+ bool "S3C2410 PM Suspend Memory CRC"
+ depends on PM && (ARCH_S3C64XX || ARCH_S5PV210)
+ select CRC32
+ help
+ Enable the PM code's memory area checksum over sleep. This option
+ will generate CRCs of all blocks of memory, and store them before
+ going to sleep. The blocks are then checked on resume for any
+ errors.
+
+ Note, this can take several seconds depending on memory size
+ and CPU speed.
+
+config SAMSUNG_PM_CHECK_CHUNKSIZE
+ int "S3C2410 PM Suspend CRC Chunksize (KiB)"
+ depends on PM && SAMSUNG_PM_CHECK
+ default 64
+ help
+ Set the chunksize in Kilobytes of the CRC for checking memory
+ corruption over suspend and resume. A smaller value will mean that
+ the CRC data block will take more memory, but will identify any
+ faults with better precision.
+
+config EXYNOS_REGULATOR_COUPLER
+ bool "Exynos SoC Regulator Coupler" if COMPILE_TEST
+ depends on ARCH_EXYNOS || COMPILE_TEST
+endif
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
new file mode 100644
index 0000000000..248a33d775
--- /dev/null
+++ b/drivers/soc/samsung/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_EXYNOS_ASV_ARM) += exynos5422-asv.o
+obj-$(CONFIG_EXYNOS_CHIPID) += exynos_chipid.o
+exynos_chipid-y += exynos-chipid.o exynos-asv.o
+
+obj-$(CONFIG_EXYNOS_USI) += exynos-usi.o
+
+obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
+
+obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
+ exynos5250-pmu.o exynos5420-pmu.o
+obj-$(CONFIG_EXYNOS_REGULATOR_COUPLER) += exynos-regulator-coupler.o
+
+obj-$(CONFIG_SAMSUNG_PM_CHECK) += s3c-pm-check.o
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
new file mode 100644
index 0000000000..d60af8acc3
--- /dev/null
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ * Author: Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * Samsung Exynos SoC Adaptive Supply Voltage support
+ */
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/soc/samsung/exynos-chipid.h>
+
+#include "exynos-asv.h"
+#include "exynos5422-asv.h"
+
+#define MHZ 1000000U
+
+static int exynos_asv_update_cpu_opps(struct exynos_asv *asv,
+ struct device *cpu)
+{
+ struct exynos_asv_subsys *subsys = NULL;
+ struct dev_pm_opp *opp;
+ unsigned int opp_freq;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(asv->subsys); i++) {
+ if (of_device_is_compatible(cpu->of_node,
+ asv->subsys[i].cpu_dt_compat)) {
+ subsys = &asv->subsys[i];
+ break;
+ }
+ }
+ if (!subsys)
+ return -EINVAL;
+
+ for (i = 0; i < subsys->table.num_rows; i++) {
+ unsigned int new_volt, volt;
+ int ret;
+
+ opp_freq = exynos_asv_opp_get_frequency(subsys, i);
+
+ opp = dev_pm_opp_find_freq_exact(cpu, opp_freq * MHZ, true);
+ if (IS_ERR(opp)) {
+ dev_info(asv->dev, "cpu%d opp%d, freq: %u missing\n",
+ cpu->id, i, opp_freq);
+
+ continue;
+ }
+
+ volt = dev_pm_opp_get_voltage(opp);
+ new_volt = asv->opp_get_voltage(subsys, i, volt);
+ dev_pm_opp_put(opp);
+
+ if (new_volt == volt)
+ continue;
+
+ ret = dev_pm_opp_adjust_voltage(cpu, opp_freq * MHZ,
+ new_volt, new_volt, new_volt);
+ if (ret < 0)
+ dev_err(asv->dev,
+ "Failed to adjust OPP %u Hz/%u uV for cpu%d\n",
+ opp_freq, new_volt, cpu->id);
+ else
+ dev_dbg(asv->dev,
+ "Adjusted OPP %u Hz/%u -> %u uV, cpu%d\n",
+ opp_freq, volt, new_volt, cpu->id);
+ }
+
+ return 0;
+}
+
+static int exynos_asv_update_opps(struct exynos_asv *asv)
+{
+ struct opp_table *last_opp_table = NULL;
+ struct device *cpu;
+ int ret, cpuid;
+
+ for_each_possible_cpu(cpuid) {
+ struct opp_table *opp_table;
+
+ cpu = get_cpu_device(cpuid);
+ if (!cpu)
+ continue;
+
+ opp_table = dev_pm_opp_get_opp_table(cpu);
+ if (IS_ERR(opp_table))
+ continue;
+
+ if (!last_opp_table || opp_table != last_opp_table) {
+ last_opp_table = opp_table;
+
+ ret = exynos_asv_update_cpu_opps(asv, cpu);
+ if (ret < 0)
+ dev_err(asv->dev, "Couldn't udate OPPs for cpu%d\n",
+ cpuid);
+ }
+
+ dev_pm_opp_put_opp_table(opp_table);
+ }
+
+ return 0;
+}
+
+int exynos_asv_init(struct device *dev, struct regmap *regmap)
+{
+ int (*probe_func)(struct exynos_asv *asv);
+ struct exynos_asv *asv;
+ struct device *cpu_dev;
+ u32 product_id = 0;
+ int ret, i;
+
+ asv = devm_kzalloc(dev, sizeof(*asv), GFP_KERNEL);
+ if (!asv)
+ return -ENOMEM;
+
+ asv->chipid_regmap = regmap;
+ asv->dev = dev;
+ ret = regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PRO_ID,
+ &product_id);
+ if (ret < 0) {
+ dev_err(dev, "Cannot read revision from ChipID: %d\n", ret);
+ return -ENODEV;
+ }
+
+ switch (product_id & EXYNOS_MASK) {
+ case 0xE5422000:
+ probe_func = exynos5422_asv_init;
+ break;
+ default:
+ dev_dbg(dev, "No ASV support for this SoC\n");
+ devm_kfree(dev, asv);
+ return 0;
+ }
+
+ cpu_dev = get_cpu_device(0);
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret < 0)
+ return -EPROBE_DEFER;
+
+ ret = of_property_read_u32(dev->of_node, "samsung,asv-bin",
+ &asv->of_bin);
+ if (ret < 0)
+ asv->of_bin = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(asv->subsys); i++)
+ asv->subsys[i].asv = asv;
+
+ ret = probe_func(asv);
+ if (ret < 0)
+ return ret;
+
+ return exynos_asv_update_opps(asv);
+}
diff --git a/drivers/soc/samsung/exynos-asv.h b/drivers/soc/samsung/exynos-asv.h
new file mode 100644
index 0000000000..dcbe154db3
--- /dev/null
+++ b/drivers/soc/samsung/exynos-asv.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Samsung Exynos SoC Adaptive Supply Voltage support
+ */
+#ifndef __LINUX_SOC_EXYNOS_ASV_H
+#define __LINUX_SOC_EXYNOS_ASV_H
+
+struct regmap;
+
+/* HPM, IDS values to select target group */
+struct asv_limit_entry {
+ unsigned int hpm;
+ unsigned int ids;
+};
+
+struct exynos_asv_table {
+ unsigned int num_rows;
+ unsigned int num_cols;
+ u32 *buf;
+};
+
+struct exynos_asv_subsys {
+ struct exynos_asv *asv;
+ const char *cpu_dt_compat;
+ int id;
+ struct exynos_asv_table table;
+
+ unsigned int base_volt;
+ unsigned int offset_volt_h;
+ unsigned int offset_volt_l;
+};
+
+struct exynos_asv {
+ struct device *dev;
+ struct regmap *chipid_regmap;
+ struct exynos_asv_subsys subsys[2];
+
+ int (*opp_get_voltage)(const struct exynos_asv_subsys *subs,
+ int level, unsigned int voltage);
+ unsigned int group;
+ unsigned int table;
+
+ /* True if SG fields from PKG_ID register should be used */
+ bool use_sg;
+ /* ASV bin read from DT */
+ int of_bin;
+};
+
+static inline u32 __asv_get_table_entry(const struct exynos_asv_table *table,
+ unsigned int row, unsigned int col)
+{
+ return table->buf[row * (table->num_cols) + col];
+}
+
+static inline u32 exynos_asv_opp_get_voltage(const struct exynos_asv_subsys *subsys,
+ unsigned int level, unsigned int group)
+{
+ return __asv_get_table_entry(&subsys->table, level, group + 1);
+}
+
+static inline u32 exynos_asv_opp_get_frequency(const struct exynos_asv_subsys *subsys,
+ unsigned int level)
+{
+ return __asv_get_table_entry(&subsys->table, level, 0);
+}
+
+int exynos_asv_init(struct device *dev, struct regmap *regmap);
+
+#endif /* __LINUX_SOC_EXYNOS_ASV_H */
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
new file mode 100644
index 0000000000..7ba45c4aff
--- /dev/null
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * Exynos - CHIP ID support
+ * Author: Pankaj Dubey <pankaj.dubey@samsung.com>
+ * Author: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+ * Author: Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * Samsung Exynos SoC Adaptive Supply Voltage and Chip ID support
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/soc/samsung/exynos-chipid.h>
+#include <linux/sys_soc.h>
+
+#include "exynos-asv.h"
+
+struct exynos_chipid_variant {
+ unsigned int rev_reg; /* revision register offset */
+ unsigned int main_rev_shift; /* main revision offset in rev_reg */
+ unsigned int sub_rev_shift; /* sub revision offset in rev_reg */
+};
+
+struct exynos_chipid_info {
+ u32 product_id;
+ u32 revision;
+};
+
+static const struct exynos_soc_id {
+ const char *name;
+ unsigned int id;
+} soc_ids[] = {
+ /* List ordered by SoC name */
+ /* Compatible with: samsung,exynos4210-chipid */
+ { "EXYNOS3250", 0xE3472000 },
+ { "EXYNOS4210", 0x43200000 }, /* EVT0 revision */
+ { "EXYNOS4210", 0x43210000 },
+ { "EXYNOS4212", 0x43220000 },
+ { "EXYNOS4412", 0xE4412000 },
+ { "EXYNOS5250", 0x43520000 },
+ { "EXYNOS5260", 0xE5260000 },
+ { "EXYNOS5410", 0xE5410000 },
+ { "EXYNOS5420", 0xE5420000 },
+ { "EXYNOS5433", 0xE5433000 },
+ { "EXYNOS5440", 0xE5440000 },
+ { "EXYNOS5800", 0xE5422000 },
+ { "EXYNOS7420", 0xE7420000 },
+ /* Compatible with: samsung,exynos850-chipid */
+ { "EXYNOS7885", 0xE7885000 },
+ { "EXYNOS850", 0xE3830000 },
+ { "EXYNOSAUTOV9", 0xAAA80000 },
+};
+
+static const char *product_id_to_soc_id(unsigned int product_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(soc_ids); i++)
+ if (product_id == soc_ids[i].id)
+ return soc_ids[i].name;
+ return NULL;
+}
+
+static int exynos_chipid_get_chipid_info(struct regmap *regmap,
+ const struct exynos_chipid_variant *data,
+ struct exynos_chipid_info *soc_info)
+{
+ int ret;
+ unsigned int val, main_rev, sub_rev;
+
+ ret = regmap_read(regmap, EXYNOS_CHIPID_REG_PRO_ID, &val);
+ if (ret < 0)
+ return ret;
+ soc_info->product_id = val & EXYNOS_MASK;
+
+ if (data->rev_reg != EXYNOS_CHIPID_REG_PRO_ID) {
+ ret = regmap_read(regmap, data->rev_reg, &val);
+ if (ret < 0)
+ return ret;
+ }
+ main_rev = (val >> data->main_rev_shift) & EXYNOS_REV_PART_MASK;
+ sub_rev = (val >> data->sub_rev_shift) & EXYNOS_REV_PART_MASK;
+ soc_info->revision = (main_rev << EXYNOS_REV_PART_SHIFT) | sub_rev;
+
+ return 0;
+}
+
+static int exynos_chipid_probe(struct platform_device *pdev)
+{
+ const struct exynos_chipid_variant *drv_data;
+ struct exynos_chipid_info soc_info;
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *root;
+ struct regmap *regmap;
+ int ret;
+
+ drv_data = of_device_get_match_data(&pdev->dev);
+ if (!drv_data)
+ return -EINVAL;
+
+ regmap = device_node_to_regmap(pdev->dev.of_node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = exynos_chipid_get_chipid_info(regmap, drv_data, &soc_info);
+ if (ret < 0)
+ return ret;
+
+ soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
+ GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Samsung Exynos";
+
+ root = of_find_node_by_path("/");
+ of_property_read_string(root, "model", &soc_dev_attr->machine);
+ of_node_put(root);
+
+ soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "%x", soc_info.revision);
+ soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id);
+ if (!soc_dev_attr->soc_id) {
+ pr_err("Unknown SoC\n");
+ return -ENODEV;
+ }
+
+ /* please note that the actual registration will be deferred */
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ ret = exynos_asv_init(&pdev->dev, regmap);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, soc_dev);
+
+ dev_info(&pdev->dev, "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
+ soc_dev_attr->soc_id, soc_info.product_id, soc_info.revision);
+
+ return 0;
+
+err:
+ soc_device_unregister(soc_dev);
+
+ return ret;
+}
+
+static int exynos_chipid_remove(struct platform_device *pdev)
+{
+ struct soc_device *soc_dev = platform_get_drvdata(pdev);
+
+ soc_device_unregister(soc_dev);
+
+ return 0;
+}
+
+static const struct exynos_chipid_variant exynos4210_chipid_drv_data = {
+ .rev_reg = 0x0,
+ .main_rev_shift = 4,
+ .sub_rev_shift = 0,
+};
+
+static const struct exynos_chipid_variant exynos850_chipid_drv_data = {
+ .rev_reg = 0x10,
+ .main_rev_shift = 20,
+ .sub_rev_shift = 16,
+};
+
+static const struct of_device_id exynos_chipid_of_device_ids[] = {
+ {
+ .compatible = "samsung,exynos4210-chipid",
+ .data = &exynos4210_chipid_drv_data,
+ }, {
+ .compatible = "samsung,exynos850-chipid",
+ .data = &exynos850_chipid_drv_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, exynos_chipid_of_device_ids);
+
+static struct platform_driver exynos_chipid_driver = {
+ .driver = {
+ .name = "exynos-chipid",
+ .of_match_table = exynos_chipid_of_device_ids,
+ },
+ .probe = exynos_chipid_probe,
+ .remove = exynos_chipid_remove,
+};
+module_platform_driver(exynos_chipid_driver);
+
+MODULE_DESCRIPTION("Samsung Exynos ChipID controller and ASV driver");
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
new file mode 100644
index 0000000000..250537d7cf
--- /dev/null
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// Exynos - CPU PMU(Power Management Unit) support
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+struct exynos_pmu_context {
+ struct device *dev;
+ const struct exynos_pmu_data *pmu_data;
+};
+
+void __iomem *pmu_base_addr;
+static struct exynos_pmu_context *pmu_context;
+
+void pmu_raw_writel(u32 val, u32 offset)
+{
+ writel_relaxed(val, pmu_base_addr + offset);
+}
+
+u32 pmu_raw_readl(u32 offset)
+{
+ return readl_relaxed(pmu_base_addr + offset);
+}
+
+void exynos_sys_powerdown_conf(enum sys_powerdown mode)
+{
+ unsigned int i;
+ const struct exynos_pmu_data *pmu_data;
+
+ if (!pmu_context || !pmu_context->pmu_data)
+ return;
+
+ pmu_data = pmu_context->pmu_data;
+
+ if (pmu_data->powerdown_conf)
+ pmu_data->powerdown_conf(mode);
+
+ if (pmu_data->pmu_config) {
+ for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
+ pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
+ pmu_data->pmu_config[i].offset);
+ }
+
+ if (pmu_data->powerdown_conf_extra)
+ pmu_data->powerdown_conf_extra(mode);
+
+ if (pmu_data->pmu_config_extra) {
+ for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
+ pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
+ pmu_data->pmu_config_extra[i].offset);
+ }
+}
+
+/*
+ * Split the data between ARM architectures because it is relatively big
+ * and useless on other arch.
+ */
+#ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
+#define exynos_pmu_data_arm_ptr(data) (&data)
+#else
+#define exynos_pmu_data_arm_ptr(data) NULL
+#endif
+
+/*
+ * PMU platform driver and devicetree bindings.
+ */
+static const struct of_device_id exynos_pmu_of_device_ids[] = {
+ {
+ .compatible = "samsung,exynos3250-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
+ }, {
+ .compatible = "samsung,exynos4210-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
+ }, {
+ .compatible = "samsung,exynos4212-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data),
+ }, {
+ .compatible = "samsung,exynos4412-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
+ }, {
+ .compatible = "samsung,exynos5250-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data),
+ }, {
+ .compatible = "samsung,exynos5410-pmu",
+ }, {
+ .compatible = "samsung,exynos5420-pmu",
+ .data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data),
+ }, {
+ .compatible = "samsung,exynos5433-pmu",
+ }, {
+ .compatible = "samsung,exynos7-pmu",
+ }, {
+ .compatible = "samsung,exynos850-pmu",
+ },
+ { /*sentinel*/ },
+};
+
+static const struct mfd_cell exynos_pmu_devs[] = {
+ { .name = "exynos-clkout", },
+};
+
+struct regmap *exynos_get_pmu_regmap(void)
+{
+ struct device_node *np = of_find_matching_node(NULL,
+ exynos_pmu_of_device_ids);
+ if (np)
+ return syscon_node_to_regmap(np);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
+
+static int exynos_pmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pmu_base_addr))
+ return PTR_ERR(pmu_base_addr);
+
+ pmu_context = devm_kzalloc(&pdev->dev,
+ sizeof(struct exynos_pmu_context),
+ GFP_KERNEL);
+ if (!pmu_context)
+ return -ENOMEM;
+ pmu_context->dev = dev;
+ pmu_context->pmu_data = of_device_get_match_data(dev);
+
+ if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
+ pmu_context->pmu_data->pmu_init();
+
+ platform_set_drvdata(pdev, pmu_context);
+
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs,
+ ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ if (devm_of_platform_populate(dev))
+ dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
+
+ dev_dbg(dev, "Exynos PMU Driver probe done\n");
+ return 0;
+}
+
+static struct platform_driver exynos_pmu_driver = {
+ .driver = {
+ .name = "exynos-pmu",
+ .of_match_table = exynos_pmu_of_device_ids,
+ },
+ .probe = exynos_pmu_probe,
+};
+
+static int __init exynos_pmu_init(void)
+{
+ return platform_driver_register(&exynos_pmu_driver);
+
+}
+postcore_initcall(exynos_pmu_init);
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
new file mode 100644
index 0000000000..1c652ffd79
--- /dev/null
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Header for Exynos PMU Driver support
+ */
+
+#ifndef __EXYNOS_PMU_H
+#define __EXYNOS_PMU_H
+
+#include <linux/io.h>
+
+#define PMU_TABLE_END (-1U)
+
+struct exynos_pmu_conf {
+ unsigned int offset;
+ u8 val[NUM_SYS_POWERDOWN];
+};
+
+struct exynos_pmu_data {
+ const struct exynos_pmu_conf *pmu_config;
+ const struct exynos_pmu_conf *pmu_config_extra;
+
+ void (*pmu_init)(void);
+ void (*powerdown_conf)(enum sys_powerdown);
+ void (*powerdown_conf_extra)(enum sys_powerdown);
+};
+
+extern void __iomem *pmu_base_addr;
+
+#ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
+/* list of all exported SoC specific data */
+extern const struct exynos_pmu_data exynos3250_pmu_data;
+extern const struct exynos_pmu_data exynos4210_pmu_data;
+extern const struct exynos_pmu_data exynos4212_pmu_data;
+extern const struct exynos_pmu_data exynos4412_pmu_data;
+extern const struct exynos_pmu_data exynos5250_pmu_data;
+extern const struct exynos_pmu_data exynos5420_pmu_data;
+#endif
+
+extern void pmu_raw_writel(u32 val, u32 offset);
+extern u32 pmu_raw_readl(u32 offset);
+#endif /* __EXYNOS_PMU_H */
diff --git a/drivers/soc/samsung/exynos-regulator-coupler.c b/drivers/soc/samsung/exynos-regulator-coupler.c
new file mode 100644
index 0000000000..61a156b44a
--- /dev/null
+++ b/drivers/soc/samsung/exynos-regulator-coupler.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Simplified generic voltage coupler from regulator core.c
+ * The main difference is that it keeps current regulator voltage
+ * if consumers didn't apply their constraints yet.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+static int regulator_get_optimal_voltage(struct regulator_dev *rdev,
+ int *current_uV,
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
+{
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ struct regulator_dev **c_rdevs = c_desc->coupled_rdevs;
+ struct regulation_constraints *constraints = rdev->constraints;
+ int desired_min_uV = 0, desired_max_uV = INT_MAX;
+ int max_current_uV = 0, min_current_uV = INT_MAX;
+ int highest_min_uV = 0, target_uV, possible_uV;
+ int i, ret, max_spread, n_coupled = c_desc->n_coupled;
+ bool done;
+
+ *current_uV = -1;
+
+ /* Find highest min desired voltage */
+ for (i = 0; i < n_coupled; i++) {
+ int tmp_min = 0;
+ int tmp_max = INT_MAX;
+
+ lockdep_assert_held_once(&c_rdevs[i]->mutex.base);
+
+ ret = regulator_check_consumers(c_rdevs[i],
+ &tmp_min,
+ &tmp_max, state);
+ if (ret < 0)
+ return ret;
+
+ if (tmp_min == 0) {
+ ret = regulator_get_voltage_rdev(c_rdevs[i]);
+ if (ret < 0)
+ return ret;
+ tmp_min = ret;
+ }
+
+ /* apply constraints */
+ ret = regulator_check_voltage(c_rdevs[i], &tmp_min, &tmp_max);
+ if (ret < 0)
+ return ret;
+
+ highest_min_uV = max(highest_min_uV, tmp_min);
+
+ if (i == 0) {
+ desired_min_uV = tmp_min;
+ desired_max_uV = tmp_max;
+ }
+ }
+
+ max_spread = constraints->max_spread[0];
+
+ /*
+ * Let target_uV be equal to the desired one if possible.
+ * If not, set it to minimum voltage, allowed by other coupled
+ * regulators.
+ */
+ target_uV = max(desired_min_uV, highest_min_uV - max_spread);
+
+ /*
+ * Find min and max voltages, which currently aren't violating
+ * max_spread.
+ */
+ for (i = 1; i < n_coupled; i++) {
+ int tmp_act;
+
+ tmp_act = regulator_get_voltage_rdev(c_rdevs[i]);
+ if (tmp_act < 0)
+ return tmp_act;
+
+ min_current_uV = min(tmp_act, min_current_uV);
+ max_current_uV = max(tmp_act, max_current_uV);
+ }
+
+ /*
+ * Correct target voltage, so as it currently isn't
+ * violating max_spread
+ */
+ possible_uV = max(target_uV, max_current_uV - max_spread);
+ possible_uV = min(possible_uV, min_current_uV + max_spread);
+
+ if (possible_uV > desired_max_uV)
+ return -EINVAL;
+
+ done = (possible_uV == target_uV);
+ desired_min_uV = possible_uV;
+
+ /* Set current_uV if wasn't done earlier in the code and if necessary */
+ if (*current_uV == -1) {
+ ret = regulator_get_voltage_rdev(rdev);
+ if (ret < 0)
+ return ret;
+ *current_uV = ret;
+ }
+
+ *min_uV = desired_min_uV;
+ *max_uV = desired_max_uV;
+
+ return done;
+}
+
+static int exynos_coupler_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct regulator_dev **c_rdevs;
+ struct regulator_dev *best_rdev;
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ int i, ret, n_coupled, best_min_uV, best_max_uV, best_c_rdev;
+ unsigned int delta, best_delta;
+ unsigned long c_rdev_done = 0;
+ bool best_c_rdev_done;
+
+ c_rdevs = c_desc->coupled_rdevs;
+ n_coupled = c_desc->n_coupled;
+
+ /*
+ * Find the best possible voltage change on each loop. Leave the loop
+ * if there isn't any possible change.
+ */
+ do {
+ best_c_rdev_done = false;
+ best_delta = 0;
+ best_min_uV = 0;
+ best_max_uV = 0;
+ best_c_rdev = 0;
+ best_rdev = NULL;
+
+ /*
+ * Find highest difference between optimal voltage
+ * and current voltage.
+ */
+ for (i = 0; i < n_coupled; i++) {
+ /*
+ * optimal_uV is the best voltage that can be set for
+ * i-th regulator at the moment without violating
+ * max_spread constraint in order to balance
+ * the coupled voltages.
+ */
+ int optimal_uV = 0, optimal_max_uV = 0, current_uV = 0;
+
+ if (test_bit(i, &c_rdev_done))
+ continue;
+
+ ret = regulator_get_optimal_voltage(c_rdevs[i],
+ &current_uV,
+ &optimal_uV,
+ &optimal_max_uV,
+ state);
+ if (ret < 0)
+ goto out;
+
+ delta = abs(optimal_uV - current_uV);
+
+ if (delta && best_delta <= delta) {
+ best_c_rdev_done = ret;
+ best_delta = delta;
+ best_rdev = c_rdevs[i];
+ best_min_uV = optimal_uV;
+ best_max_uV = optimal_max_uV;
+ best_c_rdev = i;
+ }
+ }
+
+ /* Nothing to change, return successfully */
+ if (!best_rdev) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = regulator_set_voltage_rdev(best_rdev, best_min_uV,
+ best_max_uV, state);
+
+ if (ret < 0)
+ goto out;
+
+ if (best_c_rdev_done)
+ set_bit(best_c_rdev, &c_rdev_done);
+
+ } while (n_coupled > 1);
+
+out:
+ return ret;
+}
+
+static int exynos_coupler_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ return 0;
+}
+
+static struct regulator_coupler exynos_coupler = {
+ .attach_regulator = exynos_coupler_attach,
+ .balance_voltage = exynos_coupler_balance_voltage,
+};
+
+static int __init exynos_coupler_init(void)
+{
+ if (!of_machine_is_compatible("samsung,exynos5800"))
+ return 0;
+
+ return regulator_coupler_register(&exynos_coupler);
+}
+arch_initcall(exynos_coupler_init);
diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c
new file mode 100644
index 0000000000..114352695a
--- /dev/null
+++ b/drivers/soc/samsung/exynos-usi.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Samsung Exynos USI driver (Universal Serial Interface).
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/soc/samsung,exynos-usi.h>
+
+/* USIv2: System Register: SW_CONF register bits */
+#define USI_V2_SW_CONF_NONE 0x0
+#define USI_V2_SW_CONF_UART BIT(0)
+#define USI_V2_SW_CONF_SPI BIT(1)
+#define USI_V2_SW_CONF_I2C BIT(2)
+#define USI_V2_SW_CONF_MASK (USI_V2_SW_CONF_UART | USI_V2_SW_CONF_SPI | \
+ USI_V2_SW_CONF_I2C)
+
+/* USIv2: USI register offsets */
+#define USI_CON 0x04
+#define USI_OPTION 0x08
+
+/* USIv2: USI register bits */
+#define USI_CON_RESET BIT(0)
+#define USI_OPTION_CLKREQ_ON BIT(1)
+#define USI_OPTION_CLKSTOP_ON BIT(2)
+
+enum exynos_usi_ver {
+ USI_VER2 = 2,
+};
+
+struct exynos_usi_variant {
+ enum exynos_usi_ver ver; /* USI IP-core version */
+ unsigned int sw_conf_mask; /* SW_CONF mask for all protocols */
+ size_t min_mode; /* first index in exynos_usi_modes[] */
+ size_t max_mode; /* last index in exynos_usi_modes[] */
+ size_t num_clks; /* number of clocks to assert */
+ const char * const *clk_names; /* clock names to assert */
+};
+
+struct exynos_usi {
+ struct device *dev;
+ void __iomem *regs; /* USI register map */
+ struct clk_bulk_data *clks; /* USI clocks */
+
+ size_t mode; /* current USI SW_CONF mode index */
+ bool clkreq_on; /* always provide clock to IP */
+
+ /* System Register */
+ struct regmap *sysreg; /* System Register map */
+ unsigned int sw_conf; /* SW_CONF register offset in sysreg */
+
+ const struct exynos_usi_variant *data;
+};
+
+struct exynos_usi_mode {
+ const char *name; /* mode name */
+ unsigned int val; /* mode register value */
+};
+
+static const struct exynos_usi_mode exynos_usi_modes[] = {
+ [USI_V2_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
+ [USI_V2_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
+ [USI_V2_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
+ [USI_V2_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
+};
+
+static const char * const exynos850_usi_clk_names[] = { "pclk", "ipclk" };
+static const struct exynos_usi_variant exynos850_usi_data = {
+ .ver = USI_VER2,
+ .sw_conf_mask = USI_V2_SW_CONF_MASK,
+ .min_mode = USI_V2_NONE,
+ .max_mode = USI_V2_I2C,
+ .num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
+ .clk_names = exynos850_usi_clk_names,
+};
+
+static const struct of_device_id exynos_usi_dt_match[] = {
+ {
+ .compatible = "samsung,exynos850-usi",
+ .data = &exynos850_usi_data,
+ },
+ { } /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, exynos_usi_dt_match);
+
+/**
+ * exynos_usi_set_sw_conf - Set USI block configuration mode
+ * @usi: USI driver object
+ * @mode: Mode index
+ *
+ * Select underlying serial protocol (UART/SPI/I2C) in USI IP-core.
+ *
+ * Return: 0 on success, or negative error code on failure.
+ */
+static int exynos_usi_set_sw_conf(struct exynos_usi *usi, size_t mode)
+{
+ unsigned int val;
+ int ret;
+
+ if (mode < usi->data->min_mode || mode > usi->data->max_mode)
+ return -EINVAL;
+
+ val = exynos_usi_modes[mode].val;
+ ret = regmap_update_bits(usi->sysreg, usi->sw_conf,
+ usi->data->sw_conf_mask, val);
+ if (ret)
+ return ret;
+
+ usi->mode = mode;
+ dev_dbg(usi->dev, "protocol: %s\n", exynos_usi_modes[usi->mode].name);
+
+ return 0;
+}
+
+/**
+ * exynos_usi_enable - Initialize USI block
+ * @usi: USI driver object
+ *
+ * USI IP-core start state is "reset" (on startup and after CPU resume). This
+ * routine enables the USI block by clearing the reset flag. It also configures
+ * HWACG behavior (needed e.g. for UART Rx). It should be performed before
+ * underlying protocol becomes functional.
+ *
+ * Return: 0 on success, or negative error code on failure.
+ */
+static int exynos_usi_enable(const struct exynos_usi *usi)
+{
+ u32 val;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(usi->data->num_clks, usi->clks);
+ if (ret)
+ return ret;
+
+ /* Enable USI block */
+ val = readl(usi->regs + USI_CON);
+ val &= ~USI_CON_RESET;
+ writel(val, usi->regs + USI_CON);
+ udelay(1);
+
+ /* Continuously provide the clock to USI IP w/o gating */
+ if (usi->clkreq_on) {
+ val = readl(usi->regs + USI_OPTION);
+ val &= ~USI_OPTION_CLKSTOP_ON;
+ val |= USI_OPTION_CLKREQ_ON;
+ writel(val, usi->regs + USI_OPTION);
+ }
+
+ clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
+
+ return ret;
+}
+
+static int exynos_usi_configure(struct exynos_usi *usi)
+{
+ int ret;
+
+ ret = exynos_usi_set_sw_conf(usi, usi->mode);
+ if (ret)
+ return ret;
+
+ if (usi->data->ver == USI_VER2)
+ return exynos_usi_enable(usi);
+
+ return 0;
+}
+
+static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
+{
+ int ret;
+ u32 mode;
+
+ ret = of_property_read_u32(np, "samsung,mode", &mode);
+ if (ret)
+ return ret;
+ if (mode < usi->data->min_mode || mode > usi->data->max_mode)
+ return -EINVAL;
+ usi->mode = mode;
+
+ usi->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
+ if (IS_ERR(usi->sysreg))
+ return PTR_ERR(usi->sysreg);
+
+ ret = of_property_read_u32_index(np, "samsung,sysreg", 1,
+ &usi->sw_conf);
+ if (ret)
+ return ret;
+
+ usi->clkreq_on = of_property_read_bool(np, "samsung,clkreq-on");
+
+ return 0;
+}
+
+static int exynos_usi_get_clocks(struct exynos_usi *usi)
+{
+ const size_t num = usi->data->num_clks;
+ struct device *dev = usi->dev;
+ size_t i;
+
+ if (num == 0)
+ return 0;
+
+ usi->clks = devm_kcalloc(dev, num, sizeof(*usi->clks), GFP_KERNEL);
+ if (!usi->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; ++i)
+ usi->clks[i].id = usi->data->clk_names[i];
+
+ return devm_clk_bulk_get(dev, num, usi->clks);
+}
+
+static int exynos_usi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct exynos_usi *usi;
+ int ret;
+
+ usi = devm_kzalloc(dev, sizeof(*usi), GFP_KERNEL);
+ if (!usi)
+ return -ENOMEM;
+
+ usi->dev = dev;
+ platform_set_drvdata(pdev, usi);
+
+ usi->data = of_device_get_match_data(dev);
+ if (!usi->data)
+ return -EINVAL;
+
+ ret = exynos_usi_parse_dt(np, usi);
+ if (ret)
+ return ret;
+
+ ret = exynos_usi_get_clocks(usi);
+ if (ret)
+ return ret;
+
+ if (usi->data->ver == USI_VER2) {
+ usi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(usi->regs))
+ return PTR_ERR(usi->regs);
+ }
+
+ ret = exynos_usi_configure(usi);
+ if (ret)
+ return ret;
+
+ /* Make it possible to embed protocol nodes into USI np */
+ return of_platform_populate(np, NULL, NULL, dev);
+}
+
+static int __maybe_unused exynos_usi_resume_noirq(struct device *dev)
+{
+ struct exynos_usi *usi = dev_get_drvdata(dev);
+
+ return exynos_usi_configure(usi);
+}
+
+static const struct dev_pm_ops exynos_usi_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, exynos_usi_resume_noirq)
+};
+
+static struct platform_driver exynos_usi_driver = {
+ .driver = {
+ .name = "exynos-usi",
+ .pm = &exynos_usi_pm,
+ .of_match_table = exynos_usi_dt_match,
+ },
+ .probe = exynos_usi_probe,
+};
+module_platform_driver(exynos_usi_driver);
+
+MODULE_DESCRIPTION("Samsung USI driver");
+MODULE_AUTHOR("Sam Protsenko <semen.protsenko@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
new file mode 100644
index 0000000000..30f230ed17
--- /dev/null
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// Exynos3250 - CPU PMU (Power Management Unit) support
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos3250_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, W-AFTR, SLEEP } */
+ { EXYNOS3_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS3_ARM_L2_SYS_PWR_REG, { 0x0, 0x0, 0x3} },
+ { EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS3_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_TOP_PWR_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS3_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_CAM_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_LCD0_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_MAUDIO_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos3250_list_feed[] = {
+ EXYNOS3_ARM_CORE_OPTION(0),
+ EXYNOS3_ARM_CORE_OPTION(1),
+ EXYNOS3_ARM_CORE_OPTION(2),
+ EXYNOS3_ARM_CORE_OPTION(3),
+ EXYNOS3_ARM_COMMON_OPTION,
+ EXYNOS3_TOP_PWR_OPTION,
+ EXYNOS3_CORE_TOP_PWR_OPTION,
+ S5P_CAM_OPTION,
+ S5P_MFC_OPTION,
+ S5P_G3D_OPTION,
+ S5P_LCD0_OPTION,
+ S5P_ISP_OPTION,
+};
+
+static void exynos3250_powerdown_conf_extra(enum sys_powerdown mode)
+{
+ unsigned int i;
+ unsigned int tmp;
+
+ /* Enable only SC_FEEDBACK */
+ for (i = 0; i < ARRAY_SIZE(exynos3250_list_feed); i++) {
+ tmp = pmu_raw_readl(exynos3250_list_feed[i]);
+ tmp &= ~(EXYNOS3_OPTION_USE_SC_COUNTER);
+ tmp |= EXYNOS3_OPTION_USE_SC_FEEDBACK;
+ pmu_raw_writel(tmp, exynos3250_list_feed[i]);
+ }
+
+ if (mode != SYS_SLEEP)
+ return;
+
+ pmu_raw_writel(XUSBXTI_DURATION, EXYNOS3_XUSBXTI_DURATION);
+ pmu_raw_writel(XXTI_DURATION, EXYNOS3_XXTI_DURATION);
+ pmu_raw_writel(EXT_REGULATOR_DURATION, EXYNOS3_EXT_REGULATOR_DURATION);
+ pmu_raw_writel(EXT_REGULATOR_COREBLK_DURATION,
+ EXYNOS3_EXT_REGULATOR_COREBLK_DURATION);
+}
+
+static void exynos3250_pmu_init(void)
+{
+ unsigned int value;
+
+ /*
+ * To prevent from issuing new bus request form L2 memory system
+ * If core status is power down, should be set '1' to L2 power down
+ */
+ value = pmu_raw_readl(EXYNOS3_ARM_COMMON_OPTION);
+ value |= EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(value, EXYNOS3_ARM_COMMON_OPTION);
+
+ /* Enable USE_STANDBY_WFI for all CORE */
+ pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+ /*
+ * Set PSHOLD port for output high
+ */
+ value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
+ value |= S5P_PS_HOLD_OUTPUT_HIGH;
+ pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
+
+ /*
+ * Enable signal for PSHOLD port
+ */
+ value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
+ value |= S5P_PS_HOLD_EN;
+ pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
+}
+
+const struct exynos_pmu_data exynos3250_pmu_data = {
+ .pmu_config = exynos3250_pmu_config,
+ .pmu_init = exynos3250_pmu_init,
+ .powerdown_conf_extra = exynos3250_powerdown_conf_extra,
+};
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
new file mode 100644
index 0000000000..f8092190b9
--- /dev/null
+++ b/drivers/soc/samsung/exynos4-pmu.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// Exynos4 - CPU PMU(Power Management Unit) support
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos4210_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+ { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE1_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE1, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL1, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_COMMON_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_L2_0_LOWPWR, { 0x2, 0x2, 0x3 } },
+ { S5P_L2_1_LOWPWR, { 0x2, 0x2, 0x3 } },
+ { S5P_CMU_ACLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SCLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_APLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_MPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_VPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_EPLL_SYSCLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_GPSALIVE_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_CAM_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_TV_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_MFC_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_G3D_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_LCD0_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_LCD1_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_CAM_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_TV_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_MFC_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_G3D_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_LCD0_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_LCD1_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_GPS_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_TOP_BUS_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_TOP_RETENTION_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_TOP_PWR_LOWPWR, { 0x3, 0x0, 0x3 } },
+ { S5P_LOGIC_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_ONENAND_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_MODIMIF_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_G2D_ACP_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_USBOTG_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_HSMMC_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_CSSYS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_SECSS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_PCIE_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_SATA_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_DRAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_PAD_RETENTION_GPIO_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_UART_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ISOLATION_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ALV_SEL_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_XUSBXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_XXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_EXT_REGULATOR_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_GPIO_MODE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_GPIO_MODE_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CAM_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_TV_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MFC_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_G3D_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_LCD0_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_LCD1_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MAUDIO_LOWPWR, { 0x7, 0x7, 0x0 } },
+ { S5P_GPS_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_GPS_ALIVE_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { PMU_TABLE_END,},
+};
+
+static const struct exynos_pmu_conf exynos4x12_pmu_config[] = {
+ { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE1_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE1, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL1, { 0x0, 0x0, 0x0 } },
+ { S5P_ISP_ARM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_COMMON_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_L2_0_LOWPWR, { 0x0, 0x0, 0x3 } },
+ /* XXX_OPTION register should be set other field */
+ { S5P_ARM_L2_0_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_L2_1_LOWPWR, { 0x0, 0x0, 0x3 } },
+ { S5P_ARM_L2_1_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_CMU_ACLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SCLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_DRAM_FREQ_DOWN_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_DDRPHY_DLLOFF_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_LPDDR_PHY_DLL_LOCK_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_CMU_ACLKSTOP_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SCLKSTOP_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_COREBLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_APLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_MPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_VPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_EPLL_SYSCLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_MPLLUSER_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_GPSALIVE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_CAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_TV_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_MFC_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_G3D_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_LCD0_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_CAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_TV_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_MFC_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_G3D_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_LCD0_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_TOP_BUS_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_TOP_RETENTION_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_TOP_PWR_LOWPWR, { 0x3, 0x0, 0x3 } },
+ { S5P_TOP_BUS_COREBLK_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_TOP_RETENTION_COREBLK_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_TOP_PWR_COREBLK_LOWPWR, { 0x3, 0x0, 0x3 } },
+ { S5P_LOGIC_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_OSCCLK_GATE_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_LOGIC_RESET_COREBLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_OSCCLK_GATE_COREBLK_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_ONENAND_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_ONENAND_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_HSI_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_HSI_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_G2D_ACP_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_G2D_ACP_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_USBOTG_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_USBOTG_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_HSMMC_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_HSMMC_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_CSSYS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_CSSYS_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_SECSS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_SECSS_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_ROTATOR_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_ROTATOR_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_PAD_RETENTION_DRAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_PAD_RETENTION_GPIO_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_UART_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ISOLATION_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_ISOLATION_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ALV_SEL_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_XUSBXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_XXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_EXT_REGULATOR_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_GPIO_MODE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_GPIO_MODE_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_GPIO_MODE_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_TOP_ASB_RESET_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_TOP_ASB_ISOLATION_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_CAM_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_TV_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MFC_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_G3D_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_LCD0_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_ISP_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MAUDIO_LOWPWR, { 0x7, 0x7, 0x0 } },
+ { S5P_GPS_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_GPS_ALIVE_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_CMU_SYSCLK_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SYSCLK_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { PMU_TABLE_END,},
+};
+
+static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
+ { S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE3_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE3, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL3, { 0x0, 0x0, 0x0 } },
+ { PMU_TABLE_END,},
+};
+
+const struct exynos_pmu_data exynos4210_pmu_data = {
+ .pmu_config = exynos4210_pmu_config,
+};
+
+const struct exynos_pmu_data exynos4212_pmu_data = {
+ .pmu_config = exynos4x12_pmu_config,
+};
+
+const struct exynos_pmu_data exynos4412_pmu_data = {
+ .pmu_config = exynos4x12_pmu_config,
+ .pmu_config_extra = exynos4412_pmu_config,
+};
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
new file mode 100644
index 0000000000..7a2d50be6b
--- /dev/null
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// Exynos5250 - CPU PMU (Power Management Unit) support
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos5250_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+ { EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_FSYS_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS_L2_OPTION(0), { 0x10, 0x10, 0x0 } },
+ { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_USBOTG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_G2D_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_USBDRD_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SDMMC_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_CSSYS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SECSS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_INTRAM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_INTROM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_JPEG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_JPEG_MEM_OPTION, { 0x10, 0x10, 0x0} },
+ { EXYNOS5_HSI_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SATA_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_GSCL_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_DISP1_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MAU_SYS_PWR_REG, { 0x7, 0x7, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos5_list_both_cnt_feed[] = {
+ EXYNOS5_ARM_CORE0_OPTION,
+ EXYNOS5_ARM_CORE1_OPTION,
+ EXYNOS5_ARM_COMMON_OPTION,
+ EXYNOS5_GSCL_OPTION,
+ EXYNOS5_ISP_OPTION,
+ EXYNOS5_MFC_OPTION,
+ EXYNOS5_G3D_OPTION,
+ EXYNOS5_DISP1_OPTION,
+ EXYNOS5_MAU_OPTION,
+ EXYNOS5_TOP_PWR_OPTION,
+ EXYNOS5_TOP_PWR_SYSMEM_OPTION,
+};
+
+static unsigned int const exynos5_list_disable_wfi_wfe[] = {
+ EXYNOS5_ARM_CORE1_OPTION,
+ EXYNOS5_FSYS_ARM_OPTION,
+ EXYNOS5_ISP_ARM_OPTION,
+};
+
+static void exynos5250_pmu_init(void)
+{
+ unsigned int value;
+ /*
+ * When SYS_WDTRESET is set, watchdog timer reset request
+ * is ignored by power management unit.
+ */
+ value = pmu_raw_readl(EXYNOS5_AUTO_WDTRESET_DISABLE);
+ value &= ~EXYNOS5_SYS_WDTRESET;
+ pmu_raw_writel(value, EXYNOS5_AUTO_WDTRESET_DISABLE);
+
+ value = pmu_raw_readl(EXYNOS5_MASK_WDTRESET_REQUEST);
+ value &= ~EXYNOS5_SYS_WDTRESET;
+ pmu_raw_writel(value, EXYNOS5_MASK_WDTRESET_REQUEST);
+}
+
+static void exynos5_powerdown_conf(enum sys_powerdown mode)
+{
+ unsigned int i;
+ unsigned int tmp;
+
+ /*
+ * Enable both SC_FEEDBACK and SC_COUNTER
+ */
+ for (i = 0; i < ARRAY_SIZE(exynos5_list_both_cnt_feed); i++) {
+ tmp = pmu_raw_readl(exynos5_list_both_cnt_feed[i]);
+ tmp |= (EXYNOS5_USE_SC_FEEDBACK |
+ EXYNOS5_USE_SC_COUNTER);
+ pmu_raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
+ }
+
+ /*
+ * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
+ */
+ tmp = pmu_raw_readl(EXYNOS5_ARM_COMMON_OPTION);
+ tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
+
+ /*
+ * Disable WFI/WFE on XXX_OPTION
+ */
+ for (i = 0; i < ARRAY_SIZE(exynos5_list_disable_wfi_wfe); i++) {
+ tmp = pmu_raw_readl(exynos5_list_disable_wfi_wfe[i]);
+ tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
+ EXYNOS5_OPTION_USE_STANDBYWFI);
+ pmu_raw_writel(tmp, exynos5_list_disable_wfi_wfe[i]);
+ }
+}
+
+const struct exynos_pmu_data exynos5250_pmu_data = {
+ .pmu_config = exynos5250_pmu_config,
+ .pmu_init = exynos5250_pmu_init,
+ .powerdown_conf = exynos5_powerdown_conf,
+};
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
new file mode 100644
index 0000000000..6fedcd78cb
--- /dev/null
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+// http://www.samsung.com/
+//
+// Exynos5420 - CPU PMU (Power Management Unit) support
+
+#include <linux/pm.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include <asm/cputype.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos5420_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+ { EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_ARM_CORE2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_ARM_CORE3_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE3_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_L2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_TOP_PWR_SYS_PWR_REG, { 0x3, 0x3, 0x0} },
+ { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_INTRAM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5420_INTROM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GSCL_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_DISP1_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_MAU_SYS_PWR_REG, { 0x7, 0x7, 0x0} },
+ { EXYNOS5420_G2D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_MSC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_FSYS_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_FSYS2_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_PSGEN_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_PERIC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_WCORE_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos5420_list_disable_pmu_reg[] = {
+ EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,
+ EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,
+ EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,
+ EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,
+ EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,
+ EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,
+ EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,
+ EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,
+ EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,
+};
+
+static void exynos5420_powerdown_conf(enum sys_powerdown mode)
+{
+ u32 this_cluster;
+
+ this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
+
+ /*
+ * set the cluster id to IROM register to ensure that we wake
+ * up with the current cluster.
+ */
+ pmu_raw_writel(this_cluster, EXYNOS_IROM_DATA2);
+}
+
+static void exynos5420_pmu_init(void)
+{
+ unsigned int value;
+ int i;
+
+ /*
+ * Set the CMU_RESET, CMU_SYSCLK and CMU_CLKSTOP registers
+ * for local power blocks to Low initially as per Table 8-4:
+ * "System-Level Power-Down Configuration Registers".
+ */
+ for (i = 0; i < ARRAY_SIZE(exynos5420_list_disable_pmu_reg); i++)
+ pmu_raw_writel(0, exynos5420_list_disable_pmu_reg[i]);
+
+ /* Enable USE_STANDBY_WFI for all CORE */
+ pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+ value = pmu_raw_readl(EXYNOS_L2_OPTION(0));
+ value &= ~EXYNOS_L2_USE_RETENTION;
+ pmu_raw_writel(value, EXYNOS_L2_OPTION(0));
+
+ value = pmu_raw_readl(EXYNOS_L2_OPTION(1));
+ value &= ~EXYNOS_L2_USE_RETENTION;
+ pmu_raw_writel(value, EXYNOS_L2_OPTION(1));
+
+ /*
+ * If L2_COMMON is turned off, clocks related to ATB async
+ * bridge are gated. Thus, when ISP power is gated, LPI
+ * may get stuck.
+ */
+ value = pmu_raw_readl(EXYNOS5420_LPI_MASK);
+ value |= EXYNOS5420_ATB_ISP_ARM;
+ pmu_raw_writel(value, EXYNOS5420_LPI_MASK);
+
+ value = pmu_raw_readl(EXYNOS5420_LPI_MASK1);
+ value |= EXYNOS5420_ATB_KFC;
+ pmu_raw_writel(value, EXYNOS5420_LPI_MASK1);
+
+ /* Prevent issue of new bus request from L2 memory */
+ value = pmu_raw_readl(EXYNOS5420_ARM_COMMON_OPTION);
+ value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(value, EXYNOS5420_ARM_COMMON_OPTION);
+
+ value = pmu_raw_readl(EXYNOS5420_KFC_COMMON_OPTION);
+ value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(value, EXYNOS5420_KFC_COMMON_OPTION);
+
+ /* This setting is to reduce suspend/resume time */
+ pmu_raw_writel(DUR_WAIT_RESET, EXYNOS5420_LOGIC_RESET_DURATION3);
+
+ /* Serialized CPU wakeup of Eagle */
+ pmu_raw_writel(SPREAD_ENABLE, EXYNOS5420_ARM_INTR_SPREAD_ENABLE);
+
+ pmu_raw_writel(SPREAD_USE_STANDWFI,
+ EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI);
+
+ pmu_raw_writel(0x1, EXYNOS5420_UP_SCHEDULER);
+
+ pr_info("EXYNOS5420 PMU initialized\n");
+}
+
+const struct exynos_pmu_data exynos5420_pmu_data = {
+ .pmu_config = exynos5420_pmu_config,
+ .pmu_init = exynos5420_pmu_init,
+ .powerdown_conf = exynos5420_powerdown_conf,
+};
diff --git a/drivers/soc/samsung/exynos5422-asv.c b/drivers/soc/samsung/exynos5422-asv.c
new file mode 100644
index 0000000000..475ae52765
--- /dev/null
+++ b/drivers/soc/samsung/exynos5422-asv.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Exynos 5422 SoC Adaptive Supply Voltage support
+ */
+
+#include <linux/bitrev.h>
+#include <linux/errno.h>
+#include <linux/regmap.h>
+#include <linux/soc/samsung/exynos-chipid.h>
+#include <linux/slab.h>
+
+#include "exynos-asv.h"
+#include "exynos5422-asv.h"
+
+#define ASV_GROUPS_NUM 14
+#define ASV_ARM_DVFS_NUM 20
+#define ASV_ARM_BIN2_DVFS_NUM 17
+#define ASV_KFC_DVFS_NUM 14
+#define ASV_KFC_BIN2_DVFS_NUM 12
+
+/*
+ * This array is a set of 4 ASV data tables, first column of each ASV table
+ * contains frequency value in MHz and subsequent columns contain the CPU
+ * cluster's supply voltage values in uV.
+ * In order to create a set of OPPs for specific SoC revision one of the voltage
+ * columns (1...14) from one of the tables (0...3) is selected during
+ * initialization. There are separate ASV tables for the big (ARM) and little
+ * (KFC) CPU cluster. Only OPPs which are already defined in devicetree
+ * will be updated.
+ */
+
+static const u32 asv_arm_table[][ASV_ARM_DVFS_NUM][ASV_GROUPS_NUM + 1] = {
+{
+ /* ARM 0, 1 */
+ { 2100, 1362500, 1362500, 1350000, 1337500, 1325000, 1312500, 1300000,
+ 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000 },
+ { 2000, 1312500, 1312500, 1300000, 1287500, 1275000, 1262500, 1250000,
+ 1237500, 1225000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1900, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500, 1175000,
+ 1162500, 1150000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1800, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500, 1125000,
+ 1112500, 1100000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1700, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500,
+ 1075000, 1062500, 1075000, 1062500, 1050000, 1037500, 1025000 },
+ { 1600, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1050000,
+ 1037500, 1025000, 1037500, 1025000, 1012500, 1000000, 987500 },
+ { 1500, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500,
+ 1000000, 987500, 1000000, 987500, 975000, 962500, 950000 },
+ { 1400, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 975000, 962500, 950000, 937500, 925000 },
+ { 1300, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 962500, 950000, 937500, 925000, 912500 },
+ { 1200, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 937500, 925000, 912500, 900000, 900000 },
+ { 1100, 1000000, 987500, 975000, 962500, 950000, 937500, 925000,
+ 912500, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 1000, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 900, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* ARM 2 */
+ { 2100, 1362500, 1362500, 1350000, 1337500, 1325000, 1312500, 1300000,
+ 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000 },
+ { 2000, 1312500, 1312500, 1312500, 1300000, 1275000, 1262500, 1250000,
+ 1237500, 1225000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1900, 1262500, 1250000, 1250000, 1237500, 1212500, 1200000, 1187500,
+ 1175000, 1162500, 1175000, 1162500, 1150000, 1137500, 1125000 },
+ { 1800, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500,
+ 1125000, 1112500, 1125000, 1112500, 1100000, 1087500, 1075000 },
+ { 1700, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1087500, 1075000, 1062500, 1050000, 1037500 },
+ { 1600, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1050000, 1037500, 1025000, 1012500, 1000000 },
+ { 1500, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 1400, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000,
+ 987500, 975000, 987500, 975000, 962500, 950000, 937500 },
+ { 1300, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 962500, 950000, 937500, 925000, 912500 },
+ { 1200, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 937500, 925000, 912500, 900000, 900000 },
+ { 1100, 1000000, 987500, 975000, 962500, 950000, 937500, 925000,
+ 912500, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 1000, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 900, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* ARM 3 */
+ { 2100, 1362500, 1362500, 1350000, 1337500, 1325000, 1312500, 1300000,
+ 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000 },
+ { 2000, 1312500, 1312500, 1300000, 1287500, 1275000, 1262500, 1250000,
+ 1237500, 1225000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1900, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500,
+ 1175000, 1162500, 1175000, 1162500, 1150000, 1137500, 1125000 },
+ { 1800, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500,
+ 1125000, 1112500, 1125000, 1112500, 1100000, 1087500, 1075000 },
+ { 1700, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1087500, 1075000, 1062500, 1050000, 1037500 },
+ { 1600, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1050000, 1037500, 1025000, 1012500, 1000000 },
+ { 1500, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 1400, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000,
+ 987500, 975000, 987500, 975000, 962500, 950000, 937500 },
+ { 1300, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 962500, 950000, 937500, 925000, 912500 },
+ { 1200, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 937500, 925000, 912500, 900000, 900000 },
+ { 1100, 1000000, 987500, 975000, 962500, 950000, 937500, 925000,
+ 912500, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 1000, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 900, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* ARM bin 2 */
+ { 1800, 1237500, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500,
+ 1150000, 1137500, 1150000, 1137500, 1125000, 1112500, 1100000 },
+ { 1700, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500, 1125000,
+ 1112500, 1100000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1600, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500,
+ 1075000, 1062500, 1075000, 1062500, 1050000, 1037500, 1025000 },
+ { 1500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1050000,
+ 1037500, 1025000, 1037500, 1025000, 1012500, 1000000, 987500 },
+ { 1400, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 1300, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500,
+ 1000000, 987500, 1000000, 987500, 975000, 962500, 950000 },
+ { 1200, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 975000, 962500, 950000, 937500, 925000 },
+ { 1100, 1037500, 1025000, 1012500, 1000000, 987500, 975000, 962500,
+ 950000, 937500, 950000, 937500, 925000, 912500, 900000 },
+ { 1000, 1012500, 1000000, 987500, 975000, 962500, 950000, 937500,
+ 925000, 912500, 925000, 912500, 900000, 900000, 900000 },
+ { 900, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 962500, 950000, 937500, 925000, 912500, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 937500, 925000, 912500, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}
+};
+
+static const u32 asv_kfc_table[][ASV_KFC_DVFS_NUM][ASV_GROUPS_NUM + 1] = {
+{
+ /* KFC 0, 1 */
+ { 1500000, 1300000, 1300000, 1300000, 1287500, 1287500, 1287500, 1275000,
+ 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1400000, 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000,
+ 1187500, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1300000, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000,
+ 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1200000, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500 },
+ { 1100000, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000 },
+ { 1000000, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 987500, 975000, 962500, 950000, 937500 },
+ { 900000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 950000, 937500, 925000, 912500, 900000 },
+ { 800000, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 912500, 900000, 900000, 900000, 900000 },
+ { 700000, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600000, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500000, 912500, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400000, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300000, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200000, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* KFC 2 */
+ { 1500, 1300000, 1300000, 1300000, 1287500, 1287500, 1287500, 1275000,
+ 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1400, 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000,
+ 1187500, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1300, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000,
+ 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1200, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500 },
+ { 1100, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000 },
+ { 1000, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 987500, 975000, 962500, 950000, 937500 },
+ { 900, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 950000, 937500, 925000, 912500, 900000 },
+ { 800, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 912500, 900000, 900000, 900000, 900000 },
+ { 700, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 912500, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* KFC 3 */
+ { 1500, 1300000, 1300000, 1300000, 1287500, 1287500, 1287500, 1275000,
+ 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1400, 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000,
+ 1187500, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1300, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000,
+ 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1200, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500 },
+ { 1100, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000 },
+ { 1000, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 987500, 975000, 962500, 950000, 937500 },
+ { 900, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 950000, 937500, 925000, 912500, 900000 },
+ { 800, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 912500, 900000, 900000, 900000, 900000 },
+ { 700, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 912500, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* KFC bin 2 */
+ { 1300, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500, 1175000,
+ 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500 },
+ { 1200, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500, 1125000,
+ 1112500, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500 },
+ { 1100, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500,
+ 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000 },
+ { 1000, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1050000,
+ 1037500, 1025000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 900, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500,
+ 1000000, 987500, 975000, 962500, 950000, 937500, 925000 },
+ { 800, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 937500, 925000, 912500, 900000, 900000 },
+ { 700, 1012500, 1000000, 987500, 975000, 962500, 950000, 937500,
+ 925000, 912500, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 937500, 925000, 912500, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}
+};
+
+static const struct asv_limit_entry __asv_limits[ASV_GROUPS_NUM] = {
+ { 13, 55 },
+ { 21, 65 },
+ { 25, 69 },
+ { 30, 72 },
+ { 36, 74 },
+ { 43, 76 },
+ { 51, 78 },
+ { 65, 80 },
+ { 81, 82 },
+ { 98, 84 },
+ { 119, 87 },
+ { 135, 89 },
+ { 150, 92 },
+ { 999, 999 },
+};
+
+static int exynos5422_asv_get_group(struct exynos_asv *asv)
+{
+ unsigned int pkgid_reg, auxi_reg;
+ int hpm, ids, i;
+
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PKG_ID, &pkgid_reg);
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_AUX_INFO, &auxi_reg);
+
+ if (asv->use_sg) {
+ u32 sga = (pkgid_reg >> EXYNOS5422_SG_A_OFFSET) &
+ EXYNOS5422_SG_A_MASK;
+
+ u32 sgb = (pkgid_reg >> EXYNOS5422_SG_B_OFFSET) &
+ EXYNOS5422_SG_B_MASK;
+
+ if ((pkgid_reg >> EXYNOS5422_SG_BSIGN_OFFSET) &
+ EXYNOS5422_SG_BSIGN_MASK)
+ return sga + sgb;
+ else
+ return sga - sgb;
+ }
+
+ hpm = (auxi_reg >> EXYNOS5422_TMCB_OFFSET) & EXYNOS5422_TMCB_MASK;
+ ids = (pkgid_reg >> EXYNOS5422_IDS_OFFSET) & EXYNOS5422_IDS_MASK;
+
+ for (i = 0; i < ASV_GROUPS_NUM; i++) {
+ if (ids <= __asv_limits[i].ids)
+ break;
+ if (hpm <= __asv_limits[i].hpm)
+ break;
+ }
+ if (i < ASV_GROUPS_NUM)
+ return i;
+
+ return 0;
+}
+
+static int __asv_offset_voltage(unsigned int index)
+{
+ switch (index) {
+ case 1:
+ return 12500;
+ case 2:
+ return 50000;
+ case 3:
+ return 25000;
+ default:
+ return 0;
+ }
+}
+
+static void exynos5422_asv_offset_voltage_setup(struct exynos_asv *asv)
+{
+ struct exynos_asv_subsys *subsys;
+ unsigned int reg, value;
+
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_AUX_INFO, &reg);
+
+ /* ARM offset voltage setup */
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_ARM];
+
+ subsys->base_volt = 1000000;
+
+ value = (reg >> EXYNOS5422_ARM_UP_OFFSET) & EXYNOS5422_ARM_UP_MASK;
+ subsys->offset_volt_h = __asv_offset_voltage(value);
+
+ value = (reg >> EXYNOS5422_ARM_DN_OFFSET) & EXYNOS5422_ARM_DN_MASK;
+ subsys->offset_volt_l = __asv_offset_voltage(value);
+
+ /* KFC offset voltage setup */
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_KFC];
+
+ subsys->base_volt = 1000000;
+
+ value = (reg >> EXYNOS5422_KFC_UP_OFFSET) & EXYNOS5422_KFC_UP_MASK;
+ subsys->offset_volt_h = __asv_offset_voltage(value);
+
+ value = (reg >> EXYNOS5422_KFC_DN_OFFSET) & EXYNOS5422_KFC_DN_MASK;
+ subsys->offset_volt_l = __asv_offset_voltage(value);
+}
+
+static int exynos5422_asv_opp_get_voltage(const struct exynos_asv_subsys *subsys,
+ int level, unsigned int volt)
+{
+ unsigned int asv_volt;
+
+ if (level >= subsys->table.num_rows)
+ return volt;
+
+ asv_volt = exynos_asv_opp_get_voltage(subsys, level,
+ subsys->asv->group);
+
+ if (volt > subsys->base_volt)
+ asv_volt += subsys->offset_volt_h;
+ else
+ asv_volt += subsys->offset_volt_l;
+
+ return asv_volt;
+}
+
+static unsigned int exynos5422_asv_parse_table(unsigned int pkg_id)
+{
+ return (pkg_id >> EXYNOS5422_TABLE_OFFSET) & EXYNOS5422_TABLE_MASK;
+}
+
+static bool exynos5422_asv_parse_bin2(unsigned int pkg_id)
+{
+ return (pkg_id >> EXYNOS5422_BIN2_OFFSET) & EXYNOS5422_BIN2_MASK;
+}
+
+static bool exynos5422_asv_parse_sg(unsigned int pkg_id)
+{
+ return (pkg_id >> EXYNOS5422_USESG_OFFSET) & EXYNOS5422_USESG_MASK;
+}
+
+int exynos5422_asv_init(struct exynos_asv *asv)
+{
+ struct exynos_asv_subsys *subsys;
+ unsigned int table_index;
+ unsigned int pkg_id;
+ bool bin2;
+
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PKG_ID, &pkg_id);
+
+ if (asv->of_bin == 2) {
+ bin2 = true;
+ asv->use_sg = false;
+ } else {
+ asv->use_sg = exynos5422_asv_parse_sg(pkg_id);
+ bin2 = exynos5422_asv_parse_bin2(pkg_id);
+ }
+
+ asv->group = exynos5422_asv_get_group(asv);
+ asv->table = exynos5422_asv_parse_table(pkg_id);
+
+ exynos5422_asv_offset_voltage_setup(asv);
+
+ if (bin2) {
+ table_index = 3;
+ } else {
+ if (asv->table == 2 || asv->table == 3)
+ table_index = asv->table - 1;
+ else
+ table_index = 0;
+ }
+
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_ARM];
+ subsys->cpu_dt_compat = "arm,cortex-a15";
+ if (bin2)
+ subsys->table.num_rows = ASV_ARM_BIN2_DVFS_NUM;
+ else
+ subsys->table.num_rows = ASV_ARM_DVFS_NUM;
+ subsys->table.num_cols = ASV_GROUPS_NUM + 1;
+ subsys->table.buf = (u32 *)asv_arm_table[table_index];
+
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_KFC];
+ subsys->cpu_dt_compat = "arm,cortex-a7";
+ if (bin2)
+ subsys->table.num_rows = ASV_KFC_BIN2_DVFS_NUM;
+ else
+ subsys->table.num_rows = ASV_KFC_DVFS_NUM;
+ subsys->table.num_cols = ASV_GROUPS_NUM + 1;
+ subsys->table.buf = (u32 *)asv_kfc_table[table_index];
+
+ asv->opp_get_voltage = exynos5422_asv_opp_get_voltage;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos5422_asv_init);
diff --git a/drivers/soc/samsung/exynos5422-asv.h b/drivers/soc/samsung/exynos5422-asv.h
new file mode 100644
index 0000000000..95a5fb1a75
--- /dev/null
+++ b/drivers/soc/samsung/exynos5422-asv.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Exynos 5422 SoC Adaptive Supply Voltage support
+ */
+
+#ifndef __LINUX_SOC_EXYNOS5422_ASV_H
+#define __LINUX_SOC_EXYNOS5422_ASV_H
+
+#include <linux/errno.h>
+
+enum {
+ EXYNOS_ASV_SUBSYS_ID_ARM,
+ EXYNOS_ASV_SUBSYS_ID_KFC,
+ EXYNOS_ASV_SUBSYS_ID_MAX
+};
+
+struct exynos_asv;
+
+#ifdef CONFIG_EXYNOS_ASV_ARM
+int exynos5422_asv_init(struct exynos_asv *asv);
+#else
+static inline int exynos5422_asv_init(struct exynos_asv *asv)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#endif /* __LINUX_SOC_EXYNOS5422_ASV_H */
diff --git a/drivers/soc/samsung/s3c-pm-check.c b/drivers/soc/samsung/s3c-pm-check.c
new file mode 100644
index 0000000000..439d5c3725
--- /dev/null
+++ b/drivers/soc/samsung/s3c-pm-check.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// originally in linux/arch/arm/plat-s3c24xx/pm.c
+//
+// Copyright (c) 2004-2008 Simtec Electronics
+// http://armlinux.simtec.co.uk
+// Ben Dooks <ben@simtec.co.uk>
+//
+// S3C Power Mangament - suspend/resume memory corruption check.
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include <linux/soc/samsung/s3c-pm.h>
+
+#if CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE < 1
+#error CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE must be a positive non-zero value
+#endif
+
+/* suspend checking code...
+ *
+ * this next area does a set of crc checks over all the installed
+ * memory, so the system can verify if the resume was ok.
+ *
+ * CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE defines the block-size for the CRC,
+ * increasing it will mean that the area corrupted will be less easy to spot,
+ * and reducing the size will cause the CRC save area to grow
+*/
+
+#define CHECK_CHUNKSIZE (CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE * 1024)
+
+static u32 crc_size; /* size needed for the crc block */
+static u32 *crcs; /* allocated over suspend/resume */
+
+typedef u32 *(run_fn_t)(struct resource *ptr, u32 *arg);
+
+/* s3c_pm_run_res
+ *
+ * go through the given resource list, and look for system ram
+*/
+
+static void s3c_pm_run_res(struct resource *ptr, run_fn_t fn, u32 *arg)
+{
+ while (ptr != NULL) {
+ if (ptr->child != NULL)
+ s3c_pm_run_res(ptr->child, fn, arg);
+
+ if ((ptr->flags & IORESOURCE_SYSTEM_RAM)
+ == IORESOURCE_SYSTEM_RAM) {
+ S3C_PMDBG("Found system RAM at %08lx..%08lx\n",
+ (unsigned long)ptr->start,
+ (unsigned long)ptr->end);
+ arg = (fn)(ptr, arg);
+ }
+
+ ptr = ptr->sibling;
+ }
+}
+
+static void s3c_pm_run_sysram(run_fn_t fn, u32 *arg)
+{
+ s3c_pm_run_res(&iomem_resource, fn, arg);
+}
+
+static u32 *s3c_pm_countram(struct resource *res, u32 *val)
+{
+ u32 size = (u32)resource_size(res);
+
+ size += CHECK_CHUNKSIZE-1;
+ size /= CHECK_CHUNKSIZE;
+
+ S3C_PMDBG("Area %08lx..%08lx, %d blocks\n",
+ (unsigned long)res->start, (unsigned long)res->end, size);
+
+ *val += size * sizeof(u32);
+ return val;
+}
+
+/* s3c_pm_prepare_check
+ *
+ * prepare the necessary information for creating the CRCs. This
+ * must be done before the final save, as it will require memory
+ * allocating, and thus touching bits of the kernel we do not
+ * know about.
+*/
+
+void s3c_pm_check_prepare(void)
+{
+ crc_size = 0;
+
+ s3c_pm_run_sysram(s3c_pm_countram, &crc_size);
+
+ S3C_PMDBG("s3c_pm_prepare_check: %u checks needed\n", crc_size);
+
+ crcs = kmalloc(crc_size+4, GFP_KERNEL);
+ if (crcs == NULL)
+ printk(KERN_ERR "Cannot allocated CRC save area\n");
+}
+
+static u32 *s3c_pm_makecheck(struct resource *res, u32 *val)
+{
+ unsigned long addr, left;
+
+ for (addr = res->start; addr < res->end;
+ addr += CHECK_CHUNKSIZE) {
+ left = res->end - addr;
+
+ if (left > CHECK_CHUNKSIZE)
+ left = CHECK_CHUNKSIZE;
+
+ *val = crc32_le(~0, phys_to_virt(addr), left);
+ val++;
+ }
+
+ return val;
+}
+
+/* s3c_pm_check_store
+ *
+ * compute the CRC values for the memory blocks before the final
+ * sleep.
+*/
+
+void s3c_pm_check_store(void)
+{
+ if (crcs != NULL)
+ s3c_pm_run_sysram(s3c_pm_makecheck, crcs);
+}
+
+/* in_region
+ *
+ * return TRUE if the area defined by ptr..ptr+size contains the
+ * what..what+whatsz
+*/
+
+static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
+{
+ if ((what+whatsz) < ptr)
+ return 0;
+
+ if (what > (ptr+size))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * s3c_pm_runcheck() - helper to check a resource on restore.
+ * @res: The resource to check
+ * @val: Pointer to list of CRC32 values to check.
+ *
+ * Called from the s3c_pm_check_restore() via s3c_pm_run_sysram(), this
+ * function runs the given memory resource checking it against the stored
+ * CRC to ensure that memory is restored. The function tries to skip as
+ * many of the areas used during the suspend process.
+ */
+static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
+{
+ unsigned long addr;
+ unsigned long left;
+ void *stkpage;
+ void *ptr;
+ u32 calc;
+
+ stkpage = (void *)((u32)&calc & ~PAGE_MASK);
+
+ for (addr = res->start; addr < res->end;
+ addr += CHECK_CHUNKSIZE) {
+ left = res->end - addr;
+
+ if (left > CHECK_CHUNKSIZE)
+ left = CHECK_CHUNKSIZE;
+
+ ptr = phys_to_virt(addr);
+
+ if (in_region(ptr, left, stkpage, 4096)) {
+ S3C_PMDBG("skipping %08lx, has stack in\n", addr);
+ goto skip_check;
+ }
+
+ if (in_region(ptr, left, crcs, crc_size)) {
+ S3C_PMDBG("skipping %08lx, has crc block in\n", addr);
+ goto skip_check;
+ }
+
+ /* calculate and check the checksum */
+
+ calc = crc32_le(~0, ptr, left);
+ if (calc != *val) {
+ printk(KERN_ERR "Restore CRC error at "
+ "%08lx (%08x vs %08x)\n", addr, calc, *val);
+
+ S3C_PMDBG("Restore CRC error at %08lx (%08x vs %08x)\n",
+ addr, calc, *val);
+ }
+
+ skip_check:
+ val++;
+ }
+
+ return val;
+}
+
+/**
+ * s3c_pm_check_restore() - memory check called on resume
+ *
+ * check the CRCs after the restore event and free the memory used
+ * to hold them
+*/
+void s3c_pm_check_restore(void)
+{
+ if (crcs != NULL)
+ s3c_pm_run_sysram(s3c_pm_runcheck, crcs);
+}
+
+/**
+ * s3c_pm_check_cleanup() - free memory resources
+ *
+ * Free the resources that where allocated by the suspend
+ * memory check code. We do this separately from the
+ * s3c_pm_check_restore() function as we cannot call any
+ * functions that might sleep during that resume.
+ */
+void s3c_pm_check_cleanup(void)
+{
+ kfree(crcs);
+ crcs = NULL;
+}
+
diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig
new file mode 100644
index 0000000000..e86870be34
--- /dev/null
+++ b/drivers/soc/sifive/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if SOC_SIFIVE || SOC_STARFIVE
+
+config SIFIVE_CCACHE
+ bool "Sifive Composable Cache controller"
+ help
+ Support for the composable cache controller on SiFive platforms.
+
+endif
diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile
new file mode 100644
index 0000000000..1f5dc339bf
--- /dev/null
+++ b/drivers/soc/sifive/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/soc/sifive/sifive_ccache.c
new file mode 100644
index 0000000000..3684f5b40a
--- /dev/null
+++ b/drivers/soc/sifive/sifive_ccache.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiFive composable cache controller Driver
+ *
+ * Copyright (C) 2018-2022 SiFive, Inc.
+ *
+ */
+
+#define pr_fmt(fmt) "CCACHE: " fmt
+
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/device.h>
+#include <linux/bitfield.h>
+#include <asm/cacheinfo.h>
+#include <soc/sifive/sifive_ccache.h>
+
+#define SIFIVE_CCACHE_DIRECCFIX_LOW 0x100
+#define SIFIVE_CCACHE_DIRECCFIX_HIGH 0x104
+#define SIFIVE_CCACHE_DIRECCFIX_COUNT 0x108
+
+#define SIFIVE_CCACHE_DIRECCFAIL_LOW 0x120
+#define SIFIVE_CCACHE_DIRECCFAIL_HIGH 0x124
+#define SIFIVE_CCACHE_DIRECCFAIL_COUNT 0x128
+
+#define SIFIVE_CCACHE_DATECCFIX_LOW 0x140
+#define SIFIVE_CCACHE_DATECCFIX_HIGH 0x144
+#define SIFIVE_CCACHE_DATECCFIX_COUNT 0x148
+
+#define SIFIVE_CCACHE_DATECCFAIL_LOW 0x160
+#define SIFIVE_CCACHE_DATECCFAIL_HIGH 0x164
+#define SIFIVE_CCACHE_DATECCFAIL_COUNT 0x168
+
+#define SIFIVE_CCACHE_CONFIG 0x00
+#define SIFIVE_CCACHE_CONFIG_BANK_MASK GENMASK_ULL(7, 0)
+#define SIFIVE_CCACHE_CONFIG_WAYS_MASK GENMASK_ULL(15, 8)
+#define SIFIVE_CCACHE_CONFIG_SETS_MASK GENMASK_ULL(23, 16)
+#define SIFIVE_CCACHE_CONFIG_BLKS_MASK GENMASK_ULL(31, 24)
+
+#define SIFIVE_CCACHE_WAYENABLE 0x08
+#define SIFIVE_CCACHE_ECCINJECTERR 0x40
+
+#define SIFIVE_CCACHE_MAX_ECCINTR 4
+
+static void __iomem *ccache_base;
+static int g_irq[SIFIVE_CCACHE_MAX_ECCINTR];
+static struct riscv_cacheinfo_ops ccache_cache_ops;
+static int level;
+
+enum {
+ DIR_CORR = 0,
+ DATA_CORR,
+ DATA_UNCORR,
+ DIR_UNCORR,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *sifive_test;
+
+static ssize_t ccache_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ unsigned int val;
+
+ if (kstrtouint_from_user(data, count, 0, &val))
+ return -EINVAL;
+ if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
+ writel(val, ccache_base + SIFIVE_CCACHE_ECCINJECTERR);
+ else
+ return -EINVAL;
+ return count;
+}
+
+static const struct file_operations ccache_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = ccache_write
+};
+
+static void setup_sifive_debug(void)
+{
+ sifive_test = debugfs_create_dir("sifive_ccache_cache", NULL);
+
+ debugfs_create_file("sifive_debug_inject_error", 0200,
+ sifive_test, NULL, &ccache_fops);
+}
+#endif
+
+static void ccache_config_read(void)
+{
+ u32 cfg;
+
+ cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG);
+ pr_info("%llu banks, %llu ways, sets/bank=%llu, bytes/block=%llu\n",
+ FIELD_GET(SIFIVE_CCACHE_CONFIG_BANK_MASK, cfg),
+ FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg),
+ BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_SETS_MASK, cfg)),
+ BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_BLKS_MASK, cfg)));
+
+ cfg = readl(ccache_base + SIFIVE_CCACHE_WAYENABLE);
+ pr_info("Index of the largest way enabled: %u\n", cfg);
+}
+
+static const struct of_device_id sifive_ccache_ids[] = {
+ { .compatible = "sifive,fu540-c000-ccache" },
+ { .compatible = "sifive,fu740-c000-ccache" },
+ { .compatible = "sifive,ccache0" },
+ { /* end of table */ }
+};
+
+static ATOMIC_NOTIFIER_HEAD(ccache_err_chain);
+
+int register_sifive_ccache_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ccache_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_sifive_ccache_error_notifier);
+
+int unregister_sifive_ccache_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ccache_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_sifive_ccache_error_notifier);
+
+static int ccache_largest_wayenabled(void)
+{
+ return readl(ccache_base + SIFIVE_CCACHE_WAYENABLE) & 0xFF;
+}
+
+static ssize_t number_of_ways_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", ccache_largest_wayenabled());
+}
+
+static DEVICE_ATTR_RO(number_of_ways_enabled);
+
+static struct attribute *priv_attrs[] = {
+ &dev_attr_number_of_ways_enabled.attr,
+ NULL,
+};
+
+static const struct attribute_group priv_attr_group = {
+ .attrs = priv_attrs,
+};
+
+static const struct attribute_group *ccache_get_priv_group(struct cacheinfo
+ *this_leaf)
+{
+ /* We want to use private group for composable cache only */
+ if (this_leaf->level == level)
+ return &priv_attr_group;
+ else
+ return NULL;
+}
+
+static irqreturn_t ccache_int_handler(int irq, void *device)
+{
+ unsigned int add_h, add_l;
+
+ if (irq == g_irq[DIR_CORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_LOW);
+ pr_err("DirError @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DirError interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_CE,
+ "DirECCFix");
+ }
+ if (irq == g_irq[DIR_UNCORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_LOW);
+ /* Reading this register clears the DirFail interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_UE,
+ "DirECCFail");
+ panic("CCACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l);
+ }
+ if (irq == g_irq[DATA_CORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_LOW);
+ pr_err("DataError @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DataError interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_CE,
+ "DatECCFix");
+ }
+ if (irq == g_irq[DATA_UNCORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_LOW);
+ pr_err("DataFail @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DataFail interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_UE,
+ "DatECCFail");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init sifive_ccache_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int i, rc, intr_num;
+
+ np = of_find_matching_node(NULL, sifive_ccache_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res)) {
+ rc = -ENODEV;
+ goto err_node_put;
+ }
+
+ ccache_base = ioremap(res.start, resource_size(&res));
+ if (!ccache_base) {
+ rc = -ENOMEM;
+ goto err_node_put;
+ }
+
+ if (of_property_read_u32(np, "cache-level", &level)) {
+ rc = -ENOENT;
+ goto err_unmap;
+ }
+
+ intr_num = of_property_count_u32_elems(np, "interrupts");
+ if (!intr_num) {
+ pr_err("No interrupts property\n");
+ rc = -ENODEV;
+ goto err_unmap;
+ }
+
+ for (i = 0; i < intr_num; i++) {
+ g_irq[i] = irq_of_parse_and_map(np, i);
+ rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
+ NULL);
+ if (rc) {
+ pr_err("Could not request IRQ %d\n", g_irq[i]);
+ goto err_free_irq;
+ }
+ }
+ of_node_put(np);
+
+ ccache_config_read();
+
+ ccache_cache_ops.get_priv_group = ccache_get_priv_group;
+ riscv_set_cacheinfo_ops(&ccache_cache_ops);
+
+#ifdef CONFIG_DEBUG_FS
+ setup_sifive_debug();
+#endif
+ return 0;
+
+err_free_irq:
+ while (--i >= 0)
+ free_irq(g_irq[i], NULL);
+err_unmap:
+ iounmap(ccache_base);
+err_node_put:
+ of_node_put(np);
+ return rc;
+}
+
+device_initcall(sifive_ccache_init);
diff --git a/drivers/soc/starfive/Kconfig b/drivers/soc/starfive/Kconfig
new file mode 100644
index 0000000000..bdb96dc4c9
--- /dev/null
+++ b/drivers/soc/starfive/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config JH71XX_PMU
+ bool "Support PMU for StarFive JH71XX Soc"
+ depends on PM
+ depends on SOC_STARFIVE || COMPILE_TEST
+ default SOC_STARFIVE
+ select PM_GENERIC_DOMAINS
+ help
+ Say 'y' here to enable support power domain support.
+ In order to meet low power requirements, a Power Management Unit (PMU)
+ is designed for controlling power resources in StarFive JH71XX SoCs.
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
new file mode 100644
index 0000000000..c5070914fc
--- /dev/null
+++ b/drivers/soc/sunxi/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Allwinner sunXi SoC drivers
+#
+
+config SUNXI_MBUS
+ bool
+ default ARCH_SUNXI
+ depends on ARM || ARM64
+ help
+ Say y to enable the fixups needed to support the Allwinner
+ MBUS DMA quirks.
+
+config SUNXI_SRAM
+ bool
+ default ARCH_SUNXI
+ select REGMAP_MMIO
+ help
+ Say y here to enable the SRAM controller support. This
+ device is responsible on mapping the SRAM in the sunXi SoCs
+ whether to the CPU/DMA, or to the devices.
+
+config SUN20I_PPU
+ bool "Allwinner D1 PPU power domain driver"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ help
+ Say y to enable the PPU power domain driver. This saves power
+ when certain peripherals, such as the video engine, are idle.
diff --git a/drivers/soc/sunxi/Makefile b/drivers/soc/sunxi/Makefile
new file mode 100644
index 0000000000..549159571d
--- /dev/null
+++ b/drivers/soc/sunxi/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SUNXI_MBUS) += sunxi_mbus.o
+obj-$(CONFIG_SUNXI_SRAM) += sunxi_sram.o
diff --git a/drivers/soc/sunxi/sunxi_mbus.c b/drivers/soc/sunxi/sunxi_mbus.c
new file mode 100644
index 0000000000..1734da357c
--- /dev/null
+++ b/drivers/soc/sunxi/sunxi_mbus.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 Maxime Ripard <maxime@cerno.tech> */
+
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static const char * const sunxi_mbus_devices[] = {
+ /*
+ * The display engine virtual devices are not strictly speaking
+ * connected to the MBUS, but since DRM will perform all the
+ * memory allocations and DMA operations through that device, we
+ * need to have the quirk on those devices too.
+ */
+ "allwinner,sun4i-a10-display-engine",
+ "allwinner,sun5i-a10s-display-engine",
+ "allwinner,sun5i-a13-display-engine",
+ "allwinner,sun6i-a31-display-engine",
+ "allwinner,sun6i-a31s-display-engine",
+ "allwinner,sun7i-a20-display-engine",
+ "allwinner,sun8i-a23-display-engine",
+ "allwinner,sun8i-a33-display-engine",
+ "allwinner,sun9i-a80-display-engine",
+
+ /*
+ * And now we have the regular devices connected to the MBUS
+ * (that we know of).
+ */
+ "allwinner,sun4i-a10-csi1",
+ "allwinner,sun4i-a10-display-backend",
+ "allwinner,sun4i-a10-display-frontend",
+ "allwinner,sun4i-a10-video-engine",
+ "allwinner,sun5i-a13-display-backend",
+ "allwinner,sun5i-a13-video-engine",
+ "allwinner,sun6i-a31-csi",
+ "allwinner,sun6i-a31-display-backend",
+ "allwinner,sun7i-a20-csi0",
+ "allwinner,sun7i-a20-display-backend",
+ "allwinner,sun7i-a20-display-frontend",
+ "allwinner,sun7i-a20-video-engine",
+ "allwinner,sun8i-a23-display-backend",
+ "allwinner,sun8i-a23-display-frontend",
+ "allwinner,sun8i-a33-display-backend",
+ "allwinner,sun8i-a33-display-frontend",
+ "allwinner,sun8i-a33-video-engine",
+ "allwinner,sun8i-a83t-csi",
+ "allwinner,sun8i-h3-csi",
+ "allwinner,sun8i-h3-video-engine",
+ "allwinner,sun8i-v3s-csi",
+ "allwinner,sun9i-a80-display-backend",
+ "allwinner,sun50i-a64-csi",
+ "allwinner,sun50i-a64-video-engine",
+ "allwinner,sun50i-h5-video-engine",
+ NULL,
+};
+
+static int sunxi_mbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *__dev)
+{
+ struct device *dev = __dev;
+ int ret;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ /*
+ * Only the devices that need a large memory bandwidth do DMA
+ * directly over the memory bus (called MBUS), instead of going
+ * through the regular system bus.
+ */
+ if (!of_device_compatible_match(dev->of_node, sunxi_mbus_devices))
+ return NOTIFY_DONE;
+
+ /*
+ * Devices with an interconnects property have the MBUS
+ * relationship described in their DT and dealt with by
+ * of_dma_configure, so we can just skip them.
+ *
+ * Older DTs or SoCs who are not clearly understood need to set
+ * that DMA offset though.
+ */
+ if (of_property_present(dev->of_node, "interconnects"))
+ return NOTIFY_DONE;
+
+ ret = dma_direct_set_offset(dev, PHYS_OFFSET, 0, SZ_4G);
+ if (ret)
+ dev_err(dev, "Couldn't setup our DMA offset: %d\n", ret);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sunxi_mbus_nb = {
+ .notifier_call = sunxi_mbus_notifier,
+};
+
+static const char * const sunxi_mbus_platforms[] __initconst = {
+ "allwinner,sun4i-a10",
+ "allwinner,sun5i-a10s",
+ "allwinner,sun5i-a13",
+ "allwinner,sun6i-a31",
+ "allwinner,sun7i-a20",
+ "allwinner,sun8i-a23",
+ "allwinner,sun8i-a33",
+ "allwinner,sun8i-a83t",
+ "allwinner,sun8i-h3",
+ "allwinner,sun8i-r40",
+ "allwinner,sun8i-v3",
+ "allwinner,sun8i-v3s",
+ "allwinner,sun9i-a80",
+ "allwinner,sun50i-a64",
+ "allwinner,sun50i-h5",
+ "nextthing,gr8",
+ NULL,
+};
+
+static int __init sunxi_mbus_init(void)
+{
+ if (!of_device_compatible_match(of_root, sunxi_mbus_platforms))
+ return 0;
+
+ bus_register_notifier(&platform_bus_type, &sunxi_mbus_nb);
+ return 0;
+}
+arch_initcall(sunxi_mbus_init);
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
new file mode 100644
index 0000000000..4458b2e056
--- /dev/null
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -0,0 +1,426 @@
+/*
+ * Allwinner SoCs SRAM Controller Driver
+ *
+ * Copyright (C) 2015 Maxime Ripard
+ *
+ * Author: Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/soc/sunxi/sunxi_sram.h>
+
+struct sunxi_sram_func {
+ char *func;
+ u8 val;
+ u32 reg_val;
+};
+
+struct sunxi_sram_data {
+ char *name;
+ u8 reg;
+ u8 offset;
+ u8 width;
+ struct sunxi_sram_func *func;
+ struct list_head list;
+};
+
+struct sunxi_sram_desc {
+ struct sunxi_sram_data data;
+ bool claimed;
+};
+
+#define SUNXI_SRAM_MAP(_reg_val, _val, _func) \
+ { \
+ .func = _func, \
+ .val = _val, \
+ .reg_val = _reg_val, \
+ }
+
+#define SUNXI_SRAM_DATA(_name, _reg, _off, _width, ...) \
+ { \
+ .name = _name, \
+ .reg = _reg, \
+ .offset = _off, \
+ .width = _width, \
+ .func = (struct sunxi_sram_func[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
+static struct sunxi_sram_desc sun4i_a10_sram_a3_a4 = {
+ .data = SUNXI_SRAM_DATA("A3-A4", 0x4, 0x4, 2,
+ SUNXI_SRAM_MAP(0, 0, "cpu"),
+ SUNXI_SRAM_MAP(1, 1, "emac")),
+};
+
+static struct sunxi_sram_desc sun4i_a10_sram_c1 = {
+ .data = SUNXI_SRAM_DATA("C1", 0x0, 0x0, 31,
+ SUNXI_SRAM_MAP(0, 0, "cpu"),
+ SUNXI_SRAM_MAP(0x7fffffff, 1, "ve")),
+};
+
+static struct sunxi_sram_desc sun4i_a10_sram_d = {
+ .data = SUNXI_SRAM_DATA("D", 0x4, 0x0, 1,
+ SUNXI_SRAM_MAP(0, 0, "cpu"),
+ SUNXI_SRAM_MAP(1, 1, "usb-otg")),
+};
+
+static struct sunxi_sram_desc sun50i_a64_sram_c = {
+ .data = SUNXI_SRAM_DATA("C", 0x4, 24, 1,
+ SUNXI_SRAM_MAP(1, 0, "cpu"),
+ SUNXI_SRAM_MAP(0, 1, "de2")),
+};
+
+static const struct of_device_id sunxi_sram_dt_ids[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-sram-a3-a4",
+ .data = &sun4i_a10_sram_a3_a4.data,
+ },
+ {
+ .compatible = "allwinner,sun4i-a10-sram-c1",
+ .data = &sun4i_a10_sram_c1.data,
+ },
+ {
+ .compatible = "allwinner,sun4i-a10-sram-d",
+ .data = &sun4i_a10_sram_d.data,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-sram-c",
+ .data = &sun50i_a64_sram_c.data,
+ },
+ {}
+};
+
+static struct device *sram_dev;
+static LIST_HEAD(claimed_sram);
+static DEFINE_SPINLOCK(sram_lock);
+static void __iomem *base;
+
+static int sunxi_sram_show(struct seq_file *s, void *data)
+{
+ struct device_node *sram_node, *section_node;
+ const struct sunxi_sram_data *sram_data;
+ const struct of_device_id *match;
+ struct sunxi_sram_func *func;
+ const __be32 *sram_addr_p, *section_addr_p;
+ u32 val;
+
+ seq_puts(s, "Allwinner sunXi SRAM\n");
+ seq_puts(s, "--------------------\n\n");
+
+ for_each_child_of_node(sram_dev->of_node, sram_node) {
+ if (!of_device_is_compatible(sram_node, "mmio-sram"))
+ continue;
+
+ sram_addr_p = of_get_address(sram_node, 0, NULL, NULL);
+
+ seq_printf(s, "sram@%08x\n",
+ be32_to_cpu(*sram_addr_p));
+
+ for_each_child_of_node(sram_node, section_node) {
+ match = of_match_node(sunxi_sram_dt_ids, section_node);
+ if (!match)
+ continue;
+ sram_data = match->data;
+
+ section_addr_p = of_get_address(section_node, 0,
+ NULL, NULL);
+
+ seq_printf(s, "\tsection@%04x\t(%s)\n",
+ be32_to_cpu(*section_addr_p),
+ sram_data->name);
+
+ val = readl(base + sram_data->reg);
+ val >>= sram_data->offset;
+ val &= GENMASK(sram_data->width - 1, 0);
+
+ for (func = sram_data->func; func->func; func++) {
+ seq_printf(s, "\t\t%s%c\n", func->func,
+ func->reg_val == val ?
+ '*' : ' ');
+ }
+ }
+
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sunxi_sram);
+
+static inline struct sunxi_sram_desc *to_sram_desc(const struct sunxi_sram_data *data)
+{
+ return container_of(data, struct sunxi_sram_desc, data);
+}
+
+static const struct sunxi_sram_data *sunxi_sram_of_parse(struct device_node *node,
+ unsigned int *reg_value)
+{
+ const struct of_device_id *match;
+ const struct sunxi_sram_data *data;
+ struct sunxi_sram_func *func;
+ struct of_phandle_args args;
+ u8 val;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(node, "allwinner,sram", 1, 0,
+ &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!of_device_is_available(args.np)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ val = args.args[0];
+
+ match = of_match_node(sunxi_sram_dt_ids, args.np);
+ if (!match) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data = match->data;
+ if (!data) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (func = data->func; func->func; func++) {
+ if (val == func->val) {
+ if (reg_value)
+ *reg_value = func->reg_val;
+
+ break;
+ }
+ }
+
+ if (!func->func) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ of_node_put(args.np);
+ return match->data;
+
+err:
+ of_node_put(args.np);
+ return ERR_PTR(ret);
+}
+
+int sunxi_sram_claim(struct device *dev)
+{
+ const struct sunxi_sram_data *sram_data;
+ struct sunxi_sram_desc *sram_desc;
+ unsigned int device;
+ u32 val, mask;
+
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (!base)
+ return -EPROBE_DEFER;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ sram_data = sunxi_sram_of_parse(dev->of_node, &device);
+ if (IS_ERR(sram_data))
+ return PTR_ERR(sram_data);
+
+ sram_desc = to_sram_desc(sram_data);
+
+ spin_lock(&sram_lock);
+
+ if (sram_desc->claimed) {
+ spin_unlock(&sram_lock);
+ return -EBUSY;
+ }
+
+ mask = GENMASK(sram_data->offset + sram_data->width - 1,
+ sram_data->offset);
+ val = readl(base + sram_data->reg);
+ val &= ~mask;
+ writel(val | ((device << sram_data->offset) & mask),
+ base + sram_data->reg);
+
+ sram_desc->claimed = true;
+ spin_unlock(&sram_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(sunxi_sram_claim);
+
+void sunxi_sram_release(struct device *dev)
+{
+ const struct sunxi_sram_data *sram_data;
+ struct sunxi_sram_desc *sram_desc;
+
+ if (!dev || !dev->of_node)
+ return;
+
+ sram_data = sunxi_sram_of_parse(dev->of_node, NULL);
+ if (IS_ERR(sram_data))
+ return;
+
+ sram_desc = to_sram_desc(sram_data);
+
+ spin_lock(&sram_lock);
+ sram_desc->claimed = false;
+ spin_unlock(&sram_lock);
+}
+EXPORT_SYMBOL(sunxi_sram_release);
+
+struct sunxi_sramc_variant {
+ int num_emac_clocks;
+ bool has_ldo_ctrl;
+};
+
+static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = {
+ /* Nothing special */
+};
+
+static const struct sunxi_sramc_variant sun8i_h3_sramc_variant = {
+ .num_emac_clocks = 1,
+};
+
+static const struct sunxi_sramc_variant sun20i_d1_sramc_variant = {
+ .num_emac_clocks = 1,
+ .has_ldo_ctrl = true,
+};
+
+static const struct sunxi_sramc_variant sun50i_a64_sramc_variant = {
+ .num_emac_clocks = 1,
+};
+
+static const struct sunxi_sramc_variant sun50i_h616_sramc_variant = {
+ .num_emac_clocks = 2,
+};
+
+#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
+#define SUNXI_SYS_LDO_CTRL_REG 0x150
+
+static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
+ unsigned int reg)
+{
+ const struct sunxi_sramc_variant *variant = dev_get_drvdata(dev);
+
+ if (reg >= SUNXI_SRAM_EMAC_CLOCK_REG &&
+ reg < SUNXI_SRAM_EMAC_CLOCK_REG + variant->num_emac_clocks * 4)
+ return true;
+ if (reg == SUNXI_SYS_LDO_CTRL_REG && variant->has_ldo_ctrl)
+ return true;
+
+ return false;
+}
+
+static struct regmap_config sunxi_sram_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ /* last defined register */
+ .max_register = SUNXI_SYS_LDO_CTRL_REG,
+ /* other devices have no business accessing other registers */
+ .readable_reg = sunxi_sram_regmap_accessible_reg,
+ .writeable_reg = sunxi_sram_regmap_accessible_reg,
+};
+
+static int __init sunxi_sram_probe(struct platform_device *pdev)
+{
+ const struct sunxi_sramc_variant *variant;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+
+ sram_dev = &pdev->dev;
+
+ variant = of_device_get_match_data(&pdev->dev);
+ if (!variant)
+ return -EINVAL;
+
+ dev_set_drvdata(dev, (struct sunxi_sramc_variant *)variant);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (variant->num_emac_clocks || variant->has_ldo_ctrl) {
+ regmap = devm_regmap_init_mmio(dev, base, &sunxi_sram_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ }
+
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+ debugfs_create_file("sram", 0444, NULL, NULL, &sunxi_sram_fops);
+
+ return 0;
+}
+
+static const struct of_device_id sunxi_sram_dt_match[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-sram-controller",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun4i-a10-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-a23-system-control",
+ .data = &sun4i_a10_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-system-control",
+ .data = &sun8i_h3_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun20i-d1-system-control",
+ .data = &sun20i_d1_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-sram-controller",
+ .data = &sun50i_a64_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-system-control",
+ .data = &sun50i_a64_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-h5-system-control",
+ .data = &sun50i_a64_sramc_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-h616-system-control",
+ .data = &sun50i_h616_sramc_variant,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match);
+
+static struct platform_driver sunxi_sram_driver = {
+ .driver = {
+ .name = "sunxi-sram",
+ .of_match_table = sunxi_sram_dt_match,
+ },
+};
+builtin_platform_driver_probe(sunxi_sram_driver, sunxi_sram_probe);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver");
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
new file mode 100644
index 0000000000..6f30988229
--- /dev/null
+++ b/drivers/soc/tegra/Kconfig
@@ -0,0 +1,177 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if ARCH_TEGRA
+
+# 32-bit ARM SoCs
+if ARM
+
+config ARCH_TEGRA_2x_SOC
+ bool "Enable support for Tegra20 family"
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
+ select ARM_ERRATA_720789
+ select ARM_ERRATA_754327 if SMP
+ select ARM_ERRATA_764369 if SMP
+ select PINCTRL_TEGRA20
+ select PL310_ERRATA_727915 if CACHE_L2X0
+ select PL310_ERRATA_769419 if CACHE_L2X0
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ select SOC_TEGRA20_VOLTAGE_COUPLER if REGULATOR
+ select TEGRA_TIMER
+ help
+ Support for NVIDIA Tegra AP20 and T20 processors, based on the
+ ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
+
+config ARCH_TEGRA_3x_SOC
+ bool "Enable support for Tegra30 family"
+ select ARM_ERRATA_754322
+ select ARM_ERRATA_764369 if SMP
+ select PINCTRL_TEGRA30
+ select PL310_ERRATA_769419 if CACHE_L2X0
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ select SOC_TEGRA30_VOLTAGE_COUPLER if REGULATOR
+ select TEGRA_TIMER
+ help
+ Support for NVIDIA Tegra T30 processor family, based on the
+ ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
+
+config ARCH_TEGRA_114_SOC
+ bool "Enable support for Tegra114 family"
+ select ARM_ERRATA_798181 if SMP
+ select HAVE_ARM_ARCH_TIMER
+ select PINCTRL_TEGRA114
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ select TEGRA_TIMER
+ help
+ Support for NVIDIA Tegra T114 processor family, based on the
+ ARM CortexA15MP CPU
+
+config ARCH_TEGRA_124_SOC
+ bool "Enable support for Tegra124 family"
+ select HAVE_ARM_ARCH_TIMER
+ select PINCTRL_TEGRA124
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ select TEGRA_TIMER
+ help
+ Support for NVIDIA Tegra T124 processor family, based on the
+ ARM CortexA15MP CPU
+
+endif
+
+# 64-bit ARM SoCs
+if ARM64
+
+config ARCH_TEGRA_132_SOC
+ bool "NVIDIA Tegra132 SoC"
+ select PINCTRL_TEGRA124
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ help
+ Enable support for NVIDIA Tegra132 SoC, based on the Denver
+ ARMv8 CPU. The Tegra132 SoC is similar to the Tegra124 SoC,
+ but contains an NVIDIA Denver CPU complex in place of
+ Tegra124's "4+1" Cortex-A15 CPU complex.
+
+config ARCH_TEGRA_210_SOC
+ bool "NVIDIA Tegra210 SoC"
+ select PINCTRL_TEGRA210
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
+ select TEGRA_TIMER
+ help
+ Enable support for the NVIDIA Tegra210 SoC. Also known as Tegra X1,
+ the Tegra210 has four Cortex-A57 cores paired with four Cortex-A53
+ cores in a switched configuration. It features a GPU of the Maxwell
+ architecture with support for DX11, SM4, OpenGL 4.5, OpenGL ES 3.1
+ and providing 256 CUDA cores. It supports hardware-accelerated en-
+ and decoding of various video standards including H.265, H.264 and
+ VP8 at 4K resolution and up to 60 fps.
+
+ Besides the multimedia features it also comes with a variety of I/O
+ controllers, such as GPIO, I2C, SPI, SDHCI, PCIe, SATA and XHCI, to
+ name only a few.
+
+config ARCH_TEGRA_186_SOC
+ bool "NVIDIA Tegra186 SoC"
+ depends on !CPU_BIG_ENDIAN
+ select MAILBOX
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ select SOC_TEGRA_PMC
+ help
+ Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a
+ combination of Denver and Cortex-A57 CPU cores and a GPU based on
+ the Pascal architecture. It contains an ADSP with a Cortex-A9 CPU
+ used for audio processing, hardware video encoders/decoders with
+ multi-format support, ISP for image capture processing and BPMP for
+ power management.
+
+config ARCH_TEGRA_194_SOC
+ bool "NVIDIA Tegra194 SoC"
+ depends on !CPU_BIG_ENDIAN
+ select MAILBOX
+ select PINCTRL_TEGRA194
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ select SOC_TEGRA_PMC
+ help
+ Enable support for the NVIDIA Tegra194 SoC.
+
+config ARCH_TEGRA_234_SOC
+ bool "NVIDIA Tegra234 SoC"
+ depends on !CPU_BIG_ENDIAN
+ select MAILBOX
+ select PINCTRL_TEGRA234
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ select SOC_TEGRA_PMC
+ help
+ Enable support for the NVIDIA Tegra234 SoC.
+
+endif
+endif
+
+config SOC_TEGRA_FUSE
+ def_bool y
+ depends on ARCH_TEGRA
+ select SOC_BUS
+
+config SOC_TEGRA_FLOWCTRL
+ bool
+
+config SOC_TEGRA_PMC
+ bool
+ select GENERIC_PINCONF
+ select IRQ_DOMAIN_HIERARCHY
+ select PM_OPP
+ select PM_GENERIC_DOMAINS
+ select REGMAP
+
+config SOC_TEGRA_POWERGATE_BPMP
+ def_bool y
+ depends on PM_GENERIC_DOMAINS
+ depends on TEGRA_BPMP
+
+config SOC_TEGRA20_VOLTAGE_COUPLER
+ bool "Voltage scaling support for Tegra20 SoCs"
+ depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
+ depends on REGULATOR
+
+config SOC_TEGRA30_VOLTAGE_COUPLER
+ bool "Voltage scaling support for Tegra30 SoCs"
+ depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
+ depends on REGULATOR
+
+config SOC_TEGRA_CBB
+ tristate "Tegra driver to handle error from CBB"
+ depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
+ default y
+ help
+ Support for handling error from Tegra Control Backbone(CBB).
+ This driver handles the errors from CBB and prints debug
+ information about the failed transactions.
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
new file mode 100644
index 0000000000..01059619e7
--- /dev/null
+++ b/drivers/soc/tegra/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += fuse/
+obj-y += cbb/
+
+obj-y += common.o
+obj-$(CONFIG_SOC_TEGRA_FLOWCTRL) += flowctrl.o
+obj-$(CONFIG_SOC_TEGRA_PMC) += pmc.o
+obj-$(CONFIG_SOC_TEGRA20_VOLTAGE_COUPLER) += regulators-tegra20.o
+obj-$(CONFIG_SOC_TEGRA30_VOLTAGE_COUPLER) += regulators-tegra30.o
+obj-$(CONFIG_ARCH_TEGRA_186_SOC) += ari-tegra186.o
diff --git a/drivers/soc/tegra/ari-tegra186.c b/drivers/soc/tegra/ari-tegra186.c
new file mode 100644
index 0000000000..02577853ec
--- /dev/null
+++ b/drivers/soc/tegra/ari-tegra186.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/panic_notifier.h>
+
+#define SMC_SIP_INVOKE_MCE 0xc2ffff00
+#define MCE_SMC_READ_MCA 12
+
+#define MCA_ARI_CMD_RD_SERR 1
+
+#define MCA_ARI_RW_SUBIDX_STAT 1
+#define SERR_STATUS_VAL BIT_ULL(63)
+
+#define MCA_ARI_RW_SUBIDX_ADDR 2
+#define MCA_ARI_RW_SUBIDX_MSC1 3
+#define MCA_ARI_RW_SUBIDX_MSC2 4
+
+static const char * const bank_names[] = {
+ "SYS:DPMU", "ROC:IOB", "ROC:MCB", "ROC:CCE", "ROC:CQX", "ROC:CTU",
+};
+
+static void read_uncore_mca(u8 cmd, u8 idx, u8 subidx, u8 inst, u64 *data)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(SMC_SIP_INVOKE_MCE | MCE_SMC_READ_MCA,
+ ((u64)inst << 24) | ((u64)idx << 16) |
+ ((u64)subidx << 8) | ((u64)cmd << 0),
+ 0, 0, 0, 0, 0, 0, &res);
+
+ *data = res.a2;
+}
+
+static int tegra186_ari_panic_handler(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ u64 status;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bank_names); i++) {
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i, MCA_ARI_RW_SUBIDX_STAT,
+ 0, &status);
+
+ if (status & SERR_STATUS_VAL) {
+ u64 addr, misc1, misc2;
+
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i,
+ MCA_ARI_RW_SUBIDX_ADDR, 0, &addr);
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i,
+ MCA_ARI_RW_SUBIDX_MSC1, 0, &misc1);
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i,
+ MCA_ARI_RW_SUBIDX_MSC2, 0, &misc2);
+
+ pr_crit("Machine Check Error in %s\n"
+ " status=0x%llx addr=0x%llx\n"
+ " msc1=0x%llx msc2=0x%llx\n",
+ bank_names[i], status, addr, misc1, misc2);
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tegra186_ari_panic_nb = {
+ .notifier_call = tegra186_ari_panic_handler,
+};
+
+static int __init tegra186_ari_init(void)
+{
+ if (of_machine_is_compatible("nvidia,tegra186"))
+ atomic_notifier_chain_register(&panic_notifier_list, &tegra186_ari_panic_nb);
+
+ return 0;
+}
+early_initcall(tegra186_ari_init);
diff --git a/drivers/soc/tegra/cbb/Makefile b/drivers/soc/tegra/cbb/Makefile
new file mode 100644
index 0000000000..e3ac6cdddf
--- /dev/null
+++ b/drivers/soc/tegra/cbb/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Control Backbone Driver code.
+#
+ifdef CONFIG_SOC_TEGRA_CBB
+obj-y += tegra-cbb.o
+obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra194-cbb.o
+obj-$(CONFIG_ARCH_TEGRA_234_SOC) += tegra234-cbb.o
+endif
diff --git a/drivers/soc/tegra/cbb/tegra-cbb.c b/drivers/soc/tegra/cbb/tegra-cbb.c
new file mode 100644
index 0000000000..84ab46c9d9
--- /dev/null
+++ b/drivers/soc/tegra/cbb/tegra-cbb.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufeature.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/tegra-cbb.h>
+
+void tegra_cbb_print_err(struct seq_file *file, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ if (file) {
+ seq_vprintf(file, fmt, args);
+ } else {
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ pr_crit("%pV", &vaf);
+ }
+
+ va_end(args);
+}
+
+void tegra_cbb_print_cache(struct seq_file *file, u32 cache)
+{
+ const char *buff_str, *mod_str, *rd_str, *wr_str;
+
+ buff_str = (cache & BIT(0)) ? "Bufferable " : "";
+ mod_str = (cache & BIT(1)) ? "Modifiable " : "";
+ rd_str = (cache & BIT(2)) ? "Read-Allocate " : "";
+ wr_str = (cache & BIT(3)) ? "Write-Allocate" : "";
+
+ if (cache == 0x0)
+ buff_str = "Device Non-Bufferable";
+
+ tegra_cbb_print_err(file, "\t Cache\t\t\t: 0x%x -- %s%s%s%s\n",
+ cache, buff_str, mod_str, rd_str, wr_str);
+}
+
+void tegra_cbb_print_prot(struct seq_file *file, u32 prot)
+{
+ const char *data_str, *secure_str, *priv_str;
+
+ data_str = (prot & 0x4) ? "Instruction" : "Data";
+ secure_str = (prot & 0x2) ? "Non-Secure" : "Secure";
+ priv_str = (prot & 0x1) ? "Privileged" : "Unprivileged";
+
+ tegra_cbb_print_err(file, "\t Protection\t\t: 0x%x -- %s, %s, %s Access\n",
+ prot, priv_str, secure_str, data_str);
+}
+
+static int tegra_cbb_err_show(struct seq_file *file, void *data)
+{
+ struct tegra_cbb *cbb = file->private;
+
+ return cbb->ops->debugfs_show(cbb, file, data);
+}
+DEFINE_SHOW_ATTRIBUTE(tegra_cbb_err);
+
+static int tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
+{
+ static struct dentry *root;
+
+ if (!root) {
+ root = debugfs_create_file("tegra_cbb_err", 0444, NULL, cbb, &tegra_cbb_err_fops);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("%s(): could not create debugfs node\n", __func__);
+ return PTR_ERR(root);
+ }
+ }
+
+ return 0;
+}
+
+void tegra_cbb_stall_enable(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->stall_enable)
+ cbb->ops->stall_enable(cbb);
+}
+
+void tegra_cbb_fault_enable(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->fault_enable)
+ cbb->ops->fault_enable(cbb);
+}
+
+void tegra_cbb_error_clear(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->error_clear)
+ cbb->ops->error_clear(cbb);
+}
+
+u32 tegra_cbb_get_status(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->get_status)
+ return cbb->ops->get_status(cbb);
+
+ return 0;
+}
+
+int tegra_cbb_get_irq(struct platform_device *pdev, unsigned int *nonsec_irq,
+ unsigned int *sec_irq)
+{
+ unsigned int index = 0;
+ int num_intr = 0, irq;
+
+ num_intr = platform_irq_count(pdev);
+ if (!num_intr)
+ return -EINVAL;
+
+ if (num_intr == 2) {
+ irq = platform_get_irq(pdev, index);
+ if (irq <= 0)
+ return -ENOENT;
+
+ *nonsec_irq = irq;
+ index++;
+ }
+
+ irq = platform_get_irq(pdev, index);
+ if (irq <= 0)
+ return -ENOENT;
+
+ *sec_irq = irq;
+
+ if (num_intr == 1)
+ dev_dbg(&pdev->dev, "secure IRQ: %u\n", *sec_irq);
+
+ if (num_intr == 2)
+ dev_dbg(&pdev->dev, "secure IRQ: %u, non-secure IRQ: %u\n", *sec_irq, *nonsec_irq);
+
+ return 0;
+}
+
+int tegra_cbb_register(struct tegra_cbb *cbb)
+{
+ int ret;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ ret = tegra_cbb_err_debugfs_init(cbb);
+ if (ret) {
+ dev_err(cbb->dev, "failed to create debugfs\n");
+ return ret;
+ }
+ }
+
+ /* register interrupt handler for errors due to different initiators */
+ ret = cbb->ops->interrupt_enable(cbb);
+ if (ret < 0) {
+ dev_err(cbb->dev, "Failed to register CBB Interrupt ISR");
+ return ret;
+ }
+
+ cbb->ops->error_enable(cbb);
+ dsb(sy);
+
+ return 0;
+}
diff --git a/drivers/soc/tegra/cbb/tegra194-cbb.c b/drivers/soc/tegra/cbb/tegra194-cbb.c
new file mode 100644
index 0000000000..cf6886f362
--- /dev/null
+++ b/drivers/soc/tegra/cbb/tegra194-cbb.c
@@ -0,0 +1,2356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ *
+ * The driver handles Error's from Control Backbone(CBB) generated due to
+ * illegal accesses. When an error is reported from a NOC within CBB,
+ * the driver checks ErrVld status of all three Error Logger's of that NOC.
+ * It then prints debug information about failed transaction using ErrLog
+ * registers of error logger which has ErrVld set. Currently, SLV, DEC,
+ * TMO, SEC, UNS are the codes which are supported by CBB.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufeature.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/tegra-cbb.h>
+
+#define ERRLOGGER_0_ID_COREID_0 0x00000000
+#define ERRLOGGER_0_ID_REVISIONID_0 0x00000004
+#define ERRLOGGER_0_FAULTEN_0 0x00000008
+#define ERRLOGGER_0_ERRVLD_0 0x0000000c
+#define ERRLOGGER_0_ERRCLR_0 0x00000010
+#define ERRLOGGER_0_ERRLOG0_0 0x00000014
+#define ERRLOGGER_0_ERRLOG1_0 0x00000018
+#define ERRLOGGER_0_RSVD_00_0 0x0000001c
+#define ERRLOGGER_0_ERRLOG3_0 0x00000020
+#define ERRLOGGER_0_ERRLOG4_0 0x00000024
+#define ERRLOGGER_0_ERRLOG5_0 0x00000028
+#define ERRLOGGER_0_STALLEN_0 0x00000038
+
+#define ERRLOGGER_1_ID_COREID_0 0x00000080
+#define ERRLOGGER_1_ID_REVISIONID_0 0x00000084
+#define ERRLOGGER_1_FAULTEN_0 0x00000088
+#define ERRLOGGER_1_ERRVLD_0 0x0000008c
+#define ERRLOGGER_1_ERRCLR_0 0x00000090
+#define ERRLOGGER_1_ERRLOG0_0 0x00000094
+#define ERRLOGGER_1_ERRLOG1_0 0x00000098
+#define ERRLOGGER_1_RSVD_00_0 0x0000009c
+#define ERRLOGGER_1_ERRLOG3_0 0x000000a0
+#define ERRLOGGER_1_ERRLOG4_0 0x000000a4
+#define ERRLOGGER_1_ERRLOG5_0 0x000000a8
+#define ERRLOGGER_1_STALLEN_0 0x000000b8
+
+#define ERRLOGGER_2_ID_COREID_0 0x00000100
+#define ERRLOGGER_2_ID_REVISIONID_0 0x00000104
+#define ERRLOGGER_2_FAULTEN_0 0x00000108
+#define ERRLOGGER_2_ERRVLD_0 0x0000010c
+#define ERRLOGGER_2_ERRCLR_0 0x00000110
+#define ERRLOGGER_2_ERRLOG0_0 0x00000114
+#define ERRLOGGER_2_ERRLOG1_0 0x00000118
+#define ERRLOGGER_2_RSVD_00_0 0x0000011c
+#define ERRLOGGER_2_ERRLOG3_0 0x00000120
+#define ERRLOGGER_2_ERRLOG4_0 0x00000124
+#define ERRLOGGER_2_ERRLOG5_0 0x00000128
+#define ERRLOGGER_2_STALLEN_0 0x00000138
+
+#define CBB_NOC_INITFLOW GENMASK(23, 20)
+#define CBB_NOC_TARGFLOW GENMASK(19, 16)
+#define CBB_NOC_TARG_SUBRANGE GENMASK(15, 9)
+#define CBB_NOC_SEQID GENMASK(8, 0)
+
+#define BPMP_NOC_INITFLOW GENMASK(20, 18)
+#define BPMP_NOC_TARGFLOW GENMASK(17, 13)
+#define BPMP_NOC_TARG_SUBRANGE GENMASK(12, 9)
+#define BPMP_NOC_SEQID GENMASK(8, 0)
+
+#define AON_NOC_INITFLOW GENMASK(22, 21)
+#define AON_NOC_TARGFLOW GENMASK(20, 15)
+#define AON_NOC_TARG_SUBRANGE GENMASK(14, 9)
+#define AON_NOC_SEQID GENMASK(8, 0)
+
+#define SCE_NOC_INITFLOW GENMASK(21, 19)
+#define SCE_NOC_TARGFLOW GENMASK(18, 14)
+#define SCE_NOC_TARG_SUBRANGE GENMASK(13, 9)
+#define SCE_NOC_SEQID GENMASK(8, 0)
+
+#define CBB_NOC_AXCACHE GENMASK(3, 0)
+#define CBB_NOC_NON_MOD GENMASK(4, 4)
+#define CBB_NOC_AXPROT GENMASK(7, 5)
+#define CBB_NOC_FALCONSEC GENMASK(9, 8)
+#define CBB_NOC_GRPSEC GENMASK(16, 10)
+#define CBB_NOC_VQC GENMASK(18, 17)
+#define CBB_NOC_MSTR_ID GENMASK(22, 19)
+#define CBB_NOC_AXI_ID GENMASK(30, 23)
+
+#define CLUSTER_NOC_AXCACHE GENMASK(3, 0)
+#define CLUSTER_NOC_AXPROT GENMASK(6, 4)
+#define CLUSTER_NOC_FALCONSEC GENMASK(8, 7)
+#define CLUSTER_NOC_GRPSEC GENMASK(15, 9)
+#define CLUSTER_NOC_VQC GENMASK(17, 16)
+#define CLUSTER_NOC_MSTR_ID GENMASK(21, 18)
+
+#define CBB_ERR_OPC GENMASK(4, 1)
+#define CBB_ERR_ERRCODE GENMASK(10, 8)
+#define CBB_ERR_LEN1 GENMASK(27, 16)
+
+#define DMAAPB_X_RAW_INTERRUPT_STATUS 0x2ec
+
+struct tegra194_cbb_packet_header {
+ bool lock; // [0]
+ u8 opc; // [4:1]
+ u8 errcode; // [10:8]= RD, RDW, RDL, RDX, WR, WRW, WRC, PRE, URG
+ u16 len1; // [27:16]
+ bool format; // [31] = 1 -> FlexNoC versions 2.7 & above
+};
+
+struct tegra194_cbb_aperture {
+ u8 initflow;
+ u8 targflow;
+ u8 targ_subrange;
+ u8 init_mapping;
+ u32 init_localaddress;
+ u8 targ_mapping;
+ u32 targ_localaddress;
+ u16 seqid;
+};
+
+struct tegra194_cbb_userbits {
+ u8 axcache;
+ u8 non_mod;
+ u8 axprot;
+ u8 falconsec;
+ u8 grpsec;
+ u8 vqc;
+ u8 mstr_id;
+ u8 axi_id;
+};
+
+struct tegra194_cbb_noc_data {
+ const char *name;
+ bool erd_mask_inband_err;
+ const char * const *master_id;
+ unsigned int max_aperture;
+ const struct tegra194_cbb_aperture *noc_aperture;
+ const char * const *routeid_initflow;
+ const char * const *routeid_targflow;
+ void (*parse_routeid)(struct tegra194_cbb_aperture *info, u64 routeid);
+ void (*parse_userbits)(struct tegra194_cbb_userbits *usrbits, u32 elog_5);
+};
+
+struct tegra194_axi2apb_bridge {
+ struct resource res;
+ void __iomem *base;
+};
+
+struct tegra194_cbb {
+ struct tegra_cbb base;
+
+ const struct tegra194_cbb_noc_data *noc;
+ struct resource *res;
+
+ void __iomem *regs;
+ unsigned int num_intr;
+ unsigned int sec_irq;
+ unsigned int nonsec_irq;
+ u32 errlog0;
+ u32 errlog1;
+ u32 errlog2;
+ u32 errlog3;
+ u32 errlog4;
+ u32 errlog5;
+
+ struct tegra194_axi2apb_bridge *bridges;
+ unsigned int num_bridges;
+};
+
+static inline struct tegra194_cbb *to_tegra194_cbb(struct tegra_cbb *cbb)
+{
+ return container_of(cbb, struct tegra194_cbb, base);
+}
+
+static LIST_HEAD(cbb_list);
+static DEFINE_SPINLOCK(cbb_lock);
+
+static const char * const tegra194_cbb_trantype[] = {
+ "RD - Read, Incrementing",
+ "RDW - Read, Wrap", /* Not Supported */
+ "RDX - Exclusive Read", /* Not Supported */
+ "RDL - Linked Read", /* Not Supported */
+ "WR - Write, Incrementing",
+ "WRW - Write, Wrap", /* Not Supported */
+ "WRC - Exclusive Write", /* Not Supported */
+ "PRE - Preamble Sequence for Fixed Accesses"
+};
+
+static const char * const tegra194_axi2apb_error[] = {
+ "SFIFONE - Status FIFO Not Empty interrupt",
+ "SFIFOF - Status FIFO Full interrupt",
+ "TIM - Timer(Timeout) interrupt",
+ "SLV - SLVERR interrupt",
+ "NULL",
+ "ERBF - Early response buffer Full interrupt",
+ "NULL",
+ "RDFIFOF - Read Response FIFO Full interrupt",
+ "WRFIFOF - Write Response FIFO Full interrupt",
+ "CH0DFIFOF - Ch0 Data FIFO Full interrupt",
+ "CH1DFIFOF - Ch1 Data FIFO Full interrupt",
+ "CH2DFIFOF - Ch2 Data FIFO Full interrupt",
+ "UAT - Unsupported alignment type error",
+ "UBS - Unsupported burst size error",
+ "UBE - Unsupported Byte Enable error",
+ "UBT - Unsupported burst type error",
+ "BFS - Block Firewall security error",
+ "ARFS - Address Range Firewall security error",
+ "CH0RFIFOF - Ch0 Request FIFO Full interrupt",
+ "CH1RFIFOF - Ch1 Request FIFO Full interrupt",
+ "CH2RFIFOF - Ch2 Request FIFO Full interrupt"
+};
+
+static const char * const tegra194_master_id[] = {
+ [0x0] = "CCPLEX",
+ [0x1] = "CCPLEX_DPMU",
+ [0x2] = "BPMP",
+ [0x3] = "AON",
+ [0x4] = "SCE",
+ [0x5] = "GPCDMA_PERIPHERAL",
+ [0x6] = "TSECA",
+ [0x7] = "TSECB",
+ [0x8] = "JTAGM_DFT",
+ [0x9] = "CORESIGHT_AXIAP",
+ [0xa] = "APE",
+ [0xb] = "PEATR",
+ [0xc] = "NVDEC",
+ [0xd] = "RCE",
+ [0xe] = "NVDEC1"
+};
+
+static const struct tegra_cbb_error tegra194_cbb_errors[] = {
+ {
+ .code = "SLV",
+ .source = "Target",
+ .desc = "Target error detected by CBB slave"
+ }, {
+ .code = "DEC",
+ .source = "Initiator NIU",
+ .desc = "Address decode error"
+ }, {
+ .code = "UNS",
+ .source = "Target NIU",
+ .desc = "Unsupported request. Not a valid transaction"
+ }, {
+ .code = "DISC", /* Not Supported by CBB */
+ .source = "Power Disconnect",
+ .desc = "Disconnected target or domain"
+ }, {
+ .code = "SEC",
+ .source = "Initiator NIU or Firewall",
+ .desc = "Security violation. Firewall error"
+ }, {
+ .code = "HIDE", /* Not Supported by CBB */
+ .source = "Firewall",
+ .desc = "Hidden security violation, reported as OK to initiator"
+ }, {
+ .code = "TMO",
+ .source = "Target NIU",
+ .desc = "Target time-out error"
+ }, {
+ .code = "RSV",
+ .source = "None",
+ .desc = "Reserved"
+ }
+};
+
+/*
+ * CBB NOC aperture lookup table as per file "cbb_central_noc_Structure.info".
+ */
+static const char * const tegra194_cbbcentralnoc_routeid_initflow[] = {
+ [0x0] = "aon_p2ps/I/aon",
+ [0x1] = "ape_p2ps/I/ape_p2ps",
+ [0x2] = "bpmp_p2ps/I/bpmp_p2ps",
+ [0x3] = "ccroc_p2ps/I/ccroc_p2ps",
+ [0x4] = "csite_p2ps/I/0",
+ [0x5] = "gpcdma_mmio_p2ps/I/0",
+ [0x6] = "jtag_p2ps/I/0",
+ [0x7] = "nvdec1_p2ps/I/0",
+ [0x8] = "nvdec_p2ps/I/0",
+ [0x9] = "rce_p2ps/I/rce_p2ps",
+ [0xa] = "sce_p2ps/I/sce_p2ps",
+ [0xb] = "tseca_p2ps/I/0",
+ [0xc] = "tsecb_p2ps/I/0",
+ [0xd] = "RESERVED",
+ [0xe] = "RESERVED",
+ [0xf] = "RESERVED"
+};
+
+static const char * const tegra194_cbbcentralnoc_routeid_targflow[] = {
+ [0x0] = "SVC/T/intreg",
+ [0x1] = "axis_satellite_axi2apb_p2pm/T/axis_satellite_axi2apb_p2pm",
+ [0x2] = "axis_satellite_grout/T/axis_satellite_grout",
+ [0x3] = "cbb_firewall/T/cbb_firewall",
+ [0x4] = "gpu_p2pm/T/gpu_p2pm",
+ [0x5] = "host1x_p2pm/T/host1x_p2pm",
+ [0x6] = "sapb_3_p2pm/T/sapb_3_p2pm",
+ [0x7] = "smmu0_p2pm/T/smmu0_p2pm",
+ [0x8] = "smmu1_p2pm/T/smmu1_p2pm",
+ [0x9] = "smmu2_p2pm/T/smmu2_p2pm",
+ [0xa] = "stm_p2pm/T/stm_p2pm",
+ [0xb] = "RESERVED",
+ [0xc] = "RESERVED",
+ [0xd] = "RESERVED",
+ [0xe] = "RESERVED",
+ [0xf] = "RESERVED"
+};
+
+/*
+ * Fields of CBB NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_cbbcentralnoc_apert_lookup[] = {
+ { 0x0, 0x0, 0x00, 0x0, 0x02300000, 0, 0x00000000 },
+ { 0x0, 0x1, 0x00, 0x0, 0x02003000, 0, 0x02003000 },
+ { 0x0, 0x1, 0x01, 0x0, 0x02006000, 2, 0x02006000 },
+ { 0x0, 0x1, 0x02, 0x0, 0x02016000, 3, 0x02016000 },
+ { 0x0, 0x1, 0x03, 0x0, 0x0201d000, 4, 0x0201d000 },
+ { 0x0, 0x1, 0x04, 0x0, 0x0202b000, 6, 0x0202b000 },
+ { 0x0, 0x1, 0x05, 0x0, 0x02434000, 20, 0x02434000 },
+ { 0x0, 0x1, 0x06, 0x0, 0x02436000, 21, 0x02436000 },
+ { 0x0, 0x1, 0x07, 0x0, 0x02438000, 22, 0x02438000 },
+ { 0x0, 0x1, 0x08, 0x0, 0x02445000, 24, 0x02445000 },
+ { 0x0, 0x1, 0x09, 0x0, 0x02446000, 25, 0x02446000 },
+ { 0x0, 0x1, 0x0a, 0x0, 0x02004000, 1, 0x02004000 },
+ { 0x0, 0x1, 0x0b, 0x0, 0x0201e000, 5, 0x0201e000 },
+ { 0x0, 0x1, 0x0c, 0x0, 0x0202c000, 7, 0x0202c000 },
+ { 0x0, 0x1, 0x0d, 0x0, 0x02204000, 8, 0x02204000 },
+ { 0x0, 0x1, 0x0e, 0x0, 0x02214000, 9, 0x02214000 },
+ { 0x0, 0x1, 0x0f, 0x0, 0x02224000, 10, 0x02224000 },
+ { 0x0, 0x1, 0x10, 0x0, 0x02234000, 11, 0x02234000 },
+ { 0x0, 0x1, 0x11, 0x0, 0x02244000, 12, 0x02244000 },
+ { 0x0, 0x1, 0x12, 0x0, 0x02254000, 13, 0x02254000 },
+ { 0x0, 0x1, 0x13, 0x0, 0x02264000, 14, 0x02264000 },
+ { 0x0, 0x1, 0x14, 0x0, 0x02274000, 15, 0x02274000 },
+ { 0x0, 0x1, 0x15, 0x0, 0x02284000, 16, 0x02284000 },
+ { 0x0, 0x1, 0x16, 0x0, 0x0243a000, 23, 0x0243a000 },
+ { 0x0, 0x1, 0x17, 0x0, 0x02370000, 17, 0x02370000 },
+ { 0x0, 0x1, 0x18, 0x0, 0x023d0000, 18, 0x023d0000 },
+ { 0x0, 0x1, 0x19, 0x0, 0x023e0000, 19, 0x023e0000 },
+ { 0x0, 0x1, 0x1a, 0x0, 0x02450000, 26, 0x02450000 },
+ { 0x0, 0x1, 0x1b, 0x0, 0x02460000, 27, 0x02460000 },
+ { 0x0, 0x1, 0x1c, 0x0, 0x02490000, 28, 0x02490000 },
+ { 0x0, 0x1, 0x1d, 0x0, 0x03130000, 31, 0x03130000 },
+ { 0x0, 0x1, 0x1e, 0x0, 0x03160000, 32, 0x03160000 },
+ { 0x0, 0x1, 0x1f, 0x0, 0x03270000, 33, 0x03270000 },
+ { 0x0, 0x1, 0x20, 0x0, 0x032e0000, 35, 0x032e0000 },
+ { 0x0, 0x1, 0x21, 0x0, 0x03300000, 36, 0x03300000 },
+ { 0x0, 0x1, 0x22, 0x0, 0x13090000, 40, 0x13090000 },
+ { 0x0, 0x1, 0x23, 0x0, 0x20120000, 43, 0x20120000 },
+ { 0x0, 0x1, 0x24, 0x0, 0x20170000, 44, 0x20170000 },
+ { 0x0, 0x1, 0x25, 0x0, 0x20190000, 45, 0x20190000 },
+ { 0x0, 0x1, 0x26, 0x0, 0x201b0000, 46, 0x201b0000 },
+ { 0x0, 0x1, 0x27, 0x0, 0x20250000, 47, 0x20250000 },
+ { 0x0, 0x1, 0x28, 0x0, 0x20260000, 48, 0x20260000 },
+ { 0x0, 0x1, 0x29, 0x0, 0x20420000, 49, 0x20420000 },
+ { 0x0, 0x1, 0x2a, 0x0, 0x20460000, 50, 0x20460000 },
+ { 0x0, 0x1, 0x2b, 0x0, 0x204f0000, 51, 0x204f0000 },
+ { 0x0, 0x1, 0x2c, 0x0, 0x20520000, 52, 0x20520000 },
+ { 0x0, 0x1, 0x2d, 0x0, 0x20580000, 53, 0x20580000 },
+ { 0x0, 0x1, 0x2e, 0x0, 0x205a0000, 54, 0x205a0000 },
+ { 0x0, 0x1, 0x2f, 0x0, 0x205c0000, 55, 0x205c0000 },
+ { 0x0, 0x1, 0x30, 0x0, 0x20690000, 56, 0x20690000 },
+ { 0x0, 0x1, 0x31, 0x0, 0x20770000, 57, 0x20770000 },
+ { 0x0, 0x1, 0x32, 0x0, 0x20790000, 58, 0x20790000 },
+ { 0x0, 0x1, 0x33, 0x0, 0x20880000, 59, 0x20880000 },
+ { 0x0, 0x1, 0x34, 0x0, 0x20990000, 62, 0x20990000 },
+ { 0x0, 0x1, 0x35, 0x0, 0x20e10000, 65, 0x20e10000 },
+ { 0x0, 0x1, 0x36, 0x0, 0x20e70000, 66, 0x20e70000 },
+ { 0x0, 0x1, 0x37, 0x0, 0x20e80000, 67, 0x20e80000 },
+ { 0x0, 0x1, 0x38, 0x0, 0x20f30000, 68, 0x20f30000 },
+ { 0x0, 0x1, 0x39, 0x0, 0x20f50000, 69, 0x20f50000 },
+ { 0x0, 0x1, 0x3a, 0x0, 0x20fc0000, 70, 0x20fc0000 },
+ { 0x0, 0x1, 0x3b, 0x0, 0x21110000, 72, 0x21110000 },
+ { 0x0, 0x1, 0x3c, 0x0, 0x21270000, 73, 0x21270000 },
+ { 0x0, 0x1, 0x3d, 0x0, 0x21290000, 74, 0x21290000 },
+ { 0x0, 0x1, 0x3e, 0x0, 0x21840000, 75, 0x21840000 },
+ { 0x0, 0x1, 0x3f, 0x0, 0x21880000, 76, 0x21880000 },
+ { 0x0, 0x1, 0x40, 0x0, 0x218d0000, 77, 0x218d0000 },
+ { 0x0, 0x1, 0x41, 0x0, 0x21950000, 78, 0x21950000 },
+ { 0x0, 0x1, 0x42, 0x0, 0x21960000, 79, 0x21960000 },
+ { 0x0, 0x1, 0x43, 0x0, 0x21a10000, 80, 0x21a10000 },
+ { 0x0, 0x1, 0x44, 0x0, 0x024a0000, 29, 0x024a0000 },
+ { 0x0, 0x1, 0x45, 0x0, 0x024c0000, 30, 0x024c0000 },
+ { 0x0, 0x1, 0x46, 0x0, 0x032c0000, 34, 0x032c0000 },
+ { 0x0, 0x1, 0x47, 0x0, 0x03400000, 37, 0x03400000 },
+ { 0x0, 0x1, 0x48, 0x0, 0x130a0000, 41, 0x130a0000 },
+ { 0x0, 0x1, 0x49, 0x0, 0x130c0000, 42, 0x130c0000 },
+ { 0x0, 0x1, 0x4a, 0x0, 0x208a0000, 60, 0x208a0000 },
+ { 0x0, 0x1, 0x4b, 0x0, 0x208c0000, 61, 0x208c0000 },
+ { 0x0, 0x1, 0x4c, 0x0, 0x209a0000, 63, 0x209a0000 },
+ { 0x0, 0x1, 0x4d, 0x0, 0x21a40000, 81, 0x21a40000 },
+ { 0x0, 0x1, 0x4e, 0x0, 0x03440000, 38, 0x03440000 },
+ { 0x0, 0x1, 0x4f, 0x0, 0x20d00000, 64, 0x20d00000 },
+ { 0x0, 0x1, 0x50, 0x0, 0x21000000, 71, 0x21000000 },
+ { 0x0, 0x1, 0x51, 0x0, 0x0b000000, 39, 0x0b000000 },
+ { 0x0, 0x2, 0x00, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x0, 0x3, 0x00, 0x0, 0x02340000, 0, 0x00000000 },
+ { 0x0, 0x4, 0x00, 0x0, 0x17000000, 0, 0x17000000 },
+ { 0x0, 0x4, 0x01, 0x0, 0x18000000, 1, 0x18000000 },
+ { 0x0, 0x5, 0x00, 0x0, 0x13e80000, 1, 0x13e80000 },
+ { 0x0, 0x5, 0x01, 0x0, 0x15810000, 12, 0x15810000 },
+ { 0x0, 0x5, 0x02, 0x0, 0x15840000, 14, 0x15840000 },
+ { 0x0, 0x5, 0x03, 0x0, 0x15a40000, 17, 0x15a40000 },
+ { 0x0, 0x5, 0x04, 0x0, 0x13f00000, 3, 0x13f00000 },
+ { 0x0, 0x5, 0x05, 0x0, 0x15820000, 13, 0x15820000 },
+ { 0x0, 0x5, 0x06, 0x0, 0x13ec0000, 2, 0x13ec0000 },
+ { 0x0, 0x5, 0x07, 0x0, 0x15200000, 6, 0x15200000 },
+ { 0x0, 0x5, 0x08, 0x0, 0x15340000, 7, 0x15340000 },
+ { 0x0, 0x5, 0x09, 0x0, 0x15380000, 8, 0x15380000 },
+ { 0x0, 0x5, 0x0a, 0x0, 0x15500000, 10, 0x15500000 },
+ { 0x0, 0x5, 0x0b, 0x0, 0x155c0000, 11, 0x155c0000 },
+ { 0x0, 0x5, 0x0c, 0x0, 0x15a00000, 16, 0x15a00000 },
+ { 0x0, 0x5, 0x0d, 0x0, 0x13e00000, 0, 0x13e00000 },
+ { 0x0, 0x5, 0x0e, 0x0, 0x15100000, 5, 0x15100000 },
+ { 0x0, 0x5, 0x0f, 0x0, 0x15480000, 9, 0x15480000 },
+ { 0x0, 0x5, 0x10, 0x0, 0x15880000, 15, 0x15880000 },
+ { 0x0, 0x5, 0x11, 0x0, 0x15a80000, 18, 0x15a80000 },
+ { 0x0, 0x5, 0x12, 0x0, 0x15b00000, 19, 0x15b00000 },
+ { 0x0, 0x5, 0x13, 0x0, 0x14800000, 4, 0x14800000 },
+ { 0x0, 0x5, 0x14, 0x0, 0x15c00000, 20, 0x15c00000 },
+ { 0x0, 0x5, 0x15, 0x0, 0x16000000, 21, 0x16000000 },
+ { 0x0, 0x6, 0x00, 0x0, 0x02000000, 4, 0x02000000 },
+ { 0x0, 0x6, 0x01, 0x0, 0x02007000, 5, 0x02007000 },
+ { 0x0, 0x6, 0x02, 0x0, 0x02008000, 6, 0x02008000 },
+ { 0x0, 0x6, 0x03, 0x0, 0x02013000, 7, 0x02013000 },
+ { 0x0, 0x6, 0x04, 0x0, 0x0201c000, 8, 0x0201c000 },
+ { 0x0, 0x6, 0x05, 0x0, 0x02020000, 9, 0x02020000 },
+ { 0x0, 0x6, 0x06, 0x0, 0x0202a000, 10, 0x0202a000 },
+ { 0x0, 0x6, 0x07, 0x0, 0x0202e000, 11, 0x0202e000 },
+ { 0x0, 0x6, 0x08, 0x0, 0x06400000, 33, 0x06400000 },
+ { 0x0, 0x6, 0x09, 0x0, 0x02038000, 12, 0x02038000 },
+ { 0x0, 0x6, 0x0a, 0x0, 0x00100000, 0, 0x00100000 },
+ { 0x0, 0x6, 0x0b, 0x0, 0x023b0000, 13, 0x023b0000 },
+ { 0x0, 0x6, 0x0c, 0x0, 0x02800000, 16, 0x02800000 },
+ { 0x0, 0x6, 0x0d, 0x0, 0x030e0000, 22, 0x030e0000 },
+ { 0x0, 0x6, 0x0e, 0x0, 0x03800000, 23, 0x03800000 },
+ { 0x0, 0x6, 0x0f, 0x0, 0x03980000, 25, 0x03980000 },
+ { 0x0, 0x6, 0x10, 0x0, 0x03a60000, 26, 0x03a60000 },
+ { 0x0, 0x6, 0x11, 0x0, 0x03d80000, 31, 0x03d80000 },
+ { 0x0, 0x6, 0x12, 0x0, 0x20000000, 36, 0x20000000 },
+ { 0x0, 0x6, 0x13, 0x0, 0x20050000, 38, 0x20050000 },
+ { 0x0, 0x6, 0x14, 0x0, 0x201e0000, 40, 0x201e0000 },
+ { 0x0, 0x6, 0x15, 0x0, 0x20280000, 42, 0x20280000 },
+ { 0x0, 0x6, 0x16, 0x0, 0x202c0000, 43, 0x202c0000 },
+ { 0x0, 0x6, 0x17, 0x0, 0x20390000, 44, 0x20390000 },
+ { 0x0, 0x6, 0x18, 0x0, 0x20430000, 45, 0x20430000 },
+ { 0x0, 0x6, 0x19, 0x0, 0x20440000, 46, 0x20440000 },
+ { 0x0, 0x6, 0x1a, 0x0, 0x204e0000, 47, 0x204e0000 },
+ { 0x0, 0x6, 0x1b, 0x0, 0x20550000, 48, 0x20550000 },
+ { 0x0, 0x6, 0x1c, 0x0, 0x20570000, 49, 0x20570000 },
+ { 0x0, 0x6, 0x1d, 0x0, 0x20590000, 50, 0x20590000 },
+ { 0x0, 0x6, 0x1e, 0x0, 0x20730000, 52, 0x20730000 },
+ { 0x0, 0x6, 0x1f, 0x0, 0x209f0000, 54, 0x209f0000 },
+ { 0x0, 0x6, 0x20, 0x0, 0x20e20000, 55, 0x20e20000 },
+ { 0x0, 0x6, 0x21, 0x0, 0x20ed0000, 56, 0x20ed0000 },
+ { 0x0, 0x6, 0x22, 0x0, 0x20fd0000, 57, 0x20fd0000 },
+ { 0x0, 0x6, 0x23, 0x0, 0x21120000, 59, 0x21120000 },
+ { 0x0, 0x6, 0x24, 0x0, 0x211a0000, 60, 0x211a0000 },
+ { 0x0, 0x6, 0x25, 0x0, 0x21850000, 61, 0x21850000 },
+ { 0x0, 0x6, 0x26, 0x0, 0x21860000, 62, 0x21860000 },
+ { 0x0, 0x6, 0x27, 0x0, 0x21890000, 63, 0x21890000 },
+ { 0x0, 0x6, 0x28, 0x0, 0x21970000, 64, 0x21970000 },
+ { 0x0, 0x6, 0x29, 0x0, 0x21990000, 65, 0x21990000 },
+ { 0x0, 0x6, 0x2a, 0x0, 0x21a00000, 66, 0x21a00000 },
+ { 0x0, 0x6, 0x2b, 0x0, 0x21a90000, 68, 0x21a90000 },
+ { 0x0, 0x6, 0x2c, 0x0, 0x21ac0000, 70, 0x21ac0000 },
+ { 0x0, 0x6, 0x2d, 0x0, 0x01f80000, 3, 0x01f80000 },
+ { 0x0, 0x6, 0x2e, 0x0, 0x024e0000, 14, 0x024e0000 },
+ { 0x0, 0x6, 0x2f, 0x0, 0x030c0000, 21, 0x030c0000 },
+ { 0x0, 0x6, 0x30, 0x0, 0x03820000, 24, 0x03820000 },
+ { 0x0, 0x6, 0x31, 0x0, 0x03aa0000, 27, 0x03aa0000 },
+ { 0x0, 0x6, 0x32, 0x0, 0x03c80000, 29, 0x03c80000 },
+ { 0x0, 0x6, 0x33, 0x0, 0x130e0000, 34, 0x130e0000 },
+ { 0x0, 0x6, 0x34, 0x0, 0x20020000, 37, 0x20020000 },
+ { 0x0, 0x6, 0x35, 0x0, 0x20060000, 39, 0x20060000 },
+ { 0x0, 0x6, 0x36, 0x0, 0x20200000, 41, 0x20200000 },
+ { 0x0, 0x6, 0x37, 0x0, 0x206a0000, 51, 0x206a0000 },
+ { 0x0, 0x6, 0x38, 0x0, 0x20740000, 53, 0x20740000 },
+ { 0x0, 0x6, 0x39, 0x0, 0x20fe0000, 58, 0x20fe0000 },
+ { 0x0, 0x6, 0x3a, 0x0, 0x21a20000, 67, 0x21a20000 },
+ { 0x0, 0x6, 0x3b, 0x0, 0x21aa0000, 69, 0x21aa0000 },
+ { 0x0, 0x6, 0x3c, 0x0, 0x02b80000, 17, 0x02b80000 },
+ { 0x0, 0x6, 0x3d, 0x0, 0x03080000, 20, 0x03080000 },
+ { 0x0, 0x6, 0x3e, 0x0, 0x13100000, 35, 0x13100000 },
+ { 0x0, 0x6, 0x3f, 0x0, 0x01f00000, 2, 0x01f00000 },
+ { 0x0, 0x6, 0x40, 0x0, 0x03000000, 19, 0x03000000 },
+ { 0x0, 0x6, 0x41, 0x0, 0x03c00000, 28, 0x03c00000 },
+ { 0x0, 0x6, 0x42, 0x0, 0x03d00000, 30, 0x03d00000 },
+ { 0x0, 0x6, 0x43, 0x0, 0x01700000, 1, 0x01700000 },
+ { 0x0, 0x6, 0x44, 0x0, 0x02c00000, 18, 0x02c00000 },
+ { 0x0, 0x6, 0x45, 0x0, 0x02600000, 15, 0x02600000 },
+ { 0x0, 0x6, 0x46, 0x0, 0x06000000, 32, 0x06000000 },
+ { 0x0, 0x6, 0x47, 0x0, 0x24000000, 71, 0x24000000 },
+ { 0x0, 0x7, 0x00, 0x0, 0x12000000, 0, 0x12000000 },
+ { 0x0, 0x8, 0x00, 0x0, 0x11000000, 0, 0x11000000 },
+ { 0x0, 0x9, 0x00, 0x0, 0x10000000, 0, 0x10000000 },
+ { 0x0, 0xa, 0x00, 0x0, 0x22000000, 0, 0x22000000 }
+};
+
+/*
+ * BPMP NOC aperture lookup table as per file "BPMP_NOC_Structure.info".
+ */
+static const char * const tegra194_bpmpnoc_routeid_initflow[] = {
+ [0x0] = "cbb_i/I/0",
+ [0x1] = "cpu_m_i/I/0",
+ [0x2] = "cpu_p_i/I/0",
+ [0x3] = "cvc_i/I/0",
+ [0x4] = "dma_m_i/I/0",
+ [0x5] = "dma_p_i/I/0",
+ [0x6] = "RESERVED",
+ [0x7] = "RESERVED"
+};
+
+static const char * const tegra194_bpmpnoc_routeid_targflow[] = {
+ [0x00] = "multiport0_t/T/actmon",
+ [0x01] = "multiport0_t/T/ast_0",
+ [0x02] = "multiport0_t/T/ast_1",
+ [0x03] = "multiport0_t/T/atcm_cfg",
+ [0x04] = "multiport0_t/T/car",
+ [0x05] = "multiport0_t/T/central_pwr_mgr",
+ [0x06] = "multiport0_t/T/central_vtg_ctlr",
+ [0x07] = "multiport0_t/T/cfg",
+ [0x08] = "multiport0_t/T/dma",
+ [0x09] = "multiport0_t/T/err_collator",
+ [0x0a] = "multiport0_t/T/err_collator_car",
+ [0x0b] = "multiport0_t/T/fpga_misc",
+ [0x0c] = "multiport0_t/T/fpga_uart",
+ [0x0d] = "multiport0_t/T/gte",
+ [0x0e] = "multiport0_t/T/hsp",
+ [0x0f] = "multiport0_t/T/misc",
+ [0x10] = "multiport0_t/T/pm",
+ [0x11] = "multiport0_t/T/simon0",
+ [0x12] = "multiport0_t/T/simon1",
+ [0x13] = "multiport0_t/T/simon2",
+ [0x14] = "multiport0_t/T/simon3",
+ [0x15] = "multiport0_t/T/simon4",
+ [0x16] = "multiport0_t/T/soc_therm",
+ [0x17] = "multiport0_t/T/tke",
+ [0x18] = "multiport0_t/T/vic_0",
+ [0x19] = "multiport0_t/T/vic_1",
+ [0x1a] = "ast0_t/T/0",
+ [0x1b] = "ast1_t/T/0",
+ [0x1c] = "bpmp_noc_firewall/T/0",
+ [0x1d] = "cbb_t/T/0",
+ [0x1e] = "cpu_t/T/0",
+ [0x1f] = "svc_t/T/0"
+};
+
+/*
+ * Fields of BPMP NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_bpmpnoc_apert_lookup[] = {
+ { 0x0, 0x1c, 0x0, 0x0, 0x0d640000, 0, 0x00000000 },
+ { 0x0, 0x1e, 0x0, 0x0, 0x0d400000, 0, 0x0d400000 },
+ { 0x0, 0x00, 0x0, 0x0, 0x0d230000, 0, 0x00000000 },
+ { 0x0, 0x01, 0x0, 0x0, 0x0d040000, 0, 0x00000000 },
+ { 0x0, 0x02, 0x0, 0x0, 0x0d050000, 0, 0x00000000 },
+ { 0x0, 0x03, 0x0, 0x0, 0x0d000000, 0, 0x00000000 },
+ { 0x0, 0x04, 0x0, 0x0, 0x20ae0000, 3, 0x000e0000 },
+ { 0x0, 0x04, 0x1, 0x0, 0x20ac0000, 2, 0x000c0000 },
+ { 0x0, 0x04, 0x2, 0x0, 0x20a80000, 1, 0x00080000 },
+ { 0x0, 0x04, 0x3, 0x0, 0x20a00000, 0, 0x00000000 },
+ { 0x0, 0x05, 0x0, 0x0, 0x0d2a0000, 0, 0x00000000 },
+ { 0x0, 0x06, 0x0, 0x0, 0x0d290000, 0, 0x00000000 },
+ { 0x0, 0x07, 0x0, 0x0, 0x0d2c0000, 0, 0x00000000 },
+ { 0x0, 0x08, 0x0, 0x0, 0x0d0e0000, 4, 0x00080000 },
+ { 0x0, 0x08, 0x1, 0x0, 0x0d060000, 0, 0x00000000 },
+ { 0x0, 0x08, 0x2, 0x0, 0x0d080000, 1, 0x00020000 },
+ { 0x0, 0x08, 0x3, 0x0, 0x0d0a0000, 2, 0x00040000 },
+ { 0x0, 0x08, 0x4, 0x0, 0x0d0c0000, 3, 0x00060000 },
+ { 0x0, 0x09, 0x0, 0x0, 0x0d650000, 0, 0x00000000 },
+ { 0x0, 0x0a, 0x0, 0x0, 0x20af0000, 0, 0x00000000 },
+ { 0x0, 0x0b, 0x0, 0x0, 0x0d3e0000, 0, 0x00000000 },
+ { 0x0, 0x0c, 0x0, 0x0, 0x0d3d0000, 0, 0x00000000 },
+ { 0x0, 0x0d, 0x0, 0x0, 0x0d1e0000, 0, 0x00000000 },
+ { 0x0, 0x0e, 0x0, 0x0, 0x0d150000, 0, 0x00000000 },
+ { 0x0, 0x0e, 0x1, 0x0, 0x0d160000, 1, 0x00010000 },
+ { 0x0, 0x0e, 0x2, 0x0, 0x0d170000, 2, 0x00020000 },
+ { 0x0, 0x0e, 0x3, 0x0, 0x0d180000, 3, 0x00030000 },
+ { 0x0, 0x0e, 0x4, 0x0, 0x0d190000, 4, 0x00040000 },
+ { 0x0, 0x0e, 0x5, 0x0, 0x0d1a0000, 5, 0x00050000 },
+ { 0x0, 0x0e, 0x6, 0x0, 0x0d1b0000, 6, 0x00060000 },
+ { 0x0, 0x0e, 0x7, 0x0, 0x0d1c0000, 7, 0x00070000 },
+ { 0x0, 0x0e, 0x8, 0x0, 0x0d1d0000, 8, 0x00080000 },
+ { 0x0, 0x0f, 0x0, 0x0, 0x0d660000, 0, 0x00000000 },
+ { 0x0, 0x10, 0x0, 0x0, 0x0d1f0000, 0, 0x00000000 },
+ { 0x0, 0x10, 0x1, 0x0, 0x0d200000, 1, 0x00010000 },
+ { 0x0, 0x10, 0x2, 0x0, 0x0d210000, 2, 0x00020000 },
+ { 0x0, 0x10, 0x3, 0x0, 0x0d220000, 3, 0x00030000 },
+ { 0x0, 0x11, 0x0, 0x0, 0x0d240000, 0, 0x00000000 },
+ { 0x0, 0x12, 0x0, 0x0, 0x0d250000, 0, 0x00000000 },
+ { 0x0, 0x13, 0x0, 0x0, 0x0d260000, 0, 0x00000000 },
+ { 0x0, 0x14, 0x0, 0x0, 0x0d270000, 0, 0x00000000 },
+ { 0x0, 0x15, 0x0, 0x0, 0x0d2b0000, 0, 0x00000000 },
+ { 0x0, 0x16, 0x0, 0x0, 0x0d280000, 0, 0x00000000 },
+ { 0x0, 0x17, 0x0, 0x0, 0x0d0f0000, 0, 0x00000000 },
+ { 0x0, 0x17, 0x1, 0x0, 0x0d100000, 1, 0x00010000 },
+ { 0x0, 0x17, 0x2, 0x0, 0x0d110000, 2, 0x00020000 },
+ { 0x0, 0x17, 0x3, 0x0, 0x0d120000, 3, 0x00030000 },
+ { 0x0, 0x17, 0x4, 0x0, 0x0d130000, 4, 0x00040000 },
+ { 0x0, 0x17, 0x5, 0x0, 0x0d140000, 5, 0x00050000 },
+ { 0x0, 0x18, 0x0, 0x0, 0x0d020000, 0, 0x00000000 },
+ { 0x0, 0x19, 0x0, 0x0, 0x0d030000, 0, 0x00000000 },
+ { 0x0, 0x1f, 0x0, 0x0, 0x0d600000, 0, 0x00000000 },
+ { 0x0, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x1a, 0x0, 0x0, 0x40000000, 0, 0x40000000 },
+ { 0x1, 0x1a, 0x1, 0x1, 0x80000000, 1, 0x80000000 },
+ { 0x1, 0x1a, 0x2, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x1c, 0x0, 0x0, 0x0d640000, 0, 0x00000000 },
+ { 0x2, 0x1d, 0x0, 0x0, 0x20b00000, 8, 0x20b00000 },
+ { 0x2, 0x1d, 0x1, 0x0, 0x20800000, 7, 0x20800000 },
+ { 0x2, 0x1d, 0x2, 0x0, 0x20c00000, 9, 0x20c00000 },
+ { 0x2, 0x1d, 0x3, 0x0, 0x0d800000, 3, 0x0d800000 },
+ { 0x2, 0x1d, 0x4, 0x0, 0x20000000, 6, 0x20000000 },
+ { 0x2, 0x1d, 0x5, 0x0, 0x0c000000, 2, 0x0c000000 },
+ { 0x2, 0x1d, 0x6, 0x0, 0x21000000, 10, 0x21000000 },
+ { 0x2, 0x1d, 0x7, 0x0, 0x0e000000, 4, 0x0e000000 },
+ { 0x2, 0x1d, 0x8, 0x0, 0x22000000, 11, 0x22000000 },
+ { 0x2, 0x1d, 0x9, 0x0, 0x08000000, 1, 0x08000000 },
+ { 0x2, 0x1d, 0xa, 0x0, 0x24000000, 12, 0x24000000 },
+ { 0x2, 0x1d, 0xb, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x1d, 0xc, 0x0, 0x28000000, 13, 0x28000000 },
+ { 0x2, 0x1d, 0xd, 0x0, 0x10000000, 5, 0x10000000 },
+ { 0x2, 0x1d, 0xe, 0x0, 0x30000000, 14, 0x30000000 },
+ { 0x2, 0x00, 0x0, 0x0, 0x0d230000, 0, 0x00000000 },
+ { 0x2, 0x01, 0x0, 0x0, 0x0d040000, 0, 0x00000000 },
+ { 0x2, 0x02, 0x0, 0x0, 0x0d050000, 0, 0x00000000 },
+ { 0x2, 0x03, 0x0, 0x0, 0x0d000000, 0, 0x00000000 },
+ { 0x2, 0x04, 0x0, 0x0, 0x20ae0000, 3, 0x000e0000 },
+ { 0x2, 0x04, 0x1, 0x0, 0x20ac0000, 2, 0x000c0000 },
+ { 0x2, 0x04, 0x2, 0x0, 0x20a80000, 1, 0x00080000 },
+ { 0x2, 0x04, 0x3, 0x0, 0x20a00000, 0, 0x00000000 },
+ { 0x2, 0x05, 0x0, 0x0, 0x0d2a0000, 0, 0x00000000 },
+ { 0x2, 0x06, 0x0, 0x0, 0x0d290000, 0, 0x00000000 },
+ { 0x2, 0x07, 0x0, 0x0, 0x0d2c0000, 0, 0x00000000 },
+ { 0x2, 0x08, 0x0, 0x0, 0x0d0e0000, 4, 0x00080000 },
+ { 0x2, 0x08, 0x1, 0x0, 0x0d060000, 0, 0x00000000 },
+ { 0x2, 0x08, 0x2, 0x0, 0x0d080000, 1, 0x00020000 },
+ { 0x2, 0x08, 0x3, 0x0, 0x0d0a0000, 2, 0x00040000 },
+ { 0x2, 0x08, 0x4, 0x0, 0x0d0c0000, 3, 0x00060000 },
+ { 0x2, 0x09, 0x0, 0x0, 0x0d650000, 0, 0x00000000 },
+ { 0x2, 0x0a, 0x0, 0x0, 0x20af0000, 0, 0x00000000 },
+ { 0x2, 0x0b, 0x0, 0x0, 0x0d3e0000, 0, 0x00000000 },
+ { 0x2, 0x0c, 0x0, 0x0, 0x0d3d0000, 0, 0x00000000 },
+ { 0x2, 0x0d, 0x0, 0x0, 0x0d1e0000, 0, 0x00000000 },
+ { 0x2, 0x0e, 0x0, 0x0, 0x0d150000, 0, 0x00000000 },
+ { 0x2, 0x0e, 0x1, 0x0, 0x0d160000, 1, 0x00010000 },
+ { 0x2, 0x0e, 0x2, 0x0, 0x0d170000, 2, 0x00020000 },
+ { 0x2, 0x0e, 0x3, 0x0, 0x0d180000, 3, 0x00030000 },
+ { 0x2, 0x0e, 0x4, 0x0, 0x0d190000, 4, 0x00040000 },
+ { 0x2, 0x0e, 0x5, 0x0, 0x0d1a0000, 5, 0x00050000 },
+ { 0x2, 0x0e, 0x6, 0x0, 0x0d1b0000, 6, 0x00060000 },
+ { 0x2, 0x0e, 0x7, 0x0, 0x0d1c0000, 7, 0x00070000 },
+ { 0x2, 0x0e, 0x8, 0x0, 0x0d1d0000, 8, 0x00080000 },
+ { 0x2, 0x0f, 0x0, 0x0, 0x0d660000, 0, 0x00000000 },
+ { 0x2, 0x10, 0x0, 0x0, 0x0d1f0000, 0, 0x00000000 },
+ { 0x2, 0x10, 0x1, 0x0, 0x0d200000, 1, 0x00010000 },
+ { 0x2, 0x10, 0x2, 0x0, 0x0d210000, 2, 0x00020000 },
+ { 0x2, 0x10, 0x3, 0x0, 0x0d220000, 3, 0x00030000 },
+ { 0x2, 0x11, 0x0, 0x0, 0x0d240000, 0, 0x00000000 },
+ { 0x2, 0x12, 0x0, 0x0, 0x0d250000, 0, 0x00000000 },
+ { 0x2, 0x13, 0x0, 0x0, 0x0d260000, 0, 0x00000000 },
+ { 0x2, 0x14, 0x0, 0x0, 0x0d270000, 0, 0x00000000 },
+ { 0x2, 0x15, 0x0, 0x0, 0x0d2b0000, 0, 0x00000000 },
+ { 0x2, 0x16, 0x0, 0x0, 0x0d280000, 0, 0x00000000 },
+ { 0x2, 0x17, 0x0, 0x0, 0x0d0f0000, 0, 0x00000000 },
+ { 0x2, 0x17, 0x1, 0x0, 0x0d100000, 1, 0x00010000 },
+ { 0x2, 0x17, 0x2, 0x0, 0x0d110000, 2, 0x00020000 },
+ { 0x2, 0x17, 0x3, 0x0, 0x0d120000, 3, 0x00030000 },
+ { 0x2, 0x17, 0x4, 0x0, 0x0d130000, 4, 0x00040000 },
+ { 0x2, 0x17, 0x5, 0x0, 0x0d140000, 5, 0x00050000 },
+ { 0x2, 0x18, 0x0, 0x0, 0x0d020000, 0, 0x00000000 },
+ { 0x2, 0x19, 0x0, 0x0, 0x0d030000, 0, 0x00000000 },
+ { 0x2, 0x1f, 0x0, 0x0, 0x0d600000, 0, 0x00000000 },
+ { 0x2, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x1b, 0x0, 0x0, 0x40000000, 0, 0x40000000 },
+ { 0x3, 0x1b, 0x1, 0x1, 0x80000000, 1, 0x80000000 },
+ { 0x3, 0x1c, 0x0, 0x2, 0x0d640000, 0, 0x00000000 },
+ { 0x3, 0x1d, 0x0, 0x2, 0x20b00000, 8, 0x20b00000 },
+ { 0x3, 0x1d, 0x1, 0x2, 0x20800000, 7, 0x20800000 },
+ { 0x3, 0x1d, 0x2, 0x2, 0x20c00000, 9, 0x20c00000 },
+ { 0x3, 0x1d, 0x3, 0x2, 0x0d800000, 3, 0x0d800000 },
+ { 0x3, 0x1d, 0x4, 0x2, 0x20000000, 6, 0x20000000 },
+ { 0x3, 0x1d, 0x5, 0x2, 0x0c000000, 2, 0x0c000000 },
+ { 0x3, 0x1d, 0x6, 0x2, 0x21000000, 10, 0x21000000 },
+ { 0x3, 0x1d, 0x7, 0x2, 0x0e000000, 4, 0x0e000000 },
+ { 0x3, 0x1d, 0x8, 0x2, 0x22000000, 11, 0x22000000 },
+ { 0x3, 0x1d, 0x9, 0x2, 0x08000000, 1, 0x08000000 },
+ { 0x3, 0x1d, 0xa, 0x2, 0x24000000, 12, 0x24000000 },
+ { 0x3, 0x1d, 0xb, 0x2, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x1d, 0xc, 0x2, 0x28000000, 13, 0x28000000 },
+ { 0x3, 0x1d, 0xd, 0x2, 0x10000000, 5, 0x10000000 },
+ { 0x3, 0x1d, 0xe, 0x2, 0x30000000, 14, 0x30000000 },
+ { 0x3, 0x1e, 0x0, 0x2, 0x0d400000, 0, 0x0d400000 },
+ { 0x3, 0x00, 0x0, 0x2, 0x0d230000, 0, 0x00000000 },
+ { 0x3, 0x01, 0x0, 0x2, 0x0d040000, 0, 0x00000000 },
+ { 0x3, 0x02, 0x0, 0x2, 0x0d050000, 0, 0x00000000 },
+ { 0x3, 0x03, 0x0, 0x2, 0x0d000000, 0, 0x00000000 },
+ { 0x3, 0x04, 0x0, 0x2, 0x20ae0000, 3, 0x000e0000 },
+ { 0x3, 0x04, 0x1, 0x2, 0x20ac0000, 2, 0x000c0000 },
+ { 0x3, 0x04, 0x2, 0x2, 0x20a80000, 1, 0x00080000 },
+ { 0x3, 0x04, 0x3, 0x2, 0x20a00000, 0, 0x00000000 },
+ { 0x3, 0x05, 0x0, 0x2, 0x0d2a0000, 0, 0x00000000 },
+ { 0x3, 0x06, 0x0, 0x2, 0x0d290000, 0, 0x00000000 },
+ { 0x3, 0x07, 0x0, 0x2, 0x0d2c0000, 0, 0x00000000 },
+ { 0x3, 0x08, 0x0, 0x2, 0x0d0e0000, 4, 0x00080000 },
+ { 0x3, 0x08, 0x1, 0x2, 0x0d060000, 0, 0x00000000 },
+ { 0x3, 0x08, 0x2, 0x2, 0x0d080000, 1, 0x00020000 },
+ { 0x3, 0x08, 0x3, 0x2, 0x0d0a0000, 2, 0x00040000 },
+ { 0x3, 0x08, 0x4, 0x2, 0x0d0c0000, 3, 0x00060000 },
+ { 0x3, 0x09, 0x0, 0x2, 0x0d650000, 0, 0x00000000 },
+ { 0x3, 0x0a, 0x0, 0x2, 0x20af0000, 0, 0x00000000 },
+ { 0x3, 0x0b, 0x0, 0x2, 0x0d3e0000, 0, 0x00000000 },
+ { 0x3, 0x0c, 0x0, 0x2, 0x0d3d0000, 0, 0x00000000 },
+ { 0x3, 0x0d, 0x0, 0x2, 0x0d1e0000, 0, 0x00000000 },
+ { 0x3, 0x0e, 0x0, 0x2, 0x0d150000, 0, 0x00000000 },
+ { 0x3, 0x0e, 0x1, 0x2, 0x0d160000, 1, 0x00010000 },
+ { 0x3, 0x0e, 0x2, 0x2, 0x0d170000, 2, 0x00020000 },
+ { 0x3, 0x0e, 0x3, 0x2, 0x0d180000, 3, 0x00030000 },
+ { 0x3, 0x0e, 0x4, 0x2, 0x0d190000, 4, 0x00040000 },
+ { 0x3, 0x0e, 0x5, 0x2, 0x0d1a0000, 5, 0x00050000 },
+ { 0x3, 0x0e, 0x6, 0x2, 0x0d1b0000, 6, 0x00060000 },
+ { 0x3, 0x0e, 0x7, 0x2, 0x0d1c0000, 7, 0x00070000 },
+ { 0x3, 0x0e, 0x8, 0x2, 0x0d1d0000, 8, 0x00080000 },
+ { 0x3, 0x0f, 0x0, 0x2, 0x0d660000, 0, 0x00000000 },
+ { 0x3, 0x10, 0x0, 0x2, 0x0d1f0000, 0, 0x00000000 },
+ { 0x3, 0x10, 0x1, 0x2, 0x0d200000, 1, 0x00010000 },
+ { 0x3, 0x10, 0x2, 0x2, 0x0d210000, 2, 0x00020000 },
+ { 0x3, 0x10, 0x3, 0x2, 0x0d220000, 3, 0x00030000 },
+ { 0x3, 0x11, 0x0, 0x2, 0x0d240000, 0, 0x00000000 },
+ { 0x3, 0x12, 0x0, 0x2, 0x0d250000, 0, 0x00000000 },
+ { 0x3, 0x13, 0x0, 0x2, 0x0d260000, 0, 0x00000000 },
+ { 0x3, 0x14, 0x0, 0x2, 0x0d270000, 0, 0x00000000 },
+ { 0x3, 0x15, 0x0, 0x2, 0x0d2b0000, 0, 0x00000000 },
+ { 0x3, 0x16, 0x0, 0x2, 0x0d280000, 0, 0x00000000 },
+ { 0x3, 0x17, 0x0, 0x2, 0x0d0f0000, 0, 0x00000000 },
+ { 0x3, 0x17, 0x1, 0x2, 0x0d100000, 1, 0x00010000 },
+ { 0x3, 0x17, 0x2, 0x2, 0x0d110000, 2, 0x00020000 },
+ { 0x3, 0x17, 0x3, 0x2, 0x0d120000, 3, 0x00030000 },
+ { 0x3, 0x17, 0x4, 0x2, 0x0d130000, 4, 0x00040000 },
+ { 0x3, 0x17, 0x5, 0x2, 0x0d140000, 5, 0x00050000 },
+ { 0x3, 0x18, 0x0, 0x2, 0x0d020000, 0, 0x00000000 },
+ { 0x3, 0x19, 0x0, 0x2, 0x0d030000, 0, 0x00000000 },
+ { 0x3, 0x1f, 0x0, 0x2, 0x0d600000, 0, 0x00000000 },
+ { 0x3, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x1b, 0x0, 0x0, 0x40000000, 0, 0x40000000 },
+ { 0x4, 0x1b, 0x1, 0x1, 0x80000000, 1, 0x80000000 },
+ { 0x4, 0x1e, 0x0, 0x2, 0x0d400000, 0, 0x0d400000 },
+ { 0x4, 0x1e, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x5, 0x1c, 0x0, 0x0, 0x0d640000, 0, 0x00000000 },
+ { 0x5, 0x1d, 0x0, 0x0, 0x20b00000, 8, 0x20b00000 },
+ { 0x5, 0x1d, 0x1, 0x0, 0x20800000, 7, 0x20800000 },
+ { 0x5, 0x1d, 0x2, 0x0, 0x20c00000, 9, 0x20c00000 },
+ { 0x5, 0x1d, 0x3, 0x0, 0x0d800000, 3, 0x0d800000 },
+ { 0x5, 0x1d, 0x4, 0x0, 0x20000000, 6, 0x20000000 },
+ { 0x5, 0x1d, 0x5, 0x0, 0x0c000000, 2, 0x0c000000 },
+ { 0x5, 0x1d, 0x6, 0x0, 0x21000000, 10, 0x21000000 },
+ { 0x5, 0x1d, 0x7, 0x0, 0x0e000000, 4, 0x0e000000 },
+ { 0x5, 0x1d, 0x8, 0x0, 0x22000000, 11, 0x22000000 },
+ { 0x5, 0x1d, 0x9, 0x0, 0x08000000, 1, 0x08000000 },
+ { 0x5, 0x1d, 0xa, 0x0, 0x24000000, 12, 0x24000000 },
+ { 0x5, 0x1d, 0xb, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x5, 0x1d, 0xc, 0x0, 0x28000000, 13, 0x28000000 },
+ { 0x5, 0x1d, 0xd, 0x0, 0x10000000, 5, 0x10000000 },
+ { 0x5, 0x1d, 0xe, 0x0, 0x30000000, 14, 0x30000000 },
+ { 0x5, 0x00, 0x0, 0x0, 0x0d230000, 0, 0x00000000 },
+ { 0x5, 0x01, 0x0, 0x0, 0x0d040000, 0, 0x00000000 },
+ { 0x5, 0x02, 0x0, 0x0, 0x0d050000, 0, 0x00000000 },
+ { 0x5, 0x03, 0x0, 0x0, 0x0d000000, 0, 0x00000000 },
+ { 0x5, 0x04, 0x0, 0x0, 0x20ae0000, 3, 0x000e0000 },
+ { 0x5, 0x04, 0x1, 0x0, 0x20ac0000, 2, 0x000c0000 },
+ { 0x5, 0x04, 0x2, 0x0, 0x20a80000, 1, 0x00080000 },
+ { 0x5, 0x04, 0x3, 0x0, 0x20a00000, 0, 0x00000000 },
+ { 0x5, 0x05, 0x0, 0x0, 0x0d2a0000, 0, 0x00000000 },
+ { 0x5, 0x06, 0x0, 0x0, 0x0d290000, 0, 0x00000000 },
+ { 0x5, 0x07, 0x0, 0x0, 0x0d2c0000, 0, 0x00000000 },
+ { 0x5, 0x08, 0x0, 0x0, 0x0d0e0000, 4, 0x00080000 },
+ { 0x5, 0x08, 0x1, 0x0, 0x0d060000, 0, 0x00000000 },
+ { 0x5, 0x08, 0x2, 0x0, 0x0d080000, 1, 0x00020000 },
+ { 0x5, 0x08, 0x3, 0x0, 0x0d0a0000, 2, 0x00040000 },
+ { 0x5, 0x08, 0x4, 0x0, 0x0d0c0000, 3, 0x00060000 },
+ { 0x5, 0x09, 0x0, 0x0, 0x0d650000, 0, 0x00000000 },
+ { 0x5, 0x0a, 0x0, 0x0, 0x20af0000, 0, 0x00000000 },
+ { 0x5, 0x0b, 0x0, 0x0, 0x0d3e0000, 0, 0x00000000 },
+ { 0x5, 0x0c, 0x0, 0x0, 0x0d3d0000, 0, 0x00000000 },
+ { 0x5, 0x0d, 0x0, 0x0, 0x0d1e0000, 0, 0x00000000 },
+ { 0x5, 0x0e, 0x0, 0x0, 0x0d150000, 0, 0x00000000 },
+ { 0x5, 0x0e, 0x1, 0x0, 0x0d160000, 1, 0x00010000 },
+ { 0x5, 0x0e, 0x2, 0x0, 0x0d170000, 2, 0x00020000 },
+ { 0x5, 0x0e, 0x3, 0x0, 0x0d180000, 3, 0x00030000 },
+ { 0x5, 0x0e, 0x4, 0x0, 0x0d190000, 4, 0x00040000 },
+ { 0x5, 0x0e, 0x5, 0x0, 0x0d1a0000, 5, 0x00050000 },
+ { 0x5, 0x0e, 0x6, 0x0, 0x0d1b0000, 6, 0x00060000 },
+ { 0x5, 0x0e, 0x7, 0x0, 0x0d1c0000, 7, 0x00070000 },
+ { 0x5, 0x0e, 0x8, 0x0, 0x0d1d0000, 8, 0x00080000 },
+ { 0x5, 0x0f, 0x0, 0x0, 0x0d660000, 0, 0x00000000 },
+ { 0x5, 0x10, 0x0, 0x0, 0x0d1f0000, 0, 0x00000000 },
+ { 0x5, 0x10, 0x1, 0x0, 0x0d200000, 1, 0x00010000 },
+ { 0x5, 0x10, 0x2, 0x0, 0x0d210000, 2, 0x00020000 },
+ { 0x5, 0x10, 0x3, 0x0, 0x0d220000, 3, 0x00030000 },
+ { 0x5, 0x11, 0x0, 0x0, 0x0d240000, 0, 0x00000000 },
+ { 0x5, 0x12, 0x0, 0x0, 0x0d250000, 0, 0x00000000 },
+ { 0x5, 0x13, 0x0, 0x0, 0x0d260000, 0, 0x00000000 },
+ { 0x5, 0x14, 0x0, 0x0, 0x0d270000, 0, 0x00000000 },
+ { 0x5, 0x15, 0x0, 0x0, 0x0d2b0000, 0, 0x00000000 },
+ { 0x5, 0x16, 0x0, 0x0, 0x0d280000, 0, 0x00000000 },
+ { 0x5, 0x17, 0x0, 0x0, 0x0d0f0000, 0, 0x00000000 },
+ { 0x5, 0x17, 0x1, 0x0, 0x0d100000, 1, 0x00010000 },
+ { 0x5, 0x17, 0x2, 0x0, 0x0d110000, 2, 0x00020000 },
+ { 0x5, 0x17, 0x3, 0x0, 0x0d120000, 3, 0x00030000 },
+ { 0x5, 0x17, 0x4, 0x0, 0x0d130000, 4, 0x00040000 },
+ { 0x5, 0x17, 0x5, 0x0, 0x0d140000, 5, 0x00050000 },
+ { 0x5, 0x18, 0x0, 0x0, 0x0d020000, 0, 0x00000000 },
+ { 0x5, 0x19, 0x0, 0x0, 0x0d030000, 0, 0x00000000 },
+ { 0x5, 0x1f, 0x0, 0x0, 0x0d600000, 0, 0x00000000 },
+ { 0x5, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 }
+};
+
+/*
+ * AON NOC aperture lookup table as per file "AON_NOC_Structure.info".
+ */
+static const char * const tegra194_aonnoc_routeid_initflow[] = {
+ [0x0] = "cbb_i/I/0",
+ [0x1] = "cpu_p_i/I/0",
+ [0x2] = "dma_m_i/I/0",
+ [0x3] = "dma_p_i/I/0"
+};
+
+static const char * const tegra194_aonnoc_routeid_targflow[] = {
+ [0x00] = "multiport1_t/T/aon_misc",
+ [0x01] = "multiport1_t/T/avic0",
+ [0x02] = "multiport1_t/T/avic1",
+ [0x03] = "multiport1_t/T/can1",
+ [0x04] = "multiport1_t/T/can2",
+ [0x05] = "multiport1_t/T/dma",
+ [0x06] = "multiport1_t/T/dmic",
+ [0x07] = "multiport1_t/T/err_collator",
+ [0x08] = "multiport1_t/T/fpga_misc",
+ [0x09] = "multiport1_t/T/gte",
+ [0x0a] = "multiport1_t/T/hsp",
+ [0x0b] = "multiport1_t/T/i2c2",
+ [0x0c] = "multiport1_t/T/i2c8",
+ [0x0d] = "multiport1_t/T/pwm",
+ [0x0e] = "multiport1_t/T/spi2",
+ [0x0f] = "multiport1_t/T/tke",
+ [0x10] = "multiport1_t/T/uartg",
+ [0x11] = "RESERVED",
+ [0x12] = "RESERVED",
+ [0x13] = "RESERVED",
+ [0x14] = "RESERVED",
+ [0x15] = "RESERVED",
+ [0x16] = "RESERVED",
+ [0x17] = "RESERVED",
+ [0x18] = "RESERVED",
+ [0x19] = "RESERVED",
+ [0x1a] = "RESERVED",
+ [0x1b] = "RESERVED",
+ [0x1c] = "RESERVED",
+ [0x1d] = "RESERVED",
+ [0x1e] = "RESERVED",
+ [0x1f] = "RESERVED",
+ [0x20] = "multiport0_t/T/aovc",
+ [0x21] = "multiport0_t/T/atcm",
+ [0x22] = "multiport0_t/T/cast",
+ [0x23] = "multiport0_t/T/dast",
+ [0x24] = "multiport0_t/T/err_collator_car",
+ [0x25] = "multiport0_t/T/gpio",
+ [0x26] = "multiport0_t/T/i2c10",
+ [0x27] = "multiport0_t/T/mss",
+ [0x28] = "multiport0_t/T/padctl_a12",
+ [0x29] = "multiport0_t/T/padctl_a14",
+ [0x2a] = "multiport0_t/T/padctl_a15",
+ [0x2b] = "multiport0_t/T/rtc",
+ [0x2c] = "multiport0_t/T/tsc",
+ [0x2d] = "RESERVED",
+ [0x2e] = "RESERVED",
+ [0x2f] = "RESERVED",
+ [0x30] = "multiport2_t/T/aon_vref_ro",
+ [0x31] = "multiport2_t/T/aopm",
+ [0x32] = "multiport2_t/T/car",
+ [0x33] = "multiport2_t/T/pmc",
+ [0x34] = "ast1_t/T/0",
+ [0x35] = "cbb_t/T/0",
+ [0x36] = "cpu_t/T/0",
+ [0x37] = "firewall_t/T/0",
+ [0x38] = "svc_t/T/0",
+ [0x39] = "uartc/T/uartc",
+ [0x3a] = "RESERVED",
+ [0x3b] = "RESERVED",
+ [0x3c] = "RESERVED",
+ [0x3d] = "RESERVED",
+ [0x3e] = "RESERVED",
+ [0x3f] = "RESERVED"
+};
+
+/*
+ * Fields of AON NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_aonnoc_aperture_lookup[] = {
+ { 0x0, 0x37, 0x00, 0, 0x0c640000, 0, 0x00000000 },
+ { 0x0, 0x20, 0x00, 0, 0x0c3b0000, 0, 0x00000000 },
+ { 0x0, 0x21, 0x00, 0, 0x0c000000, 0, 0x00000000 },
+ { 0x0, 0x22, 0x00, 0, 0x0c040000, 0, 0x00000000 },
+ { 0x0, 0x23, 0x00, 0, 0x0c050000, 0, 0x00000000 },
+ { 0x0, 0x24, 0x00, 0, 0x20cf0000, 0, 0x00000000 },
+ { 0x0, 0x25, 0x00, 0, 0x0c2f0000, 0, 0x00000000 },
+ { 0x0, 0x26, 0x00, 0, 0x0c230000, 0, 0x00000000 },
+ { 0x0, 0x27, 0x00, 0, 0x0c350000, 0, 0x00000000 },
+ { 0x0, 0x28, 0x00, 0, 0x0c301000, 0, 0x00000000 },
+ { 0x0, 0x29, 0x00, 0, 0x0c302000, 0, 0x00000000 },
+ { 0x0, 0x2a, 0x00, 0, 0x0c303000, 0, 0x00000000 },
+ { 0x0, 0x2b, 0x00, 0, 0x0c2a0000, 0, 0x00000000 },
+ { 0x0, 0x2c, 0x00, 0, 0x0c2b0000, 0, 0x00000000 },
+ { 0x0, 0x2c, 0x01, 0, 0x0c2c0000, 1, 0x00010000 },
+ { 0x0, 0x2c, 0x02, 0, 0x0c2d0000, 2, 0x00020000 },
+ { 0x0, 0x2c, 0x03, 0, 0x0c2e0000, 3, 0x00030000 },
+ { 0x0, 0x00, 0x00, 0, 0x0c660000, 0, 0x00000000 },
+ { 0x0, 0x01, 0x00, 0, 0x0c020000, 0, 0x00000000 },
+ { 0x0, 0x02, 0x00, 0, 0x0c030000, 0, 0x00000000 },
+ { 0x0, 0x03, 0x00, 0, 0x0c310000, 0, 0x00000000 },
+ { 0x0, 0x04, 0x00, 0, 0x0c320000, 0, 0x00000000 },
+ { 0x0, 0x05, 0x00, 0, 0x0c0a0000, 2, 0x00040000 },
+ { 0x0, 0x05, 0x01, 0, 0x0c0b0000, 3, 0x00050000 },
+ { 0x0, 0x05, 0x02, 0, 0x0c0e0000, 5, 0x00080000 },
+ { 0x0, 0x05, 0x03, 0, 0x0c060000, 0, 0x00000000 },
+ { 0x0, 0x05, 0x04, 0, 0x0c080000, 1, 0x00020000 },
+ { 0x0, 0x05, 0x05, 0, 0x0c0c0000, 4, 0x00060000 },
+ { 0x0, 0x06, 0x00, 0, 0x0c330000, 0, 0x00000000 },
+ { 0x0, 0x07, 0x00, 0, 0x0c650000, 0, 0x00000000 },
+ { 0x0, 0x08, 0x00, 0, 0x0c3e0000, 0, 0x00000000 },
+ { 0x0, 0x09, 0x00, 0, 0x0c1e0000, 0, 0x00000000 },
+ { 0x0, 0x0a, 0x00, 0, 0x0c150000, 0, 0x00000000 },
+ { 0x0, 0x0a, 0x01, 0, 0x0c160000, 1, 0x00010000 },
+ { 0x0, 0x0a, 0x02, 0, 0x0c170000, 2, 0x00020000 },
+ { 0x0, 0x0a, 0x03, 0, 0x0c180000, 3, 0x00030000 },
+ { 0x0, 0x0a, 0x04, 0, 0x0c190000, 4, 0x00040000 },
+ { 0x0, 0x0a, 0x05, 0, 0x0c1a0000, 5, 0x00050000 },
+ { 0x0, 0x0a, 0x06, 0, 0x0c1b0000, 6, 0x00060000 },
+ { 0x0, 0x0a, 0x07, 0, 0x0c1c0000, 7, 0x00070000 },
+ { 0x0, 0x0a, 0x08, 0, 0x0c1d0000, 8, 0x00080000 },
+ { 0x0, 0x0b, 0x00, 0, 0x0c240000, 0, 0x00000000 },
+ { 0x0, 0x0c, 0x00, 0, 0x0c250000, 0, 0x00000000 },
+ { 0x0, 0x0d, 0x00, 0, 0x0c340000, 0, 0x00000000 },
+ { 0x0, 0x0e, 0x00, 0, 0x0c260000, 0, 0x00000000 },
+ { 0x0, 0x0f, 0x00, 0, 0x0c0f0000, 0, 0x00000000 },
+ { 0x0, 0x0f, 0x01, 0, 0x0c100000, 1, 0x00010000 },
+ { 0x0, 0x0f, 0x02, 0, 0x0c110000, 2, 0x00020000 },
+ { 0x0, 0x0f, 0x03, 0, 0x0c120000, 3, 0x00030000 },
+ { 0x0, 0x0f, 0x04, 0, 0x0c130000, 4, 0x00040000 },
+ { 0x0, 0x0f, 0x05, 0, 0x0c140000, 5, 0x00050000 },
+ { 0x0, 0x10, 0x00, 0, 0x0c290000, 0, 0x00000000 },
+ { 0x0, 0x30, 0x00, 0, 0x20ce0000, 0, 0x00000000 },
+ { 0x0, 0x31, 0x00, 0, 0x0c1f0000, 0, 0x00000000 },
+ { 0x0, 0x31, 0x01, 0, 0x0c200000, 1, 0x00010000 },
+ { 0x0, 0x31, 0x02, 0, 0x0c210000, 2, 0x00020000 },
+ { 0x0, 0x31, 0x03, 0, 0x0c220000, 3, 0x00030000 },
+ { 0x0, 0x32, 0x00, 0, 0x20cc0000, 3, 0x001c0000 },
+ { 0x0, 0x32, 0x01, 0, 0x20c80000, 2, 0x00180000 },
+ { 0x0, 0x32, 0x02, 0, 0x20c00000, 1, 0x00100000 },
+ { 0x0, 0x32, 0x03, 0, 0x20b00000, 0, 0x00000000 },
+ { 0x0, 0x33, 0x00, 0, 0x0c360000, 0, 0x00000000 },
+ { 0x0, 0x33, 0x01, 0, 0x0c370000, 1, 0x00010000 },
+ { 0x0, 0x33, 0x02, 0, 0x0c3a0000, 3, 0x00040000 },
+ { 0x0, 0x33, 0x03, 0, 0x0c380000, 2, 0x00020000 },
+ { 0x0, 0x38, 0x00, 0, 0x0c600000, 0, 0x00000000 },
+ { 0x0, 0x38, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x0, 0x39, 0x00, 0, 0x0c280000, 0, 0x00000000 },
+ { 0x1, 0x35, 0x00, 0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x35, 0x01, 0, 0x00100000, 1, 0x00100000 },
+ { 0x1, 0x35, 0x02, 0, 0x05a00000, 11, 0x05a00000 },
+ { 0x1, 0x35, 0x03, 0, 0x05b00000, 32, 0x05b00000 },
+ { 0x1, 0x35, 0x04, 0, 0x05c00000, 33, 0x05c00000 },
+ { 0x1, 0x35, 0x05, 0, 0x05d00000, 12, 0x05d00000 },
+ { 0x1, 0x35, 0x06, 0, 0x20000000, 19, 0x20000000 },
+ { 0x1, 0x35, 0x07, 0, 0x20100000, 20, 0x20100000 },
+ { 0x1, 0x35, 0x08, 0, 0x20a00000, 24, 0x20a00000 },
+ { 0x1, 0x35, 0x09, 0, 0x20d00000, 25, 0x20d00000 },
+ { 0x1, 0x35, 0x0a, 0, 0x00200000, 2, 0x00200000 },
+ { 0x1, 0x35, 0x0b, 0, 0x05800000, 10, 0x05800000 },
+ { 0x1, 0x35, 0x0c, 0, 0x05e00000, 13, 0x05e00000 },
+ { 0x1, 0x35, 0x0d, 0, 0x20200000, 21, 0x20200000 },
+ { 0x1, 0x35, 0x0e, 0, 0x20800000, 23, 0x20800000 },
+ { 0x1, 0x35, 0x0f, 0, 0x20e00000, 26, 0x20e00000 },
+ { 0x1, 0x35, 0x10, 0, 0x00400000, 3, 0x00400000 },
+ { 0x1, 0x35, 0x11, 0, 0x20400000, 22, 0x20400000 },
+ { 0x1, 0x35, 0x12, 0, 0x00800000, 4, 0x00800000 },
+ { 0x1, 0x35, 0x13, 0, 0x05000000, 9, 0x05000000 },
+ { 0x1, 0x35, 0x14, 0, 0x0c800000, 34, 0x0c800000 },
+ { 0x1, 0x35, 0x15, 0, 0x01000000, 5, 0x01000000 },
+ { 0x1, 0x35, 0x16, 0, 0x03000000, 7, 0x03000000 },
+ { 0x1, 0x35, 0x17, 0, 0x04000000, 8, 0x04000000 },
+ { 0x1, 0x35, 0x18, 0, 0x0d000000, 16, 0x0d000000 },
+ { 0x1, 0x35, 0x19, 0, 0x21000000, 27, 0x21000000 },
+ { 0x1, 0x35, 0x1a, 0, 0x02000000, 6, 0x02000000 },
+ { 0x1, 0x35, 0x1b, 0, 0x06000000, 14, 0x06000000 },
+ { 0x1, 0x35, 0x1c, 0, 0x0e000000, 17, 0x0e000000 },
+ { 0x1, 0x35, 0x1d, 0, 0x22000000, 28, 0x22000000 },
+ { 0x1, 0x35, 0x1e, 0, 0x08000000, 15, 0x08000000 },
+ { 0x1, 0x35, 0x1f, 0, 0x24000000, 29, 0x24000000 },
+ { 0x1, 0x35, 0x20, 0, 0x28000000, 30, 0x28000000 },
+ { 0x1, 0x35, 0x21, 0, 0x10000000, 18, 0x10000000 },
+ { 0x1, 0x35, 0x22, 0, 0x30000000, 31, 0x30000000 },
+ { 0x1, 0x37, 0x00, 0, 0x0c640000, 0, 0x00000000 },
+ { 0x1, 0x20, 0x00, 0, 0x0c3b0000, 0, 0x00000000 },
+ { 0x1, 0x21, 0x00, 0, 0x0c000000, 0, 0x00000000 },
+ { 0x1, 0x22, 0x00, 0, 0x0c040000, 0, 0x00000000 },
+ { 0x1, 0x23, 0x00, 0, 0x0c050000, 0, 0x00000000 },
+ { 0x1, 0x24, 0x00, 0, 0x20cf0000, 0, 0x00000000 },
+ { 0x1, 0x25, 0x00, 0, 0x0c2f0000, 0, 0x00000000 },
+ { 0x1, 0x26, 0x00, 0, 0x0c230000, 0, 0x00000000 },
+ { 0x1, 0x27, 0x00, 0, 0x0c350000, 0, 0x00000000 },
+ { 0x1, 0x28, 0x00, 0, 0x0c301000, 0, 0x00000000 },
+ { 0x1, 0x29, 0x00, 0, 0x0c302000, 0, 0x00000000 },
+ { 0x1, 0x2a, 0x00, 0, 0x0c303000, 0, 0x00000000 },
+ { 0x1, 0x2b, 0x00, 0, 0x0c2a0000, 0, 0x00000000 },
+ { 0x1, 0x2c, 0x00, 0, 0x0c2b0000, 0, 0x00000000 },
+ { 0x1, 0x2c, 0x01, 0, 0x0c2c0000, 1, 0x00010000 },
+ { 0x1, 0x2c, 0x02, 0, 0x0c2d0000, 2, 0x00020000 },
+ { 0x1, 0x2c, 0x03, 0, 0x0c2e0000, 3, 0x00030000 },
+ { 0x1, 0x00, 0x00, 0, 0x0c660000, 0, 0x00000000 },
+ { 0x1, 0x01, 0x00, 0, 0x0c020000, 0, 0x00000000 },
+ { 0x1, 0x02, 0x00, 0, 0x0c030000, 0, 0x00000000 },
+ { 0x1, 0x03, 0x00, 0, 0x0c310000, 0, 0x00000000 },
+ { 0x1, 0x04, 0x00, 0, 0x0c320000, 0, 0x00000000 },
+ { 0x1, 0x05, 0x00, 0, 0x0c0a0000, 2, 0x00040000 },
+ { 0x1, 0x05, 0x01, 0, 0x0c0b0000, 3, 0x00050000 },
+ { 0x1, 0x05, 0x02, 0, 0x0c0e0000, 5, 0x00080000 },
+ { 0x1, 0x05, 0x03, 0, 0x0c060000, 0, 0x00000000 },
+ { 0x1, 0x05, 0x04, 0, 0x0c080000, 1, 0x00020000 },
+ { 0x1, 0x05, 0x05, 0, 0x0c0c0000, 4, 0x00060000 },
+ { 0x1, 0x06, 0x00, 0, 0x0c330000, 0, 0x00000000 },
+ { 0x1, 0x07, 0x00, 0, 0x0c650000, 0, 0x00000000 },
+ { 0x1, 0x08, 0x00, 0, 0x0c3e0000, 0, 0x00000000 },
+ { 0x1, 0x09, 0x00, 0, 0x0c1e0000, 0, 0x00000000 },
+ { 0x1, 0x0a, 0x00, 0, 0x0c150000, 0, 0x00000000 },
+ { 0x1, 0x0a, 0x01, 0, 0x0c160000, 1, 0x00010000 },
+ { 0x1, 0x0a, 0x02, 0, 0x0c170000, 2, 0x00020000 },
+ { 0x1, 0x0a, 0x03, 0, 0x0c180000, 3, 0x00030000 },
+ { 0x1, 0x0a, 0x04, 0, 0x0c190000, 4, 0x00040000 },
+ { 0x1, 0x0a, 0x05, 0, 0x0c1a0000, 5, 0x00050000 },
+ { 0x1, 0x0a, 0x06, 0, 0x0c1b0000, 6, 0x00060000 },
+ { 0x1, 0x0a, 0x07, 0, 0x0c1c0000, 7, 0x00070000 },
+ { 0x1, 0x0a, 0x08, 0, 0x0c1d0000, 8, 0x00080000 },
+ { 0x1, 0x0b, 0x00, 0, 0x0c240000, 0, 0x00000000 },
+ { 0x1, 0x0c, 0x00, 0, 0x0c250000, 0, 0x00000000 },
+ { 0x1, 0x0d, 0x00, 0, 0x0c340000, 0, 0x00000000 },
+ { 0x1, 0x0e, 0x00, 0, 0x0c260000, 0, 0x00000000 },
+ { 0x1, 0x0f, 0x00, 0, 0x0c0f0000, 0, 0x00000000 },
+ { 0x1, 0x0f, 0x01, 0, 0x0c100000, 1, 0x00010000 },
+ { 0x1, 0x0f, 0x02, 0, 0x0c110000, 2, 0x00020000 },
+ { 0x1, 0x0f, 0x03, 0, 0x0c120000, 3, 0x00030000 },
+ { 0x1, 0x0f, 0x04, 0, 0x0c130000, 4, 0x00040000 },
+ { 0x1, 0x0f, 0x05, 0, 0x0c140000, 5, 0x00050000 },
+ { 0x1, 0x10, 0x00, 0, 0x0c290000, 0, 0x00000000 },
+ { 0x1, 0x30, 0x00, 0, 0x20ce0000, 0, 0x00000000 },
+ { 0x1, 0x31, 0x00, 0, 0x0c1f0000, 0, 0x00000000 },
+ { 0x1, 0x31, 0x01, 0, 0x0c200000, 1, 0x00010000 },
+ { 0x1, 0x31, 0x02, 0, 0x0c210000, 2, 0x00020000 },
+ { 0x1, 0x31, 0x03, 0, 0x0c220000, 3, 0x00030000 },
+ { 0x1, 0x32, 0x00, 0, 0x20cc0000, 3, 0x001c0000 },
+ { 0x1, 0x32, 0x01, 0, 0x20c80000, 2, 0x00180000 },
+ { 0x1, 0x32, 0x02, 0, 0x20c00000, 1, 0x00100000 },
+ { 0x1, 0x32, 0x03, 0, 0x20b00000, 0, 0x00000000 },
+ { 0x1, 0x33, 0x00, 0, 0x0c360000, 0, 0x00000000 },
+ { 0x1, 0x33, 0x01, 0, 0x0c370000, 1, 0x00010000 },
+ { 0x1, 0x33, 0x02, 0, 0x0c3a0000, 3, 0x00040000 },
+ { 0x1, 0x33, 0x03, 0, 0x0c380000, 2, 0x00020000 },
+ { 0x1, 0x38, 0x00, 0, 0x0c600000, 0, 0x00000000 },
+ { 0x1, 0x38, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x39, 0x00, 0, 0x0c280000, 0, 0x00000000 },
+ { 0x2, 0x34, 0x00, 0, 0x40000000, 0, 0x40000000 },
+ { 0x2, 0x34, 0x01, 0, 0x80000000, 1, 0x80000000 },
+ { 0x2, 0x36, 0x00, 0, 0x0c400000, 0, 0x0c400000 },
+ { 0x2, 0x36, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x35, 0x00, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x35, 0x01, 0, 0x00100000, 1, 0x00100000 },
+ { 0x3, 0x35, 0x02, 0, 0x05a00000, 11, 0x05a00000 },
+ { 0x3, 0x35, 0x03, 0, 0x05b00000, 32, 0x05b00000 },
+ { 0x3, 0x35, 0x04, 0, 0x05c00000, 33, 0x05c00000 },
+ { 0x3, 0x35, 0x05, 0, 0x05d00000, 12, 0x05d00000 },
+ { 0x3, 0x35, 0x06, 0, 0x20000000, 19, 0x20000000 },
+ { 0x3, 0x35, 0x07, 0, 0x20100000, 20, 0x20100000 },
+ { 0x3, 0x35, 0x08, 0, 0x20a00000, 24, 0x20a00000 },
+ { 0x3, 0x35, 0x09, 0, 0x20d00000, 25, 0x20d00000 },
+ { 0x3, 0x35, 0x0a, 0, 0x00200000, 2, 0x00200000 },
+ { 0x3, 0x35, 0x0b, 0, 0x05800000, 10, 0x05800000 },
+ { 0x3, 0x35, 0x0c, 0, 0x05e00000, 13, 0x05e00000 },
+ { 0x3, 0x35, 0x0d, 0, 0x20200000, 21, 0x20200000 },
+ { 0x3, 0x35, 0x0e, 0, 0x20800000, 23, 0x20800000 },
+ { 0x3, 0x35, 0x0f, 0, 0x20e00000, 26, 0x20e00000 },
+ { 0x3, 0x35, 0x10, 0, 0x00400000, 3, 0x00400000 },
+ { 0x3, 0x35, 0x11, 0, 0x20400000, 22, 0x20400000 },
+ { 0x3, 0x35, 0x12, 0, 0x00800000, 4, 0x00800000 },
+ { 0x3, 0x35, 0x13, 0, 0x50000000, 9, 0x05000000 },
+ { 0x3, 0x35, 0x14, 0, 0xc0800000, 34, 0x0c800000 },
+ { 0x3, 0x35, 0x15, 0, 0x10000000, 5, 0x01000000 },
+ { 0x3, 0x35, 0x16, 0, 0x30000000, 7, 0x03000000 },
+ { 0x3, 0x35, 0x17, 0, 0x04000000, 8, 0x04000000 },
+ { 0x3, 0x35, 0x18, 0, 0x0d000000, 16, 0x0d000000 },
+ { 0x3, 0x35, 0x19, 0, 0x21000000, 27, 0x21000000 },
+ { 0x3, 0x35, 0x1a, 0, 0x02000000, 6, 0x02000000 },
+ { 0x3, 0x35, 0x1b, 0, 0x06000000, 14, 0x06000000 },
+ { 0x3, 0x35, 0x1c, 0, 0x0e000000, 17, 0x0e000000 },
+ { 0x3, 0x35, 0x1d, 0, 0x22000000, 28, 0x22000000 },
+ { 0x3, 0x35, 0x1e, 0, 0x08000000, 15, 0x08000000 },
+ { 0x3, 0x35, 0x1f, 0, 0x24000000, 29, 0x24000000 },
+ { 0x3, 0x35, 0x20, 0, 0x28000000, 30, 0x28000000 },
+ { 0x3, 0x35, 0x21, 0, 0x10000000, 18, 0x10000000 },
+ { 0x3, 0x35, 0x22, 0, 0x30000000, 31, 0x30000000 },
+ { 0x3, 0x37, 0x00, 0, 0x0c640000, 0, 0x00000000 },
+ { 0x3, 0x20, 0x00, 0, 0x0c3b0000, 0, 0x00000000 },
+ { 0x3, 0x21, 0x00, 0, 0x0c000000, 0, 0x00000000 },
+ { 0x3, 0x22, 0x00, 0, 0x0c040000, 0, 0x00000000 },
+ { 0x3, 0x23, 0x00, 0, 0x0c050000, 0, 0x00000000 },
+ { 0x3, 0x24, 0x00, 0, 0x20cf0000, 0, 0x00000000 },
+ { 0x3, 0x25, 0x00, 0, 0x0c2f0000, 0, 0x00000000 },
+ { 0x3, 0x26, 0x00, 0, 0x0c230000, 0, 0x00000000 },
+ { 0x3, 0x27, 0x00, 0, 0x0c350000, 0, 0x00000000 },
+ { 0x3, 0x28, 0x00, 0, 0x0c301000, 0, 0x00000000 },
+ { 0x3, 0x29, 0x00, 0, 0x0c302000, 0, 0x00000000 },
+ { 0x3, 0x2a, 0x00, 0, 0x0c303000, 0, 0x00000000 },
+ { 0x3, 0x2b, 0x00, 0, 0x0c2a0000, 0, 0x00000000 },
+ { 0x3, 0x2c, 0x00, 0, 0x0c2b0000, 0, 0x00000000 },
+ { 0x3, 0x2c, 0x01, 0, 0x0c2c0000, 1, 0x00010000 },
+ { 0x3, 0x2c, 0x02, 0, 0x0c2d0000, 2, 0x00020000 },
+ { 0x3, 0x2c, 0x03, 0, 0x0c2e0000, 3, 0x00030000 },
+ { 0x3, 0x00, 0x00, 0, 0x0c660000, 0, 0x00000000 },
+ { 0x3, 0x01, 0x00, 0, 0x0c020000, 0, 0x00000000 },
+ { 0x3, 0x02, 0x00, 0, 0x0c030000, 0, 0x00000000 },
+ { 0x3, 0x03, 0x00, 0, 0x0c310000, 0, 0x00000000 },
+ { 0x3, 0x04, 0x00, 0, 0x0c320000, 0, 0x00000000 },
+ { 0x3, 0x05, 0x00, 0, 0x0c0a0000, 2, 0x00040000 },
+ { 0x3, 0x05, 0x01, 0, 0x0c0b0000, 3, 0x00050000 },
+ { 0x3, 0x05, 0x02, 0, 0x0c0e0000, 5, 0x00080000 },
+ { 0x3, 0x05, 0x03, 0, 0x0c060000, 0, 0x00000000 },
+ { 0x3, 0x05, 0x04, 0, 0x0c080000, 1, 0x00020000 },
+ { 0x3, 0x05, 0x05, 0, 0x0c0c0000, 4, 0x00060000 },
+ { 0x3, 0x06, 0x00, 0, 0x0c330000, 0, 0x00000000 },
+ { 0x3, 0x07, 0x00, 0, 0x0c650000, 0, 0x00000000 },
+ { 0x3, 0x08, 0x00, 0, 0x0c3e0000, 0, 0x00000000 },
+ { 0x3, 0x09, 0x00, 0, 0x0c1e0000, 0, 0x00000000 },
+ { 0x3, 0x0a, 0x00, 0, 0x0c150000, 0, 0x00000000 },
+ { 0x3, 0x0a, 0x01, 0, 0x0c160000, 1, 0x00010000 },
+ { 0x3, 0x0a, 0x02, 0, 0x0c170000, 2, 0x00020000 },
+ { 0x3, 0x0a, 0x03, 0, 0x0c180000, 3, 0x00030000 },
+ { 0x3, 0x0a, 0x04, 0, 0x0c190000, 4, 0x00040000 },
+ { 0x3, 0x0a, 0x05, 0, 0x0c1a0000, 5, 0x00050000 },
+ { 0x3, 0x0a, 0x06, 0, 0x0c1b0000, 6, 0x00060000 },
+ { 0x3, 0x0a, 0x07, 0, 0x0c1c0000, 7, 0x00070000 },
+ { 0x3, 0x0a, 0x08, 0, 0x0c1d0000, 8, 0x00080000 },
+ { 0x3, 0x0b, 0x00, 0, 0x0c240000, 0, 0x00000000 },
+ { 0x3, 0x0c, 0x00, 0, 0x0c250000, 0, 0x00000000 },
+ { 0x3, 0x0d, 0x00, 0, 0x0c340000, 0, 0x00000000 },
+ { 0x3, 0x0e, 0x00, 0, 0x0c260000, 0, 0x00000000 },
+ { 0x3, 0x0f, 0x00, 0, 0x0c0f0000, 0, 0x00000000 },
+ { 0x3, 0x0f, 0x01, 0, 0x0c100000, 1, 0x00010000 },
+ { 0x3, 0x0f, 0x02, 0, 0x0c110000, 2, 0x00020000 },
+ { 0x3, 0x0f, 0x03, 0, 0x0c120000, 3, 0x00030000 },
+ { 0x3, 0x0f, 0x04, 0, 0x0c130000, 4, 0x00040000 },
+ { 0x3, 0x0f, 0x05, 0, 0x0c140000, 5, 0x00050000 },
+ { 0x3, 0x10, 0x00, 0, 0x0c290000, 0, 0x00000000 },
+ { 0x3, 0x30, 0x00, 0, 0x20ce0000, 0, 0x00000000 },
+ { 0x3, 0x31, 0x00, 0, 0x0c1f0000, 0, 0x00000000 },
+ { 0x3, 0x31, 0x01, 0, 0x0c200000, 1, 0x00010000 },
+ { 0x3, 0x31, 0x02, 0, 0x0c210000, 2, 0x00020000 },
+ { 0x3, 0x31, 0x03, 0, 0x0c220000, 3, 0x00030000 },
+ { 0x3, 0x32, 0x00, 0, 0x20cc0000, 3, 0x001c0000 },
+ { 0x3, 0x32, 0x01, 0, 0x20c80000, 2, 0x00180000 },
+ { 0x3, 0x32, 0x02, 0, 0x20c00000, 1, 0x00100000 },
+ { 0x3, 0x32, 0x03, 0, 0x20b00000, 0, 0x00000000 },
+ { 0x3, 0x33, 0x00, 0, 0x0c360000, 0, 0x00000000 },
+ { 0x3, 0x33, 0x01, 0, 0x0c370000, 1, 0x00010000 },
+ { 0x3, 0x33, 0x02, 0, 0x0c3a0000, 3, 0x00040000 },
+ { 0x3, 0x33, 0x03, 0, 0x0c380000, 2, 0x00020000 },
+ { 0x3, 0x38, 0x00, 0, 0x0c600000, 0, 0x00000000 },
+ { 0x3, 0x38, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x39, 0x00, 0, 0x0c280000, 0, 0x00000000 }
+};
+
+/*
+ * SCE/RCE NOC aperture lookup table as per file "AON_NOC_Structure.info".
+ */
+static const char * const tegra194_scenoc_routeid_initflow[] = {
+ [0x0] = "cbb_i/I/0",
+ [0x1] = "cpu_m_i/I/0",
+ [0x2] = "cpu_p_i/I/0",
+ [0x3] = "dma_m_i/I/0",
+ [0x4] = "dma_p_i/I/0",
+ [0x5] = "RESERVED",
+ [0x6] = "RESERVED",
+ [0x7] = "RESERVED"
+};
+
+static const char * const tegra194_scenoc_routeid_targflow[] = {
+ [0x00] = "multiport0_t/T/atcm_cfg",
+ [0x01] = "multiport0_t/T/car",
+ [0x02] = "multiport0_t/T/cast",
+ [0x03] = "multiport0_t/T/cfg",
+ [0x04] = "multiport0_t/T/dast",
+ [0x05] = "multiport0_t/T/dma",
+ [0x06] = "multiport0_t/T/err_collator",
+ [0x07] = "multiport0_t/T/err_collator_car",
+ [0x08] = "multiport0_t/T/fpga_misc",
+ [0x09] = "multiport0_t/T/fpga_uart",
+ [0x0a] = "multiport0_t/T/gte",
+ [0x0b] = "multiport0_t/T/hsp",
+ [0x0c] = "multiport0_t/T/misc",
+ [0x0d] = "multiport0_t/T/pm",
+ [0x0e] = "multiport0_t/T/tke",
+ [0x0f] = "RESERVED",
+ [0x10] = "multiport1_t/T/hsm",
+ [0x11] = "multiport1_t/T/vic0",
+ [0x12] = "multiport1_t/T/vic1",
+ [0x13] = "ast0_t/T/0",
+ [0x14] = "ast1_t/T/0",
+ [0x15] = "cbb_t/T/0",
+ [0x16] = "cpu_t/T/0",
+ [0x17] = "sce_noc_firewall/T/0",
+ [0x18] = "svc_t/T/0",
+ [0x19] = "RESERVED",
+ [0x1a] = "RESERVED",
+ [0x1b] = "RESERVED",
+ [0x1c] = "RESERVED",
+ [0x1d] = "RESERVED",
+ [0x1e] = "RESERVED",
+ [0x1f] = "RESERVED"
+};
+
+/*
+ * Fields of SCE/RCE NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_scenoc_apert_lookup[] = {
+ { 0x0, 0x16, 0x0, 0, 0x0b400000, 0, 0x0b400000 },
+ { 0x0, 0x16, 0x1, 0, 0x0bc00000, 1, 0x0bc00000 },
+ { 0x0, 0x0, 0x0, 0, 0x0b000000, 0, 0x00000000 },
+ { 0x0, 0x0, 0x1, 0, 0x0b800000, 1, 0x00000000 },
+ { 0x0, 0x1, 0x0, 0, 0x20de0000, 3, 0x000e0000 },
+ { 0x0, 0x1, 0x1, 0, 0x210e0000, 7, 0x000e0000 },
+ { 0x0, 0x1, 0x2, 0, 0x20dc0000, 2, 0x000c0000 },
+ { 0x0, 0x1, 0x3, 0, 0x210c0000, 6, 0x000c0000 },
+ { 0x0, 0x1, 0x4, 0, 0x20d80000, 1, 0x00080000 },
+ { 0x0, 0x1, 0x5, 0, 0x21080000, 5, 0x00080000 },
+ { 0x0, 0x1, 0x6, 0, 0x20d00000, 0, 0x00000000 },
+ { 0x0, 0x1, 0x7, 0, 0x21000000, 4, 0x00000000 },
+ { 0x0, 0x2, 0x0, 0, 0x0b040000, 0, 0x00000000 },
+ { 0x0, 0x2, 0x1, 0, 0x0b840000, 1, 0x00000000 },
+ { 0x0, 0x3, 0x0, 0, 0x0b230000, 0, 0x00000000 },
+ { 0x0, 0x3, 0x1, 0, 0x0ba30000, 1, 0x00000000 },
+ { 0x0, 0x4, 0x0, 0, 0x0b050000, 0, 0x00000000 },
+ { 0x0, 0x4, 0x1, 0, 0x0b850000, 1, 0x00000000 },
+ { 0x0, 0x5, 0x0, 0, 0x0b060000, 0, 0x00000000 },
+ { 0x0, 0x5, 0x1, 0, 0x0b070000, 1, 0x00010000 },
+ { 0x0, 0x5, 0x2, 0, 0x0b080000, 2, 0x00020000 },
+ { 0x0, 0x5, 0x3, 0, 0x0b090000, 3, 0x00030000 },
+ { 0x0, 0x5, 0x4, 0, 0x0b0a0000, 4, 0x00040000 },
+ { 0x0, 0x5, 0x5, 0, 0x0b0b0000, 5, 0x00050000 },
+ { 0x0, 0x5, 0x6, 0, 0x0b0c0000, 6, 0x00060000 },
+ { 0x0, 0x5, 0x7, 0, 0x0b0d0000, 7, 0x00070000 },
+ { 0x0, 0x5, 0x8, 0, 0x0b0e0000, 8, 0x00080000 },
+ { 0x0, 0x5, 0x9, 0, 0x0b860000, 9, 0x00000000 },
+ { 0x0, 0x5, 0xa, 0, 0x0b870000, 10, 0x00010000 },
+ { 0x0, 0x5, 0xb, 0, 0x0b880000, 11, 0x00020000 },
+ { 0x0, 0x5, 0xc, 0, 0x0b890000, 12, 0x00030000 },
+ { 0x0, 0x5, 0xd, 0, 0x0b8a0000, 13, 0x00040000 },
+ { 0x0, 0x5, 0xe, 0, 0x0b8b0000, 14, 0x00050000 },
+ { 0x0, 0x5, 0xf, 0, 0x0b8c0000, 15, 0x00060000 },
+ { 0x0, 0x5, 0x10, 0, 0x0b8d0000, 16, 0x00070000 },
+ { 0x0, 0x5, 0x11, 0, 0x0b8e0000, 17, 0x00080000 },
+ { 0x0, 0x6, 0x0, 0, 0x0b650000, 0, 0x00000000 },
+ { 0x0, 0x6, 0x1, 0, 0x0be50000, 1, 0x00000000 },
+ { 0x0, 0x7, 0x0, 0, 0x20df0000, 0, 0x00000000 },
+ { 0x0, 0x7, 0x1, 0, 0x210f0000, 1, 0x00000000 },
+ { 0x0, 0x8, 0x0, 0, 0x0b3e0000, 0, 0x00000000 },
+ { 0x0, 0x8, 0x1, 0, 0x0bbe0000, 1, 0x00000000 },
+ { 0x0, 0x9, 0x0, 0, 0x0b3d0000, 0, 0x00000000 },
+ { 0x0, 0x9, 0x1, 0, 0x0bbd0000, 1, 0x00000000 },
+ { 0x0, 0xa, 0x0, 0, 0x0b1e0000, 0, 0x00000000 },
+ { 0x0, 0xa, 0x1, 0, 0x0b9e0000, 1, 0x00000000 },
+ { 0x0, 0xb, 0x0, 0, 0x0b150000, 0, 0x00000000 },
+ { 0x0, 0xb, 0x1, 0, 0x0b160000, 1, 0x00010000 },
+ { 0x0, 0xb, 0x2, 0, 0x0b170000, 2, 0x00020000 },
+ { 0x0, 0xb, 0x3, 0, 0x0b180000, 3, 0x00030000 },
+ { 0x0, 0xb, 0x4, 0, 0x0b190000, 4, 0x00040000 },
+ { 0x0, 0xb, 0x5, 0, 0x0b1a0000, 5, 0x00050000 },
+ { 0x0, 0xb, 0x6, 0, 0x0b1b0000, 6, 0x00060000 },
+ { 0x0, 0xb, 0x7, 0, 0x0b1c0000, 7, 0x00070000 },
+ { 0x0, 0xb, 0x8, 0, 0x0b1d0000, 8, 0x00080000 },
+ { 0x0, 0xb, 0x9, 0, 0x0b950000, 9, 0x00000000 },
+ { 0x0, 0xb, 0xa, 0, 0x0b960000, 10, 0x00010000 },
+ { 0x0, 0xb, 0xb, 0, 0x0b970000, 11, 0x00020000 },
+ { 0x0, 0xb, 0xc, 0, 0x0b980000, 12, 0x00030000 },
+ { 0x0, 0xb, 0xd, 0, 0x0b990000, 13, 0x00040000 },
+ { 0x0, 0xb, 0xe, 0, 0x0b9a0000, 14, 0x00050000 },
+ { 0x0, 0xb, 0xf, 0, 0x0b9b0000, 15, 0x00060000 },
+ { 0x0, 0xb, 0x10, 0, 0x0b9c0000, 16, 0x00070000 },
+ { 0x0, 0xb, 0x11, 0, 0x0b9d0000, 17, 0x00080000 },
+ { 0x0, 0xc, 0x0, 0, 0x0b660000, 0, 0x00000000 },
+ { 0x0, 0xc, 0x1, 0, 0x0be60000, 1, 0x00000000 },
+ { 0x0, 0xd, 0x0, 0, 0x0b1f0000, 0, 0x00000000 },
+ { 0x0, 0xd, 0x1, 0, 0x0b200000, 1, 0x00010000 },
+ { 0x0, 0xd, 0x2, 0, 0x0b210000, 2, 0x00020000 },
+ { 0x0, 0xd, 0x3, 0, 0x0b220000, 3, 0x00030000 },
+ { 0x0, 0xd, 0x4, 0, 0x0b9f0000, 4, 0x00000000 },
+ { 0x0, 0xd, 0x5, 0, 0x0ba00000, 5, 0x00010000 },
+ { 0x0, 0xd, 0x6, 0, 0x0ba10000, 6, 0x00020000 },
+ { 0x0, 0xd, 0x7, 0, 0x0ba20000, 7, 0x00030000 },
+ { 0x0, 0xe, 0x0, 0, 0x0b0f0000, 0, 0x00000000 },
+ { 0x0, 0xe, 0x1, 0, 0x0b100000, 1, 0x00010000 },
+ { 0x0, 0xe, 0x2, 0, 0x0b110000, 2, 0x00020000 },
+ { 0x0, 0xe, 0x3, 0, 0x0b120000, 3, 0x00030000 },
+ { 0x0, 0xe, 0x4, 0, 0x0b130000, 4, 0x00040000 },
+ { 0x0, 0xe, 0x5, 0, 0x0b140000, 5, 0x00050000 },
+ { 0x0, 0xe, 0x6, 0, 0x0b8f0000, 6, 0x00000000 },
+ { 0x0, 0xe, 0x7, 0, 0x0b900000, 7, 0x00010000 },
+ { 0x0, 0xe, 0x8, 0, 0x0b910000, 8, 0x00020000 },
+ { 0x0, 0xe, 0x9, 0, 0x0b920000, 9, 0x00030000 },
+ { 0x0, 0xe, 0xa, 0, 0x0b930000, 10, 0x00040000 },
+ { 0x0, 0xe, 0xb, 0, 0x0b940000, 11, 0x00050000 },
+ { 0x0, 0x10, 0x0, 0, 0x0b240000, 0, 0x00000000 },
+ { 0x0, 0x10, 0x1, 0, 0x0ba40000, 1, 0x00000000 },
+ { 0x0, 0x11, 0x0, 0, 0x0b020000, 0, 0x00000000 },
+ { 0x0, 0x11, 0x1, 0, 0x0b820000, 1, 0x00000000 },
+ { 0x0, 0x12, 0x0, 0, 0x0b030000, 0, 0x00000000 },
+ { 0x0, 0x12, 0x1, 0, 0x0b830000, 1, 0x00000000 },
+ { 0x0, 0x17, 0x0, 0, 0x0b640000, 0, 0x00000000 },
+ { 0x0, 0x17, 0x1, 0, 0x0be40000, 1, 0x00000000 },
+ { 0x0, 0x18, 0x0, 0, 0x0b600000, 0, 0x00000000 },
+ { 0x0, 0x18, 0x1, 0, 0x0be00000, 1, 0x00000000 },
+ { 0x0, 0x18, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x0, 0x18, 0x3, 0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x13, 0x0, 0, 0x40000000, 0, 0x40000000 },
+ { 0x1, 0x13, 0x1, 1, 0x80000000, 1, 0x80000000 },
+ { 0x1, 0x13, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x15, 0x0, 0, 0x20c00000, 8, 0x20c00000 },
+ { 0x2, 0x15, 0x1, 0, 0x21100000, 22, 0x21100000 },
+ { 0x2, 0x15, 0x2, 0, 0x20e00000, 9, 0x20e00000 },
+ { 0x2, 0x15, 0x3, 0, 0x21200000, 23, 0x21200000 },
+ { 0x2, 0x15, 0x4, 0, 0x20800000, 7, 0x20800000 },
+ { 0x2, 0x15, 0x5, 0, 0x21400000, 24, 0x21400000 },
+ { 0x2, 0x15, 0x6, 0, 0x0b000000, 18, 0x0b000000 },
+ { 0x2, 0x15, 0x7, 0, 0x0b800000, 3, 0x0b800000 },
+ { 0x2, 0x15, 0x8, 0, 0x20000000, 6, 0x20000000 },
+ { 0x2, 0x15, 0x9, 0, 0x21800000, 25, 0x21800000 },
+ { 0x2, 0x15, 0xa, 0, 0x0a000000, 2, 0x0a000000 },
+ { 0x2, 0x15, 0xb, 0, 0x0a000000, 17, 0x0a000000 },
+ { 0x2, 0x15, 0xc, 0, 0x20000000, 21, 0x20000000 },
+ { 0x2, 0x15, 0xd, 0, 0x21000000, 10, 0x21000000 },
+ { 0x2, 0x15, 0xe, 0, 0x08000000, 1, 0x08000000 },
+ { 0x2, 0x15, 0xf, 0, 0x08000000, 16, 0x08000000 },
+ { 0x2, 0x15, 0x10, 0, 0x22000000, 11, 0x22000000 },
+ { 0x2, 0x15, 0x11, 0, 0x22000000, 26, 0x22000000 },
+ { 0x2, 0x15, 0x12, 0, 0x0c000000, 4, 0x0c000000 },
+ { 0x2, 0x15, 0x13, 0, 0x0c000000, 19, 0x0c000000 },
+ { 0x2, 0x15, 0x14, 0, 0x24000000, 12, 0x24000000 },
+ { 0x2, 0x15, 0x15, 0, 0x24000000, 27, 0x24000000 },
+ { 0x2, 0x15, 0x16, 0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x15, 0x17, 0, 0x00000000, 15, 0x00000000 },
+ { 0x2, 0x15, 0x18, 0, 0x28000000, 13, 0x28000000 },
+ { 0x2, 0x15, 0x19, 0, 0x28000000, 28, 0x28000000 },
+ { 0x2, 0x15, 0x1a, 0, 0x10000000, 5, 0x10000000 },
+ { 0x2, 0x15, 0x1b, 0, 0x10000000, 20, 0x10000000 },
+ { 0x2, 0x15, 0x1c, 0, 0x30000000, 14, 0x30000000 },
+ { 0x2, 0x15, 0x1d, 0, 0x30000000, 29, 0x30000000 },
+ { 0x2, 0x0, 0x0, 0, 0x0b000000, 0, 0x00000000 },
+ { 0x2, 0x0, 0x1, 0, 0x0b800000, 1, 0x00000000 },
+ { 0x2, 0x1, 0x0, 0, 0x20de0000, 3, 0x000e0000 },
+ { 0x2, 0x1, 0x1, 0, 0x210e0000, 7, 0x000e0000 },
+ { 0x2, 0x1, 0x2, 0, 0x20dc0000, 2, 0x000c0000 },
+ { 0x2, 0x1, 0x3, 0, 0x210c0000, 6, 0x000c0000 },
+ { 0x2, 0x1, 0x4, 0, 0x20d80000, 1, 0x00080000 },
+ { 0x2, 0x1, 0x5, 0, 0x21080000, 5, 0x00080000 },
+ { 0x2, 0x1, 0x6, 0, 0x20d00000, 0, 0x00000000 },
+ { 0x2, 0x1, 0x7, 0, 0x21000000, 4, 0x00000000 },
+ { 0x2, 0x2, 0x0, 0, 0x0b040000, 0, 0x00000000 },
+ { 0x2, 0x2, 0x1, 0, 0x0b840000, 1, 0x00000000 },
+ { 0x2, 0x3, 0x0, 0, 0x0b230000, 0, 0x00000000 },
+ { 0x2, 0x3, 0x1, 0, 0x0ba30000, 1, 0x00000000 },
+ { 0x2, 0x4, 0x0, 0, 0x0b050000, 0, 0x00000000 },
+ { 0x2, 0x4, 0x1, 0, 0x0b850000, 1, 0x00000000 },
+ { 0x2, 0x5, 0x0, 0, 0x0b060000, 0, 0x00000000 },
+ { 0x2, 0x5, 0x1, 0, 0x0b070000, 1, 0x00010000 },
+ { 0x2, 0x5, 0x2, 0, 0x0b080000, 2, 0x00020000 },
+ { 0x2, 0x5, 0x3, 0, 0x0b090000, 3, 0x00030000 },
+ { 0x2, 0x5, 0x4, 0, 0x0b0a0000, 4, 0x00040000 },
+ { 0x2, 0x5, 0x5, 0, 0x0b0b0000, 5, 0x00050000 },
+ { 0x2, 0x5, 0x6, 0, 0x0b0c0000, 6, 0x00060000 },
+ { 0x2, 0x5, 0x7, 0, 0x0b0d0000, 7, 0x00070000 },
+ { 0x2, 0x5, 0x8, 0, 0x0b0e0000, 8, 0x00080000 },
+ { 0x2, 0x5, 0x9, 0, 0x0b860000, 9, 0x00000000 },
+ { 0x2, 0x5, 0xa, 0, 0x0b870000, 10, 0x00010000 },
+ { 0x2, 0x5, 0xb, 0, 0x0b880000, 11, 0x00020000 },
+ { 0x2, 0x5, 0xc, 0, 0x0b890000, 12, 0x00030000 },
+ { 0x2, 0x5, 0xd, 0, 0x0b8a0000, 13, 0x00040000 },
+ { 0x2, 0x5, 0xe, 0, 0x0b8b0000, 14, 0x00050000 },
+ { 0x2, 0x5, 0xf, 0, 0x0b8c0000, 15, 0x00060000 },
+ { 0x2, 0x5, 0x10, 0, 0x0b8d0000, 16, 0x00070000 },
+ { 0x2, 0x5, 0x11, 0, 0x0b8e0000, 17, 0x00080000 },
+ { 0x2, 0x6, 0x0, 0, 0x0b650000, 0, 0x00000000 },
+ { 0x2, 0x6, 0x1, 0, 0x0be50000, 1, 0x00000000 },
+ { 0x2, 0x7, 0x0, 0, 0x20df0000, 0, 0x00000000 },
+ { 0x2, 0x7, 0x1, 0, 0x210f0000, 1, 0x00000000 },
+ { 0x2, 0x8, 0x0, 0, 0x0b3e0000, 0, 0x00000000 },
+ { 0x2, 0x8, 0x1, 0, 0x0bbe0000, 1, 0x00000000 },
+ { 0x2, 0x9, 0x0, 0, 0x0b3d0000, 0, 0x00000000 },
+ { 0x2, 0x9, 0x1, 0, 0x0bbd0000, 1, 0x00000000 },
+ { 0x2, 0xa, 0x0, 0, 0x0b1e0000, 0, 0x00000000 },
+ { 0x2, 0xa, 0x1, 0, 0x0b9e0000, 1, 0x00000000 },
+ { 0x2, 0xb, 0x0, 0, 0x0b150000, 0, 0x00000000 },
+ { 0x2, 0xb, 0x1, 0, 0x0b160000, 1, 0x00010000 },
+ { 0x2, 0xb, 0x2, 0, 0x0b170000, 2, 0x00020000 },
+ { 0x2, 0xb, 0x3, 0, 0x0b180000, 3, 0x00030000 },
+ { 0x2, 0xb, 0x4, 0, 0x0b190000, 4, 0x00040000 },
+ { 0x2, 0xb, 0x5, 0, 0x0b1a0000, 5, 0x00050000 },
+ { 0x2, 0xb, 0x6, 0, 0x0b1b0000, 6, 0x00060000 },
+ { 0x2, 0xb, 0x7, 0, 0x0b1c0000, 7, 0x00070000 },
+ { 0x2, 0xb, 0x8, 0, 0x0b1d0000, 8, 0x00080000 },
+ { 0x2, 0xb, 0x9, 0, 0x0b950000, 9, 0x00000000 },
+ { 0x2, 0xb, 0xa, 0, 0x0b960000, 10, 0x00010000 },
+ { 0x2, 0xb, 0xb, 0, 0x0b970000, 11, 0x00020000 },
+ { 0x2, 0xb, 0xc, 0, 0x0b980000, 12, 0x00030000 },
+ { 0x2, 0xb, 0xd, 0, 0x0b990000, 13, 0x00040000 },
+ { 0x2, 0xb, 0xe, 0, 0x0b9a0000, 14, 0x00050000 },
+ { 0x2, 0xb, 0xf, 0, 0x0b9b0000, 15, 0x00060000 },
+ { 0x2, 0xb, 0x10, 0, 0x0b9c0000, 16, 0x00070000 },
+ { 0x2, 0xb, 0x11, 0, 0x0b9d0000, 17, 0x00080000 },
+ { 0x2, 0xc, 0x0, 0, 0x0b660000, 0, 0x00000000 },
+ { 0x2, 0xc, 0x1, 0, 0x0be60000, 1, 0x00000000 },
+ { 0x2, 0xd, 0x0, 0, 0x0b1f0000, 0, 0x00000000 },
+ { 0x2, 0xd, 0x1, 0, 0x0b200000, 1, 0x00010000 },
+ { 0x2, 0xd, 0x2, 0, 0x0b210000, 2, 0x00020000 },
+ { 0x2, 0xd, 0x3, 0, 0x0b220000, 3, 0x00030000 },
+ { 0x2, 0xd, 0x4, 0, 0x0b9f0000, 4, 0x00000000 },
+ { 0x2, 0xd, 0x5, 0, 0x0ba00000, 5, 0x00010000 },
+ { 0x2, 0xd, 0x6, 0, 0x0ba10000, 6, 0x00020000 },
+ { 0x2, 0xd, 0x7, 0, 0x0ba20000, 7, 0x00030000 },
+ { 0x2, 0xe, 0x0, 0, 0x0b0f0000, 0, 0x00000000 },
+ { 0x2, 0xe, 0x1, 0, 0x0b100000, 1, 0x00010000 },
+ { 0x2, 0xe, 0x2, 0, 0x0b110000, 2, 0x00020000 },
+ { 0x2, 0xe, 0x3, 0, 0x0b120000, 3, 0x00030000 },
+ { 0x2, 0xe, 0x4, 0, 0x0b130000, 4, 0x00040000 },
+ { 0x2, 0xe, 0x5, 0, 0x0b140000, 5, 0x00050000 },
+ { 0x2, 0xe, 0x6, 0, 0x0b8f0000, 6, 0x00000000 },
+ { 0x2, 0xe, 0x7, 0, 0x0b900000, 7, 0x00010000 },
+ { 0x2, 0xe, 0x8, 0, 0x0b910000, 8, 0x00020000 },
+ { 0x2, 0xe, 0x9, 0, 0x0b920000, 9, 0x00030000 },
+ { 0x2, 0xe, 0xa, 0, 0x0b930000, 10, 0x00040000 },
+ { 0x2, 0xe, 0xb, 0, 0x0b940000, 11, 0x00050000 },
+ { 0x2, 0x10, 0x0, 0, 0x0b240000, 0, 0x00000000 },
+ { 0x2, 0x10, 0x1, 0, 0x0ba40000, 1, 0x00000000 },
+ { 0x2, 0x11, 0x0, 0, 0x0b020000, 0, 0x00000000 },
+ { 0x2, 0x11, 0x1, 0, 0x0b820000, 1, 0x00000000 },
+ { 0x2, 0x12, 0x0, 0, 0x0b030000, 0, 0x00000000 },
+ { 0x2, 0x12, 0x1, 0, 0x0b830000, 1, 0x00000000 },
+ { 0x2, 0x17, 0x0, 0, 0x0b640000, 0, 0x00000000 },
+ { 0x2, 0x17, 0x1, 0, 0x0be40000, 1, 0x00000000 },
+ { 0x2, 0x18, 0x0, 0, 0x0b600000, 0, 0x00000000 },
+ { 0x2, 0x18, 0x1, 0, 0x0be00000, 1, 0x00000000 },
+ { 0x2, 0x18, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x18, 0x3, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x14, 0x0, 0, 0x40000000, 0, 0x40000000 },
+ { 0x3, 0x14, 0x1, 1, 0x80000000, 1, 0x80000000 },
+ { 0x3, 0x16, 0x0, 2, 0x0b400000, 0, 0x0b400000 },
+ { 0x3, 0x16, 0x1, 2, 0x0bc00000, 1, 0x0bc00000 },
+ { 0x3, 0x16, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x16, 0x3, 0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x15, 0x0, 0, 0x20c00000, 8, 0x20c00000 },
+ { 0x4, 0x15, 0x1, 0, 0x21100000, 22, 0x21100000 },
+ { 0x4, 0x15, 0x2, 0, 0x20e00000, 9, 0x20e00000 },
+ { 0x4, 0x15, 0x3, 0, 0x21200000, 23, 0x21200000 },
+ { 0x4, 0x15, 0x4, 0, 0x20800000, 7, 0x20800000 },
+ { 0x4, 0x15, 0x5, 0, 0x21400000, 24, 0x21400000 },
+ { 0x4, 0x15, 0x6, 0, 0x0b000000, 18, 0x0b000000 },
+ { 0x4, 0x15, 0x7, 0, 0x0b800000, 3, 0x0b800000 },
+ { 0x4, 0x15, 0x8, 0, 0x20000000, 6, 0x20000000 },
+ { 0x4, 0x15, 0x9, 0, 0x21800000, 25, 0x21800000 },
+ { 0x4, 0x15, 0xa, 0, 0x0a000000, 2, 0x0a000000 },
+ { 0x4, 0x15, 0xb, 0, 0x0a000000, 17, 0x0a000000 },
+ { 0x4, 0x15, 0xc, 0, 0x20000000, 21, 0x20000000 },
+ { 0x4, 0x15, 0xd, 0, 0x21000000, 10, 0x21000000 },
+ { 0x4, 0x15, 0xe, 0, 0x08000000, 1, 0x08000000 },
+ { 0x4, 0x15, 0xf, 0, 0x08000000, 16, 0x08000000 },
+ { 0x4, 0x15, 0x10, 0, 0x22000000, 11, 0x22000000 },
+ { 0x4, 0x15, 0x11, 0, 0x22000000, 26, 0x22000000 },
+ { 0x4, 0x15, 0x12, 0, 0x0c000000, 4, 0x0c000000 },
+ { 0x4, 0x15, 0x13, 0, 0x0c000000, 19, 0x0c000000 },
+ { 0x4, 0x15, 0x14, 0, 0x24000000, 12, 0x24000000 },
+ { 0x4, 0x15, 0x15, 0, 0x24000000, 27, 0x24000000 },
+ { 0x4, 0x15, 0x16, 0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x15, 0x17, 0, 0x00000000, 15, 0x00000000 },
+ { 0x4, 0x15, 0x18, 0, 0x28000000, 13, 0x28000000 },
+ { 0x4, 0x15, 0x19, 0, 0x28000000, 28, 0x28000000 },
+ { 0x4, 0x15, 0x1a, 0, 0x10000000, 5, 0x10000000 },
+ { 0x4, 0x15, 0x1b, 0, 0x10000000, 20, 0x10000000 },
+ { 0x4, 0x15, 0x1c, 0, 0x30000000, 14, 0x30000000 },
+ { 0x4, 0x15, 0x1d, 0, 0x30000000, 29, 0x30000000 },
+ { 0x4, 0x0, 0x0, 0, 0x0b000000, 0, 0x00000000 },
+ { 0x4, 0x0, 0x1, 0, 0x0b800000, 1, 0x00000000 },
+ { 0x4, 0x1, 0x0, 0, 0x20de0000, 3, 0x000e0000 },
+ { 0x4, 0x1, 0x1, 0, 0x210e0000, 7, 0x000e0000 },
+ { 0x4, 0x1, 0x2, 0, 0x20dc0000, 2, 0x000c0000 },
+ { 0x4, 0x1, 0x3, 0, 0x210c0000, 6, 0x000c0000 },
+ { 0x4, 0x1, 0x4, 0, 0x20d80000, 1, 0x00080000 },
+ { 0x4, 0x1, 0x5, 0, 0x21080000, 5, 0x00080000 },
+ { 0x4, 0x1, 0x6, 0, 0x20d00000, 0, 0x00000000 },
+ { 0x4, 0x1, 0x7, 0, 0x21000000, 4, 0x00000000 },
+ { 0x4, 0x2, 0x0, 0, 0x0b040000, 0, 0x00000000 },
+ { 0x4, 0x2, 0x1, 0, 0x0b840000, 1, 0x00000000 },
+ { 0x4, 0x3, 0x0, 0, 0x0b230000, 0, 0x00000000 },
+ { 0x4, 0x3, 0x1, 0, 0x0ba30000, 1, 0x00000000 },
+ { 0x4, 0x4, 0x0, 0, 0x0b050000, 0, 0x00000000 },
+ { 0x4, 0x4, 0x1, 0, 0x0b850000, 1, 0x00000000 },
+ { 0x4, 0x5, 0x0, 0, 0x0b060000, 0, 0x00000000 },
+ { 0x4, 0x5, 0x1, 0, 0x0b070000, 1, 0x00010000 },
+ { 0x4, 0x5, 0x2, 0, 0x0b080000, 2, 0x00020000 },
+ { 0x4, 0x5, 0x3, 0, 0x0b090000, 3, 0x00030000 },
+ { 0x4, 0x5, 0x4, 0, 0x0b0a0000, 4, 0x00040000 },
+ { 0x4, 0x5, 0x5, 0, 0x0b0b0000, 5, 0x00050000 },
+ { 0x4, 0x5, 0x6, 0, 0x0b0c0000, 6, 0x00060000 },
+ { 0x4, 0x5, 0x7, 0, 0x0b0d0000, 7, 0x00070000 },
+ { 0x4, 0x5, 0x8, 0, 0x0b0e0000, 8, 0x00080000 },
+ { 0x4, 0x5, 0x9, 0, 0x0b860000, 9, 0x00000000 },
+ { 0x4, 0x5, 0xa, 0, 0x0b870000, 10, 0x00010000 },
+ { 0x4, 0x5, 0xb, 0, 0x0b880000, 11, 0x00020000 },
+ { 0x4, 0x5, 0xc, 0, 0x0b890000, 12, 0x00030000 },
+ { 0x4, 0x5, 0xd, 0, 0x0b8a0000, 13, 0x00040000 },
+ { 0x4, 0x5, 0xe, 0, 0x0b8b0000, 14, 0x00050000 },
+ { 0x4, 0x5, 0xf, 0, 0x0b8c0000, 15, 0x00060000 },
+ { 0x4, 0x5, 0x10, 0, 0x0b8d0000, 16, 0x00070000 },
+ { 0x4, 0x5, 0x11, 0, 0x0b8e0000, 17, 0x00080000 },
+ { 0x4, 0x6, 0x0, 0, 0x0b650000, 0, 0x00000000 },
+ { 0x4, 0x6, 0x1, 0, 0x0be50000, 1, 0x00000000 },
+ { 0x4, 0x7, 0x0, 0, 0x20df0000, 0, 0x00000000 },
+ { 0x4, 0x7, 0x1, 0, 0x210f0000, 1, 0x00000000 },
+ { 0x4, 0x8, 0x0, 0, 0x0b3e0000, 0, 0x00000000 },
+ { 0x4, 0x8, 0x1, 0, 0x0bbe0000, 1, 0x00000000 },
+ { 0x4, 0x9, 0x0, 0, 0x0b3d0000, 0, 0x00000000 },
+ { 0x4, 0x9, 0x1, 0, 0x0bbd0000, 1, 0x00000000 },
+ { 0x4, 0xa, 0x0, 0, 0x0b1e0000, 0, 0x00000000 },
+ { 0x4, 0xa, 0x1, 0, 0x0b9e0000, 1, 0x00000000 },
+ { 0x4, 0xb, 0x0, 0, 0x0b150000, 0, 0x00000000 },
+ { 0x4, 0xb, 0x1, 0, 0x0b160000, 1, 0x00010000 },
+ { 0x4, 0xb, 0x2, 0, 0x0b170000, 2, 0x00020000 },
+ { 0x4, 0xb, 0x3, 0, 0x0b180000, 3, 0x00030000 },
+ { 0x4, 0xb, 0x4, 0, 0x0b190000, 4, 0x00040000 },
+ { 0x4, 0xb, 0x5, 0, 0x0b1a0000, 5, 0x00050000 },
+ { 0x4, 0xb, 0x6, 0, 0x0b1b0000, 6, 0x00060000 },
+ { 0x4, 0xb, 0x7, 0, 0x0b1c0000, 7, 0x00070000 },
+ { 0x4, 0xb, 0x8, 0, 0x0b1d0000, 8, 0x00080000 },
+ { 0x4, 0xb, 0x9, 0, 0x0b950000, 9, 0x00000000 },
+ { 0x4, 0xb, 0xa, 0, 0x0b960000, 10, 0x00010000 },
+ { 0x4, 0xb, 0xb, 0, 0x0b970000, 11, 0x00020000 },
+ { 0x4, 0xb, 0xc, 0, 0x0b980000, 12, 0x00030000 },
+ { 0x4, 0xb, 0xd, 0, 0x0b990000, 13, 0x00040000 },
+ { 0x4, 0xb, 0xe, 0, 0x0b9a0000, 14, 0x00050000 },
+ { 0x4, 0xb, 0xf, 0, 0x0b9b0000, 15, 0x00060000 },
+ { 0x4, 0xb, 0x10, 0, 0x0b9c0000, 16, 0x00070000 },
+ { 0x4, 0xb, 0x11, 0, 0x0b9d0000, 17, 0x00080000 },
+ { 0x4, 0xc, 0x0, 0, 0x0b660000, 0, 0x00000000 },
+ { 0x4, 0xc, 0x1, 0, 0x0be60000, 1, 0x00000000 },
+ { 0x4, 0xd, 0x0, 0, 0x0b1f0000, 0, 0x00000000 },
+ { 0x4, 0xd, 0x1, 0, 0x0b200000, 1, 0x00010000 },
+ { 0x4, 0xd, 0x2, 0, 0x0b210000, 2, 0x00020000 },
+ { 0x4, 0xd, 0x3, 0, 0x0b220000, 3, 0x00030000 },
+ { 0x4, 0xd, 0x4, 0, 0x0b9f0000, 4, 0x00000000 },
+ { 0x4, 0xd, 0x5, 0, 0x0ba00000, 5, 0x00010000 },
+ { 0x4, 0xd, 0x6, 0, 0x0ba10000, 6, 0x00020000 },
+ { 0x4, 0xd, 0x7, 0, 0x0ba20000, 7, 0x00030000 },
+ { 0x4, 0xe, 0x0, 0, 0x0b0f0000, 0, 0x00000000 },
+ { 0x4, 0xe, 0x1, 0, 0x0b100000, 1, 0x00010000 },
+ { 0x4, 0xe, 0x2, 0, 0x0b110000, 2, 0x00020000 },
+ { 0x4, 0xe, 0x3, 0, 0x0b120000, 3, 0x00030000 },
+ { 0x4, 0xe, 0x4, 0, 0x0b130000, 4, 0x00040000 },
+ { 0x4, 0xe, 0x5, 0, 0x0b140000, 5, 0x00050000 },
+ { 0x4, 0xe, 0x6, 0, 0x0b8f0000, 6, 0x00000000 },
+ { 0x4, 0xe, 0x7, 0, 0x0b900000, 7, 0x00010000 },
+ { 0x4, 0xe, 0x8, 0, 0x0b910000, 8, 0x00020000 },
+ { 0x4, 0xe, 0x9, 0, 0x0b920000, 9, 0x00030000 },
+ { 0x4, 0xe, 0xa, 0, 0x0b930000, 10, 0x00040000 },
+ { 0x4, 0xe, 0xb, 0, 0x0b940000, 11, 0x00050000 },
+ { 0x4, 0x10, 0x0, 0, 0x0b240000, 0, 0x00000000 },
+ { 0x4, 0x10, 0x1, 0, 0x0ba40000, 1, 0x00000000 },
+ { 0x4, 0x11, 0x0, 0, 0x0b020000, 0, 0x00000000 },
+ { 0x4, 0x11, 0x1, 0, 0x0b820000, 1, 0x00000000 },
+ { 0x4, 0x12, 0x0, 0, 0x0b030000, 0, 0x00000000 },
+ { 0x4, 0x12, 0x1, 0, 0x0b830000, 1, 0x00000000 },
+ { 0x4, 0x17, 0x0, 0, 0x0b640000, 0, 0x00000000 },
+ { 0x4, 0x17, 0x1, 0, 0x0be40000, 1, 0x00000000 },
+ { 0x4, 0x18, 0x0, 0, 0x0b600000, 0, 0x00000000 },
+ { 0x4, 0x18, 0x1, 0, 0x0be00000, 1, 0x00000000 },
+ { 0x4, 0x18, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x18, 0x3, 0, 0x00000000, 0, 0x00000000 }
+};
+
+static void cbbcentralnoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(CBB_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(CBB_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(CBB_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(CBB_NOC_SEQID, routeid);
+}
+
+static void bpmpnoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(BPMP_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(BPMP_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(BPMP_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(BPMP_NOC_SEQID, routeid);
+}
+
+static void aonnoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(AON_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(AON_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(AON_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(AON_NOC_SEQID, routeid);
+}
+
+static void scenoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(SCE_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(SCE_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(SCE_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(SCE_NOC_SEQID, routeid);
+}
+
+static void cbbcentralnoc_parse_userbits(struct tegra194_cbb_userbits *usrbits, u32 elog_5)
+{
+ usrbits->axcache = FIELD_GET(CBB_NOC_AXCACHE, elog_5);
+ usrbits->non_mod = FIELD_GET(CBB_NOC_NON_MOD, elog_5);
+ usrbits->axprot = FIELD_GET(CBB_NOC_AXPROT, elog_5);
+ usrbits->falconsec = FIELD_GET(CBB_NOC_FALCONSEC, elog_5);
+ usrbits->grpsec = FIELD_GET(CBB_NOC_GRPSEC, elog_5);
+ usrbits->vqc = FIELD_GET(CBB_NOC_VQC, elog_5);
+ usrbits->mstr_id = FIELD_GET(CBB_NOC_MSTR_ID, elog_5) - 1;
+ usrbits->axi_id = FIELD_GET(CBB_NOC_AXI_ID, elog_5);
+}
+
+static void clusternoc_parse_userbits(struct tegra194_cbb_userbits *usrbits, u32 elog_5)
+{
+ usrbits->axcache = FIELD_GET(CLUSTER_NOC_AXCACHE, elog_5);
+ usrbits->axprot = FIELD_GET(CLUSTER_NOC_AXCACHE, elog_5);
+ usrbits->falconsec = FIELD_GET(CLUSTER_NOC_FALCONSEC, elog_5);
+ usrbits->grpsec = FIELD_GET(CLUSTER_NOC_GRPSEC, elog_5);
+ usrbits->vqc = FIELD_GET(CLUSTER_NOC_VQC, elog_5);
+ usrbits->mstr_id = FIELD_GET(CLUSTER_NOC_MSTR_ID, elog_5) - 1;
+}
+
+static void tegra194_cbb_fault_enable(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+
+ writel(1, priv->regs + ERRLOGGER_0_FAULTEN_0);
+ writel(1, priv->regs + ERRLOGGER_1_FAULTEN_0);
+ writel(1, priv->regs + ERRLOGGER_2_FAULTEN_0);
+}
+
+static void tegra194_cbb_stall_enable(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+
+ writel(1, priv->regs + ERRLOGGER_0_STALLEN_0);
+ writel(1, priv->regs + ERRLOGGER_1_STALLEN_0);
+ writel(1, priv->regs + ERRLOGGER_2_STALLEN_0);
+}
+
+static void tegra194_cbb_error_clear(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+
+ writel(1, priv->regs + ERRLOGGER_0_ERRCLR_0);
+ writel(1, priv->regs + ERRLOGGER_1_ERRCLR_0);
+ writel(1, priv->regs + ERRLOGGER_2_ERRCLR_0);
+ dsb(sy);
+}
+
+static u32 tegra194_cbb_get_status(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+ u32 value;
+
+ value = readl(priv->regs + ERRLOGGER_0_ERRVLD_0);
+ value |= (readl(priv->regs + ERRLOGGER_1_ERRVLD_0) << 1);
+ value |= (readl(priv->regs + ERRLOGGER_2_ERRVLD_0) << 2);
+
+ dsb(sy);
+ return value;
+}
+
+static u32 tegra194_axi2apb_status(void __iomem *addr)
+{
+ u32 value;
+
+ value = readl(addr + DMAAPB_X_RAW_INTERRUPT_STATUS);
+ writel(0xffffffff, addr + DMAAPB_X_RAW_INTERRUPT_STATUS);
+
+ return value;
+}
+
+static bool tegra194_axi2apb_fatal(struct seq_file *file, unsigned int bridge, u32 status)
+{
+ bool is_fatal = true;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra194_axi2apb_error); i++) {
+ if (status & BIT(i)) {
+ tegra_cbb_print_err(file, "\t AXI2APB_%d bridge error: %s\n",
+ bridge + 1, tegra194_axi2apb_error[i]);
+ if (strstr(tegra194_axi2apb_error[i], "Firewall"))
+ is_fatal = false;
+ }
+ }
+
+ return is_fatal;
+}
+
+/*
+ * Fetch InitlocalAddress from NOC Aperture lookup table
+ * using Targflow, Targsubrange
+ */
+static u32 get_init_localaddress(const struct tegra194_cbb_aperture *info,
+ const struct tegra194_cbb_aperture *aper, unsigned int max)
+{
+ unsigned int t_f = 0, t_sr = 0;
+ u32 addr = 0;
+
+ for (t_f = 0; t_f < max; t_f++) {
+ if (aper[t_f].targflow == info->targflow) {
+ t_sr = t_f;
+
+ do {
+ if (aper[t_sr].targ_subrange == info->targ_subrange) {
+ addr = aper[t_sr].init_localaddress;
+ return addr;
+ }
+
+ if (t_sr >= max)
+ return 0;
+
+ t_sr++;
+ } while (aper[t_sr].targflow == aper[t_sr - 1].targflow);
+
+ t_f = t_sr;
+ }
+ }
+
+ return addr;
+}
+
+static void print_errlog5(struct seq_file *file, struct tegra194_cbb *cbb)
+{
+ struct tegra194_cbb_userbits userbits;
+
+ cbb->noc->parse_userbits(&userbits, cbb->errlog5);
+
+ if (!strcmp(cbb->noc->name, "cbb-noc")) {
+ tegra_cbb_print_err(file, "\t Non-Modify\t\t: %#x\n", userbits.non_mod);
+ tegra_cbb_print_err(file, "\t AXI ID\t\t: %#x\n", userbits.axi_id);
+ }
+
+ tegra_cbb_print_err(file, "\t Master ID\t\t: %s\n",
+ cbb->noc->master_id[userbits.mstr_id]);
+ tegra_cbb_print_err(file, "\t Security Group(GRPSEC): %#x\n", userbits.grpsec);
+ tegra_cbb_print_cache(file, userbits.axcache);
+ tegra_cbb_print_prot(file, userbits.axprot);
+ tegra_cbb_print_err(file, "\t FALCONSEC\t\t: %#x\n", userbits.falconsec);
+ tegra_cbb_print_err(file, "\t Virtual Queuing Channel(VQC): %#x\n", userbits.vqc);
+}
+
+/*
+ * Fetch Base Address/InitlocalAddress from NOC aperture lookup table using TargFlow &
+ * Targ_subRange extracted from RouteId. Perform address reconstruction as below:
+ *
+ * Address = Base Address + (ErrLog3 + ErrLog4)
+ */
+static void
+print_errlog3_4(struct seq_file *file, u32 errlog3, u32 errlog4,
+ const struct tegra194_cbb_aperture *info,
+ const struct tegra194_cbb_aperture *aperture, unsigned int max)
+{
+ u64 addr = (u64)errlog4 << 32 | errlog3;
+
+ /*
+ * If errlog4[7] = "1", then it's a joker entry. Joker entries are a rare phenomenon and
+ * such addresses are not reliable. Debugging should be done using only the RouteId
+ * information.
+ */
+ if (errlog4 & 0x80)
+ tegra_cbb_print_err(file, "\t debug using RouteId alone as below address is a "
+ "joker entry and not reliable");
+
+ addr += get_init_localaddress(info, aperture, max);
+
+ tegra_cbb_print_err(file, "\t Address accessed\t: %#llx\n", addr);
+}
+
+/*
+ * Get RouteId from ErrLog1+ErrLog2 registers and fetch values of
+ * InitFlow, TargFlow, Targ_subRange and SeqId values from RouteId
+ */
+static void
+print_errlog1_2(struct seq_file *file, struct tegra194_cbb *cbb,
+ struct tegra194_cbb_aperture *info)
+{
+ u64 routeid = (u64)cbb->errlog2 << 32 | cbb->errlog1;
+ u32 seqid = 0;
+
+ tegra_cbb_print_err(file, "\t RouteId\t\t: %#llx\n", routeid);
+
+ cbb->noc->parse_routeid(info, routeid);
+
+ tegra_cbb_print_err(file, "\t InitFlow\t\t: %s\n",
+ cbb->noc->routeid_initflow[info->initflow]);
+
+ tegra_cbb_print_err(file, "\t Targflow\t\t: %s\n",
+ cbb->noc->routeid_targflow[info->targflow]);
+
+ tegra_cbb_print_err(file, "\t TargSubRange\t\t: %d\n", info->targ_subrange);
+ tegra_cbb_print_err(file, "\t SeqId\t\t\t: %d\n", seqid);
+}
+
+/*
+ * Print transcation type, error code and description from ErrLog0 for all
+ * errors. For NOC slave errors, all relevant error info is printed using
+ * ErrLog0 only. But additional information is printed for errors from
+ * APB slaves because for them:
+ * - All errors are logged as SLV(slave) errors due to APB having only single
+ * bit pslverr to report all errors.
+ * - Exact cause is printed by reading DMAAPB_X_RAW_INTERRUPT_STATUS register.
+ * - The driver prints information showing AXI2APB bridge and exact error
+ * only if there is error in any AXI2APB slave.
+ * - There is still no way to disambiguate a DEC error from SLV error type.
+ */
+static bool print_errlog0(struct seq_file *file, struct tegra194_cbb *cbb)
+{
+ struct tegra194_cbb_packet_header hdr;
+ bool is_fatal = true;
+
+ hdr.lock = cbb->errlog0 & 0x1;
+ hdr.opc = FIELD_GET(CBB_ERR_OPC, cbb->errlog0);
+ hdr.errcode = FIELD_GET(CBB_ERR_ERRCODE, cbb->errlog0);
+ hdr.len1 = FIELD_GET(CBB_ERR_LEN1, cbb->errlog0);
+ hdr.format = (cbb->errlog0 >> 31);
+
+ tegra_cbb_print_err(file, "\t Transaction Type\t: %s\n",
+ tegra194_cbb_trantype[hdr.opc]);
+ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+ tegra194_cbb_errors[hdr.errcode].code);
+ tegra_cbb_print_err(file, "\t Error Source\t\t: %s\n",
+ tegra194_cbb_errors[hdr.errcode].source);
+ tegra_cbb_print_err(file, "\t Error Description\t: %s\n",
+ tegra194_cbb_errors[hdr.errcode].desc);
+
+ /*
+ * Do not crash system for errors which are only notifications to indicate a transaction
+ * was not allowed to be attempted.
+ */
+ if (!strcmp(tegra194_cbb_errors[hdr.errcode].code, "SEC") ||
+ !strcmp(tegra194_cbb_errors[hdr.errcode].code, "DEC") ||
+ !strcmp(tegra194_cbb_errors[hdr.errcode].code, "UNS") ||
+ !strcmp(tegra194_cbb_errors[hdr.errcode].code, "DISC")) {
+ is_fatal = false;
+ } else if (!strcmp(tegra194_cbb_errors[hdr.errcode].code, "SLV") &&
+ cbb->num_bridges > 0) {
+ unsigned int i;
+ u32 status;
+
+ /* For all SLV errors, read DMAAPB_X_RAW_INTERRUPT_STATUS
+ * register to get error status for all AXI2APB bridges.
+ * Print bridge details if a bit is set in a bridge's
+ * status register due to error in a APB slave connected
+ * to that bridge. For other NOC slaves, none of the status
+ * register will be set.
+ */
+
+ for (i = 0; i < cbb->num_bridges; i++) {
+ status = tegra194_axi2apb_status(cbb->bridges[i].base);
+
+ if (status)
+ is_fatal = tegra194_axi2apb_fatal(file, i, status);
+ }
+ }
+
+ tegra_cbb_print_err(file, "\t Packet header Lock\t: %d\n", hdr.lock);
+ tegra_cbb_print_err(file, "\t Packet header Len1\t: %d\n", hdr.len1);
+
+ if (hdr.format)
+ tegra_cbb_print_err(file, "\t NOC protocol version\t: %s\n",
+ "version >= 2.7");
+ else
+ tegra_cbb_print_err(file, "\t NOC protocol version\t: %s\n",
+ "version < 2.7");
+
+ return is_fatal;
+}
+
+/*
+ * Print debug information about failed transaction using
+ * ErrLog registers of error loggger having ErrVld set
+ */
+static bool print_errloggerX_info(struct seq_file *file, struct tegra194_cbb *cbb,
+ int errloggerX)
+{
+ struct tegra194_cbb_aperture info = { 0, };
+ bool is_fatal = true;
+
+ tegra_cbb_print_err(file, "\tError Logger\t\t: %d\n", errloggerX);
+
+ if (errloggerX == 0) {
+ cbb->errlog0 = readl(cbb->regs + ERRLOGGER_0_ERRLOG0_0);
+ cbb->errlog1 = readl(cbb->regs + ERRLOGGER_0_ERRLOG1_0);
+ cbb->errlog2 = readl(cbb->regs + ERRLOGGER_0_RSVD_00_0);
+ cbb->errlog3 = readl(cbb->regs + ERRLOGGER_0_ERRLOG3_0);
+ cbb->errlog4 = readl(cbb->regs + ERRLOGGER_0_ERRLOG4_0);
+ cbb->errlog5 = readl(cbb->regs + ERRLOGGER_0_ERRLOG5_0);
+ } else if (errloggerX == 1) {
+ cbb->errlog0 = readl(cbb->regs + ERRLOGGER_1_ERRLOG0_0);
+ cbb->errlog1 = readl(cbb->regs + ERRLOGGER_1_ERRLOG1_0);
+ cbb->errlog2 = readl(cbb->regs + ERRLOGGER_1_RSVD_00_0);
+ cbb->errlog3 = readl(cbb->regs + ERRLOGGER_1_ERRLOG3_0);
+ cbb->errlog4 = readl(cbb->regs + ERRLOGGER_1_ERRLOG4_0);
+ cbb->errlog5 = readl(cbb->regs + ERRLOGGER_1_ERRLOG5_0);
+ } else if (errloggerX == 2) {
+ cbb->errlog0 = readl(cbb->regs + ERRLOGGER_2_ERRLOG0_0);
+ cbb->errlog1 = readl(cbb->regs + ERRLOGGER_2_ERRLOG1_0);
+ cbb->errlog2 = readl(cbb->regs + ERRLOGGER_2_RSVD_00_0);
+ cbb->errlog3 = readl(cbb->regs + ERRLOGGER_2_ERRLOG3_0);
+ cbb->errlog4 = readl(cbb->regs + ERRLOGGER_2_ERRLOG4_0);
+ cbb->errlog5 = readl(cbb->regs + ERRLOGGER_2_ERRLOG5_0);
+ }
+
+ tegra_cbb_print_err(file, "\tErrLog0\t\t\t: %#x\n", cbb->errlog0);
+ is_fatal = print_errlog0(file, cbb);
+
+ tegra_cbb_print_err(file, "\tErrLog1\t\t\t: %#x\n", cbb->errlog1);
+ tegra_cbb_print_err(file, "\tErrLog2\t\t\t: %#x\n", cbb->errlog2);
+ print_errlog1_2(file, cbb, &info);
+
+ tegra_cbb_print_err(file, "\tErrLog3\t\t\t: %#x\n", cbb->errlog3);
+ tegra_cbb_print_err(file, "\tErrLog4\t\t\t: %#x\n", cbb->errlog4);
+ print_errlog3_4(file, cbb->errlog3, cbb->errlog4, &info, cbb->noc->noc_aperture,
+ cbb->noc->max_aperture);
+
+ tegra_cbb_print_err(file, "\tErrLog5\t\t\t: %#x\n", cbb->errlog5);
+
+ if (cbb->errlog5)
+ print_errlog5(file, cbb);
+
+ return is_fatal;
+}
+
+static bool print_errlog(struct seq_file *file, struct tegra194_cbb *cbb, u32 errvld)
+{
+ bool is_fatal = true;
+
+ pr_crit("**************************************\n");
+ pr_crit("CPU:%d, Error:%s\n", smp_processor_id(), cbb->noc->name);
+
+ if (errvld & 0x1)
+ is_fatal = print_errloggerX_info(file, cbb, 0);
+ else if (errvld & 0x2)
+ is_fatal = print_errloggerX_info(file, cbb, 1);
+ else if (errvld & 0x4)
+ is_fatal = print_errloggerX_info(file, cbb, 2);
+
+ tegra_cbb_error_clear(&cbb->base);
+ tegra_cbb_print_err(file, "\t**************************************\n");
+ return is_fatal;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static DEFINE_MUTEX(cbb_err_mutex);
+
+static int tegra194_cbb_debugfs_show(struct tegra_cbb *cbb, struct seq_file *file, void *data)
+{
+ struct tegra_cbb *noc;
+
+ mutex_lock(&cbb_err_mutex);
+
+ list_for_each_entry(noc, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(noc);
+ u32 status;
+
+ status = tegra_cbb_get_status(noc);
+ if (status)
+ print_errlog(file, priv, status);
+ }
+
+ mutex_unlock(&cbb_err_mutex);
+
+ return 0;
+}
+#endif
+
+/*
+ * Handler for CBB errors from different initiators
+ */
+static irqreturn_t tegra194_cbb_err_isr(int irq, void *data)
+{
+ bool is_inband_err = false, is_fatal = false;
+ //struct tegra194_cbb *cbb = data;
+ struct tegra_cbb *noc;
+ unsigned long flags;
+ u8 mstr_id = 0;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ /* XXX only process interrupts for "cbb" instead of iterating over all NOCs? */
+ list_for_each_entry(noc, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(noc);
+ u32 status = 0;
+
+ status = tegra_cbb_get_status(noc);
+
+ if (status && ((irq == priv->sec_irq) || (irq == priv->nonsec_irq))) {
+ tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@%llx, irq=%d\n",
+ smp_processor_id(), priv->noc->name, priv->res->start,
+ irq);
+
+ is_fatal = print_errlog(NULL, priv, status);
+
+ /*
+ * If illegal request is from CCPLEX(0x1) initiator
+ * and error is fatal then call BUG() to crash system.
+ */
+ if (priv->noc->erd_mask_inband_err) {
+ mstr_id = FIELD_GET(CBB_NOC_MSTR_ID, priv->errlog5);
+ if (mstr_id == 0x1)
+ is_inband_err = 1;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ if (is_inband_err) {
+ if (is_fatal)
+ BUG();
+ else
+ WARN(true, "Warning due to CBB Error\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Register handler for CBB_NONSECURE & CBB_SECURE interrupts
+ * for reporting CBB errors
+ */
+static int tegra194_cbb_interrupt_enable(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+ struct device *dev = cbb->dev;
+ int err;
+
+ if (priv->sec_irq) {
+ err = devm_request_irq(dev, priv->sec_irq, tegra194_cbb_err_isr, 0, dev_name(dev),
+ priv);
+ if (err) {
+ dev_err(dev, "failed to register interrupt %u: %d\n", priv->sec_irq, err);
+ return err;
+ }
+ }
+
+ if (priv->nonsec_irq) {
+ err = devm_request_irq(dev, priv->nonsec_irq, tegra194_cbb_err_isr, 0,
+ dev_name(dev), priv);
+ if (err) {
+ dev_err(dev, "failed to register interrupt %u: %d\n", priv->nonsec_irq,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra194_cbb_error_enable(struct tegra_cbb *cbb)
+{
+ /*
+ * Set “StallEn=1” to enable queuing of error packets till
+ * first is served & cleared
+ */
+ tegra_cbb_stall_enable(cbb);
+
+ /* set “FaultEn=1” to enable error reporting signal “Fault” */
+ tegra_cbb_fault_enable(cbb);
+}
+
+static const struct tegra_cbb_ops tegra194_cbb_ops = {
+ .get_status = tegra194_cbb_get_status,
+ .error_clear = tegra194_cbb_error_clear,
+ .fault_enable = tegra194_cbb_fault_enable,
+ .stall_enable = tegra194_cbb_stall_enable,
+ .error_enable = tegra194_cbb_error_enable,
+ .interrupt_enable = tegra194_cbb_interrupt_enable,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_show = tegra194_cbb_debugfs_show,
+#endif
+};
+
+static struct tegra194_cbb_noc_data tegra194_cbb_central_noc_data = {
+ .name = "cbb-noc",
+ .erd_mask_inband_err = true,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_cbbcentralnoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_cbbcentralnoc_apert_lookup),
+ .routeid_initflow = tegra194_cbbcentralnoc_routeid_initflow,
+ .routeid_targflow = tegra194_cbbcentralnoc_routeid_targflow,
+ .parse_routeid = cbbcentralnoc_parse_routeid,
+ .parse_userbits = cbbcentralnoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_aon_noc_data = {
+ .name = "aon-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_aonnoc_aperture_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_aonnoc_aperture_lookup),
+ .routeid_initflow = tegra194_aonnoc_routeid_initflow,
+ .routeid_targflow = tegra194_aonnoc_routeid_targflow,
+ .parse_routeid = aonnoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_bpmp_noc_data = {
+ .name = "bpmp-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_bpmpnoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_bpmpnoc_apert_lookup),
+ .routeid_initflow = tegra194_bpmpnoc_routeid_initflow,
+ .routeid_targflow = tegra194_bpmpnoc_routeid_targflow,
+ .parse_routeid = bpmpnoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_rce_noc_data = {
+ .name = "rce-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_scenoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_scenoc_apert_lookup),
+ .routeid_initflow = tegra194_scenoc_routeid_initflow,
+ .routeid_targflow = tegra194_scenoc_routeid_targflow,
+ .parse_routeid = scenoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_sce_noc_data = {
+ .name = "sce-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_scenoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_scenoc_apert_lookup),
+ .routeid_initflow = tegra194_scenoc_routeid_initflow,
+ .routeid_targflow = tegra194_scenoc_routeid_targflow,
+ .parse_routeid = scenoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static const struct of_device_id tegra194_cbb_match[] = {
+ { .compatible = "nvidia,tegra194-cbb-noc", .data = &tegra194_cbb_central_noc_data },
+ { .compatible = "nvidia,tegra194-aon-noc", .data = &tegra194_aon_noc_data },
+ { .compatible = "nvidia,tegra194-bpmp-noc", .data = &tegra194_bpmp_noc_data },
+ { .compatible = "nvidia,tegra194-rce-noc", .data = &tegra194_rce_noc_data },
+ { .compatible = "nvidia,tegra194-sce-noc", .data = &tegra194_sce_noc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra194_cbb_match);
+
+static int tegra194_cbb_get_bridges(struct tegra194_cbb *cbb, struct device_node *np)
+{
+ struct tegra_cbb *entry;
+ unsigned long flags;
+ unsigned int i;
+ int err;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ list_for_each_entry(entry, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(entry);
+
+ if (priv->bridges) {
+ cbb->num_bridges = priv->num_bridges;
+ cbb->bridges = priv->bridges;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ if (!cbb->bridges) {
+ cbb->num_bridges = of_address_count(np);
+
+ cbb->bridges = devm_kcalloc(cbb->base.dev, cbb->num_bridges,
+ sizeof(*cbb->bridges), GFP_KERNEL);
+ if (!cbb->bridges)
+ return -ENOMEM;
+
+ for (i = 0; i < cbb->num_bridges; i++) {
+ err = of_address_to_resource(np, i, &cbb->bridges[i].res);
+ if (err < 0)
+ return err;
+
+ cbb->bridges[i].base = devm_ioremap_resource(cbb->base.dev,
+ &cbb->bridges[i].res);
+ if (IS_ERR(cbb->bridges[i].base))
+ return PTR_ERR(cbb->bridges[i].base);
+ }
+ }
+
+ if (cbb->num_bridges > 0) {
+ dev_dbg(cbb->base.dev, "AXI2APB bridge info present:\n");
+
+ for (i = 0; i < cbb->num_bridges; i++)
+ dev_dbg(cbb->base.dev, " %u: %pR\n", i, &cbb->bridges[i].res);
+ }
+
+ return 0;
+}
+
+static int tegra194_cbb_probe(struct platform_device *pdev)
+{
+ const struct tegra194_cbb_noc_data *noc;
+ struct tegra194_cbb *cbb;
+ struct device_node *np;
+ unsigned long flags;
+ int err;
+
+ noc = of_device_get_match_data(&pdev->dev);
+
+ if (noc->erd_mask_inband_err) {
+ /*
+ * Set Error Response Disable(ERD) bit to mask SError/inband
+ * error and only trigger interrupts for illegal access from
+ * CCPLEX initiator.
+ */
+ err = tegra194_miscreg_mask_serror();
+ if (err) {
+ dev_err(&pdev->dev, "couldn't mask inband errors\n");
+ return err;
+ }
+ }
+
+ cbb = devm_kzalloc(&pdev->dev, sizeof(*cbb), GFP_KERNEL);
+ if (!cbb)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cbb->base.node);
+ cbb->base.ops = &tegra194_cbb_ops;
+ cbb->base.dev = &pdev->dev;
+ cbb->noc = noc;
+
+ cbb->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &cbb->res);
+ if (IS_ERR(cbb->regs))
+ return PTR_ERR(cbb->regs);
+
+ err = tegra_cbb_get_irq(pdev, &cbb->nonsec_irq, &cbb->sec_irq);
+ if (err)
+ return err;
+
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,axi2apb", 0);
+ if (np) {
+ err = tegra194_cbb_get_bridges(cbb, np);
+ of_node_put(np);
+ if (err < 0)
+ return err;
+ }
+
+ platform_set_drvdata(pdev, cbb);
+
+ spin_lock_irqsave(&cbb_lock, flags);
+ list_add(&cbb->base.node, &cbb_list);
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ return tegra_cbb_register(&cbb->base);
+}
+
+static int tegra194_cbb_remove(struct platform_device *pdev)
+{
+ struct tegra194_cbb *cbb = platform_get_drvdata(pdev);
+ struct tegra_cbb *noc, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ list_for_each_entry_safe(noc, tmp, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(noc);
+
+ if (cbb->res->start == priv->res->start) {
+ list_del(&noc->node);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ return 0;
+}
+
+static int __maybe_unused tegra194_cbb_resume_noirq(struct device *dev)
+{
+ struct tegra194_cbb *cbb = dev_get_drvdata(dev);
+
+ tegra194_cbb_error_enable(&cbb->base);
+ dsb(sy);
+
+ dev_dbg(dev, "%s resumed\n", cbb->noc->name);
+ return 0;
+}
+
+static const struct dev_pm_ops tegra194_cbb_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, tegra194_cbb_resume_noirq)
+};
+
+static struct platform_driver tegra194_cbb_driver = {
+ .probe = tegra194_cbb_probe,
+ .remove = tegra194_cbb_remove,
+ .driver = {
+ .name = "tegra194-cbb",
+ .of_match_table = of_match_ptr(tegra194_cbb_match),
+ .pm = &tegra194_cbb_pm,
+ },
+};
+
+static int __init tegra194_cbb_init(void)
+{
+ return platform_driver_register(&tegra194_cbb_driver);
+}
+pure_initcall(tegra194_cbb_init);
+
+static void __exit tegra194_cbb_exit(void)
+{
+ platform_driver_unregister(&tegra194_cbb_driver);
+}
+module_exit(tegra194_cbb_exit);
+
+MODULE_AUTHOR("Sumit Gupta <sumitg@nvidia.com>");
+MODULE_DESCRIPTION("Control Backbone error handling driver for Tegra194");
diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
new file mode 100644
index 0000000000..5cf0e8c341
--- /dev/null
+++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ *
+ * The driver handles Error's from Control Backbone(CBB) version 2.0.
+ * generated due to illegal accesses. The driver prints debug information
+ * about failed transaction on receiving interrupt from Error Notifier.
+ * Error types supported by CBB2.0 are:
+ * UNSUPPORTED_ERR, PWRDOWN_ERR, TIMEOUT_ERR, FIREWALL_ERR, DECODE_ERR,
+ * SLAVE_ERR
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/cpufeature.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/tegra-cbb.h>
+
+#define FABRIC_EN_CFG_INTERRUPT_ENABLE_0_0 0x0
+#define FABRIC_EN_CFG_STATUS_0_0 0x40
+#define FABRIC_EN_CFG_ADDR_INDEX_0_0 0x60
+#define FABRIC_EN_CFG_ADDR_LOW_0 0x80
+#define FABRIC_EN_CFG_ADDR_HI_0 0x84
+
+#define FABRIC_MN_MASTER_ERR_EN_0 0x200
+#define FABRIC_MN_MASTER_ERR_FORCE_0 0x204
+#define FABRIC_MN_MASTER_ERR_STATUS_0 0x208
+#define FABRIC_MN_MASTER_ERR_OVERFLOW_STATUS_0 0x20c
+
+#define FABRIC_MN_MASTER_LOG_ERR_STATUS_0 0x300
+#define FABRIC_MN_MASTER_LOG_ADDR_LOW_0 0x304
+#define FABRIC_MN_MASTER_LOG_ADDR_HIGH_0 0x308
+#define FABRIC_MN_MASTER_LOG_ATTRIBUTES0_0 0x30c
+#define FABRIC_MN_MASTER_LOG_ATTRIBUTES1_0 0x310
+#define FABRIC_MN_MASTER_LOG_ATTRIBUTES2_0 0x314
+#define FABRIC_MN_MASTER_LOG_USER_BITS0_0 0x318
+
+#define AXI_SLV_TIMEOUT_STATUS_0_0 0x8
+#define APB_BLOCK_TMO_STATUS_0 0xc00
+#define APB_BLOCK_NUM_TMO_OFFSET 0x20
+
+#define FAB_EM_EL_MSTRID GENMASK(29, 24)
+#define FAB_EM_EL_VQC GENMASK(17, 16)
+#define FAB_EM_EL_GRPSEC GENMASK(14, 8)
+#define FAB_EM_EL_FALCONSEC GENMASK(1, 0)
+
+#define FAB_EM_EL_FABID GENMASK(20, 16)
+#define FAB_EM_EL_SLAVEID GENMASK(7, 0)
+
+#define FAB_EM_EL_ACCESSID GENMASK(7, 0)
+
+#define FAB_EM_EL_AXCACHE GENMASK(27, 24)
+#define FAB_EM_EL_AXPROT GENMASK(22, 20)
+#define FAB_EM_EL_BURSTLENGTH GENMASK(19, 12)
+#define FAB_EM_EL_BURSTTYPE GENMASK(9, 8)
+#define FAB_EM_EL_BEATSIZE GENMASK(6, 4)
+#define FAB_EM_EL_ACCESSTYPE GENMASK(0, 0)
+
+#define USRBITS_MSTR_ID GENMASK(29, 24)
+
+#define REQ_SOCKET_ID GENMASK(27, 24)
+
+#define CCPLEX_MSTRID 0x1
+#define FIREWALL_APERTURE_SZ 0x10000
+/* Write firewall check enable */
+#define WEN 0x20000
+
+enum tegra234_cbb_fabric_ids {
+ CBB_FAB_ID,
+ SCE_FAB_ID,
+ RCE_FAB_ID,
+ DCE_FAB_ID,
+ AON_FAB_ID,
+ PSC_FAB_ID,
+ BPMP_FAB_ID,
+ FSI_FAB_ID,
+ MAX_FAB_ID,
+};
+
+struct tegra234_slave_lookup {
+ const char *name;
+ unsigned int offset;
+};
+
+struct tegra234_cbb_fabric {
+ const char *name;
+ phys_addr_t off_mask_erd;
+ phys_addr_t firewall_base;
+ unsigned int firewall_ctl;
+ unsigned int firewall_wr_ctl;
+ const char * const *master_id;
+ unsigned int notifier_offset;
+ const struct tegra_cbb_error *errors;
+ const int max_errors;
+ const struct tegra234_slave_lookup *slave_map;
+ const int max_slaves;
+};
+
+struct tegra234_cbb {
+ struct tegra_cbb base;
+
+ const struct tegra234_cbb_fabric *fabric;
+ struct resource *res;
+ void __iomem *regs;
+
+ int num_intr;
+ int sec_irq;
+
+ /* record */
+ void __iomem *mon;
+ unsigned int type;
+ u32 mask;
+ u64 access;
+ u32 mn_attr0;
+ u32 mn_attr1;
+ u32 mn_attr2;
+ u32 mn_user_bits;
+};
+
+static inline struct tegra234_cbb *to_tegra234_cbb(struct tegra_cbb *cbb)
+{
+ return container_of(cbb, struct tegra234_cbb, base);
+}
+
+static LIST_HEAD(cbb_list);
+static DEFINE_SPINLOCK(cbb_lock);
+
+static bool
+tegra234_cbb_write_access_allowed(struct platform_device *pdev, struct tegra234_cbb *cbb)
+{
+ u32 val;
+
+ if (!cbb->fabric->firewall_base ||
+ !cbb->fabric->firewall_ctl ||
+ !cbb->fabric->firewall_wr_ctl) {
+ dev_info(&pdev->dev, "SoC data missing for firewall\n");
+ return false;
+ }
+
+ if ((cbb->fabric->firewall_ctl > FIREWALL_APERTURE_SZ) ||
+ (cbb->fabric->firewall_wr_ctl > FIREWALL_APERTURE_SZ)) {
+ dev_err(&pdev->dev, "wrong firewall offset value\n");
+ return false;
+ }
+
+ val = readl(cbb->regs + cbb->fabric->firewall_base + cbb->fabric->firewall_ctl);
+ /*
+ * If the firewall check feature for allowing or blocking the
+ * write accesses through the firewall of a fabric is disabled
+ * then CCPLEX can write to the registers of that fabric.
+ */
+ if (!(val & WEN))
+ return true;
+
+ /*
+ * If the firewall check is enabled then check whether CCPLEX
+ * has write access to the fabric's error notifier registers
+ */
+ val = readl(cbb->regs + cbb->fabric->firewall_base + cbb->fabric->firewall_wr_ctl);
+ if (val & (BIT(CCPLEX_MSTRID)))
+ return true;
+
+ return false;
+}
+
+static void tegra234_cbb_fault_enable(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ void __iomem *addr;
+
+ addr = priv->regs + priv->fabric->notifier_offset;
+ writel(0x1ff, addr + FABRIC_EN_CFG_INTERRUPT_ENABLE_0_0);
+ dsb(sy);
+}
+
+static void tegra234_cbb_error_clear(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+
+ writel(0x3f, priv->mon + FABRIC_MN_MASTER_ERR_STATUS_0);
+ dsb(sy);
+}
+
+static u32 tegra234_cbb_get_status(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ void __iomem *addr;
+ u32 value;
+
+ addr = priv->regs + priv->fabric->notifier_offset;
+ value = readl(addr + FABRIC_EN_CFG_STATUS_0_0);
+ dsb(sy);
+
+ return value;
+}
+
+static void tegra234_cbb_mask_serror(struct tegra234_cbb *cbb)
+{
+ writel(0x1, cbb->regs + cbb->fabric->off_mask_erd);
+ dsb(sy);
+}
+
+static u32 tegra234_cbb_get_tmo_slv(void __iomem *addr)
+{
+ u32 timeout;
+
+ timeout = readl(addr);
+ return timeout;
+}
+
+static void tegra234_cbb_tmo_slv(struct seq_file *file, const char *slave, void __iomem *addr,
+ u32 status)
+{
+ tegra_cbb_print_err(file, "\t %s : %#x\n", slave, status);
+}
+
+static void tegra234_cbb_lookup_apbslv(struct seq_file *file, const char *slave,
+ void __iomem *base)
+{
+ unsigned int block = 0;
+ void __iomem *addr;
+ char name[64];
+ u32 status;
+
+ status = tegra234_cbb_get_tmo_slv(base);
+ if (status)
+ tegra_cbb_print_err(file, "\t %s_BLOCK_TMO_STATUS : %#x\n", slave, status);
+
+ while (status) {
+ if (status & BIT(0)) {
+ u32 timeout, clients, client = 0;
+
+ addr = base + APB_BLOCK_NUM_TMO_OFFSET + (block * 4);
+ timeout = tegra234_cbb_get_tmo_slv(addr);
+ clients = timeout;
+
+ while (timeout) {
+ if (timeout & BIT(0)) {
+ if (clients != 0xffffffff)
+ clients &= BIT(client);
+
+ sprintf(name, "%s_BLOCK%d_TMO", slave, block);
+
+ tegra234_cbb_tmo_slv(file, name, addr, clients);
+ }
+
+ timeout >>= 1;
+ client++;
+ }
+ }
+
+ status >>= 1;
+ block++;
+ }
+}
+
+static void tegra234_lookup_slave_timeout(struct seq_file *file, struct tegra234_cbb *cbb,
+ u8 slave_id, u8 fab_id)
+{
+ const struct tegra234_slave_lookup *map = cbb->fabric->slave_map;
+ void __iomem *addr;
+
+ /*
+ * 1) Get slave node name and address mapping using slave_id.
+ * 2) Check if the timed out slave node is APB or AXI.
+ * 3) If AXI, then print timeout register and reset axi slave
+ * using <FABRIC>_SN_<>_SLV_TIMEOUT_STATUS_0_0 register.
+ * 4) If APB, then perform an additional lookup to find the client
+ * which timed out.
+ * a) Get block number from the index of set bit in
+ * <FABRIC>_SN_AXI2APB_<>_BLOCK_TMO_STATUS_0 register.
+ * b) Get address of register repective to block number i.e.
+ * <FABRIC>_SN_AXI2APB_<>_BLOCK<index-set-bit>_TMO_0.
+ * c) Read the register in above step to get client_id which
+ * timed out as per the set bits.
+ * d) Reset the timedout client and print details.
+ * e) Goto step-a till all bits are set.
+ */
+
+ addr = cbb->regs + map[slave_id].offset;
+
+ if (strstr(map[slave_id].name, "AXI2APB")) {
+ addr += APB_BLOCK_TMO_STATUS_0;
+
+ tegra234_cbb_lookup_apbslv(file, map[slave_id].name, addr);
+ } else {
+ char name[64];
+ u32 status;
+
+ addr += AXI_SLV_TIMEOUT_STATUS_0_0;
+
+ status = tegra234_cbb_get_tmo_slv(addr);
+ if (status) {
+ sprintf(name, "%s_SLV_TIMEOUT_STATUS", map[slave_id].name);
+ tegra234_cbb_tmo_slv(file, name, addr, status);
+ }
+ }
+}
+
+static void tegra234_cbb_print_error(struct seq_file *file, struct tegra234_cbb *cbb, u32 status,
+ u32 overflow)
+{
+ unsigned int type = 0;
+
+ if (status & (status - 1))
+ tegra_cbb_print_err(file, "\t Multiple type of errors reported\n");
+
+ while (status) {
+ if (type >= cbb->fabric->max_errors) {
+ tegra_cbb_print_err(file, "\t Wrong type index:%u, status:%u\n",
+ type, status);
+ return;
+ }
+
+ if (status & 0x1)
+ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+ cbb->fabric->errors[type].code);
+
+ status >>= 1;
+ type++;
+ }
+
+ type = 0;
+
+ while (overflow) {
+ if (type >= cbb->fabric->max_errors) {
+ tegra_cbb_print_err(file, "\t Wrong type index:%u, overflow:%u\n",
+ type, overflow);
+ return;
+ }
+
+ if (overflow & 0x1)
+ tegra_cbb_print_err(file, "\t Overflow\t\t: Multiple %s\n",
+ cbb->fabric->errors[type].code);
+
+ overflow >>= 1;
+ type++;
+ }
+}
+
+static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
+{
+ u8 cache_type, prot_type, burst_length, mstr_id, grpsec, vqc, falconsec, beat_size;
+ u8 access_type, access_id, requester_socket_id, local_socket_id, slave_id, fab_id;
+ char fabric_name[20];
+ bool is_numa = false;
+ u8 burst_type;
+
+ if (num_possible_nodes() > 1)
+ is_numa = true;
+
+ mstr_id = FIELD_GET(FAB_EM_EL_MSTRID, cbb->mn_user_bits);
+ vqc = FIELD_GET(FAB_EM_EL_VQC, cbb->mn_user_bits);
+ grpsec = FIELD_GET(FAB_EM_EL_GRPSEC, cbb->mn_user_bits);
+ falconsec = FIELD_GET(FAB_EM_EL_FALCONSEC, cbb->mn_user_bits);
+
+ /*
+ * For SOC with multiple NUMA nodes, print cross socket access
+ * errors only if initiator/master_id is CCPLEX, CPMU or GPU.
+ */
+ if (is_numa) {
+ local_socket_id = numa_node_id();
+ requester_socket_id = FIELD_GET(REQ_SOCKET_ID, cbb->mn_attr2);
+
+ if (requester_socket_id != local_socket_id) {
+ if ((mstr_id != 0x1) && (mstr_id != 0x2) && (mstr_id != 0xB))
+ return;
+ }
+ }
+
+ fab_id = FIELD_GET(FAB_EM_EL_FABID, cbb->mn_attr2);
+ slave_id = FIELD_GET(FAB_EM_EL_SLAVEID, cbb->mn_attr2);
+
+ access_id = FIELD_GET(FAB_EM_EL_ACCESSID, cbb->mn_attr1);
+
+ cache_type = FIELD_GET(FAB_EM_EL_AXCACHE, cbb->mn_attr0);
+ prot_type = FIELD_GET(FAB_EM_EL_AXPROT, cbb->mn_attr0);
+ burst_length = FIELD_GET(FAB_EM_EL_BURSTLENGTH, cbb->mn_attr0);
+ burst_type = FIELD_GET(FAB_EM_EL_BURSTTYPE, cbb->mn_attr0);
+ beat_size = FIELD_GET(FAB_EM_EL_BEATSIZE, cbb->mn_attr0);
+ access_type = FIELD_GET(FAB_EM_EL_ACCESSTYPE, cbb->mn_attr0);
+
+ tegra_cbb_print_err(file, "\n");
+ if (cbb->type < cbb->fabric->max_errors)
+ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+ cbb->fabric->errors[cbb->type].code);
+ else
+ tegra_cbb_print_err(file, "\t Wrong type index:%u\n", cbb->type);
+
+ tegra_cbb_print_err(file, "\t MASTER_ID\t\t: %s\n", cbb->fabric->master_id[mstr_id]);
+ tegra_cbb_print_err(file, "\t Address\t\t: %#llx\n", cbb->access);
+
+ tegra_cbb_print_cache(file, cache_type);
+ tegra_cbb_print_prot(file, prot_type);
+
+ tegra_cbb_print_err(file, "\t Access_Type\t\t: %s", (access_type) ? "Write\n" : "Read\n");
+ tegra_cbb_print_err(file, "\t Access_ID\t\t: %#x", access_id);
+
+ if (fab_id == PSC_FAB_ID)
+ strcpy(fabric_name, "psc-fabric");
+ else if (fab_id == FSI_FAB_ID)
+ strcpy(fabric_name, "fsi-fabric");
+ else
+ strcpy(fabric_name, cbb->fabric->name);
+
+ if (is_numa) {
+ tegra_cbb_print_err(file, "\t Requester_Socket_Id\t: %#x\n",
+ requester_socket_id);
+ tegra_cbb_print_err(file, "\t Local_Socket_Id\t: %#x\n",
+ local_socket_id);
+ tegra_cbb_print_err(file, "\t No. of NUMA_NODES\t: %#x\n",
+ num_possible_nodes());
+ }
+
+ tegra_cbb_print_err(file, "\t Fabric\t\t: %s\n", fabric_name);
+ tegra_cbb_print_err(file, "\t Slave_Id\t\t: %#x\n", slave_id);
+ tegra_cbb_print_err(file, "\t Burst_length\t\t: %#x\n", burst_length);
+ tegra_cbb_print_err(file, "\t Burst_type\t\t: %#x\n", burst_type);
+ tegra_cbb_print_err(file, "\t Beat_size\t\t: %#x\n", beat_size);
+ tegra_cbb_print_err(file, "\t VQC\t\t\t: %#x\n", vqc);
+ tegra_cbb_print_err(file, "\t GRPSEC\t\t: %#x\n", grpsec);
+ tegra_cbb_print_err(file, "\t FALCONSEC\t\t: %#x\n", falconsec);
+
+ if ((fab_id == PSC_FAB_ID) || (fab_id == FSI_FAB_ID))
+ return;
+
+ if (slave_id >= cbb->fabric->max_slaves) {
+ tegra_cbb_print_err(file, "\t Invalid slave_id:%d\n", slave_id);
+ return;
+ }
+
+ if (!strcmp(cbb->fabric->errors[cbb->type].code, "TIMEOUT_ERR")) {
+ tegra234_lookup_slave_timeout(file, cbb, slave_id, fab_id);
+ return;
+ }
+
+ tegra_cbb_print_err(file, "\t Slave\t\t\t: %s\n", cbb->fabric->slave_map[slave_id].name);
+}
+
+static int print_errmonX_info(struct seq_file *file, struct tegra234_cbb *cbb)
+{
+ u32 overflow, status, error;
+
+ status = readl(cbb->mon + FABRIC_MN_MASTER_ERR_STATUS_0);
+ if (!status) {
+ pr_err("Error Notifier received a spurious notification\n");
+ return -ENODATA;
+ }
+
+ if (status == 0xffffffff) {
+ pr_err("CBB registers returning all 1's which is invalid\n");
+ return -EINVAL;
+ }
+
+ overflow = readl(cbb->mon + FABRIC_MN_MASTER_ERR_OVERFLOW_STATUS_0);
+
+ tegra234_cbb_print_error(file, cbb, status, overflow);
+
+ error = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ERR_STATUS_0);
+ if (!error) {
+ pr_info("Error Monitor doesn't have Error Logger\n");
+ return -EINVAL;
+ }
+
+ cbb->type = 0;
+
+ while (error) {
+ if (error & BIT(0)) {
+ u32 hi, lo;
+
+ hi = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ADDR_HIGH_0);
+ lo = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ADDR_LOW_0);
+
+ cbb->access = (u64)hi << 32 | lo;
+
+ cbb->mn_attr0 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES0_0);
+ cbb->mn_attr1 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES1_0);
+ cbb->mn_attr2 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES2_0);
+ cbb->mn_user_bits = readl(cbb->mon + FABRIC_MN_MASTER_LOG_USER_BITS0_0);
+
+ print_errlog_err(file, cbb);
+ }
+
+ cbb->type++;
+ error >>= 1;
+ }
+
+ return 0;
+}
+
+static int print_err_notifier(struct seq_file *file, struct tegra234_cbb *cbb, u32 status)
+{
+ unsigned int index = 0;
+ int err;
+
+ pr_crit("**************************************\n");
+ pr_crit("CPU:%d, Error:%s, Errmon:%d\n", smp_processor_id(),
+ cbb->fabric->name, status);
+
+ while (status) {
+ if (status & BIT(0)) {
+ unsigned int notifier = cbb->fabric->notifier_offset;
+ u32 hi, lo, mask = BIT(index);
+ phys_addr_t addr;
+ u64 offset;
+
+ writel(mask, cbb->regs + notifier + FABRIC_EN_CFG_ADDR_INDEX_0_0);
+ hi = readl(cbb->regs + notifier + FABRIC_EN_CFG_ADDR_HI_0);
+ lo = readl(cbb->regs + notifier + FABRIC_EN_CFG_ADDR_LOW_0);
+
+ addr = (u64)hi << 32 | lo;
+
+ offset = addr - cbb->res->start;
+ cbb->mon = cbb->regs + offset;
+ cbb->mask = BIT(index);
+
+ err = print_errmonX_info(file, cbb);
+ tegra234_cbb_error_clear(&cbb->base);
+ if (err)
+ return err;
+ }
+
+ status >>= 1;
+ index++;
+ }
+
+ tegra_cbb_print_err(file, "\t**************************************\n");
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static DEFINE_MUTEX(cbb_debugfs_mutex);
+
+static int tegra234_cbb_debugfs_show(struct tegra_cbb *cbb, struct seq_file *file, void *data)
+{
+ int err = 0;
+
+ mutex_lock(&cbb_debugfs_mutex);
+
+ list_for_each_entry(cbb, &cbb_list, node) {
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ u32 status;
+
+ status = tegra_cbb_get_status(&priv->base);
+ if (status) {
+ err = print_err_notifier(file, priv, status);
+ if (err)
+ break;
+ }
+ }
+
+ mutex_unlock(&cbb_debugfs_mutex);
+ return err;
+}
+#endif
+
+/*
+ * Handler for CBB errors
+ */
+static irqreturn_t tegra234_cbb_isr(int irq, void *data)
+{
+ bool is_inband_err = false;
+ struct tegra_cbb *cbb;
+ unsigned long flags;
+ u8 mstr_id;
+ int err;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ list_for_each_entry(cbb, &cbb_list, node) {
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ u32 status = tegra_cbb_get_status(cbb);
+
+ if (status && (irq == priv->sec_irq)) {
+ tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@0x%llx, irq=%d\n",
+ smp_processor_id(), priv->fabric->name,
+ priv->res->start, irq);
+
+ err = print_err_notifier(NULL, priv, status);
+ if (err)
+ goto unlock;
+
+ /*
+ * If illegal request is from CCPLEX(id:0x1) master then call WARN()
+ */
+ if (priv->fabric->off_mask_erd) {
+ mstr_id = FIELD_GET(USRBITS_MSTR_ID, priv->mn_user_bits);
+ if (mstr_id == CCPLEX_MSTRID)
+ is_inband_err = 1;
+ }
+ }
+ }
+
+unlock:
+ spin_unlock_irqrestore(&cbb_lock, flags);
+ WARN_ON(is_inband_err);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Register handler for CBB_SECURE interrupt for reporting errors
+ */
+static int tegra234_cbb_interrupt_enable(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+
+ if (priv->sec_irq) {
+ int err = devm_request_irq(cbb->dev, priv->sec_irq, tegra234_cbb_isr, 0,
+ dev_name(cbb->dev), priv);
+ if (err) {
+ dev_err(cbb->dev, "failed to register interrupt %u: %d\n", priv->sec_irq,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra234_cbb_error_enable(struct tegra_cbb *cbb)
+{
+ tegra_cbb_fault_enable(cbb);
+}
+
+static const struct tegra_cbb_ops tegra234_cbb_ops = {
+ .get_status = tegra234_cbb_get_status,
+ .error_clear = tegra234_cbb_error_clear,
+ .fault_enable = tegra234_cbb_fault_enable,
+ .error_enable = tegra234_cbb_error_enable,
+ .interrupt_enable = tegra234_cbb_interrupt_enable,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_show = tegra234_cbb_debugfs_show,
+#endif
+};
+
+static const char * const tegra234_master_id[] = {
+ [0x00] = "TZ",
+ [0x01] = "CCPLEX",
+ [0x02] = "CCPMU",
+ [0x03] = "BPMP_FW",
+ [0x04] = "AON",
+ [0x05] = "SCE",
+ [0x06] = "GPCDMA_P",
+ [0x07] = "TSECA_NONSECURE",
+ [0x08] = "TSECA_LIGHTSECURE",
+ [0x09] = "TSECA_HEAVYSECURE",
+ [0x0a] = "CORESIGHT",
+ [0x0b] = "APE",
+ [0x0c] = "PEATRANS",
+ [0x0d] = "JTAGM_DFT",
+ [0x0e] = "RCE",
+ [0x0f] = "DCE",
+ [0x10] = "PSC_FW_USER",
+ [0x11] = "PSC_FW_SUPERVISOR",
+ [0x12] = "PSC_FW_MACHINE",
+ [0x13] = "PSC_BOOT",
+ [0x14] = "BPMP_BOOT",
+ [0x15] = "NVDEC_NONSECURE",
+ [0x16] = "NVDEC_LIGHTSECURE",
+ [0x17] = "NVDEC_HEAVYSECURE",
+ [0x18] = "CBB_INTERNAL",
+ [0x19] = "RSVD"
+};
+
+static const struct tegra_cbb_error tegra234_cbb_errors[] = {
+ {
+ .code = "SLAVE_ERR",
+ .desc = "Slave being accessed responded with an error"
+ }, {
+ .code = "DECODE_ERR",
+ .desc = "Attempt to access an address hole"
+ }, {
+ .code = "FIREWALL_ERR",
+ .desc = "Attempt to access a region which is firewall protected"
+ }, {
+ .code = "TIMEOUT_ERR",
+ .desc = "No response returned by slave"
+ }, {
+ .code = "PWRDOWN_ERR",
+ .desc = "Attempt to access a portion of fabric that is powered down"
+ }, {
+ .code = "UNSUPPORTED_ERR",
+ .desc = "Attempt to access a slave through an unsupported access"
+ }
+};
+
+static const struct tegra234_slave_lookup tegra234_aon_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST", 0x14000 },
+ { "CBB", 0x15000 },
+ { "CPU", 0x16000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_aon_fabric = {
+ .name = "aon-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_aon_slave_map,
+ .max_slaves = ARRAY_SIZE(tegra234_aon_slave_map),
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x17000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x8d0,
+ .firewall_wr_ctl = 0x8c8,
+};
+
+static const struct tegra234_slave_lookup tegra234_bpmp_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CBB", 0x17000 },
+ { "CPU", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_bpmp_fabric = {
+ .name = "bpmp-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_bpmp_slave_map,
+ .max_slaves = ARRAY_SIZE(tegra234_bpmp_slave_map),
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x19000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x8f0,
+ .firewall_wr_ctl = 0x8e8,
+};
+
+static const struct tegra234_slave_lookup tegra234_cbb_slave_map[] = {
+ { "AON", 0x40000 },
+ { "BPMP", 0x41000 },
+ { "CBB", 0x42000 },
+ { "HOST1X", 0x43000 },
+ { "STM", 0x44000 },
+ { "FSI", 0x45000 },
+ { "PSC", 0x46000 },
+ { "PCIE_C1", 0x47000 },
+ { "PCIE_C2", 0x48000 },
+ { "PCIE_C3", 0x49000 },
+ { "PCIE_C0", 0x4a000 },
+ { "PCIE_C4", 0x4b000 },
+ { "GPU", 0x4c000 },
+ { "SMMU0", 0x4d000 },
+ { "SMMU1", 0x4e000 },
+ { "SMMU2", 0x4f000 },
+ { "SMMU3", 0x50000 },
+ { "SMMU4", 0x51000 },
+ { "PCIE_C10", 0x52000 },
+ { "PCIE_C7", 0x53000 },
+ { "PCIE_C8", 0x54000 },
+ { "PCIE_C9", 0x55000 },
+ { "PCIE_C5", 0x56000 },
+ { "PCIE_C6", 0x57000 },
+ { "DCE", 0x58000 },
+ { "RCE", 0x59000 },
+ { "SCE", 0x5a000 },
+ { "AXI2APB_1", 0x70000 },
+ { "AXI2APB_10", 0x71000 },
+ { "AXI2APB_11", 0x72000 },
+ { "AXI2APB_12", 0x73000 },
+ { "AXI2APB_13", 0x74000 },
+ { "AXI2APB_14", 0x75000 },
+ { "AXI2APB_15", 0x76000 },
+ { "AXI2APB_16", 0x77000 },
+ { "AXI2APB_17", 0x78000 },
+ { "AXI2APB_18", 0x79000 },
+ { "AXI2APB_19", 0x7a000 },
+ { "AXI2APB_2", 0x7b000 },
+ { "AXI2APB_20", 0x7c000 },
+ { "AXI2APB_21", 0x7d000 },
+ { "AXI2APB_22", 0x7e000 },
+ { "AXI2APB_23", 0x7f000 },
+ { "AXI2APB_25", 0x80000 },
+ { "AXI2APB_26", 0x81000 },
+ { "AXI2APB_27", 0x82000 },
+ { "AXI2APB_28", 0x83000 },
+ { "AXI2APB_29", 0x84000 },
+ { "AXI2APB_30", 0x85000 },
+ { "AXI2APB_31", 0x86000 },
+ { "AXI2APB_32", 0x87000 },
+ { "AXI2APB_33", 0x88000 },
+ { "AXI2APB_34", 0x89000 },
+ { "AXI2APB_35", 0x92000 },
+ { "AXI2APB_4", 0x8b000 },
+ { "AXI2APB_5", 0x8c000 },
+ { "AXI2APB_6", 0x8d000 },
+ { "AXI2APB_7", 0x8e000 },
+ { "AXI2APB_8", 0x8f000 },
+ { "AXI2APB_9", 0x90000 },
+ { "AXI2APB_3", 0x91000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_cbb_fabric = {
+ .name = "cbb-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_cbb_slave_map,
+ .max_slaves = ARRAY_SIZE(tegra234_cbb_slave_map),
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x60000,
+ .off_mask_erd = 0x3a004,
+ .firewall_base = 0x10000,
+ .firewall_ctl = 0x23f0,
+ .firewall_wr_ctl = 0x23e8,
+};
+
+static const struct tegra234_slave_lookup tegra234_common_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CBB", 0x17000 },
+ { "RSVD", 0x00000 },
+ { "CPU", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_dce_fabric = {
+ .name = "dce-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_common_slave_map,
+ .max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x19000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x290,
+ .firewall_wr_ctl = 0x288,
+};
+
+static const struct tegra234_cbb_fabric tegra234_rce_fabric = {
+ .name = "rce-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_common_slave_map,
+ .max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x19000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x290,
+ .firewall_wr_ctl = 0x288,
+};
+
+static const struct tegra234_cbb_fabric tegra234_sce_fabric = {
+ .name = "sce-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_common_slave_map,
+ .max_slaves = ARRAY_SIZE(tegra234_common_slave_map),
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .notifier_offset = 0x19000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x290,
+ .firewall_wr_ctl = 0x288,
+};
+
+static const char * const tegra241_master_id[] = {
+ [0x0] = "TZ",
+ [0x1] = "CCPLEX",
+ [0x2] = "CCPMU",
+ [0x3] = "BPMP_FW",
+ [0x4] = "PSC_FW_USER",
+ [0x5] = "PSC_FW_SUPERVISOR",
+ [0x6] = "PSC_FW_MACHINE",
+ [0x7] = "PSC_BOOT",
+ [0x8] = "BPMP_BOOT",
+ [0x9] = "JTAGM_DFT",
+ [0xa] = "CORESIGHT",
+ [0xb] = "GPU",
+ [0xc] = "PEATRANS",
+ [0xd ... 0x3f] = "RSVD"
+};
+
+/*
+ * Possible causes for Slave and Timeout errors.
+ * SLAVE_ERR:
+ * Slave being accessed responded with an error. Slave could return
+ * an error for various cases :
+ * Unsupported access, clamp setting when power gated, register
+ * level firewall(SCR), address hole within the slave, etc
+ *
+ * TIMEOUT_ERR:
+ * No response returned by slave. Can be due to slave being clock
+ * gated, under reset, powered down or slave inability to respond
+ * for an internal slave issue
+ */
+static const struct tegra_cbb_error tegra241_cbb_errors[] = {
+ {
+ .code = "SLAVE_ERR",
+ .desc = "Slave being accessed responded with an error."
+ }, {
+ .code = "DECODE_ERR",
+ .desc = "Attempt to access an address hole or Reserved region of memory."
+ }, {
+ .code = "FIREWALL_ERR",
+ .desc = "Attempt to access a region which is firewalled."
+ }, {
+ .code = "TIMEOUT_ERR",
+ .desc = "No response returned by slave."
+ }, {
+ .code = "PWRDOWN_ERR",
+ .desc = "Attempt to access a portion of the fabric that is powered down."
+ }, {
+ .code = "UNSUPPORTED_ERR",
+ .desc = "Attempt to access a slave through an unsupported access."
+ }, {
+ .code = "POISON_ERR",
+ .desc = "Slave responds with poison error to indicate error in data."
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "NO_SUCH_ADDRESS_ERR",
+ .desc = "The address belongs to the pri_target range but there is no register "
+ "implemented at the address."
+ }, {
+ .code = "TASK_ERR",
+ .desc = "Attempt to update a PRI task when the current task has still not "
+ "completed."
+ }, {
+ .code = "EXTERNAL_ERR",
+ .desc = "Indicates that an external PRI register access met with an error due to "
+ "any issue in the unit."
+ }, {
+ .code = "INDEX_ERR",
+ .desc = "Applicable to PRI index aperture pair, when the programmed index is "
+ "outside the range defined in the manual."
+ }, {
+ .code = "RESET_ERR",
+ .desc = "Target in Reset Error: Attempt to access a SubPri or external PRI "
+ "register but they are in reset."
+ }, {
+ .code = "REGISTER_RST_ERR",
+ .desc = "Attempt to access a PRI register but the register is partial or "
+ "completely in reset."
+ }, {
+ .code = "POWER_GATED_ERR",
+ .desc = "Returned by external PRI client when the external access goes to a power "
+ "gated domain."
+ }, {
+ .code = "SUBPRI_FS_ERR",
+ .desc = "Subpri is floorswept: Attempt to access a subpri through the main pri "
+ "target but subPri logic is floorswept."
+ }, {
+ .code = "SUBPRI_CLK_OFF_ERR",
+ .desc = "Subpri clock is off: Attempt to access a subpri through the main pri "
+ "target but subPris clock is gated/off."
+ },
+};
+
+static const struct tegra234_slave_lookup tegra241_cbb_slave_map[] = {
+ { "RSVD", 0x00000 },
+ { "PCIE_C8", 0x51000 },
+ { "PCIE_C9", 0x52000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "AON", 0x5b000 },
+ { "BPMP", 0x5c000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "PSC", 0x5d000 },
+ { "STM", 0x5e000 },
+ { "AXI2APB_1", 0x70000 },
+ { "AXI2APB_10", 0x71000 },
+ { "AXI2APB_11", 0x72000 },
+ { "AXI2APB_12", 0x73000 },
+ { "AXI2APB_13", 0x74000 },
+ { "AXI2APB_14", 0x75000 },
+ { "AXI2APB_15", 0x76000 },
+ { "AXI2APB_16", 0x77000 },
+ { "AXI2APB_17", 0x78000 },
+ { "AXI2APB_18", 0x79000 },
+ { "AXI2APB_19", 0x7a000 },
+ { "AXI2APB_2", 0x7b000 },
+ { "AXI2APB_20", 0x7c000 },
+ { "AXI2APB_4", 0x87000 },
+ { "AXI2APB_5", 0x88000 },
+ { "AXI2APB_6", 0x89000 },
+ { "AXI2APB_7", 0x8a000 },
+ { "AXI2APB_8", 0x8b000 },
+ { "AXI2APB_9", 0x8c000 },
+ { "AXI2APB_3", 0x8d000 },
+ { "AXI2APB_21", 0x7d000 },
+ { "AXI2APB_22", 0x7e000 },
+ { "AXI2APB_23", 0x7f000 },
+ { "AXI2APB_24", 0x80000 },
+ { "AXI2APB_25", 0x81000 },
+ { "AXI2APB_26", 0x82000 },
+ { "AXI2APB_27", 0x83000 },
+ { "AXI2APB_28", 0x84000 },
+ { "PCIE_C4", 0x53000 },
+ { "PCIE_C5", 0x54000 },
+ { "PCIE_C6", 0x55000 },
+ { "PCIE_C7", 0x56000 },
+ { "PCIE_C2", 0x57000 },
+ { "PCIE_C3", 0x58000 },
+ { "PCIE_C0", 0x59000 },
+ { "PCIE_C1", 0x5a000 },
+ { "CCPLEX", 0x50000 },
+ { "AXI2APB_29", 0x85000 },
+ { "AXI2APB_30", 0x86000 },
+ { "CBB_CENTRAL", 0x00000 },
+ { "AXI2APB_31", 0x8E000 },
+ { "AXI2APB_32", 0x8F000 },
+};
+
+static const struct tegra234_cbb_fabric tegra241_cbb_fabric = {
+ .name = "cbb-fabric",
+ .master_id = tegra241_master_id,
+ .slave_map = tegra241_cbb_slave_map,
+ .max_slaves = ARRAY_SIZE(tegra241_cbb_slave_map),
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .notifier_offset = 0x60000,
+ .off_mask_erd = 0x40004,
+ .firewall_base = 0x20000,
+ .firewall_ctl = 0x2370,
+ .firewall_wr_ctl = 0x2368,
+};
+
+static const struct tegra234_slave_lookup tegra241_bpmp_slave_map[] = {
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "CBB", 0x15000 },
+ { "CPU", 0x16000 },
+ { "AXI2APB", 0x00000 },
+ { "DBB0", 0x17000 },
+ { "DBB1", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra241_bpmp_fabric = {
+ .name = "bpmp-fabric",
+ .master_id = tegra241_master_id,
+ .slave_map = tegra241_bpmp_slave_map,
+ .max_slaves = ARRAY_SIZE(tegra241_bpmp_slave_map),
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .notifier_offset = 0x19000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x8f0,
+ .firewall_wr_ctl = 0x8e8,
+};
+
+static const struct of_device_id tegra234_cbb_dt_ids[] = {
+ { .compatible = "nvidia,tegra234-cbb-fabric", .data = &tegra234_cbb_fabric },
+ { .compatible = "nvidia,tegra234-aon-fabric", .data = &tegra234_aon_fabric },
+ { .compatible = "nvidia,tegra234-bpmp-fabric", .data = &tegra234_bpmp_fabric },
+ { .compatible = "nvidia,tegra234-dce-fabric", .data = &tegra234_dce_fabric },
+ { .compatible = "nvidia,tegra234-rce-fabric", .data = &tegra234_rce_fabric },
+ { .compatible = "nvidia,tegra234-sce-fabric", .data = &tegra234_sce_fabric },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tegra234_cbb_dt_ids);
+
+struct tegra234_cbb_acpi_uid {
+ const char *hid;
+ const char *uid;
+ const struct tegra234_cbb_fabric *fabric;
+};
+
+static const struct tegra234_cbb_acpi_uid tegra234_cbb_acpi_uids[] = {
+ { "NVDA1070", "1", &tegra241_cbb_fabric },
+ { "NVDA1070", "2", &tegra241_bpmp_fabric },
+ { },
+};
+
+static const struct
+tegra234_cbb_fabric *tegra234_cbb_acpi_get_fabric(struct acpi_device *adev)
+{
+ const struct tegra234_cbb_acpi_uid *entry;
+
+ for (entry = tegra234_cbb_acpi_uids; entry->hid; entry++) {
+ if (acpi_dev_hid_uid_match(adev, entry->hid, entry->uid))
+ return entry->fabric;
+ }
+
+ return NULL;
+}
+
+static const struct acpi_device_id tegra241_cbb_acpi_ids[] = {
+ { "NVDA1070" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, tegra241_cbb_acpi_ids);
+
+static int tegra234_cbb_probe(struct platform_device *pdev)
+{
+ const struct tegra234_cbb_fabric *fabric;
+ struct tegra234_cbb *cbb;
+ unsigned long flags = 0;
+ int err;
+
+ if (pdev->dev.of_node) {
+ fabric = of_device_get_match_data(&pdev->dev);
+ } else {
+ struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ if (!device)
+ return -ENODEV;
+
+ fabric = tegra234_cbb_acpi_get_fabric(device);
+ if (!fabric) {
+ dev_err(&pdev->dev, "no device match found\n");
+ return -ENODEV;
+ }
+ }
+
+ cbb = devm_kzalloc(&pdev->dev, sizeof(*cbb), GFP_KERNEL);
+ if (!cbb)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cbb->base.node);
+ cbb->base.ops = &tegra234_cbb_ops;
+ cbb->base.dev = &pdev->dev;
+ cbb->fabric = fabric;
+
+ cbb->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &cbb->res);
+ if (IS_ERR(cbb->regs))
+ return PTR_ERR(cbb->regs);
+
+ err = tegra_cbb_get_irq(pdev, NULL, &cbb->sec_irq);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, cbb);
+
+ /*
+ * Don't enable error reporting for a Fabric if write to it's registers
+ * is blocked by CBB firewall.
+ */
+ if (!tegra234_cbb_write_access_allowed(pdev, cbb)) {
+ dev_info(&pdev->dev, "error reporting not enabled due to firewall\n");
+ return 0;
+ }
+
+ spin_lock_irqsave(&cbb_lock, flags);
+ list_add(&cbb->base.node, &cbb_list);
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ /* set ERD bit to mask SError and generate interrupt to report error */
+ if (cbb->fabric->off_mask_erd)
+ tegra234_cbb_mask_serror(cbb);
+
+ return tegra_cbb_register(&cbb->base);
+}
+
+static int __maybe_unused tegra234_cbb_resume_noirq(struct device *dev)
+{
+ struct tegra234_cbb *cbb = dev_get_drvdata(dev);
+
+ tegra234_cbb_error_enable(&cbb->base);
+
+ dev_dbg(dev, "%s resumed\n", cbb->fabric->name);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra234_cbb_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, tegra234_cbb_resume_noirq)
+};
+
+static struct platform_driver tegra234_cbb_driver = {
+ .probe = tegra234_cbb_probe,
+ .driver = {
+ .name = "tegra234-cbb",
+ .of_match_table = tegra234_cbb_dt_ids,
+ .acpi_match_table = tegra241_cbb_acpi_ids,
+ .pm = &tegra234_cbb_pm,
+ },
+};
+
+static int __init tegra234_cbb_init(void)
+{
+ return platform_driver_register(&tegra234_cbb_driver);
+}
+pure_initcall(tegra234_cbb_init);
+
+static void __exit tegra234_cbb_exit(void)
+{
+ platform_driver_unregister(&tegra234_cbb_driver);
+}
+module_exit(tegra234_cbb_exit);
+
+MODULE_DESCRIPTION("Control Backbone 2.0 error handling driver for Tegra234");
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
new file mode 100644
index 0000000000..dff6d5ef4e
--- /dev/null
+++ b/drivers/soc/tegra/common.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#define dev_fmt(fmt) "tegra-soc: " fmt
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+
+static const struct of_device_id tegra_machine_match[] = {
+ { .compatible = "nvidia,tegra20", },
+ { .compatible = "nvidia,tegra30", },
+ { .compatible = "nvidia,tegra114", },
+ { .compatible = "nvidia,tegra124", },
+ { .compatible = "nvidia,tegra132", },
+ { .compatible = "nvidia,tegra210", },
+ { }
+};
+
+bool soc_is_tegra(void)
+{
+ const struct of_device_id *match;
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return false;
+
+ match = of_match_node(tegra_machine_match, root);
+ of_node_put(root);
+
+ return match != NULL;
+}
+
+static int tegra_core_dev_init_opp_state(struct device *dev)
+{
+ unsigned long rate;
+ struct clk *clk;
+ bool rpm_enabled;
+ int err;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clk: %pe\n", clk);
+ return PTR_ERR(clk);
+ }
+
+ rate = clk_get_rate(clk);
+ if (!rate) {
+ dev_err(dev, "failed to get clk rate\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Runtime PM of the device must be enabled in order to set up
+ * GENPD's performance properly because GENPD core checks whether
+ * device is suspended and this check doesn't work while RPM is
+ * disabled. This makes sure the OPP vote below gets cached in
+ * GENPD for the device. Instead, the vote is done the next time
+ * the device gets runtime resumed.
+ */
+ rpm_enabled = pm_runtime_enabled(dev);
+ if (!rpm_enabled)
+ pm_runtime_enable(dev);
+
+ /* should never happen in practice */
+ if (!pm_runtime_enabled(dev)) {
+ dev_WARN(dev, "failed to enable runtime PM\n");
+ pm_runtime_disable(dev);
+ return -EINVAL;
+ }
+
+ /* first dummy rate-setting initializes voltage vote */
+ err = dev_pm_opp_set_rate(dev, rate);
+
+ if (!rpm_enabled)
+ pm_runtime_disable(dev);
+
+ if (err) {
+ dev_err(dev, "failed to initialize OPP clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * devm_tegra_core_dev_init_opp_table() - initialize OPP table
+ * @dev: device for which OPP table is initialized
+ * @params: pointer to the OPP table configuration
+ *
+ * This function will initialize OPP table and sync OPP state of a Tegra SoC
+ * core device.
+ *
+ * Return: 0 on success or errorno.
+ */
+int devm_tegra_core_dev_init_opp_table(struct device *dev,
+ struct tegra_core_opp_params *params)
+{
+ u32 hw_version;
+ int err;
+ /*
+ * The clk's connection id to set is NULL and this is a NULL terminated
+ * array, hence two NULL entries.
+ */
+ const char *clk_names[] = { NULL, NULL };
+ struct dev_pm_opp_config config = {
+ /*
+ * For some devices we don't have any OPP table in the DT, and
+ * in order to use the same code path for all the devices, we
+ * create a dummy OPP table for them via this. The dummy OPP
+ * table is only capable of doing clk_set_rate() on invocation
+ * of dev_pm_opp_set_rate() and doesn't provide any other
+ * functionality.
+ */
+ .clk_names = clk_names,
+ };
+
+ if (of_machine_is_compatible("nvidia,tegra20")) {
+ hw_version = BIT(tegra_sku_info.soc_process_id);
+ config.supported_hw = &hw_version;
+ config.supported_hw_count = 1;
+ } else if (of_machine_is_compatible("nvidia,tegra30")) {
+ hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ config.supported_hw = &hw_version;
+ config.supported_hw_count = 1;
+ }
+
+ err = devm_pm_opp_set_config(dev, &config);
+ if (err) {
+ dev_err(dev, "failed to set OPP config: %d\n", err);
+ return err;
+ }
+
+ /*
+ * Tegra114+ doesn't support OPP yet, return early for non tegra20/30
+ * case.
+ */
+ if (!config.supported_hw)
+ return -ENODEV;
+
+ /*
+ * Older device-trees have an empty OPP table, we will get
+ * -ENODEV from devm_pm_opp_of_add_table() in this case.
+ */
+ err = devm_pm_opp_of_add_table(dev);
+ if (err) {
+ if (err != -ENODEV)
+ dev_err(dev, "failed to add OPP table: %d\n", err);
+
+ return err;
+ }
+
+ if (params->init_state) {
+ err = tegra_core_dev_init_opp_state(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_tegra_core_dev_init_opp_table);
diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c
new file mode 100644
index 0000000000..221202db33
--- /dev/null
+++ b/drivers/soc/tegra/flowctrl.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/soc/tegra/flowctrl.c
+ *
+ * Functions and macros to control the flowcontroller
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/flowctrl.h>
+#include <soc/tegra/fuse.h>
+
+static u8 flowctrl_offset_halt_cpu[] = {
+ FLOW_CTRL_HALT_CPU0_EVENTS,
+ FLOW_CTRL_HALT_CPU1_EVENTS,
+ FLOW_CTRL_HALT_CPU1_EVENTS + 8,
+ FLOW_CTRL_HALT_CPU1_EVENTS + 16,
+};
+
+static u8 flowctrl_offset_cpu_csr[] = {
+ FLOW_CTRL_CPU0_CSR,
+ FLOW_CTRL_CPU1_CSR,
+ FLOW_CTRL_CPU1_CSR + 8,
+ FLOW_CTRL_CPU1_CSR + 16,
+};
+
+static void __iomem *tegra_flowctrl_base;
+
+static void flowctrl_update(u8 offset, u32 value)
+{
+ if (WARN_ONCE(IS_ERR_OR_NULL(tegra_flowctrl_base),
+ "Tegra flowctrl not initialised!\n"))
+ return;
+
+ writel(value, tegra_flowctrl_base + offset);
+
+ /* ensure the update has reached the flow controller */
+ wmb();
+ readl_relaxed(tegra_flowctrl_base + offset);
+}
+
+u32 flowctrl_read_cpu_csr(unsigned int cpuid)
+{
+ u8 offset = flowctrl_offset_cpu_csr[cpuid];
+
+ if (WARN_ONCE(IS_ERR_OR_NULL(tegra_flowctrl_base),
+ "Tegra flowctrl not initialised!\n"))
+ return 0;
+
+ return readl(tegra_flowctrl_base + offset);
+}
+
+void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
+{
+ return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
+}
+
+void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value)
+{
+ return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
+}
+
+void flowctrl_cpu_suspend_enter(unsigned int cpuid)
+{
+ unsigned int reg;
+ int i;
+
+ reg = flowctrl_read_cpu_csr(cpuid);
+ switch (tegra_get_chip_id()) {
+ case TEGRA20:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP;
+ /* pwr gating on wfe */
+ reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid;
+ break;
+ case TEGRA30:
+ case TEGRA114:
+ case TEGRA124:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
+
+ if (tegra_get_chip_id() == TEGRA30) {
+ /*
+ * The wfi doesn't work well on Tegra30 because
+ * CPU hangs under some odd circumstances after
+ * power-gating (like memory running off PLLP),
+ * hence use wfe that is working perfectly fine.
+ * Note that Tegra30 TRM doc clearly stands that
+ * wfi should be used for the "Cluster Switching",
+ * while wfe for the power-gating, just like it
+ * is done on Tegra20.
+ */
+ reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid;
+ } else {
+ /* pwr gating on wfi */
+ reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid;
+ }
+ break;
+ }
+ reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event flag */
+ reg |= FLOW_CTRL_CSR_ENABLE; /* pwr gating */
+ flowctrl_write_cpu_csr(cpuid, reg);
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (i == cpuid)
+ continue;
+ reg = flowctrl_read_cpu_csr(i);
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG;
+ reg |= FLOW_CTRL_CSR_INTR_FLAG;
+ flowctrl_write_cpu_csr(i, reg);
+ }
+}
+
+void flowctrl_cpu_suspend_exit(unsigned int cpuid)
+{
+ unsigned int reg;
+
+ /* Disable powergating via flow controller for CPU0 */
+ reg = flowctrl_read_cpu_csr(cpuid);
+ switch (tegra_get_chip_id()) {
+ case TEGRA20:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP;
+ break;
+ case TEGRA30:
+ case TEGRA114:
+ case TEGRA124:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
+ break;
+ }
+ reg &= ~FLOW_CTRL_CSR_ENABLE; /* clear enable */
+ reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr */
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event */
+ flowctrl_write_cpu_csr(cpuid, reg);
+}
+
+static int tegra_flowctrl_probe(struct platform_device *pdev)
+{
+ void __iomem *base = tegra_flowctrl_base;
+
+ tegra_flowctrl_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(tegra_flowctrl_base))
+ return PTR_ERR(tegra_flowctrl_base);
+
+ iounmap(base);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_flowctrl_match[] = {
+ { .compatible = "nvidia,tegra210-flowctrl" },
+ { .compatible = "nvidia,tegra124-flowctrl" },
+ { .compatible = "nvidia,tegra114-flowctrl" },
+ { .compatible = "nvidia,tegra30-flowctrl" },
+ { .compatible = "nvidia,tegra20-flowctrl" },
+ { }
+};
+
+static struct platform_driver tegra_flowctrl_driver = {
+ .driver = {
+ .name = "tegra-flowctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = tegra_flowctrl_match,
+ },
+ .probe = tegra_flowctrl_probe,
+};
+builtin_platform_driver(tegra_flowctrl_driver);
+
+static int __init tegra_flowctrl_init(void)
+{
+ struct resource res;
+ struct device_node *np;
+
+ if (!soc_is_tegra())
+ return 0;
+
+ np = of_find_matching_node(NULL, tegra_flowctrl_match);
+ if (np) {
+ if (of_address_to_resource(np, 0, &res) < 0) {
+ pr_err("failed to get flowctrl register\n");
+ return -ENXIO;
+ }
+ of_node_put(np);
+ } else if (IS_ENABLED(CONFIG_ARM)) {
+ /*
+ * Hardcoded fallback for 32-bit Tegra
+ * devices if device tree node is missing.
+ */
+ res.start = 0x60007000;
+ res.end = 0x60007fff;
+ res.flags = IORESOURCE_MEM;
+ } else {
+ /*
+ * At this point we're running on a Tegra,
+ * that doesn't support the flow controller
+ * (eg. Tegra186), so just return.
+ */
+ return 0;
+ }
+
+ tegra_flowctrl_base = ioremap(res.start, resource_size(&res));
+ if (!tegra_flowctrl_base)
+ return -ENXIO;
+
+ return 0;
+}
+early_initcall(tegra_flowctrl_init);
diff --git a/drivers/soc/tegra/fuse/Makefile b/drivers/soc/tegra/fuse/Makefile
new file mode 100644
index 0000000000..ea8332cc39
--- /dev/null
+++ b/drivers/soc/tegra/fuse/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += fuse-tegra.o
+obj-y += fuse-tegra30.o
+obj-y += tegra-apbmisc.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += fuse-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += speedo-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += speedo-tegra30.o
+obj-$(CONFIG_ARCH_TEGRA_114_SOC) += speedo-tegra114.o
+obj-$(CONFIG_ARCH_TEGRA_124_SOC) += speedo-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_132_SOC) += speedo-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_210_SOC) += speedo-tegra210.o
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
new file mode 100644
index 0000000000..a2c28f493a
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+struct tegra_sku_info tegra_sku_info;
+EXPORT_SYMBOL(tegra_sku_info);
+
+static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
+ [TEGRA_REVISION_UNKNOWN] = "unknown",
+ [TEGRA_REVISION_A01] = "A01",
+ [TEGRA_REVISION_A02] = "A02",
+ [TEGRA_REVISION_A03] = "A03",
+ [TEGRA_REVISION_A03p] = "A03 prime",
+ [TEGRA_REVISION_A04] = "A04",
+};
+
+static const char *tegra_platform_name[TEGRA_PLATFORM_MAX] = {
+ [TEGRA_PLATFORM_SILICON] = "Silicon",
+ [TEGRA_PLATFORM_QT] = "QT",
+ [TEGRA_PLATFORM_SYSTEM_FPGA] = "System FPGA",
+ [TEGRA_PLATFORM_UNIT_FPGA] = "Unit FPGA",
+ [TEGRA_PLATFORM_ASIM_QT] = "Asim QT",
+ [TEGRA_PLATFORM_ASIM_LINSIM] = "Asim Linsim",
+ [TEGRA_PLATFORM_DSIM_ASIM_LINSIM] = "Dsim Asim Linsim",
+ [TEGRA_PLATFORM_VERIFICATION_SIMULATION] = "Verification Simulation",
+ [TEGRA_PLATFORM_VDK] = "VDK",
+ [TEGRA_PLATFORM_VSP] = "VSP",
+};
+
+static const struct of_device_id car_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-car", },
+ { .compatible = "nvidia,tegra30-car", },
+ { .compatible = "nvidia,tegra114-car", },
+ { .compatible = "nvidia,tegra124-car", },
+ { .compatible = "nvidia,tegra132-car", },
+ { .compatible = "nvidia,tegra210-car", },
+ {},
+};
+
+static struct tegra_fuse *fuse = &(struct tegra_fuse) {
+ .base = NULL,
+ .soc = NULL,
+};
+
+static const struct of_device_id tegra_fuse_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+ { .compatible = "nvidia,tegra234-efuse", .data = &tegra234_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+ { .compatible = "nvidia,tegra194-efuse", .data = &tegra194_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_186_SOC
+ { .compatible = "nvidia,tegra186-efuse", .data = &tegra186_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+ { .compatible = "nvidia,tegra210-efuse", .data = &tegra210_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+ { .compatible = "nvidia,tegra132-efuse", .data = &tegra124_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ { .compatible = "nvidia,tegra124-efuse", .data = &tegra124_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ { .compatible = "nvidia,tegra114-efuse", .data = &tegra114_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ { .compatible = "nvidia,tegra30-efuse", .data = &tegra30_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ { .compatible = "nvidia,tegra20-efuse", .data = &tegra20_fuse_soc },
+#endif
+ { /* sentinel */ }
+};
+
+static int tegra_fuse_read(void *priv, unsigned int offset, void *value,
+ size_t bytes)
+{
+ unsigned int count = bytes / 4, i;
+ struct tegra_fuse *fuse = priv;
+ u32 *buffer = value;
+
+ for (i = 0; i < count; i++)
+ buffer[i] = fuse->read(fuse, offset + i * 4);
+
+ return 0;
+}
+
+static void tegra_fuse_restore(void *base)
+{
+ fuse->base = (void __iomem *)base;
+ fuse->clk = NULL;
+}
+
+static int tegra_fuse_probe(struct platform_device *pdev)
+{
+ void __iomem *base = fuse->base;
+ struct nvmem_config nvmem;
+ struct resource *res;
+ int err;
+
+ err = devm_add_action(&pdev->dev, tegra_fuse_restore, (void __force *)base);
+ if (err)
+ return err;
+
+ /* take over the memory region from the early initialization */
+ fuse->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(fuse->base))
+ return PTR_ERR(fuse->base);
+ fuse->phys = res->start;
+
+ fuse->clk = devm_clk_get(&pdev->dev, "fuse");
+ if (IS_ERR(fuse->clk)) {
+ if (PTR_ERR(fuse->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
+ PTR_ERR(fuse->clk));
+
+ return PTR_ERR(fuse->clk);
+ }
+
+ platform_set_drvdata(pdev, fuse);
+ fuse->dev = &pdev->dev;
+
+ err = devm_pm_runtime_enable(&pdev->dev);
+ if (err)
+ return err;
+
+ if (fuse->soc->probe) {
+ err = fuse->soc->probe(fuse);
+ if (err < 0)
+ return err;
+ }
+
+ memset(&nvmem, 0, sizeof(nvmem));
+ nvmem.dev = &pdev->dev;
+ nvmem.name = "fuse";
+ nvmem.id = -1;
+ nvmem.owner = THIS_MODULE;
+ nvmem.cells = fuse->soc->cells;
+ nvmem.ncells = fuse->soc->num_cells;
+ nvmem.keepout = fuse->soc->keepouts;
+ nvmem.nkeepout = fuse->soc->num_keepouts;
+ nvmem.type = NVMEM_TYPE_OTP;
+ nvmem.read_only = true;
+ nvmem.root_only = false;
+ nvmem.reg_read = tegra_fuse_read;
+ nvmem.size = fuse->soc->info->size;
+ nvmem.word_size = 4;
+ nvmem.stride = 4;
+ nvmem.priv = fuse;
+
+ fuse->nvmem = devm_nvmem_register(&pdev->dev, &nvmem);
+ if (IS_ERR(fuse->nvmem)) {
+ err = PTR_ERR(fuse->nvmem);
+ dev_err(&pdev->dev, "failed to register NVMEM device: %d\n",
+ err);
+ return err;
+ }
+
+ fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse");
+ if (IS_ERR(fuse->rst)) {
+ err = PTR_ERR(fuse->rst);
+ dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n",
+ fuse->rst);
+ return err;
+ }
+
+ /*
+ * FUSE clock is enabled at a boot time, hence this resume/suspend
+ * disables the clock besides the h/w resetting.
+ */
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+ return err;
+
+ err = reset_control_reset(fuse->rst);
+ pm_runtime_put(&pdev->dev);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to reset FUSE: %d\n", err);
+ return err;
+ }
+
+ /* release the early I/O memory mapping */
+ iounmap(base);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_fuse_runtime_resume(struct device *dev)
+{
+ int err;
+
+ err = clk_prepare_enable(fuse->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable FUSE clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_fuse_runtime_suspend(struct device *dev)
+{
+ clk_disable_unprepare(fuse->clk);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_fuse_suspend(struct device *dev)
+{
+ int ret;
+
+ /*
+ * Critical for RAM re-repair operation, which must occur on resume
+ * from LP1 system suspend and as part of CCPLEX cluster switching.
+ */
+ if (fuse->soc->clk_suspend_on)
+ ret = pm_runtime_resume_and_get(dev);
+ else
+ ret = pm_runtime_force_suspend(dev);
+
+ return ret;
+}
+
+static int __maybe_unused tegra_fuse_resume(struct device *dev)
+{
+ int ret = 0;
+
+ if (fuse->soc->clk_suspend_on)
+ pm_runtime_put(dev);
+ else
+ ret = pm_runtime_force_resume(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops tegra_fuse_pm = {
+ SET_RUNTIME_PM_OPS(tegra_fuse_runtime_suspend, tegra_fuse_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_fuse_suspend, tegra_fuse_resume)
+};
+
+static struct platform_driver tegra_fuse_driver = {
+ .driver = {
+ .name = "tegra-fuse",
+ .of_match_table = tegra_fuse_match,
+ .pm = &tegra_fuse_pm,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra_fuse_probe,
+};
+builtin_platform_driver(tegra_fuse_driver);
+
+u32 __init tegra_fuse_read_spare(unsigned int spare)
+{
+ unsigned int offset = fuse->soc->info->spare + spare * 4;
+
+ return fuse->read_early(fuse, offset) & 1;
+}
+
+u32 __init tegra_fuse_read_early(unsigned int offset)
+{
+ return fuse->read_early(fuse, offset);
+}
+
+int tegra_fuse_readl(unsigned long offset, u32 *value)
+{
+ if (!fuse->read || !fuse->clk)
+ return -EPROBE_DEFER;
+
+ if (IS_ERR(fuse->clk))
+ return PTR_ERR(fuse->clk);
+
+ *value = fuse->read(fuse, offset);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_fuse_readl);
+
+static void tegra_enable_fuse_clk(void __iomem *base)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + 0x48);
+ reg |= 1 << 28;
+ writel(reg, base + 0x48);
+
+ /*
+ * Enable FUSE clock. This needs to be hardcoded because the clock
+ * subsystem is not active during early boot.
+ */
+ reg = readl(base + 0x14);
+ reg |= 1 << 7;
+ writel(reg, base + 0x14);
+}
+
+static ssize_t major_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tegra_get_major_rev());
+}
+
+static DEVICE_ATTR_RO(major);
+
+static ssize_t minor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tegra_get_minor_rev());
+}
+
+static DEVICE_ATTR_RO(minor);
+
+static struct attribute *tegra_soc_attr[] = {
+ &dev_attr_major.attr,
+ &dev_attr_minor.attr,
+ NULL,
+};
+
+const struct attribute_group tegra_soc_attr_group = {
+ .attrs = tegra_soc_attr,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+static ssize_t platform_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ /*
+ * Displays the value in the 'pre_si_platform' field of the HIDREV
+ * register for Tegra194 devices. A value of 0 indicates that the
+ * platform type is silicon and all other non-zero values indicate
+ * the type of simulation platform is being used.
+ */
+ return sprintf(buf, "%d\n", tegra_get_platform());
+}
+
+static DEVICE_ATTR_RO(platform);
+
+static struct attribute *tegra194_soc_attr[] = {
+ &dev_attr_major.attr,
+ &dev_attr_minor.attr,
+ &dev_attr_platform.attr,
+ NULL,
+};
+
+const struct attribute_group tegra194_soc_attr_group = {
+ .attrs = tegra194_soc_attr,
+};
+#endif
+
+struct device * __init tegra_soc_device_register(void)
+{
+ struct soc_device_attribute *attr;
+ struct soc_device *dev;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return NULL;
+
+ attr->family = kasprintf(GFP_KERNEL, "Tegra");
+ if (tegra_is_silicon())
+ attr->revision = kasprintf(GFP_KERNEL, "%s %s",
+ tegra_platform_name[tegra_sku_info.platform],
+ tegra_revision_name[tegra_sku_info.revision]);
+ else
+ attr->revision = kasprintf(GFP_KERNEL, "%s",
+ tegra_platform_name[tegra_sku_info.platform]);
+ attr->soc_id = kasprintf(GFP_KERNEL, "%u", tegra_get_chip_id());
+ attr->custom_attr_group = fuse->soc->soc_attr_group;
+
+ dev = soc_device_register(attr);
+ if (IS_ERR(dev)) {
+ kfree(attr->soc_id);
+ kfree(attr->revision);
+ kfree(attr->family);
+ kfree(attr);
+ return ERR_CAST(dev);
+ }
+
+ return soc_device_to_device(dev);
+}
+
+static int __init tegra_init_fuse(void)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+ struct resource regs;
+
+ tegra_init_apbmisc();
+
+ np = of_find_matching_node_and_match(NULL, tegra_fuse_match, &match);
+ if (!np) {
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * a FUSE node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain a FUSE node.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ u8 chip = tegra_get_chip_id();
+
+ regs.start = 0x7000f800;
+ regs.end = 0x7000fbff;
+ regs.flags = IORESOURCE_MEM;
+
+ switch (chip) {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+ case TEGRA20:
+ fuse->soc = &tegra20_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+ case TEGRA30:
+ fuse->soc = &tegra30_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+ case TEGRA114:
+ fuse->soc = &tegra114_fuse_soc;
+ break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ case TEGRA124:
+ fuse->soc = &tegra124_fuse_soc;
+ break;
+#endif
+
+ default:
+ pr_warn("Unsupported SoC: %02x\n", chip);
+ break;
+ }
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return 0;
+ }
+ } else {
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &regs) < 0) {
+ pr_err("failed to get FUSE register\n");
+ return -ENXIO;
+ }
+
+ fuse->soc = match->data;
+ }
+
+ np = of_find_matching_node(NULL, car_match);
+ if (np) {
+ void __iomem *base = of_iomap(np, 0);
+ of_node_put(np);
+ if (base) {
+ tegra_enable_fuse_clk(base);
+ iounmap(base);
+ } else {
+ pr_err("failed to map clock registers\n");
+ return -ENXIO;
+ }
+ }
+
+ fuse->base = ioremap(regs.start, resource_size(&regs));
+ if (!fuse->base) {
+ pr_err("failed to map FUSE registers\n");
+ return -ENXIO;
+ }
+
+ fuse->soc->init(fuse);
+
+ pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
+ tegra_revision_name[tegra_sku_info.revision],
+ tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id,
+ tegra_sku_info.soc_process_id);
+ pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
+ tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+
+ if (fuse->soc->lookups) {
+ size_t size = sizeof(*fuse->lookups) * fuse->soc->num_lookups;
+
+ fuse->lookups = kmemdup(fuse->soc->lookups, size, GFP_KERNEL);
+ if (fuse->lookups)
+ nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
+ }
+
+ return 0;
+}
+early_initcall(tegra_init_fuse);
+
+#ifdef CONFIG_ARM64
+static int __init tegra_init_soc(void)
+{
+ struct device_node *np;
+ struct device *soc;
+
+ /* make sure we're running on Tegra */
+ np = of_find_matching_node(NULL, tegra_fuse_match);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
+ soc = tegra_soc_device_register();
+ if (IS_ERR(soc)) {
+ pr_err("failed to register SoC device: %ld\n", PTR_ERR(soc));
+ return PTR_ERR(soc);
+ }
+
+ return 0;
+}
+device_initcall(tegra_init_soc);
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
new file mode 100644
index 0000000000..fdecf7b7c2
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Based on drivers/misc/eeprom/sunxi_sid.c
+ */
+
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define FUSE_BEGIN 0x100
+#define FUSE_UID_LOW 0x08
+#define FUSE_UID_HIGH 0x0c
+
+static u32 tegra20_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
+{
+ return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
+}
+
+static void apb_dma_complete(void *args)
+{
+ struct tegra_fuse *fuse = args;
+
+ complete(&fuse->apbdma.wait);
+}
+
+static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
+{
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ struct dma_async_tx_descriptor *dma_desc;
+ unsigned long time_left;
+ u32 value = 0;
+ int err;
+
+ err = pm_runtime_resume_and_get(fuse->dev);
+ if (err)
+ return err;
+
+ mutex_lock(&fuse->apbdma.lock);
+
+ fuse->apbdma.config.src_addr = fuse->phys + FUSE_BEGIN + offset;
+
+ err = dmaengine_slave_config(fuse->apbdma.chan, &fuse->apbdma.config);
+ if (err)
+ goto out;
+
+ dma_desc = dmaengine_prep_slave_single(fuse->apbdma.chan,
+ fuse->apbdma.phys,
+ sizeof(u32), DMA_DEV_TO_MEM,
+ flags);
+ if (!dma_desc)
+ goto out;
+
+ dma_desc->callback = apb_dma_complete;
+ dma_desc->callback_param = fuse;
+
+ reinit_completion(&fuse->apbdma.wait);
+
+ dmaengine_submit(dma_desc);
+ dma_async_issue_pending(fuse->apbdma.chan);
+ time_left = wait_for_completion_timeout(&fuse->apbdma.wait,
+ msecs_to_jiffies(50));
+
+ if (WARN(time_left == 0, "apb read dma timed out"))
+ dmaengine_terminate_all(fuse->apbdma.chan);
+ else
+ value = *fuse->apbdma.virt;
+
+out:
+ mutex_unlock(&fuse->apbdma.lock);
+ pm_runtime_put(fuse->dev);
+ return value;
+}
+
+static bool dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ struct device_node *np = chan->device->dev->of_node;
+
+ return of_device_is_compatible(np, "nvidia,tegra20-apbdma");
+}
+
+static void tegra20_fuse_release_channel(void *data)
+{
+ struct tegra_fuse *fuse = data;
+
+ dma_release_channel(fuse->apbdma.chan);
+ fuse->apbdma.chan = NULL;
+}
+
+static void tegra20_fuse_free_coherent(void *data)
+{
+ struct tegra_fuse *fuse = data;
+
+ dma_free_coherent(fuse->dev, sizeof(u32), fuse->apbdma.virt,
+ fuse->apbdma.phys);
+ fuse->apbdma.virt = NULL;
+ fuse->apbdma.phys = 0x0;
+}
+
+static int tegra20_fuse_probe(struct tegra_fuse *fuse)
+{
+ dma_cap_mask_t mask;
+ int err;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ fuse->apbdma.chan = dma_request_channel(mask, dma_filter, NULL);
+ if (!fuse->apbdma.chan)
+ return -EPROBE_DEFER;
+
+ err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_release_channel,
+ fuse);
+ if (err)
+ return err;
+
+ fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32),
+ &fuse->apbdma.phys,
+ GFP_KERNEL);
+ if (!fuse->apbdma.virt)
+ return -ENOMEM;
+
+ err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_free_coherent,
+ fuse);
+ if (err)
+ return err;
+
+ fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ fuse->apbdma.config.src_maxburst = 1;
+ fuse->apbdma.config.dst_maxburst = 1;
+ fuse->apbdma.config.direction = DMA_DEV_TO_MEM;
+ fuse->apbdma.config.device_fc = false;
+
+ init_completion(&fuse->apbdma.wait);
+ mutex_init(&fuse->apbdma.lock);
+ fuse->read = tegra20_fuse_read;
+
+ return 0;
+}
+
+static const struct tegra_fuse_info tegra20_fuse_info = {
+ .read = tegra20_fuse_read,
+ .size = 0x1f8,
+ .spare = 0x100,
+};
+
+/* Early boot code. This code is called before the devices are created */
+
+static void __init tegra20_fuse_add_randomness(void)
+{
+ u32 randomness[7];
+
+ randomness[0] = tegra_sku_info.sku_id;
+ randomness[1] = tegra_read_straps();
+ randomness[2] = tegra_read_chipid();
+ randomness[3] = tegra_sku_info.cpu_process_id << 16;
+ randomness[3] |= tegra_sku_info.soc_process_id;
+ randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
+ randomness[4] |= tegra_sku_info.soc_speedo_id;
+ randomness[5] = tegra_fuse_read_early(FUSE_UID_LOW);
+ randomness[6] = tegra_fuse_read_early(FUSE_UID_HIGH);
+
+ add_device_randomness(randomness, sizeof(randomness));
+}
+
+static void __init tegra20_fuse_init(struct tegra_fuse *fuse)
+{
+ fuse->read_early = tegra20_fuse_read_early;
+
+ tegra_init_revision();
+ fuse->soc->speedo_init(&tegra_sku_info);
+ tegra20_fuse_add_randomness();
+}
+
+const struct tegra_fuse_soc tegra20_fuse_soc = {
+ .init = tegra20_fuse_init,
+ .speedo_init = tegra20_init_speedo_data,
+ .probe = tegra20_fuse_probe,
+ .info = &tegra20_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
+ .clk_suspend_on = false,
+};
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
new file mode 100644
index 0000000000..e94d46372a
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define FUSE_BEGIN 0x100
+
+/* Tegra30 and later */
+#define FUSE_VENDOR_CODE 0x100
+#define FUSE_FAB_CODE 0x104
+#define FUSE_LOT_CODE_0 0x108
+#define FUSE_LOT_CODE_1 0x10c
+#define FUSE_WAFER_ID 0x110
+#define FUSE_X_COORDINATE 0x114
+#define FUSE_Y_COORDINATE 0x118
+
+#define FUSE_HAS_REVISION_INFO BIT(0)
+
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_210_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_186_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_194_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_234_SOC)
+static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
+{
+ if (WARN_ON(!fuse->base))
+ return 0;
+
+ return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
+}
+
+static u32 tegra30_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
+{
+ u32 value;
+ int err;
+
+ err = pm_runtime_resume_and_get(fuse->dev);
+ if (err)
+ return 0;
+
+ value = readl_relaxed(fuse->base + FUSE_BEGIN + offset);
+
+ pm_runtime_put(fuse->dev);
+
+ return value;
+}
+
+static void __init tegra30_fuse_add_randomness(void)
+{
+ u32 randomness[12];
+
+ randomness[0] = tegra_sku_info.sku_id;
+ randomness[1] = tegra_read_straps();
+ randomness[2] = tegra_read_chipid();
+ randomness[3] = tegra_sku_info.cpu_process_id << 16;
+ randomness[3] |= tegra_sku_info.soc_process_id;
+ randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
+ randomness[4] |= tegra_sku_info.soc_speedo_id;
+ randomness[5] = tegra_fuse_read_early(FUSE_VENDOR_CODE);
+ randomness[6] = tegra_fuse_read_early(FUSE_FAB_CODE);
+ randomness[7] = tegra_fuse_read_early(FUSE_LOT_CODE_0);
+ randomness[8] = tegra_fuse_read_early(FUSE_LOT_CODE_1);
+ randomness[9] = tegra_fuse_read_early(FUSE_WAFER_ID);
+ randomness[10] = tegra_fuse_read_early(FUSE_X_COORDINATE);
+ randomness[11] = tegra_fuse_read_early(FUSE_Y_COORDINATE);
+
+ add_device_randomness(randomness, sizeof(randomness));
+}
+
+static void __init tegra30_fuse_init(struct tegra_fuse *fuse)
+{
+ fuse->read_early = tegra30_fuse_read_early;
+ fuse->read = tegra30_fuse_read;
+
+ tegra_init_revision();
+
+ if (fuse->soc->speedo_init)
+ fuse->soc->speedo_init(&tegra_sku_info);
+
+ tegra30_fuse_add_randomness();
+}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static const struct tegra_fuse_info tegra30_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x2a4,
+ .spare = 0x144,
+};
+
+const struct tegra_fuse_soc tegra30_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra30_init_speedo_data,
+ .info = &tegra30_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
+ .clk_suspend_on = false,
+};
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+static const struct tegra_fuse_info tegra114_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x2a0,
+ .spare = 0x180,
+};
+
+const struct tegra_fuse_soc tegra114_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra114_init_speedo_data,
+ .info = &tegra114_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
+ .clk_suspend_on = false,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+static const struct nvmem_cell_info tegra124_fuse_cells[] = {
+ {
+ .name = "tsensor-cpu1",
+ .offset = 0x084,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu2",
+ .offset = 0x088,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu0",
+ .offset = 0x098,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration",
+ .offset = 0x0f0,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu3",
+ .offset = 0x12c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "sata-calibration",
+ .offset = 0x124,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-gpu",
+ .offset = 0x154,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem0",
+ .offset = 0x158,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem1",
+ .offset = 0x15c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-pllx",
+ .offset = 0x160,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-common",
+ .offset = 0x180,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-realignment",
+ .offset = 0x1fc,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ },
+};
+
+static const struct nvmem_cell_lookup tegra124_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "sata-calibration",
+ .dev_id = "70020000.sata",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-common",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "common",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-realignment",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "realignment",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu2",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu2",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu3",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu3",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-gpu",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "gpu",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-pllx",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "pllx",
+ },
+};
+
+static const struct tegra_fuse_info tegra124_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x200,
+};
+
+const struct tegra_fuse_soc tegra124_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra124_init_speedo_data,
+ .info = &tegra124_fuse_info,
+ .lookups = tegra124_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra124_fuse_lookups),
+ .cells = tegra124_fuse_cells,
+ .num_cells = ARRAY_SIZE(tegra124_fuse_cells),
+ .soc_attr_group = &tegra_soc_attr_group,
+ .clk_suspend_on = true,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct nvmem_cell_info tegra210_fuse_cells[] = {
+ {
+ .name = "tsensor-cpu1",
+ .offset = 0x084,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu2",
+ .offset = 0x088,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu0",
+ .offset = 0x098,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration",
+ .offset = 0x0f0,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu3",
+ .offset = 0x12c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "sata-calibration",
+ .offset = 0x124,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-gpu",
+ .offset = 0x154,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem0",
+ .offset = 0x158,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem1",
+ .offset = 0x15c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-pllx",
+ .offset = 0x160,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-common",
+ .offset = 0x180,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "gpu-calibration",
+ .offset = 0x204,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration-ext",
+ .offset = 0x250,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ },
+};
+
+static const struct nvmem_cell_lookup tegra210_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu2",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu2",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu3",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu3",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "sata-calibration",
+ .dev_id = "70020000.sata",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-gpu",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "gpu",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-pllx",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "pllx",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-common",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "common",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-calibration",
+ .dev_id = "57000000.gpu",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
+static const struct tegra_fuse_info tegra210_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra210_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .speedo_init = tegra210_init_speedo_data,
+ .info = &tegra210_fuse_info,
+ .lookups = tegra210_fuse_lookups,
+ .cells = tegra210_fuse_cells,
+ .num_cells = ARRAY_SIZE(tegra210_fuse_cells),
+ .num_lookups = ARRAY_SIZE(tegra210_fuse_lookups),
+ .soc_attr_group = &tegra_soc_attr_group,
+ .clk_suspend_on = false,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+static const struct nvmem_cell_info tegra186_fuse_cells[] = {
+ {
+ .name = "xusb-pad-calibration",
+ .offset = 0x0f0,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration-ext",
+ .offset = 0x250,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ },
+};
+
+static const struct nvmem_cell_lookup tegra186_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
+static const struct nvmem_keepout tegra186_fuse_keepouts[] = {
+ { .start = 0x01c, .end = 0x0f0 },
+ { .start = 0x138, .end = 0x198 },
+ { .start = 0x1d8, .end = 0x250 },
+ { .start = 0x280, .end = 0x290 },
+ { .start = 0x340, .end = 0x344 }
+};
+
+static const struct tegra_fuse_info tegra186_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x478,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra186_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra186_fuse_info,
+ .lookups = tegra186_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra186_fuse_lookups),
+ .cells = tegra186_fuse_cells,
+ .num_cells = ARRAY_SIZE(tegra186_fuse_cells),
+ .keepouts = tegra186_fuse_keepouts,
+ .num_keepouts = ARRAY_SIZE(tegra186_fuse_keepouts),
+ .soc_attr_group = &tegra_soc_attr_group,
+ .clk_suspend_on = false,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+static const struct nvmem_cell_info tegra194_fuse_cells[] = {
+ {
+ .name = "xusb-pad-calibration",
+ .offset = 0x0f0,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "gpu-gcplex-config-fuse",
+ .offset = 0x1c8,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration-ext",
+ .offset = 0x250,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "gpu-pdi0",
+ .offset = 0x300,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "gpu-pdi1",
+ .offset = 0x304,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ },
+};
+
+static const struct nvmem_cell_lookup tegra194_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration-ext",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-gcplex-config-fuse",
+ .dev_id = "17000000.gpu",
+ .con_id = "gcplex-config-fuse",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-pdi0",
+ .dev_id = "17000000.gpu",
+ .con_id = "pdi0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-pdi1",
+ .dev_id = "17000000.gpu",
+ .con_id = "pdi1",
+ },
+};
+
+static const struct nvmem_keepout tegra194_fuse_keepouts[] = {
+ { .start = 0x01c, .end = 0x0b8 },
+ { .start = 0x12c, .end = 0x198 },
+ { .start = 0x1a0, .end = 0x1bc },
+ { .start = 0x1d8, .end = 0x250 },
+ { .start = 0x270, .end = 0x290 },
+ { .start = 0x310, .end = 0x45c }
+};
+
+static const struct tegra_fuse_info tegra194_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x650,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra194_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra194_fuse_info,
+ .lookups = tegra194_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra194_fuse_lookups),
+ .cells = tegra194_fuse_cells,
+ .num_cells = ARRAY_SIZE(tegra194_fuse_cells),
+ .keepouts = tegra194_fuse_keepouts,
+ .num_keepouts = ARRAY_SIZE(tegra194_fuse_keepouts),
+ .soc_attr_group = &tegra194_soc_attr_group,
+ .clk_suspend_on = false,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+static const struct nvmem_cell_info tegra234_fuse_cells[] = {
+ {
+ .name = "xusb-pad-calibration",
+ .offset = 0x0f0,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration-ext",
+ .offset = 0x250,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ },
+};
+
+static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
+static const struct nvmem_keepout tegra234_fuse_keepouts[] = {
+ { .start = 0x01c, .end = 0x0c8 },
+ { .start = 0x12c, .end = 0x184 },
+ { .start = 0x190, .end = 0x198 },
+ { .start = 0x1a0, .end = 0x204 },
+ { .start = 0x21c, .end = 0x250 },
+ { .start = 0x25c, .end = 0x2f0 },
+ { .start = 0x310, .end = 0x3d8 },
+ { .start = 0x400, .end = 0x4f0 },
+ { .start = 0x4f8, .end = 0x7e8 },
+ { .start = 0x8d0, .end = 0x8d8 },
+ { .start = 0xacc, .end = 0xf00 }
+};
+
+static const struct tegra_fuse_info tegra234_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0xf90,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra234_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra234_fuse_info,
+ .lookups = tegra234_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra234_fuse_lookups),
+ .cells = tegra234_fuse_cells,
+ .num_cells = ARRAY_SIZE(tegra234_fuse_cells),
+ .keepouts = tegra234_fuse_keepouts,
+ .num_keepouts = ARRAY_SIZE(tegra234_fuse_keepouts),
+ .soc_attr_group = &tegra194_soc_attr_group,
+ .clk_suspend_on = false,
+};
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
new file mode 100644
index 0000000000..90f23be738
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ */
+
+#ifndef __DRIVERS_MISC_TEGRA_FUSE_H
+#define __DRIVERS_MISC_TEGRA_FUSE_H
+
+#include <linux/dmaengine.h>
+#include <linux/types.h>
+
+struct nvmem_cell_lookup;
+struct nvmem_device;
+struct tegra_fuse;
+
+struct tegra_fuse_info {
+ u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+ unsigned int size;
+ unsigned int spare;
+};
+
+struct tegra_fuse_soc {
+ void (*init)(struct tegra_fuse *fuse);
+ void (*speedo_init)(struct tegra_sku_info *info);
+ int (*probe)(struct tegra_fuse *fuse);
+
+ const struct tegra_fuse_info *info;
+
+ const struct nvmem_cell_lookup *lookups;
+ unsigned int num_lookups;
+ const struct nvmem_cell_info *cells;
+ unsigned int num_cells;
+ const struct nvmem_keepout *keepouts;
+ unsigned int num_keepouts;
+
+ const struct attribute_group *soc_attr_group;
+
+ bool clk_suspend_on;
+};
+
+struct tegra_fuse {
+ struct device *dev;
+ void __iomem *base;
+ phys_addr_t phys;
+ struct clk *clk;
+ struct reset_control *rst;
+
+ u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset);
+ u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+ const struct tegra_fuse_soc *soc;
+
+ /* APBDMA on Tegra20 */
+ struct {
+ struct mutex lock;
+ struct completion wait;
+ struct dma_chan *chan;
+ struct dma_slave_config config;
+ dma_addr_t phys;
+ u32 *virt;
+ } apbdma;
+
+ struct nvmem_device *nvmem;
+ struct nvmem_cell_lookup *lookups;
+};
+
+void tegra_init_revision(void);
+void tegra_init_apbmisc(void);
+
+u32 __init tegra_fuse_read_spare(unsigned int spare);
+u32 __init tegra_fuse_read_early(unsigned int offset);
+
+u8 tegra_get_major_rev(void);
+u8 tegra_get_minor_rev(void);
+
+extern const struct attribute_group tegra_soc_attr_group;
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+void tegra20_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+void tegra30_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+void tegra114_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+void tegra124_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+void tegra210_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+extern const struct tegra_fuse_soc tegra20_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+extern const struct tegra_fuse_soc tegra30_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+extern const struct tegra_fuse_soc tegra114_fuse_soc;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+extern const struct tegra_fuse_soc tegra124_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_fuse_soc tegra210_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_186_SOC
+extern const struct tegra_fuse_soc tegra186_fuse_soc;
+#endif
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+extern const struct attribute_group tegra194_soc_attr_group;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+extern const struct tegra_fuse_soc tegra194_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+extern const struct tegra_fuse_soc tegra234_fuse_soc;
+#endif
+
+#endif
diff --git a/drivers/soc/tegra/fuse/speedo-tegra114.c b/drivers/soc/tegra/fuse/speedo-tegra114.c
new file mode 100644
index 0000000000..6695702bdb
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra114.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define SOC_PROCESS_CORNERS 2
+#define CPU_PROCESS_CORNERS 2
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+ {1123, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ {1695, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+ int *threshold)
+{
+ u32 tmp;
+ u32 sku = sku_info->sku_id;
+ enum tegra_revision rev = sku_info->revision;
+
+ switch (sku) {
+ case 0x00:
+ case 0x10:
+ case 0x05:
+ case 0x06:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+ break;
+
+ case 0x03:
+ case 0x04:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 1;
+ *threshold = THRESHOLD_INDEX_1;
+ break;
+
+ default:
+ pr_err("Tegra Unknown SKU %d\n", sku);
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+ break;
+ }
+
+ if (rev == TEGRA_REVISION_A01) {
+ tmp = tegra_fuse_read_early(0x270) << 1;
+ tmp |= tegra_fuse_read_early(0x26c);
+ if (!tmp)
+ sku_info->cpu_speedo_id = 0;
+ }
+}
+
+void __init tegra114_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ u32 cpu_speedo_val;
+ u32 soc_speedo_val;
+ int threshold;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+ rev_sku_to_speedo_ids(sku_info, &threshold);
+
+ cpu_speedo_val = tegra_fuse_read_early(0x12c) + 1024;
+ soc_speedo_val = tegra_fuse_read_early(0x134);
+
+ for (i = 0; i < CPU_PROCESS_CORNERS; i++)
+ if (cpu_speedo_val < cpu_process_speedos[threshold][i])
+ break;
+ sku_info->cpu_process_id = i;
+
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++)
+ if (soc_speedo_val < soc_process_speedos[threshold][i])
+ break;
+ sku_info->soc_process_id = i;
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra124.c b/drivers/soc/tegra/fuse/speedo-tegra124.c
new file mode 100644
index 0000000000..5b1ee28e42
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra124.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_PROCESS_CORNERS 2
+#define GPU_PROCESS_CORNERS 2
+#define SOC_PROCESS_CORNERS 2
+
+#define FUSE_CPU_SPEEDO_0 0x14
+#define FUSE_CPU_SPEEDO_1 0x2c
+#define FUSE_CPU_SPEEDO_2 0x30
+#define FUSE_SOC_SPEEDO_0 0x34
+#define FUSE_SOC_SPEEDO_1 0x38
+#define FUSE_SOC_SPEEDO_2 0x3c
+#define FUSE_CPU_IDDQ 0x18
+#define FUSE_SOC_IDDQ 0x40
+#define FUSE_GPU_IDDQ 0x128
+#define FUSE_FT_REV 0x28
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ {2190, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = {
+ {1965, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+ {2101, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+ int *threshold)
+{
+ int sku = sku_info->sku_id;
+
+ /* Assign to default */
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ sku_info->gpu_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+
+ switch (sku) {
+ case 0x00: /* Eng sku */
+ case 0x0F:
+ case 0x23:
+ /* Using the default */
+ break;
+ case 0x83:
+ sku_info->cpu_speedo_id = 2;
+ break;
+
+ case 0x1F:
+ case 0x87:
+ case 0x27:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 0;
+ sku_info->gpu_speedo_id = 1;
+ *threshold = THRESHOLD_INDEX_0;
+ break;
+ case 0x81:
+ case 0x21:
+ case 0x07:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 1;
+ sku_info->gpu_speedo_id = 1;
+ *threshold = THRESHOLD_INDEX_1;
+ break;
+ case 0x49:
+ case 0x4A:
+ case 0x48:
+ sku_info->cpu_speedo_id = 4;
+ sku_info->soc_speedo_id = 2;
+ sku_info->gpu_speedo_id = 3;
+ *threshold = THRESHOLD_INDEX_1;
+ break;
+ default:
+ pr_err("Tegra Unknown SKU %d\n", sku);
+ /* Using the default for the error case */
+ break;
+ }
+}
+
+void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ int i, threshold, soc_speedo_0_value;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+ sku_info->cpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
+ if (sku_info->cpu_speedo_value == 0) {
+ pr_warn("Tegra Warning: Speedo value not fused.\n");
+ WARN_ON(1);
+ return;
+ }
+
+ /* GPU Speedo is stored in CPU_SPEEDO_2 */
+ sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+ soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
+
+ rev_sku_to_speedo_ids(sku_info, &threshold);
+
+ sku_info->cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
+
+ for (i = 0; i < GPU_PROCESS_CORNERS; i++)
+ if (sku_info->gpu_speedo_value <
+ gpu_process_speedos[threshold][i])
+ break;
+ sku_info->gpu_process_id = i;
+
+ for (i = 0; i < CPU_PROCESS_CORNERS; i++)
+ if (sku_info->cpu_speedo_value <
+ cpu_process_speedos[threshold][i])
+ break;
+ sku_info->cpu_process_id = i;
+
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++)
+ if (soc_speedo_0_value <
+ soc_process_speedos[threshold][i])
+ break;
+ sku_info->soc_process_id = i;
+
+ pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
+ sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra20.c b/drivers/soc/tegra/fuse/speedo-tegra20.c
new file mode 100644
index 0000000000..2546bddbab
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra20.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_SPEEDO_LSBIT 20
+#define CPU_SPEEDO_MSBIT 29
+#define CPU_SPEEDO_REDUND_LSBIT 30
+#define CPU_SPEEDO_REDUND_MSBIT 39
+#define CPU_SPEEDO_REDUND_OFFS (CPU_SPEEDO_REDUND_MSBIT - CPU_SPEEDO_MSBIT)
+
+#define SOC_SPEEDO_LSBIT 40
+#define SOC_SPEEDO_MSBIT 47
+#define SOC_SPEEDO_REDUND_LSBIT 48
+#define SOC_SPEEDO_REDUND_MSBIT 55
+#define SOC_SPEEDO_REDUND_OFFS (SOC_SPEEDO_REDUND_MSBIT - SOC_SPEEDO_MSBIT)
+
+#define SPEEDO_MULT 4
+
+#define PROCESS_CORNERS_NUM 4
+
+#define SPEEDO_ID_SELECT_0(rev) ((rev) <= 2)
+#define SPEEDO_ID_SELECT_1(sku) \
+ (((sku) != 20) && ((sku) != 23) && ((sku) != 24) && \
+ ((sku) != 27) && ((sku) != 28))
+
+enum {
+ SPEEDO_ID_0,
+ SPEEDO_ID_1,
+ SPEEDO_ID_2,
+ SPEEDO_ID_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][PROCESS_CORNERS_NUM] = {
+ {315, 366, 420, UINT_MAX},
+ {303, 368, 419, UINT_MAX},
+ {316, 331, 383, UINT_MAX},
+};
+
+static const u32 __initconst soc_process_speedos[][PROCESS_CORNERS_NUM] = {
+ {165, 195, 224, UINT_MAX},
+ {165, 195, 224, UINT_MAX},
+ {165, 195, 224, UINT_MAX},
+};
+
+void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ u32 reg;
+ u32 val;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != SPEEDO_ID_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) != SPEEDO_ID_COUNT);
+
+ if (SPEEDO_ID_SELECT_0(sku_info->revision))
+ sku_info->soc_speedo_id = SPEEDO_ID_0;
+ else if (SPEEDO_ID_SELECT_1(sku_info->sku_id))
+ sku_info->soc_speedo_id = SPEEDO_ID_1;
+ else
+ sku_info->soc_speedo_id = SPEEDO_ID_2;
+
+ val = 0;
+ for (i = CPU_SPEEDO_MSBIT; i >= CPU_SPEEDO_LSBIT; i--) {
+ reg = tegra_fuse_read_spare(i) |
+ tegra_fuse_read_spare(i + CPU_SPEEDO_REDUND_OFFS);
+ val = (val << 1) | (reg & 0x1);
+ }
+ val = val * SPEEDO_MULT;
+ pr_debug("Tegra CPU speedo value %u\n", val);
+
+ for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
+ if (val <= cpu_process_speedos[sku_info->soc_speedo_id][i])
+ break;
+ }
+ sku_info->cpu_process_id = i;
+
+ val = 0;
+ for (i = SOC_SPEEDO_MSBIT; i >= SOC_SPEEDO_LSBIT; i--) {
+ reg = tegra_fuse_read_spare(i) |
+ tegra_fuse_read_spare(i + SOC_SPEEDO_REDUND_OFFS);
+ val = (val << 1) | (reg & 0x1);
+ }
+ val = val * SPEEDO_MULT;
+ pr_debug("Core speedo value %u\n", val);
+
+ for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
+ if (val <= soc_process_speedos[sku_info->soc_speedo_id][i])
+ break;
+ }
+ sku_info->soc_process_id = i;
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
new file mode 100644
index 0000000000..695d0b7f9a
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2015, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_PROCESS_CORNERS 2
+#define GPU_PROCESS_CORNERS 2
+#define SOC_PROCESS_CORNERS 3
+
+#define FUSE_CPU_SPEEDO_0 0x014
+#define FUSE_CPU_SPEEDO_1 0x02c
+#define FUSE_CPU_SPEEDO_2 0x030
+#define FUSE_SOC_SPEEDO_0 0x034
+#define FUSE_SOC_SPEEDO_1 0x038
+#define FUSE_SOC_SPEEDO_2 0x03c
+#define FUSE_CPU_IDDQ 0x018
+#define FUSE_SOC_IDDQ 0x040
+#define FUSE_GPU_IDDQ 0x128
+#define FUSE_FT_REV 0x028
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ { 2119, UINT_MAX },
+ { 2119, UINT_MAX },
+};
+
+static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = {
+ { UINT_MAX, UINT_MAX },
+ { UINT_MAX, UINT_MAX },
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+ { 1950, 2100, UINT_MAX },
+ { 1950, 2100, UINT_MAX },
+};
+
+static u8 __init get_speedo_revision(void)
+{
+ return tegra_fuse_read_spare(4) << 2 |
+ tegra_fuse_read_spare(3) << 1 |
+ tegra_fuse_read_spare(2) << 0;
+}
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+ u8 speedo_rev, int *threshold)
+{
+ int sku = sku_info->sku_id;
+
+ /* Assign to default */
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ sku_info->gpu_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+
+ switch (sku) {
+ case 0x00: /* Engineering SKU */
+ case 0x01: /* Engineering SKU */
+ case 0x07:
+ case 0x17:
+ case 0x27:
+ if (speedo_rev >= 2)
+ sku_info->gpu_speedo_id = 1;
+ break;
+
+ case 0x13:
+ if (speedo_rev >= 2)
+ sku_info->gpu_speedo_id = 1;
+
+ sku_info->cpu_speedo_id = 1;
+ break;
+
+ default:
+ pr_err("Tegra210: unknown SKU %#04x\n", sku);
+ /* Using the default for the error case */
+ break;
+ }
+}
+
+static int get_process_id(int value, const u32 *speedos, unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (value < speedos[i])
+ return i;
+
+ return -EINVAL;
+}
+
+void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ int cpu_speedo[3], soc_speedo[3];
+ unsigned int index;
+ u8 speedo_revision;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+ /* Read speedo/IDDQ fuses */
+ cpu_speedo[0] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
+ cpu_speedo[1] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_1);
+ cpu_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+
+ soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
+ soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
+ soc_speedo[2] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_2);
+
+ /*
+ * Determine CPU, GPU and SoC speedo values depending on speedo fusing
+ * revision. Note that GPU speedo value is fused in CPU_SPEEDO_2.
+ */
+ speedo_revision = get_speedo_revision();
+ pr_info("Speedo Revision %u\n", speedo_revision);
+
+ if (speedo_revision >= 3) {
+ sku_info->cpu_speedo_value = cpu_speedo[0];
+ sku_info->gpu_speedo_value = cpu_speedo[2];
+ sku_info->soc_speedo_value = soc_speedo[0];
+ } else if (speedo_revision == 2) {
+ sku_info->cpu_speedo_value = (-1938 + (1095 * cpu_speedo[0] / 100)) / 10;
+ sku_info->gpu_speedo_value = (-1662 + (1082 * cpu_speedo[2] / 100)) / 10;
+ sku_info->soc_speedo_value = ( -705 + (1037 * soc_speedo[0] / 100)) / 10;
+ } else {
+ sku_info->cpu_speedo_value = 2100;
+ sku_info->gpu_speedo_value = cpu_speedo[2] - 75;
+ sku_info->soc_speedo_value = 1900;
+ }
+
+ if ((sku_info->cpu_speedo_value <= 0) ||
+ (sku_info->gpu_speedo_value <= 0) ||
+ (sku_info->soc_speedo_value <= 0)) {
+ WARN(1, "speedo value not fused\n");
+ return;
+ }
+
+ rev_sku_to_speedo_ids(sku_info, speedo_revision, &index);
+
+ sku_info->gpu_process_id = get_process_id(sku_info->gpu_speedo_value,
+ gpu_process_speedos[index],
+ GPU_PROCESS_CORNERS);
+
+ sku_info->cpu_process_id = get_process_id(sku_info->cpu_speedo_value,
+ cpu_process_speedos[index],
+ CPU_PROCESS_CORNERS);
+
+ sku_info->soc_process_id = get_process_id(sku_info->soc_speedo_value,
+ soc_process_speedos[index],
+ SOC_PROCESS_CORNERS);
+
+ pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
+ sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra30.c b/drivers/soc/tegra/fuse/speedo-tegra30.c
new file mode 100644
index 0000000000..b1d09944b3
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra30.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define SOC_PROCESS_CORNERS 1
+#define CPU_PROCESS_CORNERS 6
+
+#define FUSE_SPEEDO_CALIB_0 0x14
+#define FUSE_PACKAGE_INFO 0XFC
+#define FUSE_TEST_PROG_VER 0X28
+
+#define G_SPEEDO_BIT_MINUS1 58
+#define G_SPEEDO_BIT_MINUS1_R 59
+#define G_SPEEDO_BIT_MINUS2 60
+#define G_SPEEDO_BIT_MINUS2_R 61
+#define LP_SPEEDO_BIT_MINUS1 62
+#define LP_SPEEDO_BIT_MINUS1_R 63
+#define LP_SPEEDO_BIT_MINUS2 64
+#define LP_SPEEDO_BIT_MINUS2_R 65
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_2,
+ THRESHOLD_INDEX_3,
+ THRESHOLD_INDEX_4,
+ THRESHOLD_INDEX_5,
+ THRESHOLD_INDEX_6,
+ THRESHOLD_INDEX_7,
+ THRESHOLD_INDEX_8,
+ THRESHOLD_INDEX_9,
+ THRESHOLD_INDEX_10,
+ THRESHOLD_INDEX_11,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+ {180},
+ {170},
+ {195},
+ {180},
+ {168},
+ {192},
+ {180},
+ {170},
+ {195},
+ {180},
+ {180},
+ {180},
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ {306, 338, 360, 376, UINT_MAX},
+ {295, 336, 358, 375, UINT_MAX},
+ {325, 325, 358, 375, UINT_MAX},
+ {325, 325, 358, 375, UINT_MAX},
+ {292, 324, 348, 364, UINT_MAX},
+ {324, 324, 348, 364, UINT_MAX},
+ {324, 324, 348, 364, UINT_MAX},
+ {295, 336, 358, 375, UINT_MAX},
+ {358, 358, 358, 358, 397, UINT_MAX},
+ {364, 364, 364, 364, 397, UINT_MAX},
+ {295, 336, 358, 375, 391, UINT_MAX},
+ {295, 336, 358, 375, 391, UINT_MAX},
+};
+
+static int threshold_index __initdata;
+
+static void __init fuse_speedo_calib(u32 *speedo_g, u32 *speedo_lp)
+{
+ u32 reg;
+ int ate_ver;
+ int bit_minus1;
+ int bit_minus2;
+
+ reg = tegra_fuse_read_early(FUSE_SPEEDO_CALIB_0);
+
+ *speedo_lp = (reg & 0xFFFF) * 4;
+ *speedo_g = ((reg >> 16) & 0xFFFF) * 4;
+
+ ate_ver = tegra_fuse_read_early(FUSE_TEST_PROG_VER);
+ pr_debug("Tegra ATE prog ver %d.%d\n", ate_ver/10, ate_ver%10);
+
+ if (ate_ver >= 26) {
+ bit_minus1 = tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS1);
+ bit_minus1 |= tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS1_R);
+ bit_minus2 = tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS2);
+ bit_minus2 |= tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS2_R);
+ *speedo_lp |= (bit_minus1 << 1) | bit_minus2;
+
+ bit_minus1 = tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS1);
+ bit_minus1 |= tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS1_R);
+ bit_minus2 = tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS2);
+ bit_minus2 |= tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS2_R);
+ *speedo_g |= (bit_minus1 << 1) | bit_minus2;
+ } else {
+ *speedo_lp |= 0x3;
+ *speedo_g |= 0x3;
+ }
+}
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info)
+{
+ int package_id = tegra_fuse_read_early(FUSE_PACKAGE_INFO) & 0x0F;
+
+ switch (sku_info->revision) {
+ case TEGRA_REVISION_A01:
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ threshold_index = THRESHOLD_INDEX_0;
+ break;
+ case TEGRA_REVISION_A02:
+ case TEGRA_REVISION_A03:
+ switch (sku_info->sku_id) {
+ case 0x87:
+ case 0x82:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_1;
+ break;
+ case 0x81:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_2;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 4;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_7;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ case 0x80:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 5;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_8;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 6;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_9;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ case 0x83:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 7;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_10;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_3;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ case 0x8F:
+ sku_info->cpu_speedo_id = 8;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_11;
+ break;
+ case 0x08:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_4;
+ break;
+ case 0x02:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_5;
+ break;
+ case 0x04:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_6;
+ break;
+ case 0:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_2;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_3;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ default:
+ pr_warn("Tegra Unknown SKU %d\n", sku_info->sku_id);
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ threshold_index = THRESHOLD_INDEX_0;
+ break;
+ }
+ break;
+ default:
+ pr_warn("Tegra Unknown chip rev %d\n", sku_info->revision);
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ threshold_index = THRESHOLD_INDEX_0;
+ break;
+ }
+}
+
+void __init tegra30_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ u32 cpu_speedo_val;
+ u32 soc_speedo_val;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+
+ rev_sku_to_speedo_ids(sku_info);
+ fuse_speedo_calib(&cpu_speedo_val, &soc_speedo_val);
+ pr_debug("Tegra CPU speedo value %u\n", cpu_speedo_val);
+ pr_debug("Tegra Core speedo value %u\n", soc_speedo_val);
+
+ for (i = 0; i < CPU_PROCESS_CORNERS; i++) {
+ if (cpu_speedo_val < cpu_process_speedos[threshold_index][i])
+ break;
+ }
+ sku_info->cpu_process_id = i - 1;
+
+ if (sku_info->cpu_process_id == -1) {
+ pr_warn("Tegra CPU speedo value %3d out of range",
+ cpu_speedo_val);
+ sku_info->cpu_process_id = 0;
+ sku_info->cpu_speedo_id = 1;
+ }
+
+ for (i = 0; i < SOC_PROCESS_CORNERS; i++) {
+ if (soc_speedo_val < soc_process_speedos[threshold_index][i])
+ break;
+ }
+ sku_info->soc_process_id = i - 1;
+
+ if (sku_info->soc_process_id == -1) {
+ pr_warn("Tegra SoC speedo value %3d out of range",
+ soc_speedo_val);
+ sku_info->soc_process_id = 0;
+ sku_info->soc_speedo_id = 1;
+ }
+}
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
new file mode 100644
index 0000000000..da970f3dbf
--- /dev/null
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define FUSE_SKU_INFO 0x10
+
+#define ERD_ERR_CONFIG 0x120c
+#define ERD_MASK_INBAND_ERR 0x1
+
+#define PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT 4
+#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG \
+ (0xf << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
+#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT \
+ (0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
+
+static void __iomem *apbmisc_base;
+static bool long_ram_code;
+static u32 strapping;
+static u32 chipid;
+
+u32 tegra_read_chipid(void)
+{
+ WARN(!chipid, "Tegra APB MISC not yet available\n");
+
+ return chipid;
+}
+
+u8 tegra_get_chip_id(void)
+{
+ return (tegra_read_chipid() >> 8) & 0xff;
+}
+
+u8 tegra_get_major_rev(void)
+{
+ return (tegra_read_chipid() >> 4) & 0xf;
+}
+
+u8 tegra_get_minor_rev(void)
+{
+ return (tegra_read_chipid() >> 16) & 0xf;
+}
+
+u8 tegra_get_platform(void)
+{
+ return (tegra_read_chipid() >> 20) & 0xf;
+}
+
+bool tegra_is_silicon(void)
+{
+ switch (tegra_get_chip_id()) {
+ case TEGRA194:
+ case TEGRA234:
+ case TEGRA264:
+ if (tegra_get_platform() == 0)
+ return true;
+
+ return false;
+ }
+
+ /*
+ * Chips prior to Tegra194 have a different way of determining whether
+ * they are silicon or not. Since we never supported simulation on the
+ * older Tegra chips, don't bother extracting the information and just
+ * report that we're running on silicon.
+ */
+ return true;
+}
+
+u32 tegra_read_straps(void)
+{
+ WARN(!chipid, "Tegra ABP MISC not yet available\n");
+
+ return strapping;
+}
+
+u32 tegra_read_ram_code(void)
+{
+ u32 straps = tegra_read_straps();
+
+ if (long_ram_code)
+ straps &= PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG;
+ else
+ straps &= PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT;
+
+ return straps >> PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT;
+}
+EXPORT_SYMBOL_GPL(tegra_read_ram_code);
+
+/*
+ * The function sets ERD(Error Response Disable) bit.
+ * This allows to mask inband errors and always send an
+ * OKAY response from CBB to the master which caused error.
+ */
+int tegra194_miscreg_mask_serror(void)
+{
+ if (!apbmisc_base)
+ return -EPROBE_DEFER;
+
+ if (!of_machine_is_compatible("nvidia,tegra194")) {
+ WARN(1, "Only supported for Tegra194 devices!\n");
+ return -EOPNOTSUPP;
+ }
+
+ writel_relaxed(ERD_MASK_INBAND_ERR,
+ apbmisc_base + ERD_ERR_CONFIG);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra194_miscreg_mask_serror);
+
+static const struct of_device_id apbmisc_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-apbmisc", },
+ { .compatible = "nvidia,tegra186-misc", },
+ { .compatible = "nvidia,tegra194-misc", },
+ { .compatible = "nvidia,tegra234-misc", },
+ {},
+};
+
+void __init tegra_init_revision(void)
+{
+ u8 chip_id, minor_rev;
+
+ chip_id = tegra_get_chip_id();
+ minor_rev = tegra_get_minor_rev();
+
+ switch (minor_rev) {
+ case 1:
+ tegra_sku_info.revision = TEGRA_REVISION_A01;
+ break;
+ case 2:
+ tegra_sku_info.revision = TEGRA_REVISION_A02;
+ break;
+ case 3:
+ if (chip_id == TEGRA20 && (tegra_fuse_read_spare(18) ||
+ tegra_fuse_read_spare(19)))
+ tegra_sku_info.revision = TEGRA_REVISION_A03p;
+ else
+ tegra_sku_info.revision = TEGRA_REVISION_A03;
+ break;
+ case 4:
+ tegra_sku_info.revision = TEGRA_REVISION_A04;
+ break;
+ default:
+ tegra_sku_info.revision = TEGRA_REVISION_UNKNOWN;
+ }
+
+ tegra_sku_info.sku_id = tegra_fuse_read_early(FUSE_SKU_INFO);
+ tegra_sku_info.platform = tegra_get_platform();
+}
+
+void __init tegra_init_apbmisc(void)
+{
+ void __iomem *strapping_base;
+ struct resource apbmisc, straps;
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, apbmisc_match);
+ if (!np) {
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * an APBMISC node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain an APBMISC node.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ /* APBMISC registers (chip revision, ...) */
+ apbmisc.start = 0x70000800;
+ apbmisc.end = 0x70000863;
+ apbmisc.flags = IORESOURCE_MEM;
+
+ /* strapping options */
+ if (of_machine_is_compatible("nvidia,tegra124")) {
+ straps.start = 0x7000e864;
+ straps.end = 0x7000e867;
+ } else {
+ straps.start = 0x70000008;
+ straps.end = 0x7000000b;
+ }
+
+ straps.flags = IORESOURCE_MEM;
+
+ pr_warn("Using APBMISC region %pR\n", &apbmisc);
+ pr_warn("Using strapping options registers %pR\n",
+ &straps);
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return;
+ }
+ } else {
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &apbmisc) < 0) {
+ pr_err("failed to get APBMISC registers\n");
+ goto put;
+ }
+
+ if (of_address_to_resource(np, 1, &straps) < 0) {
+ pr_err("failed to get strapping options registers\n");
+ goto put;
+ }
+ }
+
+ apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc));
+ if (!apbmisc_base) {
+ pr_err("failed to map APBMISC registers\n");
+ } else {
+ chipid = readl_relaxed(apbmisc_base + 4);
+ }
+
+ strapping_base = ioremap(straps.start, resource_size(&straps));
+ if (!strapping_base) {
+ pr_err("failed to map strapping options registers\n");
+ } else {
+ strapping = readl_relaxed(strapping_base);
+ iounmap(strapping_base);
+ }
+
+ long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
+
+put:
+ of_node_put(np);
+}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
new file mode 100644
index 0000000000..162f52456f
--- /dev/null
+++ b/drivers/soc/tegra/pmc.c
@@ -0,0 +1,4440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/soc/tegra/pmc.c
+ *
+ * Copyright (c) 2010 Google, Inc
+ * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ */
+
+#define pr_fmt(fmt) "tegra-pmc: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/power_supply.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/pinctrl-tegra-io-pad.h>
+#include <dt-bindings/gpio/tegra186-gpio.h>
+#include <dt-bindings/gpio/tegra194-gpio.h>
+#include <dt-bindings/gpio/tegra234-gpio.h>
+#include <dt-bindings/soc/tegra-pmc.h>
+
+#define PMC_CNTRL 0x0
+#define PMC_CNTRL_INTR_POLARITY BIT(17) /* inverts INTR polarity */
+#define PMC_CNTRL_CPU_PWRREQ_OE BIT(16) /* CPU pwr req enable */
+#define PMC_CNTRL_CPU_PWRREQ_POLARITY BIT(15) /* CPU pwr req polarity */
+#define PMC_CNTRL_SIDE_EFFECT_LP0 BIT(14) /* LP0 when CPU pwr gated */
+#define PMC_CNTRL_SYSCLK_OE BIT(11) /* system clock enable */
+#define PMC_CNTRL_SYSCLK_POLARITY BIT(10) /* sys clk polarity */
+#define PMC_CNTRL_PWRREQ_POLARITY BIT(8)
+#define PMC_CNTRL_BLINK_EN 7
+#define PMC_CNTRL_MAIN_RST BIT(4)
+
+#define PMC_WAKE_MASK 0x0c
+#define PMC_WAKE_LEVEL 0x10
+#define PMC_WAKE_STATUS 0x14
+#define PMC_SW_WAKE_STATUS 0x18
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK 20
+
+#define DPD_SAMPLE 0x020
+#define DPD_SAMPLE_ENABLE BIT(0)
+#define DPD_SAMPLE_DISABLE (0 << 0)
+
+#define PWRGATE_TOGGLE 0x30
+#define PWRGATE_TOGGLE_START BIT(8)
+
+#define REMOVE_CLAMPING 0x34
+
+#define PWRGATE_STATUS 0x38
+
+#define PMC_BLINK_TIMER 0x40
+#define PMC_IMPL_E_33V_PWR 0x40
+
+#define PMC_PWR_DET 0x48
+
+#define PMC_SCRATCH0_MODE_RECOVERY BIT(31)
+#define PMC_SCRATCH0_MODE_BOOTLOADER BIT(30)
+#define PMC_SCRATCH0_MODE_RCM BIT(1)
+#define PMC_SCRATCH0_MODE_MASK (PMC_SCRATCH0_MODE_RECOVERY | \
+ PMC_SCRATCH0_MODE_BOOTLOADER | \
+ PMC_SCRATCH0_MODE_RCM)
+
+#define PMC_CPUPWRGOOD_TIMER 0xc8
+#define PMC_CPUPWROFF_TIMER 0xcc
+#define PMC_COREPWRGOOD_TIMER 0x3c
+#define PMC_COREPWROFF_TIMER 0xe0
+
+#define PMC_PWR_DET_VALUE 0xe4
+
+#define PMC_USB_DEBOUNCE_DEL 0xec
+#define PMC_USB_AO 0xf0
+
+#define PMC_SCRATCH37 0x130
+#define PMC_SCRATCH41 0x140
+
+#define PMC_WAKE2_MASK 0x160
+#define PMC_WAKE2_LEVEL 0x164
+#define PMC_WAKE2_STATUS 0x168
+#define PMC_SW_WAKE2_STATUS 0x16c
+
+#define PMC_CLK_OUT_CNTRL 0x1a8
+#define PMC_CLK_OUT_MUX_MASK GENMASK(1, 0)
+#define PMC_SENSOR_CTRL 0x1b0
+#define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2)
+#define PMC_SENSOR_CTRL_ENABLE_RST BIT(1)
+
+#define PMC_RST_STATUS_POR 0
+#define PMC_RST_STATUS_WATCHDOG 1
+#define PMC_RST_STATUS_SENSOR 2
+#define PMC_RST_STATUS_SW_MAIN 3
+#define PMC_RST_STATUS_LP0 4
+#define PMC_RST_STATUS_AOTAG 5
+
+#define IO_DPD_REQ 0x1b8
+#define IO_DPD_REQ_CODE_IDLE (0U << 30)
+#define IO_DPD_REQ_CODE_OFF (1U << 30)
+#define IO_DPD_REQ_CODE_ON (2U << 30)
+#define IO_DPD_REQ_CODE_MASK (3U << 30)
+
+#define IO_DPD_STATUS 0x1bc
+#define IO_DPD2_REQ 0x1c0
+#define IO_DPD2_STATUS 0x1c4
+#define SEL_DPD_TIM 0x1c8
+
+#define PMC_UTMIP_UHSIC_TRIGGERS 0x1ec
+#define PMC_UTMIP_UHSIC_SAVED_STATE 0x1f0
+
+#define PMC_UTMIP_TERM_PAD_CFG 0x1f8
+#define PMC_UTMIP_UHSIC_SLEEP_CFG 0x1fc
+#define PMC_UTMIP_UHSIC_FAKE 0x218
+
+#define PMC_SCRATCH54 0x258
+#define PMC_SCRATCH54_DATA_SHIFT 8
+#define PMC_SCRATCH54_ADDR_SHIFT 0
+
+#define PMC_SCRATCH55 0x25c
+#define PMC_SCRATCH55_RESET_TEGRA BIT(31)
+#define PMC_SCRATCH55_CNTRL_ID_SHIFT 27
+#define PMC_SCRATCH55_PINMUX_SHIFT 24
+#define PMC_SCRATCH55_16BITOP BIT(15)
+#define PMC_SCRATCH55_CHECKSUM_SHIFT 16
+#define PMC_SCRATCH55_I2CSLV1_SHIFT 0
+
+#define PMC_UTMIP_UHSIC_LINE_WAKEUP 0x26c
+
+#define PMC_UTMIP_BIAS_MASTER_CNTRL 0x270
+#define PMC_UTMIP_MASTER_CONFIG 0x274
+#define PMC_UTMIP_UHSIC2_TRIGGERS 0x27c
+#define PMC_UTMIP_MASTER2_CONFIG 0x29c
+
+#define GPU_RG_CNTRL 0x2d4
+
+#define PMC_UTMIP_PAD_CFG0 0x4c0
+#define PMC_UTMIP_UHSIC_SLEEP_CFG1 0x4d0
+#define PMC_UTMIP_SLEEPWALK_P3 0x4e0
+/* Tegra186 and later */
+#define WAKE_AOWAKE_CNTRL(x) (0x000 + ((x) << 2))
+#define WAKE_AOWAKE_CNTRL_LEVEL (1 << 3)
+#define WAKE_AOWAKE_CNTRL_SR_CAPTURE_EN (1 << 1)
+#define WAKE_AOWAKE_MASK_W(x) (0x180 + ((x) << 2))
+#define WAKE_AOWAKE_MASK_R(x) (0x300 + ((x) << 2))
+#define WAKE_AOWAKE_STATUS_W(x) (0x30c + ((x) << 2))
+#define WAKE_AOWAKE_STATUS_R(x) (0x48c + ((x) << 2))
+#define WAKE_AOWAKE_TIER0_ROUTING(x) (0x4b4 + ((x) << 2))
+#define WAKE_AOWAKE_TIER1_ROUTING(x) (0x4c0 + ((x) << 2))
+#define WAKE_AOWAKE_TIER2_ROUTING(x) (0x4cc + ((x) << 2))
+#define WAKE_AOWAKE_SW_STATUS_W_0 0x49c
+#define WAKE_AOWAKE_SW_STATUS(x) (0x4a0 + ((x) << 2))
+#define WAKE_LATCH_SW 0x498
+
+#define WAKE_AOWAKE_CTRL 0x4f4
+#define WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0)
+
+#define SW_WAKE_ID 83 /* wake83 */
+
+/* for secure PMC */
+#define TEGRA_SMC_PMC 0xc2fffe00
+#define TEGRA_SMC_PMC_READ 0xaa
+#define TEGRA_SMC_PMC_WRITE 0xbb
+
+struct pmc_clk {
+ struct clk_hw hw;
+ unsigned long offs;
+ u32 mux_shift;
+ u32 force_en_shift;
+};
+
+#define to_pmc_clk(_hw) container_of(_hw, struct pmc_clk, hw)
+
+struct pmc_clk_gate {
+ struct clk_hw hw;
+ unsigned long offs;
+ u32 shift;
+};
+
+#define to_pmc_clk_gate(_hw) container_of(_hw, struct pmc_clk_gate, hw)
+
+struct pmc_clk_init_data {
+ char *name;
+ const char *const *parents;
+ int num_parents;
+ int clk_id;
+ u8 mux_shift;
+ u8 force_en_shift;
+};
+
+static const char * const clk_out1_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern1",
+};
+
+static const char * const clk_out2_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern2",
+};
+
+static const char * const clk_out3_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern3",
+};
+
+static const struct pmc_clk_init_data tegra_pmc_clks_data[] = {
+ {
+ .name = "pmc_clk_out_1",
+ .parents = clk_out1_parents,
+ .num_parents = ARRAY_SIZE(clk_out1_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_1,
+ .mux_shift = 6,
+ .force_en_shift = 2,
+ },
+ {
+ .name = "pmc_clk_out_2",
+ .parents = clk_out2_parents,
+ .num_parents = ARRAY_SIZE(clk_out2_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_2,
+ .mux_shift = 14,
+ .force_en_shift = 10,
+ },
+ {
+ .name = "pmc_clk_out_3",
+ .parents = clk_out3_parents,
+ .num_parents = ARRAY_SIZE(clk_out3_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_3,
+ .mux_shift = 22,
+ .force_en_shift = 18,
+ },
+};
+
+struct tegra_powergate {
+ struct generic_pm_domain genpd;
+ struct tegra_pmc *pmc;
+ unsigned int id;
+ struct clk **clks;
+ unsigned int num_clks;
+ unsigned long *clk_rates;
+ struct reset_control *reset;
+};
+
+struct tegra_io_pad_soc {
+ enum tegra_io_pad id;
+ unsigned int dpd;
+ unsigned int request;
+ unsigned int status;
+ unsigned int voltage;
+ const char *name;
+};
+
+struct tegra_pmc_regs {
+ unsigned int scratch0;
+ unsigned int rst_status;
+ unsigned int rst_source_shift;
+ unsigned int rst_source_mask;
+ unsigned int rst_level_shift;
+ unsigned int rst_level_mask;
+};
+
+struct tegra_wake_event {
+ const char *name;
+ unsigned int id;
+ unsigned int irq;
+ struct {
+ unsigned int instance;
+ unsigned int pin;
+ } gpio;
+};
+
+#define TEGRA_WAKE_SIMPLE(_name, _id) \
+ { \
+ .name = _name, \
+ .id = _id, \
+ .irq = 0, \
+ .gpio = { \
+ .instance = UINT_MAX, \
+ .pin = UINT_MAX, \
+ }, \
+ }
+
+#define TEGRA_WAKE_IRQ(_name, _id, _irq) \
+ { \
+ .name = _name, \
+ .id = _id, \
+ .irq = _irq, \
+ .gpio = { \
+ .instance = UINT_MAX, \
+ .pin = UINT_MAX, \
+ }, \
+ }
+
+#define TEGRA_WAKE_GPIO(_name, _id, _instance, _pin) \
+ { \
+ .name = _name, \
+ .id = _id, \
+ .irq = 0, \
+ .gpio = { \
+ .instance = _instance, \
+ .pin = _pin, \
+ }, \
+ }
+
+struct tegra_pmc_soc {
+ unsigned int num_powergates;
+ const char *const *powergates;
+ unsigned int num_cpu_powergates;
+ const u8 *cpu_powergates;
+
+ bool has_tsense_reset;
+ bool has_gpu_clamps;
+ bool needs_mbist_war;
+ bool has_impl_33v_pwr;
+ bool maybe_tz_only;
+
+ const struct tegra_io_pad_soc *io_pads;
+ unsigned int num_io_pads;
+
+ const struct pinctrl_pin_desc *pin_descs;
+ unsigned int num_pin_descs;
+
+ const struct tegra_pmc_regs *regs;
+ void (*init)(struct tegra_pmc *pmc);
+ void (*setup_irq_polarity)(struct tegra_pmc *pmc,
+ struct device_node *np,
+ bool invert);
+ void (*set_wake_filters)(struct tegra_pmc *pmc);
+ int (*irq_set_wake)(struct irq_data *data, unsigned int on);
+ int (*irq_set_type)(struct irq_data *data, unsigned int type);
+ int (*powergate_set)(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state);
+
+ const char * const *reset_sources;
+ unsigned int num_reset_sources;
+ const char * const *reset_levels;
+ unsigned int num_reset_levels;
+
+ /*
+ * These describe events that can wake the system from sleep (i.e.
+ * LP0 or SC7). Wakeup from other sleep states (such as LP1 or LP2)
+ * are dealt with in the LIC.
+ */
+ const struct tegra_wake_event *wake_events;
+ unsigned int num_wake_events;
+ unsigned int max_wake_events;
+ unsigned int max_wake_vectors;
+
+ const struct pmc_clk_init_data *pmc_clks_data;
+ unsigned int num_pmc_clks;
+ bool has_blink_output;
+ bool has_usb_sleepwalk;
+ bool supports_core_domain;
+};
+
+/**
+ * struct tegra_pmc - NVIDIA Tegra PMC
+ * @dev: pointer to PMC device structure
+ * @base: pointer to I/O remapped register region
+ * @wake: pointer to I/O remapped region for WAKE registers
+ * @aotag: pointer to I/O remapped region for AOTAG registers
+ * @scratch: pointer to I/O remapped region for scratch registers
+ * @clk: pointer to pclk clock
+ * @soc: pointer to SoC data structure
+ * @tz_only: flag specifying if the PMC can only be accessed via TrustZone
+ * @rate: currently configured rate of pclk
+ * @suspend_mode: lowest suspend mode available
+ * @cpu_good_time: CPU power good time (in microseconds)
+ * @cpu_off_time: CPU power off time (in microsecends)
+ * @core_osc_time: core power good OSC time (in microseconds)
+ * @core_pmu_time: core power good PMU time (in microseconds)
+ * @core_off_time: core power off time (in microseconds)
+ * @corereq_high: core power request is active-high
+ * @sysclkreq_high: system clock request is active-high
+ * @combined_req: combined power request for CPU & core
+ * @cpu_pwr_good_en: CPU power good signal is enabled
+ * @lp0_vec_phys: physical base address of the LP0 warm boot code
+ * @lp0_vec_size: size of the LP0 warm boot code
+ * @powergates_available: Bitmap of available power gates
+ * @powergates_lock: mutex for power gate register access
+ * @pctl_dev: pin controller exposed by the PMC
+ * @domain: IRQ domain provided by the PMC
+ * @irq: chip implementation for the IRQ domain
+ * @clk_nb: pclk clock changes handler
+ * @core_domain_state_synced: flag marking the core domain's state as synced
+ * @core_domain_registered: flag marking the core domain as registered
+ * @wake_type_level_map: Bitmap indicating level type for non-dual edge wakes
+ * @wake_type_dual_edge_map: Bitmap indicating if a wake is dual-edge or not
+ * @wake_sw_status_map: Bitmap to hold raw status of wakes without mask
+ * @wake_cntrl_level_map: Bitmap to hold wake levels to be programmed in
+ * cntrl register associated with each wake during system suspend.
+ */
+struct tegra_pmc {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *wake;
+ void __iomem *aotag;
+ void __iomem *scratch;
+ struct clk *clk;
+
+ const struct tegra_pmc_soc *soc;
+ bool tz_only;
+
+ unsigned long rate;
+
+ enum tegra_suspend_mode suspend_mode;
+ u32 cpu_good_time;
+ u32 cpu_off_time;
+ u32 core_osc_time;
+ u32 core_pmu_time;
+ u32 core_off_time;
+ bool corereq_high;
+ bool sysclkreq_high;
+ bool combined_req;
+ bool cpu_pwr_good_en;
+ u32 lp0_vec_phys;
+ u32 lp0_vec_size;
+ DECLARE_BITMAP(powergates_available, TEGRA_POWERGATE_MAX);
+
+ struct mutex powergates_lock;
+
+ struct pinctrl_dev *pctl_dev;
+
+ struct irq_domain *domain;
+ struct irq_chip irq;
+
+ struct notifier_block clk_nb;
+
+ bool core_domain_state_synced;
+ bool core_domain_registered;
+
+ unsigned long *wake_type_level_map;
+ unsigned long *wake_type_dual_edge_map;
+ unsigned long *wake_sw_status_map;
+ unsigned long *wake_cntrl_level_map;
+ struct syscore_ops syscore;
+};
+
+static struct tegra_pmc *pmc = &(struct tegra_pmc) {
+ .base = NULL,
+ .suspend_mode = TEGRA_SUSPEND_NOT_READY,
+};
+
+static inline struct tegra_powergate *
+to_powergate(struct generic_pm_domain *domain)
+{
+ return container_of(domain, struct tegra_powergate, genpd);
+}
+
+static u32 tegra_pmc_readl(struct tegra_pmc *pmc, unsigned long offset)
+{
+ struct arm_smccc_res res;
+
+ if (pmc->tz_only) {
+ arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_READ, offset, 0, 0,
+ 0, 0, 0, &res);
+ if (res.a0) {
+ if (pmc->dev)
+ dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
+ __func__, res.a0);
+ else
+ pr_warn("%s(): SMC failed: %lu\n", __func__,
+ res.a0);
+ }
+
+ return res.a1;
+ }
+
+ return readl(pmc->base + offset);
+}
+
+static void tegra_pmc_writel(struct tegra_pmc *pmc, u32 value,
+ unsigned long offset)
+{
+ struct arm_smccc_res res;
+
+ if (pmc->tz_only) {
+ arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_WRITE, offset,
+ value, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ if (pmc->dev)
+ dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
+ __func__, res.a0);
+ else
+ pr_warn("%s(): SMC failed: %lu\n", __func__,
+ res.a0);
+ }
+ } else {
+ writel(value, pmc->base + offset);
+ }
+}
+
+static u32 tegra_pmc_scratch_readl(struct tegra_pmc *pmc, unsigned long offset)
+{
+ if (pmc->tz_only)
+ return tegra_pmc_readl(pmc, offset);
+
+ return readl(pmc->scratch + offset);
+}
+
+static void tegra_pmc_scratch_writel(struct tegra_pmc *pmc, u32 value,
+ unsigned long offset)
+{
+ if (pmc->tz_only)
+ tegra_pmc_writel(pmc, value, offset);
+ else
+ writel(value, pmc->scratch + offset);
+}
+
+/*
+ * TODO Figure out a way to call this with the struct tegra_pmc * passed in.
+ * This currently doesn't work because readx_poll_timeout() can only operate
+ * on functions that take a single argument.
+ */
+static inline bool tegra_powergate_state(int id)
+{
+ if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
+ return (tegra_pmc_readl(pmc, GPU_RG_CNTRL) & 0x1) == 0;
+ else
+ return (tegra_pmc_readl(pmc, PWRGATE_STATUS) & BIT(id)) != 0;
+}
+
+static inline bool tegra_powergate_is_valid(struct tegra_pmc *pmc, int id)
+{
+ return (pmc->soc && pmc->soc->powergates[id]);
+}
+
+static inline bool tegra_powergate_is_available(struct tegra_pmc *pmc, int id)
+{
+ return test_bit(id, pmc->powergates_available);
+}
+
+static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
+{
+ unsigned int i;
+
+ if (!pmc || !pmc->soc || !name)
+ return -EINVAL;
+
+ for (i = 0; i < pmc->soc->num_powergates; i++) {
+ if (!tegra_powergate_is_valid(pmc, i))
+ continue;
+
+ if (!strcmp(name, pmc->soc->powergates[i]))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int tegra20_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state)
+{
+ unsigned int retries = 100;
+ bool status;
+ int ret;
+
+ /*
+ * As per TRM documentation, the toggle command will be dropped by PMC
+ * if there is contention with a HW-initiated toggling (i.e. CPU core
+ * power-gated), the command should be retried in that case.
+ */
+ do {
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+
+ /* wait for PMC to execute the command */
+ ret = readx_poll_timeout(tegra_powergate_state, id, status,
+ status == new_state, 1, 10);
+ } while (ret == -ETIMEDOUT && retries--);
+
+ return ret;
+}
+
+static inline bool tegra_powergate_toggle_ready(struct tegra_pmc *pmc)
+{
+ return !(tegra_pmc_readl(pmc, PWRGATE_TOGGLE) & PWRGATE_TOGGLE_START);
+}
+
+static int tegra114_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state)
+{
+ bool status;
+ int err;
+
+ /* wait while PMC power gating is contended */
+ err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
+ status == true, 1, 100);
+ if (err)
+ return err;
+
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+
+ /* wait for PMC to accept the command */
+ err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
+ status == true, 1, 100);
+ if (err)
+ return err;
+
+ /* wait for PMC to execute the command */
+ err = readx_poll_timeout(tegra_powergate_state, id, status,
+ status == new_state, 10, 100000);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * tegra_powergate_set() - set the state of a partition
+ * @pmc: power management controller
+ * @id: partition ID
+ * @new_state: new state of the partition
+ */
+static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state)
+{
+ int err;
+
+ if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
+ return -EINVAL;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ if (tegra_powergate_state(id) == new_state) {
+ mutex_unlock(&pmc->powergates_lock);
+ return 0;
+ }
+
+ err = pmc->soc->powergate_set(pmc, id, new_state);
+
+ mutex_unlock(&pmc->powergates_lock);
+
+ return err;
+}
+
+static int __tegra_powergate_remove_clamping(struct tegra_pmc *pmc,
+ unsigned int id)
+{
+ u32 mask;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ /*
+ * On Tegra124 and later, the clamps for the GPU are controlled by a
+ * separate register (with different semantics).
+ */
+ if (id == TEGRA_POWERGATE_3D) {
+ if (pmc->soc->has_gpu_clamps) {
+ tegra_pmc_writel(pmc, 0, GPU_RG_CNTRL);
+ goto out;
+ }
+ }
+
+ /*
+ * Tegra 2 has a bug where PCIE and VDE clamping masks are
+ * swapped relatively to the partition ids
+ */
+ if (id == TEGRA_POWERGATE_VDEC)
+ mask = (1 << TEGRA_POWERGATE_PCIE);
+ else if (id == TEGRA_POWERGATE_PCIE)
+ mask = (1 << TEGRA_POWERGATE_VDEC);
+ else
+ mask = (1 << id);
+
+ tegra_pmc_writel(pmc, mask, REMOVE_CLAMPING);
+
+out:
+ mutex_unlock(&pmc->powergates_lock);
+
+ return 0;
+}
+
+static int tegra_powergate_prepare_clocks(struct tegra_powergate *pg)
+{
+ unsigned long safe_rate = 100 * 1000 * 1000;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < pg->num_clks; i++) {
+ pg->clk_rates[i] = clk_get_rate(pg->clks[i]);
+
+ if (!pg->clk_rates[i]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (pg->clk_rates[i] <= safe_rate)
+ continue;
+
+ /*
+ * We don't know whether voltage state is okay for the
+ * current clock rate, hence it's better to temporally
+ * switch clock to a safe rate which is suitable for
+ * all voltages, before enabling the clock.
+ */
+ err = clk_set_rate(pg->clks[i], safe_rate);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ while (i--)
+ clk_set_rate(pg->clks[i], pg->clk_rates[i]);
+
+ return err;
+}
+
+static int tegra_powergate_unprepare_clocks(struct tegra_powergate *pg)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < pg->num_clks; i++) {
+ err = clk_set_rate(pg->clks[i], pg->clk_rates[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void tegra_powergate_disable_clocks(struct tegra_powergate *pg)
+{
+ unsigned int i;
+
+ for (i = 0; i < pg->num_clks; i++)
+ clk_disable_unprepare(pg->clks[i]);
+}
+
+static int tegra_powergate_enable_clocks(struct tegra_powergate *pg)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < pg->num_clks; i++) {
+ err = clk_prepare_enable(pg->clks[i]);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ while (i--)
+ clk_disable_unprepare(pg->clks[i]);
+
+ return err;
+}
+
+static int tegra_powergate_power_up(struct tegra_powergate *pg,
+ bool disable_clocks)
+{
+ int err;
+
+ err = reset_control_assert(pg->reset);
+ if (err)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_set(pg->pmc, pg->id, true);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_prepare_clocks(pg);
+ if (err)
+ goto powergate_off;
+
+ err = tegra_powergate_enable_clocks(pg);
+ if (err)
+ goto unprepare_clks;
+
+ usleep_range(10, 20);
+
+ err = __tegra_powergate_remove_clamping(pg->pmc, pg->id);
+ if (err)
+ goto disable_clks;
+
+ usleep_range(10, 20);
+
+ err = reset_control_deassert(pg->reset);
+ if (err)
+ goto disable_clks;
+
+ usleep_range(10, 20);
+
+ if (pg->pmc->soc->needs_mbist_war)
+ err = tegra210_clk_handle_mbist_war(pg->id);
+ if (err)
+ goto disable_clks;
+
+ if (disable_clocks)
+ tegra_powergate_disable_clocks(pg);
+
+ err = tegra_powergate_unprepare_clocks(pg);
+ if (err)
+ return err;
+
+ return 0;
+
+disable_clks:
+ tegra_powergate_disable_clocks(pg);
+ usleep_range(10, 20);
+
+unprepare_clks:
+ tegra_powergate_unprepare_clocks(pg);
+
+powergate_off:
+ tegra_powergate_set(pg->pmc, pg->id, false);
+
+ return err;
+}
+
+static int tegra_powergate_power_down(struct tegra_powergate *pg)
+{
+ int err;
+
+ err = tegra_powergate_prepare_clocks(pg);
+ if (err)
+ return err;
+
+ err = tegra_powergate_enable_clocks(pg);
+ if (err)
+ goto unprepare_clks;
+
+ usleep_range(10, 20);
+
+ err = reset_control_assert(pg->reset);
+ if (err)
+ goto disable_clks;
+
+ usleep_range(10, 20);
+
+ tegra_powergate_disable_clocks(pg);
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_set(pg->pmc, pg->id, false);
+ if (err)
+ goto assert_resets;
+
+ err = tegra_powergate_unprepare_clocks(pg);
+ if (err)
+ return err;
+
+ return 0;
+
+assert_resets:
+ tegra_powergate_enable_clocks(pg);
+ usleep_range(10, 20);
+ reset_control_deassert(pg->reset);
+ usleep_range(10, 20);
+
+disable_clks:
+ tegra_powergate_disable_clocks(pg);
+
+unprepare_clks:
+ tegra_powergate_unprepare_clocks(pg);
+
+ return err;
+}
+
+static int tegra_genpd_power_on(struct generic_pm_domain *domain)
+{
+ struct tegra_powergate *pg = to_powergate(domain);
+ struct device *dev = pg->pmc->dev;
+ int err;
+
+ err = tegra_powergate_power_up(pg, true);
+ if (err) {
+ dev_err(dev, "failed to turn on PM domain %s: %d\n",
+ pg->genpd.name, err);
+ goto out;
+ }
+
+ reset_control_release(pg->reset);
+
+out:
+ return err;
+}
+
+static int tegra_genpd_power_off(struct generic_pm_domain *domain)
+{
+ struct tegra_powergate *pg = to_powergate(domain);
+ struct device *dev = pg->pmc->dev;
+ int err;
+
+ err = reset_control_acquire(pg->reset);
+ if (err < 0) {
+ dev_err(dev, "failed to acquire resets for PM domain %s: %d\n",
+ pg->genpd.name, err);
+ return err;
+ }
+
+ err = tegra_powergate_power_down(pg);
+ if (err) {
+ dev_err(dev, "failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
+ reset_control_release(pg->reset);
+ }
+
+ return err;
+}
+
+/**
+ * tegra_powergate_power_on() - power on partition
+ * @id: partition ID
+ */
+int tegra_powergate_power_on(unsigned int id)
+{
+ if (!tegra_powergate_is_available(pmc, id))
+ return -EINVAL;
+
+ return tegra_powergate_set(pmc, id, true);
+}
+EXPORT_SYMBOL(tegra_powergate_power_on);
+
+/**
+ * tegra_powergate_power_off() - power off partition
+ * @id: partition ID
+ */
+int tegra_powergate_power_off(unsigned int id)
+{
+ if (!tegra_powergate_is_available(pmc, id))
+ return -EINVAL;
+
+ return tegra_powergate_set(pmc, id, false);
+}
+EXPORT_SYMBOL(tegra_powergate_power_off);
+
+/**
+ * tegra_powergate_is_powered() - check if partition is powered
+ * @pmc: power management controller
+ * @id: partition ID
+ */
+static int tegra_powergate_is_powered(struct tegra_pmc *pmc, unsigned int id)
+{
+ if (!tegra_powergate_is_valid(pmc, id))
+ return -EINVAL;
+
+ return tegra_powergate_state(id);
+}
+
+/**
+ * tegra_powergate_remove_clamping() - remove power clamps for partition
+ * @id: partition ID
+ */
+int tegra_powergate_remove_clamping(unsigned int id)
+{
+ if (!tegra_powergate_is_available(pmc, id))
+ return -EINVAL;
+
+ return __tegra_powergate_remove_clamping(pmc, id);
+}
+EXPORT_SYMBOL(tegra_powergate_remove_clamping);
+
+/**
+ * tegra_powergate_sequence_power_up() - power up partition
+ * @id: partition ID
+ * @clk: clock for partition
+ * @rst: reset for partition
+ *
+ * Must be called with clk disabled, and returns with clk enabled.
+ */
+int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
+ struct reset_control *rst)
+{
+ struct tegra_powergate *pg;
+ int err;
+
+ if (!tegra_powergate_is_available(pmc, id))
+ return -EINVAL;
+
+ pg = kzalloc(sizeof(*pg), GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ pg->clk_rates = kzalloc(sizeof(*pg->clk_rates), GFP_KERNEL);
+ if (!pg->clk_rates) {
+ kfree(pg->clks);
+ return -ENOMEM;
+ }
+
+ pg->id = id;
+ pg->clks = &clk;
+ pg->num_clks = 1;
+ pg->reset = rst;
+ pg->pmc = pmc;
+
+ err = tegra_powergate_power_up(pg, false);
+ if (err)
+ dev_err(pmc->dev, "failed to turn on partition %d: %d\n", id,
+ err);
+
+ kfree(pg->clk_rates);
+ kfree(pg);
+
+ return err;
+}
+EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
+
+/**
+ * tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID
+ * @pmc: power management controller
+ * @cpuid: CPU partition ID
+ *
+ * Returns the partition ID corresponding to the CPU partition ID or a
+ * negative error code on failure.
+ */
+static int tegra_get_cpu_powergate_id(struct tegra_pmc *pmc,
+ unsigned int cpuid)
+{
+ if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates)
+ return pmc->soc->cpu_powergates[cpuid];
+
+ return -EINVAL;
+}
+
+/**
+ * tegra_pmc_cpu_is_powered() - check if CPU partition is powered
+ * @cpuid: CPU partition ID
+ */
+bool tegra_pmc_cpu_is_powered(unsigned int cpuid)
+{
+ int id;
+
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
+ if (id < 0)
+ return false;
+
+ return tegra_powergate_is_powered(pmc, id);
+}
+
+/**
+ * tegra_pmc_cpu_power_on() - power on CPU partition
+ * @cpuid: CPU partition ID
+ */
+int tegra_pmc_cpu_power_on(unsigned int cpuid)
+{
+ int id;
+
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
+ if (id < 0)
+ return id;
+
+ return tegra_powergate_set(pmc, id, true);
+}
+
+/**
+ * tegra_pmc_cpu_remove_clamping() - remove power clamps for CPU partition
+ * @cpuid: CPU partition ID
+ */
+int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
+{
+ int id;
+
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
+ if (id < 0)
+ return id;
+
+ return tegra_powergate_remove_clamping(id);
+}
+
+static void tegra_pmc_program_reboot_reason(const char *cmd)
+{
+ u32 value;
+
+ value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0);
+ value &= ~PMC_SCRATCH0_MODE_MASK;
+
+ if (cmd) {
+ if (strcmp(cmd, "recovery") == 0)
+ value |= PMC_SCRATCH0_MODE_RECOVERY;
+
+ if (strcmp(cmd, "bootloader") == 0)
+ value |= PMC_SCRATCH0_MODE_BOOTLOADER;
+
+ if (strcmp(cmd, "forced-recovery") == 0)
+ value |= PMC_SCRATCH0_MODE_RCM;
+ }
+
+ tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0);
+}
+
+static int tegra_pmc_reboot_notify(struct notifier_block *this,
+ unsigned long action, void *data)
+{
+ if (action == SYS_RESTART)
+ tegra_pmc_program_reboot_reason(data);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tegra_pmc_reboot_notifier = {
+ .notifier_call = tegra_pmc_reboot_notify,
+};
+
+static void tegra_pmc_restart(void)
+{
+ u32 value;
+
+ /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
+ value |= PMC_CNTRL_MAIN_RST;
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
+}
+
+static int tegra_pmc_restart_handler(struct sys_off_data *data)
+{
+ tegra_pmc_restart();
+
+ return NOTIFY_DONE;
+}
+
+static int tegra_pmc_power_off_handler(struct sys_off_data *data)
+{
+ /*
+ * Reboot Nexus 7 into special bootloader mode if USB cable is
+ * connected in order to display battery status and power off.
+ */
+ if (of_machine_is_compatible("asus,grouper") &&
+ power_supply_is_system_supplied()) {
+ const u32 go_to_charger_mode = 0xa5a55a5a;
+
+ tegra_pmc_writel(pmc, go_to_charger_mode, PMC_SCRATCH37);
+ tegra_pmc_restart();
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int powergate_show(struct seq_file *s, void *data)
+{
+ unsigned int i;
+ int status;
+
+ seq_printf(s, " powergate powered\n");
+ seq_printf(s, "------------------\n");
+
+ for (i = 0; i < pmc->soc->num_powergates; i++) {
+ status = tegra_powergate_is_powered(pmc, i);
+ if (status < 0)
+ continue;
+
+ seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
+ status ? "yes" : "no");
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(powergate);
+
+static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
+ struct device_node *np)
+{
+ struct clk *clk;
+ unsigned int i, count;
+ int err;
+
+ count = of_clk_get_parent_count(np);
+ if (count == 0)
+ return -ENODEV;
+
+ pg->clks = kcalloc(count, sizeof(clk), GFP_KERNEL);
+ if (!pg->clks)
+ return -ENOMEM;
+
+ pg->clk_rates = kcalloc(count, sizeof(*pg->clk_rates), GFP_KERNEL);
+ if (!pg->clk_rates) {
+ kfree(pg->clks);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < count; i++) {
+ pg->clks[i] = of_clk_get(np, i);
+ if (IS_ERR(pg->clks[i])) {
+ err = PTR_ERR(pg->clks[i]);
+ goto err;
+ }
+ }
+
+ pg->num_clks = count;
+
+ return 0;
+
+err:
+ while (i--)
+ clk_put(pg->clks[i]);
+
+ kfree(pg->clk_rates);
+ kfree(pg->clks);
+
+ return err;
+}
+
+static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
+ struct device_node *np, bool off)
+{
+ struct device *dev = pg->pmc->dev;
+ int err;
+
+ pg->reset = of_reset_control_array_get_exclusive_released(np);
+ if (IS_ERR(pg->reset)) {
+ err = PTR_ERR(pg->reset);
+ dev_err(dev, "failed to get device resets: %d\n", err);
+ return err;
+ }
+
+ err = reset_control_acquire(pg->reset);
+ if (err < 0) {
+ pr_err("failed to acquire resets: %d\n", err);
+ goto out;
+ }
+
+ if (off) {
+ err = reset_control_assert(pg->reset);
+ } else {
+ err = reset_control_deassert(pg->reset);
+ if (err < 0)
+ goto out;
+
+ reset_control_release(pg->reset);
+ }
+
+out:
+ if (err) {
+ reset_control_release(pg->reset);
+ reset_control_put(pg->reset);
+ }
+
+ return err;
+}
+
+static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
+{
+ struct device *dev = pmc->dev;
+ struct tegra_powergate *pg;
+ int id, err = 0;
+ bool off;
+
+ pg = kzalloc(sizeof(*pg), GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ id = tegra_powergate_lookup(pmc, np->name);
+ if (id < 0) {
+ dev_err(dev, "powergate lookup failed for %pOFn: %d\n", np, id);
+ err = -ENODEV;
+ goto free_mem;
+ }
+
+ /*
+ * Clear the bit for this powergate so it cannot be managed
+ * directly via the legacy APIs for controlling powergates.
+ */
+ clear_bit(id, pmc->powergates_available);
+
+ pg->id = id;
+ pg->genpd.name = np->name;
+ pg->genpd.power_off = tegra_genpd_power_off;
+ pg->genpd.power_on = tegra_genpd_power_on;
+ pg->pmc = pmc;
+
+ off = !tegra_powergate_is_powered(pmc, pg->id);
+
+ err = tegra_powergate_of_get_clks(pg, np);
+ if (err < 0) {
+ dev_err(dev, "failed to get clocks for %pOFn: %d\n", np, err);
+ goto set_available;
+ }
+
+ err = tegra_powergate_of_get_resets(pg, np, off);
+ if (err < 0) {
+ dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err);
+ goto remove_clks;
+ }
+
+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
+ goto remove_resets;
+ }
+
+ err = pm_genpd_init(&pg->genpd, NULL, off);
+ if (err < 0) {
+ dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np,
+ err);
+ goto remove_resets;
+ }
+
+ err = of_genpd_add_provider_simple(np, &pg->genpd);
+ if (err < 0) {
+ dev_err(dev, "failed to add PM domain provider for %pOFn: %d\n",
+ np, err);
+ goto remove_genpd;
+ }
+
+ dev_dbg(dev, "added PM domain %s\n", pg->genpd.name);
+
+ return 0;
+
+remove_genpd:
+ pm_genpd_remove(&pg->genpd);
+
+remove_resets:
+ reset_control_put(pg->reset);
+
+remove_clks:
+ while (pg->num_clks--)
+ clk_put(pg->clks[pg->num_clks]);
+
+ kfree(pg->clks);
+
+set_available:
+ set_bit(id, pmc->powergates_available);
+
+free_mem:
+ kfree(pg);
+
+ return err;
+}
+
+bool tegra_pmc_core_domain_state_synced(void)
+{
+ return pmc->core_domain_state_synced;
+}
+
+static int
+tegra_pmc_core_pd_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int level)
+{
+ struct dev_pm_opp *opp;
+ int err;
+
+ opp = dev_pm_opp_find_level_ceil(&genpd->dev, &level);
+ if (IS_ERR(opp)) {
+ dev_err(&genpd->dev, "failed to find OPP for level %u: %pe\n",
+ level, opp);
+ return PTR_ERR(opp);
+ }
+
+ mutex_lock(&pmc->powergates_lock);
+ err = dev_pm_opp_set_opp(pmc->dev, opp);
+ mutex_unlock(&pmc->powergates_lock);
+
+ dev_pm_opp_put(opp);
+
+ if (err) {
+ dev_err(&genpd->dev, "failed to set voltage to %duV: %d\n",
+ level, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static unsigned int
+tegra_pmc_core_pd_opp_to_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
+{
+ struct generic_pm_domain *genpd;
+ const char *rname[] = { "core", NULL};
+ int err;
+
+ genpd = devm_kzalloc(pmc->dev, sizeof(*genpd), GFP_KERNEL);
+ if (!genpd)
+ return -ENOMEM;
+
+ genpd->name = "core";
+ genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state;
+ genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state;
+
+ err = devm_pm_opp_set_regulators(pmc->dev, rname);
+ if (err)
+ return dev_err_probe(pmc->dev, err,
+ "failed to set core OPP regulator\n");
+
+ err = pm_genpd_init(genpd, NULL, false);
+ if (err) {
+ dev_err(pmc->dev, "failed to init core genpd: %d\n", err);
+ return err;
+ }
+
+ err = of_genpd_add_provider_simple(np, genpd);
+ if (err) {
+ dev_err(pmc->dev, "failed to add core genpd: %d\n", err);
+ goto remove_genpd;
+ }
+
+ pmc->core_domain_registered = true;
+
+ return 0;
+
+remove_genpd:
+ pm_genpd_remove(genpd);
+
+ return err;
+}
+
+static int tegra_powergate_init(struct tegra_pmc *pmc,
+ struct device_node *parent)
+{
+ struct of_phandle_args child_args, parent_args;
+ struct device_node *np, *child;
+ int err = 0;
+
+ /*
+ * Core power domain is the parent of powergate domains, hence it
+ * should be registered first.
+ */
+ np = of_get_child_by_name(parent, "core-domain");
+ if (np) {
+ err = tegra_pmc_core_pd_add(pmc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
+
+ np = of_get_child_by_name(parent, "powergates");
+ if (!np)
+ return 0;
+
+ for_each_child_of_node(np, child) {
+ err = tegra_powergate_add(pmc, child);
+ if (err < 0) {
+ of_node_put(child);
+ break;
+ }
+
+ if (of_parse_phandle_with_args(child, "power-domains",
+ "#power-domain-cells",
+ 0, &parent_args))
+ continue;
+
+ child_args.np = child;
+ child_args.args_count = 0;
+
+ err = of_genpd_add_subdomain(&parent_args, &child_args);
+ of_node_put(parent_args.np);
+ if (err) {
+ of_node_put(child);
+ break;
+ }
+ }
+
+ of_node_put(np);
+
+ return err;
+}
+
+static void tegra_powergate_remove(struct generic_pm_domain *genpd)
+{
+ struct tegra_powergate *pg = to_powergate(genpd);
+
+ reset_control_put(pg->reset);
+
+ while (pg->num_clks--)
+ clk_put(pg->clks[pg->num_clks]);
+
+ kfree(pg->clks);
+
+ set_bit(pg->id, pmc->powergates_available);
+
+ kfree(pg);
+}
+
+static void tegra_powergate_remove_all(struct device_node *parent)
+{
+ struct generic_pm_domain *genpd;
+ struct device_node *np, *child;
+
+ np = of_get_child_by_name(parent, "powergates");
+ if (!np)
+ return;
+
+ for_each_child_of_node(np, child) {
+ of_genpd_del_provider(child);
+
+ genpd = of_genpd_remove_last(child);
+ if (IS_ERR(genpd))
+ continue;
+
+ tegra_powergate_remove(genpd);
+ }
+
+ of_node_put(np);
+
+ np = of_get_child_by_name(parent, "core-domain");
+ if (np) {
+ of_genpd_del_provider(np);
+ of_genpd_remove_last(np);
+ }
+}
+
+static const struct tegra_io_pad_soc *
+tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
+{
+ unsigned int i;
+
+ for (i = 0; i < pmc->soc->num_io_pads; i++)
+ if (pmc->soc->io_pads[i].id == id)
+ return &pmc->soc->io_pads[i];
+
+ return NULL;
+}
+
+static int tegra_io_pad_prepare(struct tegra_pmc *pmc,
+ const struct tegra_io_pad_soc *pad,
+ unsigned long *request,
+ unsigned long *status,
+ u32 *mask)
+{
+ unsigned long rate, value;
+
+ if (pad->dpd == UINT_MAX)
+ return -EINVAL;
+
+ *request = pad->request;
+ *status = pad->status;
+ *mask = BIT(pad->dpd);
+
+ if (pmc->clk) {
+ rate = pmc->rate;
+ if (!rate) {
+ dev_err(pmc->dev, "failed to get clock rate\n");
+ return -ENODEV;
+ }
+
+ tegra_pmc_writel(pmc, DPD_SAMPLE_ENABLE, DPD_SAMPLE);
+
+ /* must be at least 200 ns, in APB (PCLK) clock cycles */
+ value = DIV_ROUND_UP(1000000000, rate);
+ value = DIV_ROUND_UP(200, value);
+ tegra_pmc_writel(pmc, value, SEL_DPD_TIM);
+ }
+
+ return 0;
+}
+
+static int tegra_io_pad_poll(struct tegra_pmc *pmc, unsigned long offset,
+ u32 mask, u32 val, unsigned long timeout)
+{
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_after(timeout, jiffies)) {
+ value = tegra_pmc_readl(pmc, offset);
+ if ((value & mask) == val)
+ return 0;
+
+ usleep_range(250, 1000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void tegra_io_pad_unprepare(struct tegra_pmc *pmc)
+{
+ if (pmc->clk)
+ tegra_pmc_writel(pmc, DPD_SAMPLE_DISABLE, DPD_SAMPLE);
+}
+
+/**
+ * tegra_io_pad_power_enable() - enable power to I/O pad
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_enable(enum tegra_io_pad id)
+{
+ const struct tegra_io_pad_soc *pad;
+ unsigned long request, status;
+ u32 mask;
+ int err;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad) {
+ dev_err(pmc->dev, "invalid I/O pad ID %u\n", id);
+ return -ENOENT;
+ }
+
+ mutex_lock(&pmc->powergates_lock);
+
+ err = tegra_io_pad_prepare(pmc, pad, &request, &status, &mask);
+ if (err < 0) {
+ dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
+ goto unlock;
+ }
+
+ tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_OFF | mask, request);
+
+ err = tegra_io_pad_poll(pmc, status, mask, 0, 250);
+ if (err < 0) {
+ dev_err(pmc->dev, "failed to enable I/O pad: %d\n", err);
+ goto unlock;
+ }
+
+ tegra_io_pad_unprepare(pmc);
+
+unlock:
+ mutex_unlock(&pmc->powergates_lock);
+ return err;
+}
+EXPORT_SYMBOL(tegra_io_pad_power_enable);
+
+/**
+ * tegra_io_pad_power_disable() - disable power to I/O pad
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_disable(enum tegra_io_pad id)
+{
+ const struct tegra_io_pad_soc *pad;
+ unsigned long request, status;
+ u32 mask;
+ int err;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad) {
+ dev_err(pmc->dev, "invalid I/O pad ID %u\n", id);
+ return -ENOENT;
+ }
+
+ mutex_lock(&pmc->powergates_lock);
+
+ err = tegra_io_pad_prepare(pmc, pad, &request, &status, &mask);
+ if (err < 0) {
+ dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
+ goto unlock;
+ }
+
+ tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_ON | mask, request);
+
+ err = tegra_io_pad_poll(pmc, status, mask, mask, 250);
+ if (err < 0) {
+ dev_err(pmc->dev, "failed to disable I/O pad: %d\n", err);
+ goto unlock;
+ }
+
+ tegra_io_pad_unprepare(pmc);
+
+unlock:
+ mutex_unlock(&pmc->powergates_lock);
+ return err;
+}
+EXPORT_SYMBOL(tegra_io_pad_power_disable);
+
+static int tegra_io_pad_is_powered(struct tegra_pmc *pmc, enum tegra_io_pad id)
+{
+ const struct tegra_io_pad_soc *pad;
+ unsigned long status;
+ u32 mask, value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad) {
+ dev_err(pmc->dev, "invalid I/O pad ID %u\n", id);
+ return -ENOENT;
+ }
+
+ if (pad->dpd == UINT_MAX)
+ return -EINVAL;
+
+ status = pad->status;
+ mask = BIT(pad->dpd);
+
+ value = tegra_pmc_readl(pmc, status);
+
+ return !(value & mask);
+}
+
+static int tegra_io_pad_set_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id,
+ int voltage)
+{
+ const struct tegra_io_pad_soc *pad;
+ u32 value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad)
+ return -ENOENT;
+
+ if (pad->voltage == UINT_MAX)
+ return -ENOTSUPP;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ if (pmc->soc->has_impl_33v_pwr) {
+ value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
+
+ if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8)
+ value &= ~BIT(pad->voltage);
+ else
+ value |= BIT(pad->voltage);
+
+ tegra_pmc_writel(pmc, value, PMC_IMPL_E_33V_PWR);
+ } else {
+ /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET);
+ value |= BIT(pad->voltage);
+ tegra_pmc_writel(pmc, value, PMC_PWR_DET);
+
+ /* update I/O voltage */
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
+
+ if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8)
+ value &= ~BIT(pad->voltage);
+ else
+ value |= BIT(pad->voltage);
+
+ tegra_pmc_writel(pmc, value, PMC_PWR_DET_VALUE);
+ }
+
+ mutex_unlock(&pmc->powergates_lock);
+
+ usleep_range(100, 250);
+
+ return 0;
+}
+
+static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id)
+{
+ const struct tegra_io_pad_soc *pad;
+ u32 value;
+
+ pad = tegra_io_pad_find(pmc, id);
+ if (!pad)
+ return -ENOENT;
+
+ if (pad->voltage == UINT_MAX)
+ return -ENOTSUPP;
+
+ if (pmc->soc->has_impl_33v_pwr)
+ value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
+ else
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
+
+ if ((value & BIT(pad->voltage)) == 0)
+ return TEGRA_IO_PAD_VOLTAGE_1V8;
+
+ return TEGRA_IO_PAD_VOLTAGE_3V3;
+}
+
+/**
+ * tegra_io_rail_power_on() - enable power to I/O rail
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * See also: tegra_io_pad_power_enable()
+ */
+int tegra_io_rail_power_on(unsigned int id)
+{
+ return tegra_io_pad_power_enable(id);
+}
+EXPORT_SYMBOL(tegra_io_rail_power_on);
+
+/**
+ * tegra_io_rail_power_off() - disable power to I/O rail
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * See also: tegra_io_pad_power_disable()
+ */
+int tegra_io_rail_power_off(unsigned int id)
+{
+ return tegra_io_pad_power_disable(id);
+}
+EXPORT_SYMBOL(tegra_io_rail_power_off);
+
+#ifdef CONFIG_PM_SLEEP
+enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
+{
+ return pmc->suspend_mode;
+}
+
+void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
+{
+ if (mode < TEGRA_SUSPEND_NONE || mode >= TEGRA_MAX_SUSPEND_MODE)
+ return;
+
+ pmc->suspend_mode = mode;
+}
+
+void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
+{
+ unsigned long long rate = 0;
+ u64 ticks;
+ u32 value;
+
+ switch (mode) {
+ case TEGRA_SUSPEND_LP1:
+ rate = 32768;
+ break;
+
+ case TEGRA_SUSPEND_LP2:
+ rate = pmc->rate;
+ break;
+
+ default:
+ break;
+ }
+
+ if (WARN_ON_ONCE(rate == 0))
+ rate = 100000000;
+
+ ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
+ do_div(ticks, USEC_PER_SEC);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER);
+
+ ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
+ do_div(ticks, USEC_PER_SEC);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER);
+
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
+ value &= ~PMC_CNTRL_SIDE_EFFECT_LP0;
+ value |= PMC_CNTRL_CPU_PWRREQ_OE;
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
+}
+#endif
+
+static int tegra_pmc_parse_dt(struct tegra_pmc *pmc, struct device_node *np)
+{
+ u32 value, values[2];
+
+ if (of_property_read_u32(np, "nvidia,suspend-mode", &value)) {
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+ } else {
+ switch (value) {
+ case 0:
+ pmc->suspend_mode = TEGRA_SUSPEND_LP0;
+ break;
+
+ case 1:
+ pmc->suspend_mode = TEGRA_SUSPEND_LP1;
+ break;
+
+ case 2:
+ pmc->suspend_mode = TEGRA_SUSPEND_LP2;
+ break;
+
+ default:
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+ break;
+ }
+ }
+
+ pmc->suspend_mode = tegra_pm_validate_suspend_mode(pmc->suspend_mode);
+
+ if (of_property_read_u32(np, "nvidia,cpu-pwr-good-time", &value))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->cpu_good_time = value;
+
+ if (of_property_read_u32(np, "nvidia,cpu-pwr-off-time", &value))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->cpu_off_time = value;
+
+ if (of_property_read_u32_array(np, "nvidia,core-pwr-good-time",
+ values, ARRAY_SIZE(values)))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->core_osc_time = values[0];
+ pmc->core_pmu_time = values[1];
+
+ if (of_property_read_u32(np, "nvidia,core-pwr-off-time", &value))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->core_off_time = value;
+
+ pmc->corereq_high = of_property_read_bool(np,
+ "nvidia,core-power-req-active-high");
+
+ pmc->sysclkreq_high = of_property_read_bool(np,
+ "nvidia,sys-clock-req-active-high");
+
+ pmc->combined_req = of_property_read_bool(np,
+ "nvidia,combined-power-req");
+
+ pmc->cpu_pwr_good_en = of_property_read_bool(np,
+ "nvidia,cpu-pwr-good-en");
+
+ if (of_property_read_u32_array(np, "nvidia,lp0-vec", values,
+ ARRAY_SIZE(values)))
+ if (pmc->suspend_mode == TEGRA_SUSPEND_LP0)
+ pmc->suspend_mode = TEGRA_SUSPEND_LP1;
+
+ pmc->lp0_vec_phys = values[0];
+ pmc->lp0_vec_size = values[1];
+
+ return 0;
+}
+
+static int tegra_pmc_init(struct tegra_pmc *pmc)
+{
+ if (pmc->soc->max_wake_events > 0) {
+ pmc->wake_type_level_map = bitmap_zalloc(pmc->soc->max_wake_events, GFP_KERNEL);
+ if (!pmc->wake_type_level_map)
+ return -ENOMEM;
+
+ pmc->wake_type_dual_edge_map = bitmap_zalloc(pmc->soc->max_wake_events, GFP_KERNEL);
+ if (!pmc->wake_type_dual_edge_map)
+ return -ENOMEM;
+
+ pmc->wake_sw_status_map = bitmap_zalloc(pmc->soc->max_wake_events, GFP_KERNEL);
+ if (!pmc->wake_sw_status_map)
+ return -ENOMEM;
+
+ pmc->wake_cntrl_level_map = bitmap_zalloc(pmc->soc->max_wake_events, GFP_KERNEL);
+ if (!pmc->wake_cntrl_level_map)
+ return -ENOMEM;
+ }
+
+ if (pmc->soc->init)
+ pmc->soc->init(pmc);
+
+ return 0;
+}
+
+static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
+{
+ static const char disabled[] = "emergency thermal reset disabled";
+ u32 pmu_addr, ctrl_id, reg_addr, reg_data, pinmux;
+ struct device *dev = pmc->dev;
+ struct device_node *np;
+ u32 value, checksum;
+
+ if (!pmc->soc->has_tsense_reset)
+ return;
+
+ np = of_get_child_by_name(pmc->dev->of_node, "i2c-thermtrip");
+ if (!np) {
+ dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled);
+ return;
+ }
+
+ if (of_property_read_u32(np, "nvidia,i2c-controller-id", &ctrl_id)) {
+ dev_err(dev, "I2C controller ID missing, %s.\n", disabled);
+ goto out;
+ }
+
+ if (of_property_read_u32(np, "nvidia,bus-addr", &pmu_addr)) {
+ dev_err(dev, "nvidia,bus-addr missing, %s.\n", disabled);
+ goto out;
+ }
+
+ if (of_property_read_u32(np, "nvidia,reg-addr", &reg_addr)) {
+ dev_err(dev, "nvidia,reg-addr missing, %s.\n", disabled);
+ goto out;
+ }
+
+ if (of_property_read_u32(np, "nvidia,reg-data", &reg_data)) {
+ dev_err(dev, "nvidia,reg-data missing, %s.\n", disabled);
+ goto out;
+ }
+
+ if (of_property_read_u32(np, "nvidia,pinmux-id", &pinmux))
+ pinmux = 0;
+
+ value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
+ value |= PMC_SENSOR_CTRL_SCRATCH_WRITE;
+ tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
+
+ value = (reg_data << PMC_SCRATCH54_DATA_SHIFT) |
+ (reg_addr << PMC_SCRATCH54_ADDR_SHIFT);
+ tegra_pmc_writel(pmc, value, PMC_SCRATCH54);
+
+ value = PMC_SCRATCH55_RESET_TEGRA;
+ value |= ctrl_id << PMC_SCRATCH55_CNTRL_ID_SHIFT;
+ value |= pinmux << PMC_SCRATCH55_PINMUX_SHIFT;
+ value |= pmu_addr << PMC_SCRATCH55_I2CSLV1_SHIFT;
+
+ /*
+ * Calculate checksum of SCRATCH54, SCRATCH55 fields. Bits 23:16 will
+ * contain the checksum and are currently zero, so they are not added.
+ */
+ checksum = reg_addr + reg_data + (value & 0xff) + ((value >> 8) & 0xff)
+ + ((value >> 24) & 0xff);
+ checksum &= 0xff;
+ checksum = 0x100 - checksum;
+
+ value |= checksum << PMC_SCRATCH55_CHECKSUM_SHIFT;
+
+ tegra_pmc_writel(pmc, value, PMC_SCRATCH55);
+
+ value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
+ value |= PMC_SENSOR_CTRL_ENABLE_RST;
+ tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
+
+ dev_info(pmc->dev, "emergency thermal reset enabled\n");
+
+out:
+ of_node_put(np);
+}
+
+static int tegra_io_pad_pinctrl_get_groups_count(struct pinctrl_dev *pctl_dev)
+{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+
+ return pmc->soc->num_io_pads;
+}
+
+static const char *tegra_io_pad_pinctrl_get_group_name(struct pinctrl_dev *pctl,
+ unsigned int group)
+{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl);
+
+ return pmc->soc->io_pads[group].name;
+}
+
+static int tegra_io_pad_pinctrl_get_group_pins(struct pinctrl_dev *pctl_dev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+
+ *pins = &pmc->soc->io_pads[group].id;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops tegra_io_pad_pinctrl_ops = {
+ .get_groups_count = tegra_io_pad_pinctrl_get_groups_count,
+ .get_group_name = tegra_io_pad_pinctrl_get_group_name,
+ .get_group_pins = tegra_io_pad_pinctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int tegra_io_pad_pinconf_get(struct pinctrl_dev *pctl_dev,
+ unsigned int pin, unsigned long *config)
+{
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+ const struct tegra_io_pad_soc *pad;
+ int ret;
+ u32 arg;
+
+ pad = tegra_io_pad_find(pmc, pin);
+ if (!pad)
+ return -EINVAL;
+
+ switch (param) {
+ case PIN_CONFIG_POWER_SOURCE:
+ ret = tegra_io_pad_get_voltage(pmc, pad->id);
+ if (ret < 0)
+ return ret;
+
+ arg = ret;
+ break;
+
+ case PIN_CONFIG_MODE_LOW_POWER:
+ ret = tegra_io_pad_is_powered(pmc, pad->id);
+ if (ret < 0)
+ return ret;
+
+ arg = !ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+ const struct tegra_io_pad_soc *pad;
+ enum pin_config_param param;
+ unsigned int i;
+ int err;
+ u32 arg;
+
+ pad = tegra_io_pad_find(pmc, pin);
+ if (!pad)
+ return -EINVAL;
+
+ for (i = 0; i < num_configs; ++i) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_MODE_LOW_POWER:
+ if (arg)
+ err = tegra_io_pad_power_disable(pad->id);
+ else
+ err = tegra_io_pad_power_enable(pad->id);
+ if (err)
+ return err;
+ break;
+ case PIN_CONFIG_POWER_SOURCE:
+ if (arg != TEGRA_IO_PAD_VOLTAGE_1V8 &&
+ arg != TEGRA_IO_PAD_VOLTAGE_3V3)
+ return -EINVAL;
+ err = tegra_io_pad_set_voltage(pmc, pad->id, arg);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops tegra_io_pad_pinconf_ops = {
+ .pin_config_get = tegra_io_pad_pinconf_get,
+ .pin_config_set = tegra_io_pad_pinconf_set,
+ .is_generic = true,
+};
+
+static struct pinctrl_desc tegra_pmc_pctl_desc = {
+ .pctlops = &tegra_io_pad_pinctrl_ops,
+ .confops = &tegra_io_pad_pinconf_ops,
+};
+
+static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
+{
+ int err;
+
+ if (!pmc->soc->num_pin_descs)
+ return 0;
+
+ tegra_pmc_pctl_desc.name = dev_name(pmc->dev);
+ tegra_pmc_pctl_desc.pins = pmc->soc->pin_descs;
+ tegra_pmc_pctl_desc.npins = pmc->soc->num_pin_descs;
+
+ pmc->pctl_dev = devm_pinctrl_register(pmc->dev, &tegra_pmc_pctl_desc,
+ pmc);
+ if (IS_ERR(pmc->pctl_dev)) {
+ err = PTR_ERR(pmc->pctl_dev);
+ dev_err(pmc->dev, "failed to register pin controller: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static ssize_t reset_reason_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 value;
+
+ value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
+ value &= pmc->soc->regs->rst_source_mask;
+ value >>= pmc->soc->regs->rst_source_shift;
+
+ if (WARN_ON(value >= pmc->soc->num_reset_sources))
+ return sprintf(buf, "%s\n", "UNKNOWN");
+
+ return sprintf(buf, "%s\n", pmc->soc->reset_sources[value]);
+}
+
+static DEVICE_ATTR_RO(reset_reason);
+
+static ssize_t reset_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 value;
+
+ value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
+ value &= pmc->soc->regs->rst_level_mask;
+ value >>= pmc->soc->regs->rst_level_shift;
+
+ if (WARN_ON(value >= pmc->soc->num_reset_levels))
+ return sprintf(buf, "%s\n", "UNKNOWN");
+
+ return sprintf(buf, "%s\n", pmc->soc->reset_levels[value]);
+}
+
+static DEVICE_ATTR_RO(reset_level);
+
+static void tegra_pmc_reset_sysfs_init(struct tegra_pmc *pmc)
+{
+ struct device *dev = pmc->dev;
+ int err = 0;
+
+ if (pmc->soc->reset_sources) {
+ err = device_create_file(dev, &dev_attr_reset_reason);
+ if (err < 0)
+ dev_warn(dev,
+ "failed to create attr \"reset_reason\": %d\n",
+ err);
+ }
+
+ if (pmc->soc->reset_levels) {
+ err = device_create_file(dev, &dev_attr_reset_level);
+ if (err < 0)
+ dev_warn(dev,
+ "failed to create attr \"reset_level\": %d\n",
+ err);
+ }
+}
+
+static int tegra_pmc_irq_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+
+ return 0;
+}
+
+static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int num_irqs, void *data)
+{
+ struct tegra_pmc *pmc = domain->host_data;
+ const struct tegra_pmc_soc *soc = pmc->soc;
+ struct irq_fwspec *fwspec = data;
+ unsigned int i;
+ int err = 0;
+
+ if (WARN_ON(num_irqs > 1))
+ return -EINVAL;
+
+ for (i = 0; i < soc->num_wake_events; i++) {
+ const struct tegra_wake_event *event = &soc->wake_events[i];
+
+ /* IRQ and simple wake events */
+ if (fwspec->param_count == 2) {
+ struct irq_fwspec spec;
+
+ if (event->id != fwspec->param[0])
+ continue;
+
+ err = irq_domain_set_hwirq_and_chip(domain, virq,
+ event->id,
+ &pmc->irq, pmc);
+ if (err < 0)
+ break;
+
+ /* simple hierarchies stop at the PMC level */
+ if (event->irq == 0) {
+ err = irq_domain_disconnect_hierarchy(domain->parent, virq);
+ break;
+ }
+
+ spec.fwnode = &pmc->dev->of_node->fwnode;
+ spec.param_count = 3;
+ spec.param[0] = GIC_SPI;
+ spec.param[1] = event->irq;
+ spec.param[2] = fwspec->param[1];
+
+ err = irq_domain_alloc_irqs_parent(domain, virq,
+ num_irqs, &spec);
+
+ break;
+ }
+
+ /* GPIO wake events */
+ if (fwspec->param_count == 3) {
+ if (event->gpio.instance != fwspec->param[0] ||
+ event->gpio.pin != fwspec->param[1])
+ continue;
+
+ err = irq_domain_set_hwirq_and_chip(domain, virq,
+ event->id,
+ &pmc->irq, pmc);
+
+ /* GPIO hierarchies stop at the PMC level */
+ if (!err && domain->parent)
+ err = irq_domain_disconnect_hierarchy(domain->parent,
+ virq);
+ break;
+ }
+ }
+
+ /* If there is no wake-up event, there is no PMC mapping */
+ if (i == soc->num_wake_events)
+ err = irq_domain_disconnect_hierarchy(domain, virq);
+
+ return err;
+}
+
+static const struct irq_domain_ops tegra_pmc_irq_domain_ops = {
+ .translate = tegra_pmc_irq_translate,
+ .alloc = tegra_pmc_irq_alloc,
+};
+
+static int tegra210_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
+ unsigned int offset, bit;
+ u32 value;
+
+ offset = data->hwirq / 32;
+ bit = data->hwirq % 32;
+
+ /* clear wake status */
+ tegra_pmc_writel(pmc, 0, PMC_SW_WAKE_STATUS);
+ tegra_pmc_writel(pmc, 0, PMC_SW_WAKE2_STATUS);
+
+ tegra_pmc_writel(pmc, 0, PMC_WAKE_STATUS);
+ tegra_pmc_writel(pmc, 0, PMC_WAKE2_STATUS);
+
+ /* enable PMC wake */
+ if (data->hwirq >= 32)
+ offset = PMC_WAKE2_MASK;
+ else
+ offset = PMC_WAKE_MASK;
+
+ value = tegra_pmc_readl(pmc, offset);
+
+ if (on)
+ value |= BIT(bit);
+ else
+ value &= ~BIT(bit);
+
+ tegra_pmc_writel(pmc, value, offset);
+
+ return 0;
+}
+
+static int tegra210_pmc_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
+ unsigned int offset, bit;
+ u32 value;
+
+ offset = data->hwirq / 32;
+ bit = data->hwirq % 32;
+
+ if (data->hwirq >= 32)
+ offset = PMC_WAKE2_LEVEL;
+ else
+ offset = PMC_WAKE_LEVEL;
+
+ value = tegra_pmc_readl(pmc, offset);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ value |= BIT(bit);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ value &= ~BIT(bit);
+ break;
+
+ case IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING:
+ value ^= BIT(bit);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tegra_pmc_writel(pmc, value, offset);
+
+ return 0;
+}
+
+static void tegra186_pmc_set_wake_filters(struct tegra_pmc *pmc)
+{
+ u32 value;
+
+ /* SW Wake (wake83) needs SR_CAPTURE filter to be enabled */
+ value = readl(pmc->wake + WAKE_AOWAKE_CNTRL(SW_WAKE_ID));
+ value |= WAKE_AOWAKE_CNTRL_SR_CAPTURE_EN;
+ writel(value, pmc->wake + WAKE_AOWAKE_CNTRL(SW_WAKE_ID));
+ dev_dbg(pmc->dev, "WAKE_AOWAKE_CNTRL_83 = 0x%x\n", value);
+}
+
+static int tegra186_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
+ unsigned int offset, bit;
+ u32 value;
+
+ offset = data->hwirq / 32;
+ bit = data->hwirq % 32;
+
+ /* clear wake status */
+ writel(0x1, pmc->wake + WAKE_AOWAKE_STATUS_W(data->hwirq));
+
+ /* route wake to tier 2 */
+ value = readl(pmc->wake + WAKE_AOWAKE_TIER2_ROUTING(offset));
+
+ if (!on)
+ value &= ~(1 << bit);
+ else
+ value |= 1 << bit;
+
+ writel(value, pmc->wake + WAKE_AOWAKE_TIER2_ROUTING(offset));
+
+ /* enable wakeup event */
+ writel(!!on, pmc->wake + WAKE_AOWAKE_MASK_W(data->hwirq));
+
+ return 0;
+}
+
+static int tegra186_pmc_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
+ u32 value;
+
+ value = readl(pmc->wake + WAKE_AOWAKE_CNTRL(data->hwirq));
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ value |= WAKE_AOWAKE_CNTRL_LEVEL;
+ set_bit(data->hwirq, pmc->wake_type_level_map);
+ clear_bit(data->hwirq, pmc->wake_type_dual_edge_map);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ value &= ~WAKE_AOWAKE_CNTRL_LEVEL;
+ clear_bit(data->hwirq, pmc->wake_type_level_map);
+ clear_bit(data->hwirq, pmc->wake_type_dual_edge_map);
+ break;
+
+ case IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING:
+ value ^= WAKE_AOWAKE_CNTRL_LEVEL;
+ clear_bit(data->hwirq, pmc->wake_type_level_map);
+ set_bit(data->hwirq, pmc->wake_type_dual_edge_map);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ writel(value, pmc->wake + WAKE_AOWAKE_CNTRL(data->hwirq));
+
+ return 0;
+}
+
+static void tegra_irq_mask_parent(struct irq_data *data)
+{
+ if (data->parent_data)
+ irq_chip_mask_parent(data);
+}
+
+static void tegra_irq_unmask_parent(struct irq_data *data)
+{
+ if (data->parent_data)
+ irq_chip_unmask_parent(data);
+}
+
+static void tegra_irq_eoi_parent(struct irq_data *data)
+{
+ if (data->parent_data)
+ irq_chip_eoi_parent(data);
+}
+
+static int tegra_irq_set_affinity_parent(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ if (data->parent_data)
+ return irq_chip_set_affinity_parent(data, dest, force);
+
+ return -EINVAL;
+}
+
+static int tegra_pmc_irq_init(struct tegra_pmc *pmc)
+{
+ struct irq_domain *parent = NULL;
+ struct device_node *np;
+
+ np = of_irq_find_parent(pmc->dev->of_node);
+ if (np) {
+ parent = irq_find_host(np);
+ of_node_put(np);
+ }
+
+ if (!parent)
+ return 0;
+
+ pmc->irq.name = dev_name(pmc->dev);
+ pmc->irq.irq_mask = tegra_irq_mask_parent;
+ pmc->irq.irq_unmask = tegra_irq_unmask_parent;
+ pmc->irq.irq_eoi = tegra_irq_eoi_parent;
+ pmc->irq.irq_set_affinity = tegra_irq_set_affinity_parent;
+ pmc->irq.irq_set_type = pmc->soc->irq_set_type;
+ pmc->irq.irq_set_wake = pmc->soc->irq_set_wake;
+
+ pmc->domain = irq_domain_add_hierarchy(parent, 0, 96, pmc->dev->of_node,
+ &tegra_pmc_irq_domain_ops, pmc);
+ if (!pmc->domain) {
+ dev_err(pmc->dev, "failed to allocate domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int tegra_pmc_clk_notify_cb(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ struct tegra_pmc *pmc = container_of(nb, struct tegra_pmc, clk_nb);
+ struct clk_notifier_data *data = ptr;
+
+ switch (action) {
+ case PRE_RATE_CHANGE:
+ mutex_lock(&pmc->powergates_lock);
+ break;
+
+ case POST_RATE_CHANGE:
+ pmc->rate = data->new_rate;
+ fallthrough;
+
+ case ABORT_RATE_CHANGE:
+ mutex_unlock(&pmc->powergates_lock);
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ return notifier_from_errno(-EINVAL);
+ }
+
+ return NOTIFY_OK;
+}
+
+static void pmc_clk_fence_udelay(u32 offset)
+{
+ tegra_pmc_readl(pmc, offset);
+ /* pmc clk propagation delay 2 us */
+ udelay(2);
+}
+
+static u8 pmc_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs) >> clk->mux_shift;
+ val &= PMC_CLK_OUT_MUX_MASK;
+
+ return val;
+}
+
+static int pmc_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs);
+ val &= ~(PMC_CLK_OUT_MUX_MASK << clk->mux_shift);
+ val |= index << clk->mux_shift;
+ tegra_pmc_writel(pmc, val, clk->offs);
+ pmc_clk_fence_udelay(clk->offs);
+
+ return 0;
+}
+
+static int pmc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs) & BIT(clk->force_en_shift);
+
+ return val ? 1 : 0;
+}
+
+static void pmc_clk_set_state(unsigned long offs, u32 shift, int state)
+{
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, offs);
+ val = state ? (val | BIT(shift)) : (val & ~BIT(shift));
+ tegra_pmc_writel(pmc, val, offs);
+ pmc_clk_fence_udelay(offs);
+}
+
+static int pmc_clk_enable(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+
+ pmc_clk_set_state(clk->offs, clk->force_en_shift, 1);
+
+ return 0;
+}
+
+static void pmc_clk_disable(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+
+ pmc_clk_set_state(clk->offs, clk->force_en_shift, 0);
+}
+
+static const struct clk_ops pmc_clk_ops = {
+ .get_parent = pmc_clk_mux_get_parent,
+ .set_parent = pmc_clk_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+ .is_enabled = pmc_clk_is_enabled,
+ .enable = pmc_clk_enable,
+ .disable = pmc_clk_disable,
+};
+
+static struct clk *
+tegra_pmc_clk_out_register(struct tegra_pmc *pmc,
+ const struct pmc_clk_init_data *data,
+ unsigned long offset)
+{
+ struct clk_init_data init;
+ struct pmc_clk *pmc_clk;
+
+ pmc_clk = devm_kzalloc(pmc->dev, sizeof(*pmc_clk), GFP_KERNEL);
+ if (!pmc_clk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = data->name;
+ init.ops = &pmc_clk_ops;
+ init.parent_names = data->parents;
+ init.num_parents = data->num_parents;
+ init.flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT |
+ CLK_SET_PARENT_GATE;
+
+ pmc_clk->hw.init = &init;
+ pmc_clk->offs = offset;
+ pmc_clk->mux_shift = data->mux_shift;
+ pmc_clk->force_en_shift = data->force_en_shift;
+
+ return clk_register(NULL, &pmc_clk->hw);
+}
+
+static int pmc_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ return tegra_pmc_readl(pmc, gate->offs) & BIT(gate->shift) ? 1 : 0;
+}
+
+static int pmc_clk_gate_enable(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ pmc_clk_set_state(gate->offs, gate->shift, 1);
+
+ return 0;
+}
+
+static void pmc_clk_gate_disable(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ pmc_clk_set_state(gate->offs, gate->shift, 0);
+}
+
+static const struct clk_ops pmc_clk_gate_ops = {
+ .is_enabled = pmc_clk_gate_is_enabled,
+ .enable = pmc_clk_gate_enable,
+ .disable = pmc_clk_gate_disable,
+};
+
+static struct clk *
+tegra_pmc_clk_gate_register(struct tegra_pmc *pmc, const char *name,
+ const char *parent_name, unsigned long offset,
+ u32 shift)
+{
+ struct clk_init_data init;
+ struct pmc_clk_gate *gate;
+
+ gate = devm_kzalloc(pmc->dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pmc_clk_gate_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ gate->hw.init = &init;
+ gate->offs = offset;
+ gate->shift = shift;
+
+ return clk_register(NULL, &gate->hw);
+}
+
+static void tegra_pmc_clock_register(struct tegra_pmc *pmc,
+ struct device_node *np)
+{
+ struct clk *clk;
+ struct clk_onecell_data *clk_data;
+ unsigned int num_clks;
+ int i, err;
+
+ num_clks = pmc->soc->num_pmc_clks;
+ if (pmc->soc->has_blink_output)
+ num_clks += 1;
+
+ if (!num_clks)
+ return;
+
+ clk_data = devm_kmalloc(pmc->dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->clks = devm_kcalloc(pmc->dev, TEGRA_PMC_CLK_MAX,
+ sizeof(*clk_data->clks), GFP_KERNEL);
+ if (!clk_data->clks)
+ return;
+
+ clk_data->clk_num = TEGRA_PMC_CLK_MAX;
+
+ for (i = 0; i < TEGRA_PMC_CLK_MAX; i++)
+ clk_data->clks[i] = ERR_PTR(-ENOENT);
+
+ for (i = 0; i < pmc->soc->num_pmc_clks; i++) {
+ const struct pmc_clk_init_data *data;
+
+ data = pmc->soc->pmc_clks_data + i;
+
+ clk = tegra_pmc_clk_out_register(pmc, data, PMC_CLK_OUT_CNTRL);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev, "unable to register clock %s: %d\n",
+ data->name, PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ err = clk_register_clkdev(clk, data->name, NULL);
+ if (err) {
+ dev_warn(pmc->dev,
+ "unable to register %s clock lookup: %d\n",
+ data->name, err);
+ return;
+ }
+
+ clk_data->clks[data->clk_id] = clk;
+ }
+
+ if (pmc->soc->has_blink_output) {
+ tegra_pmc_writel(pmc, 0x0, PMC_BLINK_TIMER);
+ clk = tegra_pmc_clk_gate_register(pmc,
+ "pmc_blink_override",
+ "clk_32k",
+ PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink_override: %d\n",
+ PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ clk = tegra_pmc_clk_gate_register(pmc, "pmc_blink",
+ "pmc_blink_override",
+ PMC_CNTRL,
+ PMC_CNTRL_BLINK_EN);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink: %d\n",
+ PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ err = clk_register_clkdev(clk, "pmc_blink", NULL);
+ if (err) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink lookup: %d\n",
+ err);
+ return;
+ }
+
+ clk_data->clks[TEGRA_PMC_CLK_BLINK] = clk;
+ }
+
+ err = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+ if (err)
+ dev_warn(pmc->dev, "failed to add pmc clock provider: %d\n",
+ err);
+}
+
+static const struct regmap_range pmc_usb_sleepwalk_ranges[] = {
+ regmap_reg_range(PMC_USB_DEBOUNCE_DEL, PMC_USB_AO),
+ regmap_reg_range(PMC_UTMIP_UHSIC_TRIGGERS, PMC_UTMIP_UHSIC_SAVED_STATE),
+ regmap_reg_range(PMC_UTMIP_TERM_PAD_CFG, PMC_UTMIP_UHSIC_FAKE),
+ regmap_reg_range(PMC_UTMIP_UHSIC_LINE_WAKEUP, PMC_UTMIP_UHSIC_LINE_WAKEUP),
+ regmap_reg_range(PMC_UTMIP_BIAS_MASTER_CNTRL, PMC_UTMIP_MASTER_CONFIG),
+ regmap_reg_range(PMC_UTMIP_UHSIC2_TRIGGERS, PMC_UTMIP_MASTER2_CONFIG),
+ regmap_reg_range(PMC_UTMIP_PAD_CFG0, PMC_UTMIP_UHSIC_SLEEP_CFG1),
+ regmap_reg_range(PMC_UTMIP_SLEEPWALK_P3, PMC_UTMIP_SLEEPWALK_P3),
+};
+
+static const struct regmap_access_table pmc_usb_sleepwalk_table = {
+ .yes_ranges = pmc_usb_sleepwalk_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pmc_usb_sleepwalk_ranges),
+};
+
+static int tegra_pmc_regmap_readl(void *context, unsigned int offset, unsigned int *value)
+{
+ struct tegra_pmc *pmc = context;
+
+ *value = tegra_pmc_readl(pmc, offset);
+ return 0;
+}
+
+static int tegra_pmc_regmap_writel(void *context, unsigned int offset, unsigned int value)
+{
+ struct tegra_pmc *pmc = context;
+
+ tegra_pmc_writel(pmc, value, offset);
+ return 0;
+}
+
+static const struct regmap_config usb_sleepwalk_regmap_config = {
+ .name = "usb_sleepwalk",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+ .rd_table = &pmc_usb_sleepwalk_table,
+ .wr_table = &pmc_usb_sleepwalk_table,
+ .reg_read = tegra_pmc_regmap_readl,
+ .reg_write = tegra_pmc_regmap_writel,
+};
+
+static int tegra_pmc_regmap_init(struct tegra_pmc *pmc)
+{
+ struct regmap *regmap;
+ int err;
+
+ if (pmc->soc->has_usb_sleepwalk) {
+ regmap = devm_regmap_init(pmc->dev, NULL, pmc, &usb_sleepwalk_regmap_config);
+ if (IS_ERR(regmap)) {
+ err = PTR_ERR(regmap);
+ dev_err(pmc->dev, "failed to allocate register map (%d)\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_pmc_reset_suspend_mode(void *data)
+{
+ pmc->suspend_mode = TEGRA_SUSPEND_NOT_READY;
+}
+
+static int tegra_pmc_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res;
+ int err;
+
+ /*
+ * Early initialisation should have configured an initial
+ * register mapping and setup the soc data pointer. If these
+ * are not valid then something went badly wrong!
+ */
+ if (WARN_ON(!pmc->base || !pmc->soc))
+ return -ENODEV;
+
+ err = tegra_pmc_parse_dt(pmc, pdev->dev.of_node);
+ if (err < 0)
+ return err;
+
+ err = devm_add_action_or_reset(&pdev->dev, tegra_pmc_reset_suspend_mode,
+ NULL);
+ if (err)
+ return err;
+
+ /* take over the memory region from the early initialization */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wake");
+ if (res) {
+ pmc->wake = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->wake))
+ return PTR_ERR(pmc->wake);
+ } else {
+ pmc->wake = base;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aotag");
+ if (res) {
+ pmc->aotag = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->aotag))
+ return PTR_ERR(pmc->aotag);
+ } else {
+ pmc->aotag = base;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch");
+ if (res) {
+ pmc->scratch = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->scratch))
+ return PTR_ERR(pmc->scratch);
+ } else {
+ pmc->scratch = base;
+ }
+
+ pmc->clk = devm_clk_get_optional(&pdev->dev, "pclk");
+ if (IS_ERR(pmc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pmc->clk),
+ "failed to get pclk\n");
+
+ /*
+ * PMC should be last resort for restarting since it soft-resets
+ * CPU without resetting everything else.
+ */
+ err = devm_register_reboot_notifier(&pdev->dev,
+ &tegra_pmc_reboot_notifier);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
+ err);
+ return err;
+ }
+
+ err = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_LOW,
+ tegra_pmc_restart_handler, NULL);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register sys-off handler: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * PMC should be primary power-off method if it soft-resets CPU,
+ * asking bootloader to shutdown hardware.
+ */
+ err = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE,
+ tegra_pmc_power_off_handler, NULL);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register sys-off handler: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * PCLK clock rate can't be retrieved using CLK API because it
+ * causes lockup if CPU enters LP2 idle state from some other
+ * CLK notifier, hence we're caching the rate's value locally.
+ */
+ if (pmc->clk) {
+ pmc->clk_nb.notifier_call = tegra_pmc_clk_notify_cb;
+ err = devm_clk_notifier_register(&pdev->dev, pmc->clk,
+ &pmc->clk_nb);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register clk notifier\n");
+ return err;
+ }
+
+ pmc->rate = clk_get_rate(pmc->clk);
+ }
+
+ pmc->dev = &pdev->dev;
+
+ err = tegra_pmc_init(pmc);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to initialize PMC: %d\n", err);
+ return err;
+ }
+
+ tegra_pmc_init_tsense_reset(pmc);
+
+ tegra_pmc_reset_sysfs_init(pmc);
+
+ err = tegra_pmc_pinctrl_init(pmc);
+ if (err)
+ goto cleanup_sysfs;
+
+ err = tegra_pmc_regmap_init(pmc);
+ if (err < 0)
+ goto cleanup_sysfs;
+
+ err = tegra_powergate_init(pmc, pdev->dev.of_node);
+ if (err < 0)
+ goto cleanup_powergates;
+
+ err = tegra_pmc_irq_init(pmc);
+ if (err < 0)
+ goto cleanup_powergates;
+
+ mutex_lock(&pmc->powergates_lock);
+ iounmap(pmc->base);
+ pmc->base = base;
+ mutex_unlock(&pmc->powergates_lock);
+
+ tegra_pmc_clock_register(pmc, pdev->dev.of_node);
+ platform_set_drvdata(pdev, pmc);
+ tegra_pm_init_suspend();
+
+ /* Some wakes require specific filter configuration */
+ if (pmc->soc->set_wake_filters)
+ pmc->soc->set_wake_filters(pmc);
+
+ debugfs_create_file("powergate", 0444, NULL, NULL, &powergate_fops);
+
+ return 0;
+
+cleanup_powergates:
+ tegra_powergate_remove_all(pdev->dev.of_node);
+cleanup_sysfs:
+ device_remove_file(&pdev->dev, &dev_attr_reset_reason);
+ device_remove_file(&pdev->dev, &dev_attr_reset_level);
+
+ return err;
+}
+
+/*
+ * Ensures that sufficient time is passed for a register write to
+ * serialize into the 32KHz domain.
+ */
+static void wke_32kwritel(struct tegra_pmc *pmc, u32 value, unsigned int offset)
+{
+ writel(value, pmc->wake + offset);
+ udelay(130);
+}
+
+static void wke_write_wake_level(struct tegra_pmc *pmc, int wake, int level)
+{
+ unsigned int offset = WAKE_AOWAKE_CNTRL(wake);
+ u32 value;
+
+ value = readl(pmc->wake + offset);
+ if (level)
+ value |= WAKE_AOWAKE_CNTRL_LEVEL;
+ else
+ value &= ~WAKE_AOWAKE_CNTRL_LEVEL;
+
+ writel(value, pmc->wake + offset);
+}
+
+static void wke_write_wake_levels(struct tegra_pmc *pmc)
+{
+ unsigned int i;
+
+ for (i = 0; i < pmc->soc->max_wake_events; i++)
+ wke_write_wake_level(pmc, i, test_bit(i, pmc->wake_cntrl_level_map));
+}
+
+static void wke_clear_sw_wake_status(struct tegra_pmc *pmc)
+{
+ wke_32kwritel(pmc, 1, WAKE_AOWAKE_SW_STATUS_W_0);
+}
+
+static void wke_read_sw_wake_status(struct tegra_pmc *pmc)
+{
+ unsigned long status;
+ unsigned int wake, i;
+
+ for (i = 0; i < pmc->soc->max_wake_events; i++)
+ wke_write_wake_level(pmc, i, 0);
+
+ wke_clear_sw_wake_status(pmc);
+
+ wke_32kwritel(pmc, 1, WAKE_LATCH_SW);
+
+ /*
+ * WAKE_AOWAKE_SW_STATUS is edge triggered, so in order to
+ * obtain the current status of the input wake signals, change
+ * the polarity of the wake level from 0->1 while latching to force
+ * a positive edge if the sampled signal is '1'.
+ */
+ for (i = 0; i < pmc->soc->max_wake_events; i++)
+ wke_write_wake_level(pmc, i, 1);
+
+ /*
+ * Wait for the update to be synced into the 32kHz domain,
+ * and let enough time lapse, so that the wake signals have time to
+ * be sampled.
+ */
+ udelay(300);
+
+ wke_32kwritel(pmc, 0, WAKE_LATCH_SW);
+
+ bitmap_zero(pmc->wake_sw_status_map, pmc->soc->max_wake_events);
+
+ for (i = 0; i < pmc->soc->max_wake_vectors; i++) {
+ status = readl(pmc->wake + WAKE_AOWAKE_SW_STATUS(i));
+
+ for_each_set_bit(wake, &status, 32)
+ set_bit(wake + (i * 32), pmc->wake_sw_status_map);
+ }
+}
+
+static void wke_clear_wake_status(struct tegra_pmc *pmc)
+{
+ unsigned long status;
+ unsigned int i, wake;
+ u32 mask;
+
+ for (i = 0; i < pmc->soc->max_wake_vectors; i++) {
+ mask = readl(pmc->wake + WAKE_AOWAKE_TIER2_ROUTING(i));
+ status = readl(pmc->wake + WAKE_AOWAKE_STATUS_R(i)) & mask;
+
+ for_each_set_bit(wake, &status, 32)
+ wke_32kwritel(pmc, 0x1, WAKE_AOWAKE_STATUS_W((i * 32) + wake));
+ }
+}
+
+/* translate sc7 wake sources back into IRQs to catch edge triggered wakeups */
+static void tegra186_pmc_process_wake_events(struct tegra_pmc *pmc, unsigned int index,
+ unsigned long status)
+{
+ unsigned int wake;
+
+ dev_dbg(pmc->dev, "Wake[%d:%d] status=%#lx\n", (index * 32) + 31, index * 32, status);
+
+ for_each_set_bit(wake, &status, 32) {
+ irq_hw_number_t hwirq = wake + 32 * index;
+ struct irq_desc *desc;
+ unsigned int irq;
+
+ irq = irq_find_mapping(pmc->domain, hwirq);
+
+ desc = irq_to_desc(irq);
+ if (!desc || !desc->action || !desc->action->name) {
+ dev_dbg(pmc->dev, "Resume caused by WAKE%ld, IRQ %d\n", hwirq, irq);
+ continue;
+ }
+
+ dev_dbg(pmc->dev, "Resume caused by WAKE%ld, %s\n", hwirq, desc->action->name);
+ generic_handle_irq(irq);
+ }
+}
+
+static void tegra186_pmc_wake_syscore_resume(void)
+{
+ u32 status, mask;
+ unsigned int i;
+
+ for (i = 0; i < pmc->soc->max_wake_vectors; i++) {
+ mask = readl(pmc->wake + WAKE_AOWAKE_TIER2_ROUTING(i));
+ status = readl(pmc->wake + WAKE_AOWAKE_STATUS_R(i)) & mask;
+
+ tegra186_pmc_process_wake_events(pmc, i, status);
+ }
+}
+
+static int tegra186_pmc_wake_syscore_suspend(void)
+{
+ wke_read_sw_wake_status(pmc);
+
+ /* flip the wakeup trigger for dual-edge triggered pads
+ * which are currently asserting as wakeups
+ */
+ bitmap_andnot(pmc->wake_cntrl_level_map, pmc->wake_type_dual_edge_map,
+ pmc->wake_sw_status_map, pmc->soc->max_wake_events);
+ bitmap_or(pmc->wake_cntrl_level_map, pmc->wake_cntrl_level_map,
+ pmc->wake_type_level_map, pmc->soc->max_wake_events);
+
+ /* Clear PMC Wake Status registers while going to suspend */
+ wke_clear_wake_status(pmc);
+ wke_write_wake_levels(pmc);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
+static int tegra_pmc_suspend(struct device *dev)
+{
+ struct tegra_pmc *pmc = dev_get_drvdata(dev);
+
+ tegra_pmc_writel(pmc, virt_to_phys(tegra_resume), PMC_SCRATCH41);
+
+ return 0;
+}
+
+static int tegra_pmc_resume(struct device *dev)
+{
+ struct tegra_pmc *pmc = dev_get_drvdata(dev);
+
+ tegra_pmc_writel(pmc, 0x0, PMC_SCRATCH41);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tegra_pmc_pm_ops, tegra_pmc_suspend, tegra_pmc_resume);
+
+#endif
+
+static const char * const tegra20_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "cpu",
+ [TEGRA_POWERGATE_3D] = "td",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+};
+
+static const struct tegra_pmc_regs tegra20_pmc_regs = {
+ .scratch0 = 0x50,
+ .rst_status = 0x1b4,
+ .rst_source_shift = 0x0,
+ .rst_source_mask = 0x7,
+ .rst_level_shift = 0x0,
+ .rst_level_mask = 0x0,
+};
+
+static void tegra20_pmc_init(struct tegra_pmc *pmc)
+{
+ u32 value, osc, pmu, off;
+
+ /* Always enable CPU power request */
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
+ value |= PMC_CNTRL_CPU_PWRREQ_OE;
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
+
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
+
+ if (pmc->sysclkreq_high)
+ value &= ~PMC_CNTRL_SYSCLK_POLARITY;
+ else
+ value |= PMC_CNTRL_SYSCLK_POLARITY;
+
+ if (pmc->corereq_high)
+ value &= ~PMC_CNTRL_PWRREQ_POLARITY;
+ else
+ value |= PMC_CNTRL_PWRREQ_POLARITY;
+
+ /* configure the output polarity while the request is tristated */
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
+
+ /* now enable the request */
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
+ value |= PMC_CNTRL_SYSCLK_OE;
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
+
+ /* program core timings which are applicable only for suspend state */
+ if (pmc->suspend_mode != TEGRA_SUSPEND_NONE) {
+ osc = DIV_ROUND_UP(pmc->core_osc_time * 8192, 1000000);
+ pmu = DIV_ROUND_UP(pmc->core_pmu_time * 32768, 1000000);
+ off = DIV_ROUND_UP(pmc->core_off_time * 32768, 1000000);
+ tegra_pmc_writel(pmc, ((osc << 8) & 0xff00) | (pmu & 0xff),
+ PMC_COREPWRGOOD_TIMER);
+ tegra_pmc_writel(pmc, off, PMC_COREPWROFF_TIMER);
+ }
+}
+
+static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
+ struct device_node *np,
+ bool invert)
+{
+ u32 value;
+
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
+
+ if (invert)
+ value |= PMC_CNTRL_INTR_POLARITY;
+ else
+ value &= ~PMC_CNTRL_INTR_POLARITY;
+
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
+}
+
+static const struct tegra_pmc_soc tegra20_pmc_soc = {
+ .supports_core_domain = true,
+ .num_powergates = ARRAY_SIZE(tegra20_powergates),
+ .powergates = tegra20_powergates,
+ .num_cpu_powergates = 0,
+ .cpu_powergates = NULL,
+ .has_tsense_reset = false,
+ .has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
+ .num_io_pads = 0,
+ .io_pads = NULL,
+ .num_pin_descs = 0,
+ .pin_descs = NULL,
+ .regs = &tegra20_pmc_regs,
+ .init = tegra20_pmc_init,
+ .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra20_powergate_set,
+ .reset_sources = NULL,
+ .num_reset_sources = 0,
+ .reset_levels = NULL,
+ .num_reset_levels = 0,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = true,
+ .has_usb_sleepwalk = true,
+};
+
+static const char * const tegra30_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "cpu0",
+ [TEGRA_POWERGATE_3D] = "td",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_SATA] = "sata",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_3D1] = "td2",
+};
+
+static const u8 tegra30_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const char * const tegra30_reset_sources[] = {
+ "POWER_ON_RESET",
+ "WATCHDOG",
+ "SENSOR",
+ "SW_MAIN",
+ "LP0"
+};
+
+static const struct tegra_pmc_soc tegra30_pmc_soc = {
+ .supports_core_domain = true,
+ .num_powergates = ARRAY_SIZE(tegra30_powergates),
+ .powergates = tegra30_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra30_cpu_powergates),
+ .cpu_powergates = tegra30_cpu_powergates,
+ .has_tsense_reset = true,
+ .has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
+ .num_io_pads = 0,
+ .io_pads = NULL,
+ .num_pin_descs = 0,
+ .pin_descs = NULL,
+ .regs = &tegra20_pmc_regs,
+ .init = tegra20_pmc_init,
+ .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra20_powergate_set,
+ .reset_sources = tegra30_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
+ .reset_levels = NULL,
+ .num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
+ .has_usb_sleepwalk = true,
+};
+
+static const char * const tegra114_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "crail",
+ [TEGRA_POWERGATE_3D] = "td",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_C1NC] = "c1nc",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+};
+
+static const u8 tegra114_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU0,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_pmc_soc tegra114_pmc_soc = {
+ .supports_core_domain = false,
+ .num_powergates = ARRAY_SIZE(tegra114_powergates),
+ .powergates = tegra114_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra114_cpu_powergates),
+ .cpu_powergates = tegra114_cpu_powergates,
+ .has_tsense_reset = true,
+ .has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
+ .num_io_pads = 0,
+ .io_pads = NULL,
+ .num_pin_descs = 0,
+ .pin_descs = NULL,
+ .regs = &tegra20_pmc_regs,
+ .init = tegra20_pmc_init,
+ .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra114_powergate_set,
+ .reset_sources = tegra30_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
+ .reset_levels = NULL,
+ .num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
+ .has_usb_sleepwalk = true,
+};
+
+static const char * const tegra124_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "crail",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_SATA] = "sata",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_C1NC] = "c1nc",
+ [TEGRA_POWERGATE_SOR] = "sor",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+ [TEGRA_POWERGATE_VIC] = "vic",
+ [TEGRA_POWERGATE_IRAM] = "iram",
+};
+
+static const u8 tegra124_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU0,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+#define TEGRA_IO_PAD(_id, _dpd, _request, _status, _voltage, _name) \
+ ((struct tegra_io_pad_soc) { \
+ .id = (_id), \
+ .dpd = (_dpd), \
+ .request = (_request), \
+ .status = (_status), \
+ .voltage = (_voltage), \
+ .name = (_name), \
+ })
+
+#define TEGRA_IO_PIN_DESC(_id, _name) \
+ ((struct pinctrl_pin_desc) { \
+ .number = (_id), \
+ .name = (_name), \
+ })
+
+static const struct tegra_io_pad_soc tegra124_io_pads[] = {
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AUDIO, 17, 0x1b8, 0x1bc, UINT_MAX, "audio"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_BB, 15, 0x1b8, 0x1bc, UINT_MAX, "bb"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CAM, 4, 0x1c0, 0x1c4, UINT_MAX, "cam"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_COMP, 22, 0x1b8, 0x1bc, UINT_MAX, "comp"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIA, 0, 0x1b8, 0x1bc, UINT_MAX, "csia"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIB, 1, 0x1b8, 0x1bc, UINT_MAX, "csib"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIE, 12, 0x1c0, 0x1c4, UINT_MAX, "csie"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSI, 2, 0x1b8, 0x1bc, UINT_MAX, "dsi"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSIB, 7, 0x1c0, 0x1c4, UINT_MAX, "dsib"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSIC, 8, 0x1c0, 0x1c4, UINT_MAX, "dsic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSID, 9, 0x1c0, 0x1c4, UINT_MAX, "dsid"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HDMI, 28, 0x1b8, 0x1bc, UINT_MAX, "hdmi"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HSIC, 19, 0x1b8, 0x1bc, UINT_MAX, "hsic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HV, 6, 0x1c0, 0x1c4, UINT_MAX, "hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_LVDS, 25, 0x1c0, 0x1c4, UINT_MAX, "lvds"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_MIPI_BIAS, 3, 0x1b8, 0x1bc, UINT_MAX, "mipi-bias"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_NAND, 13, 0x1b8, 0x1bc, UINT_MAX, "nand"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_BIAS, 4, 0x1b8, 0x1bc, UINT_MAX, "pex-bias"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK1, 5, 0x1b8, 0x1bc, UINT_MAX, "pex-clk1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK2, 6, 0x1b8, 0x1bc, UINT_MAX, "pex-clk2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CNTRL, 0, 0x1c0, 0x1c4, UINT_MAX, "pex-cntrl"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC1, 1, 0x1c0, 0x1c4, UINT_MAX, "sdmmc1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC3, 2, 0x1c0, 0x1c4, UINT_MAX, "sdmmc3"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC4, 3, 0x1c0, 0x1c4, UINT_MAX, "sdmmc4"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SYS_DDC, 26, 0x1c0, 0x1c4, UINT_MAX, "sys_ddc"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_UART, 14, 0x1b8, 0x1bc, UINT_MAX, "uart"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB0, 9, 0x1b8, 0x1bc, UINT_MAX, "usb0"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB1, 10, 0x1b8, 0x1bc, UINT_MAX, "usb1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB2, 11, 0x1b8, 0x1bc, UINT_MAX, "usb2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB_BIAS, 12, 0x1b8, 0x1bc, UINT_MAX, "usb_bias"),
+};
+
+static const struct pinctrl_pin_desc tegra124_pin_descs[] = {
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AUDIO, "audio"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_BB, "bb"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CAM, "cam"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_COMP, "comp"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIA, "csia"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIB, "csib"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIE, "csie"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSI, "dsi"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSIB, "dsib"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSIC, "dsic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSID, "dsid"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HDMI, "hdmi"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HSIC, "hsic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HV, "hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_LVDS, "lvds"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_MIPI_BIAS, "mipi-bias"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_NAND, "nand"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_BIAS, "pex-bias"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK1, "pex-clk1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK2, "pex-clk2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CNTRL, "pex-cntrl"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC1, "sdmmc1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC3, "sdmmc3"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC4, "sdmmc4"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SYS_DDC, "sys_ddc"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_UART, "uart"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB0, "usb0"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB1, "usb1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB2, "usb2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB_BIAS, "usb_bias"),
+};
+
+static const struct tegra_pmc_soc tegra124_pmc_soc = {
+ .supports_core_domain = false,
+ .num_powergates = ARRAY_SIZE(tegra124_powergates),
+ .powergates = tegra124_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra124_cpu_powergates),
+ .cpu_powergates = tegra124_cpu_powergates,
+ .has_tsense_reset = true,
+ .has_gpu_clamps = true,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
+ .num_io_pads = ARRAY_SIZE(tegra124_io_pads),
+ .io_pads = tegra124_io_pads,
+ .num_pin_descs = ARRAY_SIZE(tegra124_pin_descs),
+ .pin_descs = tegra124_pin_descs,
+ .regs = &tegra20_pmc_regs,
+ .init = tegra20_pmc_init,
+ .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra114_powergate_set,
+ .reset_sources = tegra30_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
+ .reset_levels = NULL,
+ .num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
+ .has_usb_sleepwalk = true,
+};
+
+static const char * const tegra210_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "crail",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_SATA] = "sata",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_SOR] = "sor",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+ [TEGRA_POWERGATE_VIC] = "vic",
+ [TEGRA_POWERGATE_IRAM] = "iram",
+ [TEGRA_POWERGATE_NVDEC] = "nvdec",
+ [TEGRA_POWERGATE_NVJPG] = "nvjpg",
+ [TEGRA_POWERGATE_AUD] = "aud",
+ [TEGRA_POWERGATE_DFD] = "dfd",
+ [TEGRA_POWERGATE_VE2] = "ve2",
+};
+
+static const u8 tegra210_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU0,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_io_pad_soc tegra210_io_pads[] = {
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AUDIO, 17, 0x1b8, 0x1bc, 5, "audio"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AUDIO_HV, 29, 0x1c0, 0x1c4, 18, "audio-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CAM, 4, 0x1c0, 0x1c4, 10, "cam"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIA, 0, 0x1b8, 0x1bc, UINT_MAX, "csia"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIB, 1, 0x1b8, 0x1bc, UINT_MAX, "csib"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIC, 10, 0x1c0, 0x1c4, UINT_MAX, "csic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSID, 11, 0x1c0, 0x1c4, UINT_MAX, "csid"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIE, 12, 0x1c0, 0x1c4, UINT_MAX, "csie"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIF, 13, 0x1c0, 0x1c4, UINT_MAX, "csif"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DBG, 25, 0x1b8, 0x1bc, 19, "dbg"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DEBUG_NONAO, 26, 0x1b8, 0x1bc, UINT_MAX, "debug-nonao"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DMIC, 18, 0x1c0, 0x1c4, 20, "dmic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DP, 19, 0x1c0, 0x1c4, UINT_MAX, "dp"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSI, 2, 0x1b8, 0x1bc, UINT_MAX, "dsi"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSIB, 7, 0x1c0, 0x1c4, UINT_MAX, "dsib"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSIC, 8, 0x1c0, 0x1c4, UINT_MAX, "dsic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSID, 9, 0x1c0, 0x1c4, UINT_MAX, "dsid"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_EMMC, 3, 0x1c0, 0x1c4, UINT_MAX, "emmc"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_EMMC2, 5, 0x1c0, 0x1c4, UINT_MAX, "emmc2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_GPIO, 27, 0x1b8, 0x1bc, 21, "gpio"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HDMI, 28, 0x1b8, 0x1bc, UINT_MAX, "hdmi"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HSIC, 19, 0x1b8, 0x1bc, UINT_MAX, "hsic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_LVDS, 25, 0x1c0, 0x1c4, UINT_MAX, "lvds"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_MIPI_BIAS, 3, 0x1b8, 0x1bc, UINT_MAX, "mipi-bias"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_BIAS, 4, 0x1b8, 0x1bc, UINT_MAX, "pex-bias"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK1, 5, 0x1b8, 0x1bc, UINT_MAX, "pex-clk1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK2, 6, 0x1b8, 0x1bc, UINT_MAX, "pex-clk2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CNTRL, UINT_MAX, UINT_MAX, UINT_MAX, 11, "pex-cntrl"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC1, 1, 0x1c0, 0x1c4, 12, "sdmmc1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC3, 2, 0x1c0, 0x1c4, 13, "sdmmc3"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SPI, 14, 0x1c0, 0x1c4, 22, "spi"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SPI_HV, 15, 0x1c0, 0x1c4, 23, "spi-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_UART, 14, 0x1b8, 0x1bc, 2, "uart"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB0, 9, 0x1b8, 0x1bc, UINT_MAX, "usb0"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB1, 10, 0x1b8, 0x1bc, UINT_MAX, "usb1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB2, 11, 0x1b8, 0x1bc, UINT_MAX, "usb2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB3, 18, 0x1b8, 0x1bc, UINT_MAX, "usb3"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB_BIAS, 12, 0x1b8, 0x1bc, UINT_MAX, "usb-bias"),
+};
+
+static const struct pinctrl_pin_desc tegra210_pin_descs[] = {
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AUDIO, "audio"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AUDIO_HV, "audio-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CAM, "cam"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIA, "csia"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIB, "csib"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIC, "csic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSID, "csid"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIE, "csie"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIF, "csif"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DBG, "dbg"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DEBUG_NONAO, "debug-nonao"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DMIC, "dmic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DP, "dp"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSI, "dsi"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSIB, "dsib"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSIC, "dsic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSID, "dsid"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_EMMC, "emmc"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_EMMC2, "emmc2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_GPIO, "gpio"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HDMI, "hdmi"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HSIC, "hsic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_LVDS, "lvds"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_MIPI_BIAS, "mipi-bias"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_BIAS, "pex-bias"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK1, "pex-clk1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK2, "pex-clk2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CNTRL, "pex-cntrl"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC1, "sdmmc1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC3, "sdmmc3"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SPI, "spi"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SPI_HV, "spi-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_UART, "uart"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB0, "usb0"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB1, "usb1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB2, "usb2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB3, "usb3"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB_BIAS, "usb-bias"),
+};
+
+static const char * const tegra210_reset_sources[] = {
+ "POWER_ON_RESET",
+ "WATCHDOG",
+ "SENSOR",
+ "SW_MAIN",
+ "LP0",
+ "AOTAG"
+};
+
+static const struct tegra_wake_event tegra210_wake_events[] = {
+ TEGRA_WAKE_IRQ("rtc", 16, 2),
+ TEGRA_WAKE_IRQ("pmu", 51, 86),
+};
+
+static const struct tegra_pmc_soc tegra210_pmc_soc = {
+ .supports_core_domain = false,
+ .num_powergates = ARRAY_SIZE(tegra210_powergates),
+ .powergates = tegra210_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra210_cpu_powergates),
+ .cpu_powergates = tegra210_cpu_powergates,
+ .has_tsense_reset = true,
+ .has_gpu_clamps = true,
+ .needs_mbist_war = true,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = true,
+ .num_io_pads = ARRAY_SIZE(tegra210_io_pads),
+ .io_pads = tegra210_io_pads,
+ .num_pin_descs = ARRAY_SIZE(tegra210_pin_descs),
+ .pin_descs = tegra210_pin_descs,
+ .regs = &tegra20_pmc_regs,
+ .init = tegra20_pmc_init,
+ .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra114_powergate_set,
+ .irq_set_wake = tegra210_pmc_irq_set_wake,
+ .irq_set_type = tegra210_pmc_irq_set_type,
+ .reset_sources = tegra210_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra210_reset_sources),
+ .reset_levels = NULL,
+ .num_reset_levels = 0,
+ .num_wake_events = ARRAY_SIZE(tegra210_wake_events),
+ .wake_events = tegra210_wake_events,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
+ .has_usb_sleepwalk = true,
+};
+
+static const struct tegra_io_pad_soc tegra186_io_pads[] = {
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIA, 0, 0x74, 0x78, UINT_MAX, "csia"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIB, 1, 0x74, 0x78, UINT_MAX, "csib"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSI, 2, 0x74, 0x78, UINT_MAX, "dsi"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_MIPI_BIAS, 3, 0x74, 0x78, UINT_MAX, "mipi-bias"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, 0x74, 0x78, UINT_MAX, "pex-clk-bias"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK3, 5, 0x74, 0x78, UINT_MAX, "pex-clk3"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK2, 6, 0x74, 0x78, UINT_MAX, "pex-clk2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK1, 7, 0x74, 0x78, UINT_MAX, "pex-clk1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB0, 9, 0x74, 0x78, UINT_MAX, "usb0"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB1, 10, 0x74, 0x78, UINT_MAX, "usb1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB2, 11, 0x74, 0x78, UINT_MAX, "usb2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_USB_BIAS, 12, 0x74, 0x78, UINT_MAX, "usb-bias"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_UART, 14, 0x74, 0x78, UINT_MAX, "uart"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AUDIO, 17, 0x74, 0x78, UINT_MAX, "audio"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HSIC, 19, 0x74, 0x78, UINT_MAX, "hsic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DBG, 25, 0x74, 0x78, UINT_MAX, "dbg"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HDMI_DP0, 28, 0x74, 0x78, UINT_MAX, "hdmi-dp0"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HDMI_DP1, 29, 0x74, 0x78, UINT_MAX, "hdmi-dp1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CNTRL, 0, 0x7c, 0x80, UINT_MAX, "pex-cntrl"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC2_HV, 2, 0x7c, 0x80, 5, "sdmmc2-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC4, 4, 0x7c, 0x80, UINT_MAX, "sdmmc4"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CAM, 6, 0x7c, 0x80, UINT_MAX, "cam"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSIB, 8, 0x7c, 0x80, UINT_MAX, "dsib"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSIC, 9, 0x7c, 0x80, UINT_MAX, "dsic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DSID, 10, 0x7c, 0x80, UINT_MAX, "dsid"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIC, 11, 0x7c, 0x80, UINT_MAX, "csic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSID, 12, 0x7c, 0x80, UINT_MAX, "csid"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIE, 13, 0x7c, 0x80, UINT_MAX, "csie"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIF, 14, 0x7c, 0x80, UINT_MAX, "csif"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SPI, 15, 0x7c, 0x80, UINT_MAX, "spi"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_UFS, 17, 0x7c, 0x80, UINT_MAX, "ufs"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DMIC_HV, 20, 0x7c, 0x80, 2, "dmic-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_EDP, 21, 0x7c, 0x80, UINT_MAX, "edp"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC1_HV, 23, 0x7c, 0x80, 4, "sdmmc1-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC3_HV, 24, 0x7c, 0x80, 6, "sdmmc3-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CONN, 28, 0x7c, 0x80, UINT_MAX, "conn"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AUDIO_HV, 29, 0x7c, 0x80, 1, "audio-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AO_HV, UINT_MAX, UINT_MAX, UINT_MAX, 0, "ao-hv"),
+};
+
+static const struct pinctrl_pin_desc tegra186_pin_descs[] = {
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIA, "csia"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIB, "csib"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSI, "dsi"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_MIPI_BIAS, "mipi-bias"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK_BIAS, "pex-clk-bias"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK3, "pex-clk3"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK2, "pex-clk2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK1, "pex-clk1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB0, "usb0"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB1, "usb1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB2, "usb2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_USB_BIAS, "usb-bias"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_UART, "uart"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AUDIO, "audio"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HSIC, "hsic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DBG, "dbg"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HDMI_DP0, "hdmi-dp0"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HDMI_DP1, "hdmi-dp1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CNTRL, "pex-cntrl"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC2_HV, "sdmmc2-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC4, "sdmmc4"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CAM, "cam"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSIB, "dsib"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSIC, "dsic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DSID, "dsid"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIC, "csic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSID, "csid"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIE, "csie"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIF, "csif"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SPI, "spi"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_UFS, "ufs"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DMIC_HV, "dmic-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_EDP, "edp"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC1_HV, "sdmmc1-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC3_HV, "sdmmc3-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CONN, "conn"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AUDIO_HV, "audio-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AO_HV, "ao-hv"),
+};
+
+static const struct tegra_pmc_regs tegra186_pmc_regs = {
+ .scratch0 = 0x2000,
+ .rst_status = 0x70,
+ .rst_source_shift = 0x2,
+ .rst_source_mask = 0x3c,
+ .rst_level_shift = 0x0,
+ .rst_level_mask = 0x3,
+};
+
+static void tegra186_pmc_init(struct tegra_pmc *pmc)
+{
+ pmc->syscore.suspend = tegra186_pmc_wake_syscore_suspend;
+ pmc->syscore.resume = tegra186_pmc_wake_syscore_resume;
+
+ register_syscore_ops(&pmc->syscore);
+}
+
+static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
+ struct device_node *np,
+ bool invert)
+{
+ struct resource regs;
+ void __iomem *wake;
+ u32 value;
+ int index;
+
+ index = of_property_match_string(np, "reg-names", "wake");
+ if (index < 0) {
+ dev_err(pmc->dev, "failed to find PMC wake registers\n");
+ return;
+ }
+
+ of_address_to_resource(np, index, &regs);
+
+ wake = ioremap(regs.start, resource_size(&regs));
+ if (!wake) {
+ dev_err(pmc->dev, "failed to map PMC wake registers\n");
+ return;
+ }
+
+ value = readl(wake + WAKE_AOWAKE_CTRL);
+
+ if (invert)
+ value |= WAKE_AOWAKE_CTRL_INTR_POLARITY;
+ else
+ value &= ~WAKE_AOWAKE_CTRL_INTR_POLARITY;
+
+ writel(value, wake + WAKE_AOWAKE_CTRL);
+
+ iounmap(wake);
+}
+
+static const char * const tegra186_reset_sources[] = {
+ "SYS_RESET",
+ "AOWDT",
+ "MCCPLEXWDT",
+ "BPMPWDT",
+ "SCEWDT",
+ "SPEWDT",
+ "APEWDT",
+ "BCCPLEXWDT",
+ "SENSOR",
+ "AOTAG",
+ "VFSENSOR",
+ "SWREST",
+ "SC7",
+ "HSM",
+ "CORESIGHT"
+};
+
+static const char * const tegra186_reset_levels[] = {
+ "L0", "L1", "L2", "WARM"
+};
+
+static const struct tegra_wake_event tegra186_wake_events[] = {
+ TEGRA_WAKE_IRQ("pmu", 24, 209),
+ TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)),
+ TEGRA_WAKE_IRQ("rtc", 73, 10),
+};
+
+static const struct tegra_pmc_soc tegra186_pmc_soc = {
+ .supports_core_domain = false,
+ .num_powergates = 0,
+ .powergates = NULL,
+ .num_cpu_powergates = 0,
+ .cpu_powergates = NULL,
+ .has_tsense_reset = false,
+ .has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = true,
+ .maybe_tz_only = false,
+ .num_io_pads = ARRAY_SIZE(tegra186_io_pads),
+ .io_pads = tegra186_io_pads,
+ .num_pin_descs = ARRAY_SIZE(tegra186_pin_descs),
+ .pin_descs = tegra186_pin_descs,
+ .regs = &tegra186_pmc_regs,
+ .init = tegra186_pmc_init,
+ .setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+ .set_wake_filters = tegra186_pmc_set_wake_filters,
+ .irq_set_wake = tegra186_pmc_irq_set_wake,
+ .irq_set_type = tegra186_pmc_irq_set_type,
+ .reset_sources = tegra186_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra186_reset_sources),
+ .reset_levels = tegra186_reset_levels,
+ .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
+ .num_wake_events = ARRAY_SIZE(tegra186_wake_events),
+ .wake_events = tegra186_wake_events,
+ .max_wake_events = 96,
+ .max_wake_vectors = 3,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
+ .has_usb_sleepwalk = false,
+};
+
+static const struct tegra_io_pad_soc tegra194_io_pads[] = {
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIA, 0, 0x74, 0x78, UINT_MAX, "csia"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIB, 1, 0x74, 0x78, UINT_MAX, "csib"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_MIPI_BIAS, 3, 0x74, 0x78, UINT_MAX, "mipi-bias"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, 0x74, 0x78, UINT_MAX, "pex-clk-bias"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK3, 5, 0x74, 0x78, UINT_MAX, "pex-clk3"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK2, 6, 0x74, 0x78, UINT_MAX, "pex-clk2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK1, 7, 0x74, 0x78, UINT_MAX, "pex-clk1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_EQOS, 8, 0x74, 0x78, UINT_MAX, "eqos"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK_2_BIAS, 9, 0x74, 0x78, UINT_MAX, "pex-clk-2-bias"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CLK_2, 10, 0x74, 0x78, UINT_MAX, "pex-clk-2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DAP3, 11, 0x74, 0x78, UINT_MAX, "dap3"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DAP5, 12, 0x74, 0x78, UINT_MAX, "dap5"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_UART, 14, 0x74, 0x78, UINT_MAX, "uart"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PWR_CTL, 15, 0x74, 0x78, UINT_MAX, "pwr-ctl"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SOC_GPIO53, 16, 0x74, 0x78, UINT_MAX, "soc-gpio53"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AUDIO, 17, 0x74, 0x78, UINT_MAX, "audio"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_GP_PWM2, 18, 0x74, 0x78, UINT_MAX, "gp-pwm2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_GP_PWM3, 19, 0x74, 0x78, UINT_MAX, "gp-pwm3"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SOC_GPIO12, 20, 0x74, 0x78, UINT_MAX, "soc-gpio12"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SOC_GPIO13, 21, 0x74, 0x78, UINT_MAX, "soc-gpio13"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SOC_GPIO10, 22, 0x74, 0x78, UINT_MAX, "soc-gpio10"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_UART4, 23, 0x74, 0x78, UINT_MAX, "uart4"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_UART5, 24, 0x74, 0x78, UINT_MAX, "uart5"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_DBG, 25, 0x74, 0x78, UINT_MAX, "dbg"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HDMI_DP3, 26, 0x74, 0x78, UINT_MAX, "hdmi-dp3"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HDMI_DP2, 27, 0x74, 0x78, UINT_MAX, "hdmi-dp2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HDMI_DP0, 28, 0x74, 0x78, UINT_MAX, "hdmi-dp0"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HDMI_DP1, 29, 0x74, 0x78, UINT_MAX, "hdmi-dp1"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CNTRL, 0, 0x7c, 0x80, UINT_MAX, "pex-cntrl"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_CTL2, 1, 0x7c, 0x80, UINT_MAX, "pex-ctl2"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_L0_RST, 2, 0x7c, 0x80, UINT_MAX, "pex-l0-rst"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_L1_RST, 3, 0x7c, 0x80, UINT_MAX, "pex-l1-rst"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC4, 4, 0x7c, 0x80, UINT_MAX, "sdmmc4"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_PEX_L5_RST, 5, 0x7c, 0x80, UINT_MAX, "pex-l5-rst"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CAM, 6, 0x7c, 0x80, UINT_MAX, "cam"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIC, 11, 0x7c, 0x80, UINT_MAX, "csic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSID, 12, 0x7c, 0x80, UINT_MAX, "csid"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIE, 13, 0x7c, 0x80, UINT_MAX, "csie"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIF, 14, 0x7c, 0x80, UINT_MAX, "csif"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SPI, 15, 0x7c, 0x80, UINT_MAX, "spi"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_UFS, 17, 0x7c, 0x80, UINT_MAX, "ufs"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIG, 18, 0x7c, 0x80, UINT_MAX, "csig"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIH, 19, 0x7c, 0x80, UINT_MAX, "csih"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_EDP, 21, 0x7c, 0x80, UINT_MAX, "edp"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC1_HV, 23, 0x7c, 0x80, 4, "sdmmc1-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC3_HV, 24, 0x7c, 0x80, 6, "sdmmc3-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CONN, 28, 0x7c, 0x80, UINT_MAX, "conn"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AUDIO_HV, 29, 0x7c, 0x80, 1, "audio-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AO_HV, UINT_MAX, UINT_MAX, UINT_MAX, 0, "ao-hv"),
+};
+
+static const struct pinctrl_pin_desc tegra194_pin_descs[] = {
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIA, "csia"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIB, "csib"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_MIPI_BIAS, "mipi-bias"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK_BIAS, "pex-clk-bias"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK3, "pex-clk3"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK2, "pex-clk2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK1, "pex-clk1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_EQOS, "eqos"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK_2_BIAS, "pex-clk-2-bias"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CLK_2, "pex-clk-2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DAP3, "dap3"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DAP5, "dap5"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_UART, "uart"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PWR_CTL, "pwr-ctl"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SOC_GPIO53, "soc-gpio53"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AUDIO, "audio"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_GP_PWM2, "gp-pwm2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_GP_PWM3, "gp-pwm3"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SOC_GPIO12, "soc-gpio12"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SOC_GPIO13, "soc-gpio13"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SOC_GPIO10, "soc-gpio10"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_UART4, "uart4"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_UART5, "uart5"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_DBG, "dbg"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HDMI_DP3, "hdmi-dp3"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HDMI_DP2, "hdmi-dp2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HDMI_DP0, "hdmi-dp0"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HDMI_DP1, "hdmi-dp1"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CNTRL, "pex-cntrl"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_CTL2, "pex-ctl2"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_L0_RST, "pex-l0-rst"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_L1_RST, "pex-l1-rst"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC4, "sdmmc4"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_PEX_L5_RST, "pex-l5-rst"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CAM, "cam"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIC, "csic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSID, "csid"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIE, "csie"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIF, "csif"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SPI, "spi"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_UFS, "ufs"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIG, "csig"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIH, "csih"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_EDP, "edp"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC1_HV, "sdmmc1-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC3_HV, "sdmmc3-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CONN, "conn"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AUDIO_HV, "audio-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AO_HV, "ao-hv"),
+};
+
+static const struct tegra_pmc_regs tegra194_pmc_regs = {
+ .scratch0 = 0x2000,
+ .rst_status = 0x70,
+ .rst_source_shift = 0x2,
+ .rst_source_mask = 0x7c,
+ .rst_level_shift = 0x0,
+ .rst_level_mask = 0x3,
+};
+
+static const char * const tegra194_reset_sources[] = {
+ "SYS_RESET_N",
+ "AOWDT",
+ "BCCPLEXWDT",
+ "BPMPWDT",
+ "SCEWDT",
+ "SPEWDT",
+ "APEWDT",
+ "LCCPLEXWDT",
+ "SENSOR",
+ "AOTAG",
+ "VFSENSOR",
+ "MAINSWRST",
+ "SC7",
+ "HSM",
+ "CSITE",
+ "RCEWDT",
+ "PVA0WDT",
+ "PVA1WDT",
+ "L1A_ASYNC",
+ "BPMPBOOT",
+ "FUSECRC",
+};
+
+static const struct tegra_wake_event tegra194_wake_events[] = {
+ TEGRA_WAKE_IRQ("pmu", 24, 209),
+ TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)),
+ TEGRA_WAKE_IRQ("rtc", 73, 10),
+ TEGRA_WAKE_SIMPLE("usb3-port-0", 76),
+ TEGRA_WAKE_SIMPLE("usb3-port-1", 77),
+ TEGRA_WAKE_SIMPLE("usb3-port-2-3", 78),
+ TEGRA_WAKE_SIMPLE("usb2-port-0", 79),
+ TEGRA_WAKE_SIMPLE("usb2-port-1", 80),
+ TEGRA_WAKE_SIMPLE("usb2-port-2", 81),
+ TEGRA_WAKE_SIMPLE("usb2-port-3", 82),
+};
+
+static const struct tegra_pmc_soc tegra194_pmc_soc = {
+ .supports_core_domain = false,
+ .num_powergates = 0,
+ .powergates = NULL,
+ .num_cpu_powergates = 0,
+ .cpu_powergates = NULL,
+ .has_tsense_reset = false,
+ .has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = true,
+ .maybe_tz_only = false,
+ .num_io_pads = ARRAY_SIZE(tegra194_io_pads),
+ .io_pads = tegra194_io_pads,
+ .num_pin_descs = ARRAY_SIZE(tegra194_pin_descs),
+ .pin_descs = tegra194_pin_descs,
+ .regs = &tegra194_pmc_regs,
+ .init = tegra186_pmc_init,
+ .setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+ .set_wake_filters = tegra186_pmc_set_wake_filters,
+ .irq_set_wake = tegra186_pmc_irq_set_wake,
+ .irq_set_type = tegra186_pmc_irq_set_type,
+ .reset_sources = tegra194_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra194_reset_sources),
+ .reset_levels = tegra186_reset_levels,
+ .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
+ .num_wake_events = ARRAY_SIZE(tegra194_wake_events),
+ .wake_events = tegra194_wake_events,
+ .max_wake_events = 96,
+ .max_wake_vectors = 3,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
+ .has_usb_sleepwalk = false,
+};
+
+static const struct tegra_io_pad_soc tegra234_io_pads[] = {
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIA, 0, 0xe0c0, 0xe0c4, UINT_MAX, "csia"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIB, 1, 0xe0c0, 0xe0c4, UINT_MAX, "csib"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_HDMI_DP0, 0, 0xe0d0, 0xe0d4, UINT_MAX, "hdmi-dp0"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIC, 2, 0xe0c0, 0xe0c4, UINT_MAX, "csic"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSID, 3, 0xe0c0, 0xe0c4, UINT_MAX, "csid"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIE, 4, 0xe0c0, 0xe0c4, UINT_MAX, "csie"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIF, 5, 0xe0c0, 0xe0c4, UINT_MAX, "csif"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_UFS, 0, 0xe064, 0xe068, UINT_MAX, "ufs"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_EDP, 1, 0xe05c, 0xe060, UINT_MAX, "edp"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC1_HV, 0, 0xe054, 0xe058, 4, "sdmmc1-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_SDMMC3_HV, UINT_MAX, UINT_MAX, UINT_MAX, 6, "sdmmc3-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AUDIO_HV, UINT_MAX, UINT_MAX, UINT_MAX, 1, "audio-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_AO_HV, UINT_MAX, UINT_MAX, UINT_MAX, 0, "ao-hv"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIG, 6, 0xe0c0, 0xe0c4, UINT_MAX, "csig"),
+ TEGRA_IO_PAD(TEGRA_IO_PAD_CSIH, 7, 0xe0c0, 0xe0c4, UINT_MAX, "csih"),
+};
+
+static const struct pinctrl_pin_desc tegra234_pin_descs[] = {
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIA, "csia"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIB, "csib"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_HDMI_DP0, "hdmi-dp0"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIC, "csic"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSID, "csid"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIE, "csie"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIF, "csif"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_UFS, "ufs"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_EDP, "edp"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC1_HV, "sdmmc1-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_SDMMC3_HV, "sdmmc3-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AUDIO_HV, "audio-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_AO_HV, "ao-hv"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIG, "csig"),
+ TEGRA_IO_PIN_DESC(TEGRA_IO_PAD_CSIH, "csih"),
+};
+
+static const struct tegra_pmc_regs tegra234_pmc_regs = {
+ .scratch0 = 0x2000,
+ .rst_status = 0x70,
+ .rst_source_shift = 0x2,
+ .rst_source_mask = 0xfc,
+ .rst_level_shift = 0x0,
+ .rst_level_mask = 0x3,
+};
+
+static const char * const tegra234_reset_sources[] = {
+ "SYS_RESET_N", /* 0x0 */
+ "AOWDT",
+ "BCCPLEXWDT",
+ "BPMPWDT",
+ "SCEWDT",
+ "SPEWDT",
+ "APEWDT",
+ "LCCPLEXWDT",
+ "SENSOR", /* 0x8 */
+ NULL,
+ NULL,
+ "MAINSWRST",
+ "SC7",
+ "HSM",
+ NULL,
+ "RCEWDT",
+ NULL, /* 0x10 */
+ NULL,
+ NULL,
+ "BPMPBOOT",
+ "FUSECRC",
+ "DCEWDT",
+ "PSCWDT",
+ "PSC",
+ "CSITE_SW", /* 0x18 */
+ "POD",
+ "SCPM",
+ "VREFRO_POWERBAD",
+ "VMON",
+ "FMON",
+ "FSI_R5WDT",
+ "FSI_THERM",
+ "FSI_R52C0WDT", /* 0x20 */
+ "FSI_R52C1WDT",
+ "FSI_R52C2WDT",
+ "FSI_R52C3WDT",
+ "FSI_FMON",
+ "FSI_VMON", /* 0x25 */
+};
+
+static const struct tegra_wake_event tegra234_wake_events[] = {
+ TEGRA_WAKE_IRQ("pmu", 24, 209),
+ TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)),
+ TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)),
+ TEGRA_WAKE_IRQ("rtc", 73, 10),
+ TEGRA_WAKE_IRQ("sw-wake", SW_WAKE_ID, 179),
+};
+
+static const struct tegra_pmc_soc tegra234_pmc_soc = {
+ .supports_core_domain = false,
+ .num_powergates = 0,
+ .powergates = NULL,
+ .num_cpu_powergates = 0,
+ .cpu_powergates = NULL,
+ .has_tsense_reset = false,
+ .has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = true,
+ .maybe_tz_only = false,
+ .num_io_pads = ARRAY_SIZE(tegra234_io_pads),
+ .io_pads = tegra234_io_pads,
+ .num_pin_descs = ARRAY_SIZE(tegra234_pin_descs),
+ .pin_descs = tegra234_pin_descs,
+ .regs = &tegra234_pmc_regs,
+ .init = tegra186_pmc_init,
+ .setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+ .set_wake_filters = tegra186_pmc_set_wake_filters,
+ .irq_set_wake = tegra186_pmc_irq_set_wake,
+ .irq_set_type = tegra186_pmc_irq_set_type,
+ .reset_sources = tegra234_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra234_reset_sources),
+ .reset_levels = tegra186_reset_levels,
+ .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
+ .num_wake_events = ARRAY_SIZE(tegra234_wake_events),
+ .wake_events = tegra234_wake_events,
+ .max_wake_events = 96,
+ .max_wake_vectors = 3,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
+};
+
+static const struct of_device_id tegra_pmc_match[] = {
+ { .compatible = "nvidia,tegra234-pmc", .data = &tegra234_pmc_soc },
+ { .compatible = "nvidia,tegra194-pmc", .data = &tegra194_pmc_soc },
+ { .compatible = "nvidia,tegra186-pmc", .data = &tegra186_pmc_soc },
+ { .compatible = "nvidia,tegra210-pmc", .data = &tegra210_pmc_soc },
+ { .compatible = "nvidia,tegra132-pmc", .data = &tegra124_pmc_soc },
+ { .compatible = "nvidia,tegra124-pmc", .data = &tegra124_pmc_soc },
+ { .compatible = "nvidia,tegra114-pmc", .data = &tegra114_pmc_soc },
+ { .compatible = "nvidia,tegra30-pmc", .data = &tegra30_pmc_soc },
+ { .compatible = "nvidia,tegra20-pmc", .data = &tegra20_pmc_soc },
+ { }
+};
+
+static void tegra_pmc_sync_state(struct device *dev)
+{
+ int err;
+
+ /*
+ * Newer device-trees have power domains, but we need to prepare all
+ * device drivers with runtime PM and OPP support first, otherwise
+ * state syncing is unsafe.
+ */
+ if (!pmc->soc->supports_core_domain)
+ return;
+
+ /*
+ * Older device-trees don't have core PD, and thus, there are
+ * no dependencies that will block the state syncing. We shouldn't
+ * mark the domain as synced in this case.
+ */
+ if (!pmc->core_domain_registered)
+ return;
+
+ pmc->core_domain_state_synced = true;
+
+ /* this is a no-op if core regulator isn't used */
+ mutex_lock(&pmc->powergates_lock);
+ err = dev_pm_opp_sync_regulators(dev);
+ mutex_unlock(&pmc->powergates_lock);
+
+ if (err)
+ dev_err(dev, "failed to sync regulators: %d\n", err);
+}
+
+static struct platform_driver tegra_pmc_driver = {
+ .driver = {
+ .name = "tegra-pmc",
+ .suppress_bind_attrs = true,
+ .of_match_table = tegra_pmc_match,
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
+ .pm = &tegra_pmc_pm_ops,
+#endif
+ .sync_state = tegra_pmc_sync_state,
+ },
+ .probe = tegra_pmc_probe,
+};
+builtin_platform_driver(tegra_pmc_driver);
+
+static bool __init tegra_pmc_detect_tz_only(struct tegra_pmc *pmc)
+{
+ u32 value, saved;
+
+ saved = readl(pmc->base + pmc->soc->regs->scratch0);
+ value = saved ^ 0xffffffff;
+
+ if (value == 0xffffffff)
+ value = 0xdeadbeef;
+
+ /* write pattern and read it back */
+ writel(value, pmc->base + pmc->soc->regs->scratch0);
+ value = readl(pmc->base + pmc->soc->regs->scratch0);
+
+ /* if we read all-zeroes, access is restricted to TZ only */
+ if (value == 0) {
+ pr_info("access to PMC is restricted to TZ\n");
+ return true;
+ }
+
+ /* restore original value */
+ writel(saved, pmc->base + pmc->soc->regs->scratch0);
+
+ return false;
+}
+
+/*
+ * Early initialization to allow access to registers in the very early boot
+ * process.
+ */
+static int __init tegra_pmc_early_init(void)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+ struct resource regs;
+ unsigned int i;
+ bool invert;
+
+ mutex_init(&pmc->powergates_lock);
+
+ np = of_find_matching_node_and_match(NULL, tegra_pmc_match, &match);
+ if (!np) {
+ /*
+ * Fall back to legacy initialization for 32-bit ARM only. All
+ * 64-bit ARM device tree files for Tegra are required to have
+ * a PMC node.
+ *
+ * This is for backwards-compatibility with old device trees
+ * that didn't contain a PMC node. Note that in this case the
+ * SoC data can't be matched and therefore powergating is
+ * disabled.
+ */
+ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+ pr_warn("DT node not found, powergating disabled\n");
+
+ regs.start = 0x7000e400;
+ regs.end = 0x7000e7ff;
+ regs.flags = IORESOURCE_MEM;
+
+ pr_warn("Using memory region %pR\n", &regs);
+ } else {
+ /*
+ * At this point we're not running on Tegra, so play
+ * nice with multi-platform kernels.
+ */
+ return 0;
+ }
+ } else {
+ /*
+ * Extract information from the device tree if we've found a
+ * matching node.
+ */
+ if (of_address_to_resource(np, 0, &regs) < 0) {
+ pr_err("failed to get PMC registers\n");
+ of_node_put(np);
+ return -ENXIO;
+ }
+ }
+
+ pmc->base = ioremap(regs.start, resource_size(&regs));
+ if (!pmc->base) {
+ pr_err("failed to map PMC registers\n");
+ of_node_put(np);
+ return -ENXIO;
+ }
+
+ if (of_device_is_available(np)) {
+ pmc->soc = match->data;
+
+ if (pmc->soc->maybe_tz_only)
+ pmc->tz_only = tegra_pmc_detect_tz_only(pmc);
+
+ /* Create a bitmap of the available and valid partitions */
+ for (i = 0; i < pmc->soc->num_powergates; i++)
+ if (pmc->soc->powergates[i])
+ set_bit(i, pmc->powergates_available);
+
+ /*
+ * Invert the interrupt polarity if a PMC device tree node
+ * exists and contains the nvidia,invert-interrupt property.
+ */
+ invert = of_property_read_bool(np, "nvidia,invert-interrupt");
+
+ pmc->soc->setup_irq_polarity(pmc, np, invert);
+
+ of_node_put(np);
+ }
+
+ return 0;
+}
+early_initcall(tegra_pmc_early_init);
diff --git a/drivers/soc/tegra/regulators-tegra20.c b/drivers/soc/tegra/regulators-tegra20.c
new file mode 100644
index 0000000000..6a2f90ab9d
--- /dev/null
+++ b/drivers/soc/tegra/regulators-tegra20.c
@@ -0,0 +1,560 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Voltage regulators coupler for NVIDIA Tegra20
+ * Copyright (C) 2019 GRATE-DRIVER project
+ *
+ * Voltage constraints borrowed from downstream kernel sources
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ */
+
+#define pr_fmt(fmt) "tegra voltage-coupler: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/suspend.h>
+
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
+
+struct tegra_regulator_coupler {
+ struct regulator_coupler coupler;
+ struct regulator_dev *core_rdev;
+ struct regulator_dev *cpu_rdev;
+ struct regulator_dev *rtc_rdev;
+ struct notifier_block reboot_notifier;
+ struct notifier_block suspend_notifier;
+ int core_min_uV, cpu_min_uV;
+ bool sys_reboot_mode_req;
+ bool sys_reboot_mode;
+ bool sys_suspend_mode_req;
+ bool sys_suspend_mode;
+};
+
+static inline struct tegra_regulator_coupler *
+to_tegra_coupler(struct regulator_coupler *coupler)
+{
+ return container_of(coupler, struct tegra_regulator_coupler, coupler);
+}
+
+static int tegra20_core_limit(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *core_rdev)
+{
+ int core_min_uV = 0;
+ int core_max_uV;
+ int core_cur_uV;
+ int err;
+
+ /*
+ * Tegra20 SoC has critical DVFS-capable devices that are
+ * permanently-active or active at a boot time, like EMC
+ * (DRAM controller) or Display controller for example.
+ *
+ * The voltage of a CORE SoC power domain shall not be dropped below
+ * a minimum level, which is determined by device's clock rate.
+ * This means that we can't fully allow CORE voltage scaling until
+ * the state of all DVFS-critical CORE devices is synced.
+ */
+ if (tegra_pmc_core_domain_state_synced() && !tegra->sys_reboot_mode) {
+ pr_info_once("voltage state synced\n");
+ return 0;
+ }
+
+ if (tegra->core_min_uV > 0)
+ return tegra->core_min_uV;
+
+ core_cur_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_cur_uV < 0)
+ return core_cur_uV;
+
+ core_max_uV = max(core_cur_uV, 1200000);
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ /*
+ * Limit minimum CORE voltage to a value left from bootloader or,
+ * if it's unreasonably low value, to the most common 1.2v or to
+ * whatever maximum value defined via board's device-tree.
+ */
+ tegra->core_min_uV = core_max_uV;
+
+ pr_info("core voltage initialized to %duV\n", tegra->core_min_uV);
+
+ return tegra->core_min_uV;
+}
+
+static int tegra20_core_rtc_max_spread(struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev)
+{
+ struct coupling_desc *c_desc = &core_rdev->coupling_desc;
+ struct regulator_dev *rdev;
+ int max_spread;
+ unsigned int i;
+
+ for (i = 1; i < c_desc->n_coupled; i++) {
+ max_spread = core_rdev->constraints->max_spread[i - 1];
+ rdev = c_desc->coupled_rdevs[i];
+
+ if (rdev == rtc_rdev && max_spread)
+ return max_spread;
+ }
+
+ pr_err_once("rtc-core max-spread is undefined in device-tree\n");
+
+ return 150000;
+}
+
+static int tegra20_cpu_nominal_uV(void)
+{
+ switch (tegra_sku_info.soc_speedo_id) {
+ case 0:
+ return 1100000;
+ case 1:
+ return 1025000;
+ default:
+ return 1125000;
+ }
+}
+
+static int tegra20_core_nominal_uV(void)
+{
+ switch (tegra_sku_info.soc_speedo_id) {
+ default:
+ return 1225000;
+ case 2:
+ return 1300000;
+ }
+}
+
+static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev,
+ int cpu_uV, int cpu_min_uV)
+{
+ int core_min_uV, core_max_uV = INT_MAX;
+ int rtc_min_uV, rtc_max_uV = INT_MAX;
+ int core_target_uV;
+ int rtc_target_uV;
+ int max_spread;
+ int core_uV;
+ int rtc_uV;
+ int err;
+
+ /*
+ * RTC and CORE voltages should be no more than 170mV from each other,
+ * CPU should be below RTC and CORE by at least 120mV. This applies
+ * to all Tegra20 SoC's.
+ */
+ max_spread = tegra20_core_rtc_max_spread(core_rdev, rtc_rdev);
+
+ /*
+ * The core voltage scaling is currently not hooked up in drivers,
+ * hence we will limit the minimum core voltage to a reasonable value.
+ * This should be good enough for the time being.
+ */
+ core_min_uV = tegra20_core_limit(tegra, core_rdev);
+ if (core_min_uV < 0)
+ return core_min_uV;
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(core_rdev, &core_min_uV, &core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ core_min_uV = clamp(tegra20_core_nominal_uV(),
+ core_min_uV, core_max_uV);
+
+ core_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_uV < 0)
+ return core_uV;
+
+ core_min_uV = max(cpu_min_uV + 125000, core_min_uV);
+ if (core_min_uV > core_max_uV)
+ return -EINVAL;
+
+ if (cpu_uV + 120000 > core_uV)
+ pr_err("core-cpu voltage constraint violated: %d %d\n",
+ core_uV, cpu_uV + 120000);
+
+ rtc_uV = regulator_get_voltage_rdev(rtc_rdev);
+ if (rtc_uV < 0)
+ return rtc_uV;
+
+ if (cpu_uV + 120000 > rtc_uV)
+ pr_err("rtc-cpu voltage constraint violated: %d %d\n",
+ rtc_uV, cpu_uV + 120000);
+
+ if (abs(core_uV - rtc_uV) > 170000)
+ pr_err("core-rtc voltage constraint violated: %d %d\n",
+ core_uV, rtc_uV);
+
+ rtc_min_uV = max(cpu_min_uV + 125000, core_min_uV - max_spread);
+
+ err = regulator_check_voltage(rtc_rdev, &rtc_min_uV, &rtc_max_uV);
+ if (err)
+ return err;
+
+ while (core_uV != core_min_uV || rtc_uV != rtc_min_uV) {
+ if (core_uV < core_min_uV) {
+ core_target_uV = min(core_uV + max_spread, core_min_uV);
+ core_target_uV = min(rtc_uV + max_spread, core_target_uV);
+ } else {
+ core_target_uV = max(core_uV - max_spread, core_min_uV);
+ core_target_uV = max(rtc_uV - max_spread, core_target_uV);
+ }
+
+ if (core_uV == core_target_uV)
+ goto update_rtc;
+
+ err = regulator_set_voltage_rdev(core_rdev,
+ core_target_uV,
+ core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ core_uV = core_target_uV;
+update_rtc:
+ if (rtc_uV < rtc_min_uV) {
+ rtc_target_uV = min(rtc_uV + max_spread, rtc_min_uV);
+ rtc_target_uV = min(core_uV + max_spread, rtc_target_uV);
+ } else {
+ rtc_target_uV = max(rtc_uV - max_spread, rtc_min_uV);
+ rtc_target_uV = max(core_uV - max_spread, rtc_target_uV);
+ }
+
+ if (rtc_uV == rtc_target_uV)
+ continue;
+
+ err = regulator_set_voltage_rdev(rtc_rdev,
+ rtc_target_uV,
+ rtc_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ rtc_uV = rtc_target_uV;
+ }
+
+ return 0;
+}
+
+static int tegra20_core_voltage_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *cpu_rdev,
+ struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev)
+{
+ int cpu_uV;
+
+ cpu_uV = regulator_get_voltage_rdev(cpu_rdev);
+ if (cpu_uV < 0)
+ return cpu_uV;
+
+ return tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
+ cpu_uV, cpu_uV);
+}
+
+static int tegra20_cpu_voltage_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *cpu_rdev,
+ struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev)
+{
+ int cpu_min_uV_consumers = 0;
+ int cpu_max_uV = INT_MAX;
+ int cpu_min_uV = 0;
+ int cpu_uV;
+ int err;
+
+ err = regulator_check_voltage(cpu_rdev, &cpu_min_uV, &cpu_max_uV);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV, &cpu_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV_consumers,
+ &cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ cpu_uV = regulator_get_voltage_rdev(cpu_rdev);
+ if (cpu_uV < 0)
+ return cpu_uV;
+
+ /* store boot voltage level */
+ if (!tegra->cpu_min_uV)
+ tegra->cpu_min_uV = cpu_uV;
+
+ /*
+ * CPU's regulator may not have any consumers, hence the voltage
+ * must not be changed in that case because CPU simply won't
+ * survive the voltage drop if it's running on a higher frequency.
+ */
+ if (!cpu_min_uV_consumers)
+ cpu_min_uV = cpu_uV;
+
+ /* restore boot voltage level */
+ if (tegra->sys_reboot_mode)
+ cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV);
+
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ cpu_min_uV = clamp(tegra20_cpu_nominal_uV(),
+ cpu_min_uV, cpu_max_uV);
+
+ if (cpu_min_uV > cpu_uV) {
+ err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
+ cpu_uV, cpu_min_uV);
+ if (err)
+ return err;
+
+ err = regulator_set_voltage_rdev(cpu_rdev, cpu_min_uV,
+ cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+ } else if (cpu_min_uV < cpu_uV) {
+ err = regulator_set_voltage_rdev(cpu_rdev, cpu_min_uV,
+ cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
+ cpu_uV, cpu_min_uV);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct regulator_dev *core_rdev = tegra->core_rdev;
+ struct regulator_dev *cpu_rdev = tegra->cpu_rdev;
+ struct regulator_dev *rtc_rdev = tegra->rtc_rdev;
+
+ if ((core_rdev != rdev && cpu_rdev != rdev && rtc_rdev != rdev) ||
+ state != PM_SUSPEND_ON) {
+ pr_err("regulators are not coupled properly\n");
+ return -EINVAL;
+ }
+
+ tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req);
+ tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req);
+
+ if (rdev == cpu_rdev)
+ return tegra20_cpu_voltage_update(tegra, cpu_rdev,
+ core_rdev, rtc_rdev);
+
+ if (rdev == core_rdev)
+ return tegra20_core_voltage_update(tegra, cpu_rdev,
+ core_rdev, rtc_rdev);
+
+ pr_err("changing %s voltage not permitted\n", rdev_get_name(rtc_rdev));
+
+ return -EPERM;
+}
+
+static int tegra20_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra,
+ bool sys_suspend_mode)
+{
+ int err;
+
+ if (!tegra->core_rdev || !tegra->rtc_rdev || !tegra->cpu_rdev)
+ return 0;
+
+ /*
+ * All power domains are enabled early during resume from suspend
+ * by GENPD core. Domains like VENC may require a higher voltage
+ * when enabled during resume from suspend. This also prepares
+ * hardware for resuming from LP0.
+ */
+
+ WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode);
+
+ err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
+ if (err)
+ return err;
+
+ err = regulator_sync_voltage_rdev(tegra->core_rdev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra20_regulator_suspend(struct notifier_block *notifier,
+ unsigned long mode, void *arg)
+{
+ struct tegra_regulator_coupler *tegra;
+ int ret = 0;
+
+ tegra = container_of(notifier, struct tegra_regulator_coupler,
+ suspend_notifier);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = tegra20_regulator_prepare_suspend(tegra, true);
+ break;
+
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ ret = tegra20_regulator_prepare_suspend(tegra, false);
+ break;
+ }
+
+ if (ret)
+ pr_err("failed to prepare regulators: %d\n", ret);
+
+ return notifier_from_errno(ret);
+}
+
+static int tegra20_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra,
+ bool sys_reboot_mode)
+{
+ int err;
+
+ if (!tegra->core_rdev || !tegra->rtc_rdev || !tegra->cpu_rdev)
+ return 0;
+
+ WRITE_ONCE(tegra->sys_reboot_mode_req, true);
+
+ /*
+ * Some devices use CPU soft-reboot method and in this case we
+ * should ensure that voltages are sane for the reboot by restoring
+ * the minimum boot levels.
+ */
+ err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
+ if (err)
+ return err;
+
+ err = regulator_sync_voltage_rdev(tegra->core_rdev);
+ if (err)
+ return err;
+
+ WRITE_ONCE(tegra->sys_reboot_mode_req, sys_reboot_mode);
+
+ return 0;
+}
+
+static int tegra20_regulator_reboot(struct notifier_block *notifier,
+ unsigned long event, void *cmd)
+{
+ struct tegra_regulator_coupler *tegra;
+ int ret;
+
+ if (event != SYS_RESTART)
+ return NOTIFY_DONE;
+
+ tegra = container_of(notifier, struct tegra_regulator_coupler,
+ reboot_notifier);
+
+ ret = tegra20_regulator_prepare_reboot(tegra, true);
+
+ return notifier_from_errno(ret);
+}
+
+static int tegra20_regulator_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct device_node *np = rdev->dev.of_node;
+
+ if (of_property_read_bool(np, "nvidia,tegra-core-regulator") &&
+ !tegra->core_rdev) {
+ tegra->core_rdev = rdev;
+ return 0;
+ }
+
+ if (of_property_read_bool(np, "nvidia,tegra-rtc-regulator") &&
+ !tegra->rtc_rdev) {
+ tegra->rtc_rdev = rdev;
+ return 0;
+ }
+
+ if (of_property_read_bool(np, "nvidia,tegra-cpu-regulator") &&
+ !tegra->cpu_rdev) {
+ tegra->cpu_rdev = rdev;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra20_regulator_detach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+
+ /*
+ * We don't expect regulators to be decoupled during reboot,
+ * this may race with the reboot handler and shouldn't ever
+ * happen in practice.
+ */
+ if (WARN_ON_ONCE(system_state > SYSTEM_RUNNING))
+ return -EPERM;
+
+ if (tegra->core_rdev == rdev) {
+ tegra->core_rdev = NULL;
+ return 0;
+ }
+
+ if (tegra->rtc_rdev == rdev) {
+ tegra->rtc_rdev = NULL;
+ return 0;
+ }
+
+ if (tegra->cpu_rdev == rdev) {
+ tegra->cpu_rdev = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct tegra_regulator_coupler tegra20_coupler = {
+ .coupler = {
+ .attach_regulator = tegra20_regulator_attach,
+ .detach_regulator = tegra20_regulator_detach,
+ .balance_voltage = tegra20_regulator_balance_voltage,
+ },
+ .reboot_notifier.notifier_call = tegra20_regulator_reboot,
+ .suspend_notifier.notifier_call = tegra20_regulator_suspend,
+};
+
+static int __init tegra_regulator_coupler_init(void)
+{
+ int err;
+
+ if (!of_machine_is_compatible("nvidia,tegra20"))
+ return 0;
+
+ err = register_reboot_notifier(&tegra20_coupler.reboot_notifier);
+ WARN_ON(err);
+
+ err = register_pm_notifier(&tegra20_coupler.suspend_notifier);
+ WARN_ON(err);
+
+ return regulator_coupler_register(&tegra20_coupler.coupler);
+}
+arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
new file mode 100644
index 0000000000..8fd43c6891
--- /dev/null
+++ b/drivers/soc/tegra/regulators-tegra30.c
@@ -0,0 +1,534 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Voltage regulators coupler for NVIDIA Tegra30
+ * Copyright (C) 2019 GRATE-DRIVER project
+ *
+ * Voltage constraints borrowed from downstream kernel sources
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ */
+
+#define pr_fmt(fmt) "tegra voltage-coupler: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/suspend.h>
+
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
+
+struct tegra_regulator_coupler {
+ struct regulator_coupler coupler;
+ struct regulator_dev *core_rdev;
+ struct regulator_dev *cpu_rdev;
+ struct notifier_block reboot_notifier;
+ struct notifier_block suspend_notifier;
+ int core_min_uV, cpu_min_uV;
+ bool sys_reboot_mode_req;
+ bool sys_reboot_mode;
+ bool sys_suspend_mode_req;
+ bool sys_suspend_mode;
+};
+
+static inline struct tegra_regulator_coupler *
+to_tegra_coupler(struct regulator_coupler *coupler)
+{
+ return container_of(coupler, struct tegra_regulator_coupler, coupler);
+}
+
+static int tegra30_core_limit(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *core_rdev)
+{
+ int core_min_uV = 0;
+ int core_max_uV;
+ int core_cur_uV;
+ int err;
+
+ /*
+ * Tegra30 SoC has critical DVFS-capable devices that are
+ * permanently-active or active at a boot time, like EMC
+ * (DRAM controller) or Display controller for example.
+ *
+ * The voltage of a CORE SoC power domain shall not be dropped below
+ * a minimum level, which is determined by device's clock rate.
+ * This means that we can't fully allow CORE voltage scaling until
+ * the state of all DVFS-critical CORE devices is synced.
+ */
+ if (tegra_pmc_core_domain_state_synced() && !tegra->sys_reboot_mode) {
+ pr_info_once("voltage state synced\n");
+ return 0;
+ }
+
+ if (tegra->core_min_uV > 0)
+ return tegra->core_min_uV;
+
+ core_cur_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_cur_uV < 0)
+ return core_cur_uV;
+
+ core_max_uV = max(core_cur_uV, 1200000);
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ /*
+ * Limit minimum CORE voltage to a value left from bootloader or,
+ * if it's unreasonably low value, to the most common 1.2v or to
+ * whatever maximum value defined via board's device-tree.
+ */
+ tegra->core_min_uV = core_max_uV;
+
+ pr_info("core voltage initialized to %duV\n", tegra->core_min_uV);
+
+ return tegra->core_min_uV;
+}
+
+static int tegra30_core_cpu_limit(int cpu_uV)
+{
+ if (cpu_uV < 800000)
+ return 950000;
+
+ if (cpu_uV < 900000)
+ return 1000000;
+
+ if (cpu_uV < 1000000)
+ return 1100000;
+
+ if (cpu_uV < 1100000)
+ return 1200000;
+
+ if (cpu_uV < 1250000) {
+ switch (tegra_sku_info.cpu_speedo_id) {
+ case 0 ... 1:
+ case 4:
+ case 7 ... 8:
+ return 1200000;
+
+ default:
+ return 1300000;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tegra30_cpu_nominal_uV(void)
+{
+ switch (tegra_sku_info.cpu_speedo_id) {
+ case 10 ... 11:
+ return 850000;
+
+ case 9:
+ return 912000;
+
+ case 1 ... 3:
+ case 7 ... 8:
+ return 1050000;
+
+ default:
+ return 1125000;
+
+ case 4 ... 6:
+ case 12 ... 13:
+ return 1237000;
+ }
+}
+
+static int tegra30_core_nominal_uV(void)
+{
+ switch (tegra_sku_info.soc_speedo_id) {
+ case 0:
+ return 1200000;
+
+ case 1:
+ if (tegra_sku_info.cpu_speedo_id != 7 &&
+ tegra_sku_info.cpu_speedo_id != 8)
+ return 1200000;
+
+ fallthrough;
+
+ case 2:
+ if (tegra_sku_info.cpu_speedo_id != 13)
+ return 1300000;
+
+ return 1350000;
+
+ default:
+ return 1250000;
+ }
+}
+
+static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *cpu_rdev,
+ struct regulator_dev *core_rdev)
+{
+ int core_min_uV, core_max_uV = INT_MAX;
+ int cpu_min_uV, cpu_max_uV = INT_MAX;
+ int cpu_min_uV_consumers = 0;
+ int core_min_limited_uV;
+ int core_target_uV;
+ int cpu_target_uV;
+ int core_max_step;
+ int cpu_max_step;
+ int max_spread;
+ int core_uV;
+ int cpu_uV;
+ int err;
+
+ /*
+ * CPU voltage should not got lower than 300mV from the CORE.
+ * CPU voltage should stay below the CORE by 100mV+, depending
+ * by the CORE voltage. This applies to all Tegra30 SoC's.
+ */
+ max_spread = cpu_rdev->constraints->max_spread[0];
+ cpu_max_step = cpu_rdev->constraints->max_uV_step;
+ core_max_step = core_rdev->constraints->max_uV_step;
+
+ if (!max_spread) {
+ pr_err_once("cpu-core max-spread is undefined in device-tree\n");
+ max_spread = 300000;
+ }
+
+ if (!cpu_max_step) {
+ pr_err_once("cpu max-step is undefined in device-tree\n");
+ cpu_max_step = 150000;
+ }
+
+ if (!core_max_step) {
+ pr_err_once("core max-step is undefined in device-tree\n");
+ core_max_step = 150000;
+ }
+
+ /*
+ * The CORE voltage scaling is currently not hooked up in drivers,
+ * hence we will limit the minimum CORE voltage to a reasonable value.
+ * This should be good enough for the time being.
+ */
+ core_min_uV = tegra30_core_limit(tegra, core_rdev);
+ if (core_min_uV < 0)
+ return core_min_uV;
+
+ err = regulator_check_consumers(core_rdev, &core_min_uV, &core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ core_min_uV = clamp(tegra30_core_nominal_uV(),
+ core_min_uV, core_max_uV);
+
+ core_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_uV < 0)
+ return core_uV;
+
+ cpu_min_uV = core_min_uV - max_spread;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV, &cpu_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV_consumers,
+ &cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = regulator_check_voltage(cpu_rdev, &cpu_min_uV, &cpu_max_uV);
+ if (err)
+ return err;
+
+ cpu_uV = regulator_get_voltage_rdev(cpu_rdev);
+ if (cpu_uV < 0)
+ return cpu_uV;
+
+ /* store boot voltage level */
+ if (!tegra->cpu_min_uV)
+ tegra->cpu_min_uV = cpu_uV;
+
+ /*
+ * CPU's regulator may not have any consumers, hence the voltage
+ * must not be changed in that case because CPU simply won't
+ * survive the voltage drop if it's running on a higher frequency.
+ */
+ if (!cpu_min_uV_consumers)
+ cpu_min_uV = max(cpu_uV, cpu_min_uV);
+
+ /*
+ * Bootloader shall set up voltages correctly, but if it
+ * happens that there is a violation, then try to fix it
+ * at first.
+ */
+ core_min_limited_uV = tegra30_core_cpu_limit(cpu_uV);
+ if (core_min_limited_uV < 0)
+ return core_min_limited_uV;
+
+ core_min_uV = max(core_min_uV, tegra30_core_cpu_limit(cpu_min_uV));
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ /* restore boot voltage level */
+ if (tegra->sys_reboot_mode)
+ cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV);
+
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ cpu_min_uV = clamp(tegra30_cpu_nominal_uV(),
+ cpu_min_uV, cpu_max_uV);
+
+ if (core_min_limited_uV > core_uV) {
+ pr_err("core voltage constraint violated: %d %d %d\n",
+ core_uV, core_min_limited_uV, cpu_uV);
+ goto update_core;
+ }
+
+ while (cpu_uV != cpu_min_uV || core_uV != core_min_uV) {
+ if (cpu_uV < cpu_min_uV) {
+ cpu_target_uV = min(cpu_uV + cpu_max_step, cpu_min_uV);
+ } else {
+ cpu_target_uV = max(cpu_uV - cpu_max_step, cpu_min_uV);
+ cpu_target_uV = max(core_uV - max_spread, cpu_target_uV);
+ }
+
+ if (cpu_uV == cpu_target_uV)
+ goto update_core;
+
+ err = regulator_set_voltage_rdev(cpu_rdev,
+ cpu_target_uV,
+ cpu_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ cpu_uV = cpu_target_uV;
+update_core:
+ core_min_limited_uV = tegra30_core_cpu_limit(cpu_uV);
+ if (core_min_limited_uV < 0)
+ return core_min_limited_uV;
+
+ core_target_uV = max(core_min_limited_uV, core_min_uV);
+
+ if (core_uV < core_target_uV) {
+ core_target_uV = min(core_target_uV, core_uV + core_max_step);
+ core_target_uV = min(core_target_uV, cpu_uV + max_spread);
+ } else {
+ core_target_uV = max(core_target_uV, core_uV - core_max_step);
+ }
+
+ if (core_uV == core_target_uV)
+ continue;
+
+ err = regulator_set_voltage_rdev(core_rdev,
+ core_target_uV,
+ core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ core_uV = core_target_uV;
+ }
+
+ return 0;
+}
+
+static int tegra30_regulator_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct regulator_dev *core_rdev = tegra->core_rdev;
+ struct regulator_dev *cpu_rdev = tegra->cpu_rdev;
+
+ if ((core_rdev != rdev && cpu_rdev != rdev) || state != PM_SUSPEND_ON) {
+ pr_err("regulators are not coupled properly\n");
+ return -EINVAL;
+ }
+
+ tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req);
+ tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req);
+
+ return tegra30_voltage_update(tegra, cpu_rdev, core_rdev);
+}
+
+static int tegra30_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra,
+ bool sys_suspend_mode)
+{
+ int err;
+
+ if (!tegra->core_rdev || !tegra->cpu_rdev)
+ return 0;
+
+ /*
+ * All power domains are enabled early during resume from suspend
+ * by GENPD core. Domains like VENC may require a higher voltage
+ * when enabled during resume from suspend. This also prepares
+ * hardware for resuming from LP0.
+ */
+
+ WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode);
+
+ err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
+ if (err)
+ return err;
+
+ err = regulator_sync_voltage_rdev(tegra->core_rdev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra30_regulator_suspend(struct notifier_block *notifier,
+ unsigned long mode, void *arg)
+{
+ struct tegra_regulator_coupler *tegra;
+ int ret = 0;
+
+ tegra = container_of(notifier, struct tegra_regulator_coupler,
+ suspend_notifier);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = tegra30_regulator_prepare_suspend(tegra, true);
+ break;
+
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ ret = tegra30_regulator_prepare_suspend(tegra, false);
+ break;
+ }
+
+ if (ret)
+ pr_err("failed to prepare regulators: %d\n", ret);
+
+ return notifier_from_errno(ret);
+}
+
+static int tegra30_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra,
+ bool sys_reboot_mode)
+{
+ int err;
+
+ if (!tegra->core_rdev || !tegra->cpu_rdev)
+ return 0;
+
+ WRITE_ONCE(tegra->sys_reboot_mode_req, true);
+
+ /*
+ * Some devices use CPU soft-reboot method and in this case we
+ * should ensure that voltages are sane for the reboot by restoring
+ * the minimum boot levels.
+ */
+ err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
+ if (err)
+ return err;
+
+ err = regulator_sync_voltage_rdev(tegra->core_rdev);
+ if (err)
+ return err;
+
+ WRITE_ONCE(tegra->sys_reboot_mode_req, sys_reboot_mode);
+
+ return 0;
+}
+
+static int tegra30_regulator_reboot(struct notifier_block *notifier,
+ unsigned long event, void *cmd)
+{
+ struct tegra_regulator_coupler *tegra;
+ int ret;
+
+ if (event != SYS_RESTART)
+ return NOTIFY_DONE;
+
+ tegra = container_of(notifier, struct tegra_regulator_coupler,
+ reboot_notifier);
+
+ ret = tegra30_regulator_prepare_reboot(tegra, true);
+
+ return notifier_from_errno(ret);
+}
+
+static int tegra30_regulator_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct device_node *np = rdev->dev.of_node;
+
+ if (of_property_read_bool(np, "nvidia,tegra-core-regulator") &&
+ !tegra->core_rdev) {
+ tegra->core_rdev = rdev;
+ return 0;
+ }
+
+ if (of_property_read_bool(np, "nvidia,tegra-cpu-regulator") &&
+ !tegra->cpu_rdev) {
+ tegra->cpu_rdev = rdev;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra30_regulator_detach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+
+ /*
+ * We don't expect regulators to be decoupled during reboot,
+ * this may race with the reboot handler and shouldn't ever
+ * happen in practice.
+ */
+ if (WARN_ON_ONCE(system_state > SYSTEM_RUNNING))
+ return -EPERM;
+
+ if (tegra->core_rdev == rdev) {
+ tegra->core_rdev = NULL;
+ return 0;
+ }
+
+ if (tegra->cpu_rdev == rdev) {
+ tegra->cpu_rdev = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct tegra_regulator_coupler tegra30_coupler = {
+ .coupler = {
+ .attach_regulator = tegra30_regulator_attach,
+ .detach_regulator = tegra30_regulator_detach,
+ .balance_voltage = tegra30_regulator_balance_voltage,
+ },
+ .reboot_notifier.notifier_call = tegra30_regulator_reboot,
+ .suspend_notifier.notifier_call = tegra30_regulator_suspend,
+};
+
+static int __init tegra_regulator_coupler_init(void)
+{
+ int err;
+
+ if (!of_machine_is_compatible("nvidia,tegra30"))
+ return 0;
+
+ err = register_reboot_notifier(&tegra30_coupler.reboot_notifier);
+ WARN_ON(err);
+
+ err = register_pm_notifier(&tegra30_coupler.suspend_notifier);
+ WARN_ON(err);
+
+ return regulator_coupler_register(&tegra30_coupler.coupler);
+}
+arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
new file mode 100644
index 0000000000..2cae17b65f
--- /dev/null
+++ b/drivers/soc/ti/Kconfig
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+#
+# TI SOC drivers
+#
+menuconfig SOC_TI
+ bool "TI SOC drivers support"
+
+if SOC_TI
+
+config KEYSTONE_NAVIGATOR_QMSS
+ tristate "Keystone Queue Manager Sub System"
+ depends on ARCH_KEYSTONE
+ help
+ Say y here to support the Keystone multicore Navigator Queue
+ Manager support. The Queue Manager is a hardware module that
+ is responsible for accelerating management of the packet queues.
+ Packets are queued/de-queued by writing/reading descriptor address
+ to a particular memory mapped location in the Queue Manager module.
+
+ If unsure, say N.
+
+config KEYSTONE_NAVIGATOR_DMA
+ tristate "TI Keystone Navigator Packet DMA support"
+ depends on ARCH_KEYSTONE
+ help
+ Say y tp enable support for the Keystone Navigator Packet DMA on
+ on Keystone family of devices. It sets up the dma channels for the
+ Queue Manager Sub System.
+
+ If unsure, say N.
+
+config AMX3_PM
+ tristate "AMx3 Power Management"
+ depends on SOC_AM33XX || SOC_AM43XX
+ depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM && RTC_DRV_OMAP
+ help
+ Enable power management on AM335x and AM437x. Required for suspend to mem
+ and standby states on both AM335x and AM437x platforms and for deeper cpuidle
+ c-states on AM335x. Also required for rtc and ddr in self-refresh low
+ power mode on AM437x platforms.
+
+config WKUP_M3_IPC
+ tristate "TI AMx3 Wkup-M3 IPC Driver"
+ depends on WKUP_M3_RPROC
+ depends on OMAP2PLUS_MBOX
+ help
+ TI AM33XX and AM43XX have a Cortex M3, the Wakeup M3, to handle
+ low power transitions. This IPC driver provides the necessary API
+ to communicate and use the Wakeup M3 for PM features like suspend
+ resume and boots it using wkup_m3_rproc driver.
+
+config TI_SCI_PM_DOMAINS
+ tristate "TI SCI PM Domains Driver"
+ depends on TI_SCI_PROTOCOL
+ depends on PM_GENERIC_DOMAINS
+ help
+ Generic power domain implementation for TI device implementing
+ the TI SCI protocol.
+
+ To compile this as a module, choose M here. The module will be
+ called ti_sci_pm_domains. Note this is needed early in boot before
+ rootfs may be available.
+
+config TI_K3_RINGACC
+ tristate "K3 Ring accelerator Sub System"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_INTA_IRQCHIP
+ help
+ Say y here to support the K3 Ring accelerator module.
+ The Ring Accelerator (RINGACC or RA) provides hardware acceleration
+ to enable straightforward passing of work between a producer
+ and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
+ If unsure, say N.
+
+config TI_K3_SOCINFO
+ bool
+ depends on ARCH_K3 || COMPILE_TEST
+ select SOC_BUS
+ select MFD_SYSCON
+ help
+ Include support for the SoC bus socinfo for the TI K3 Multicore SoC
+ platforms to provide information about the SoC family and
+ variant to user space.
+
+config TI_PRUSS
+ tristate "TI PRU-ICSS Subsystem Platform drivers"
+ depends on SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ TI PRU-ICSS Subsystem platform specific support.
+
+ Say Y or M here to support the Programmable Realtime Unit (PRU)
+ processors on various TI SoCs. It's safe to say N here if you're
+ not interested in the PRU or if you are unsure.
+
+endif # SOC_TI
+
+config TI_SCI_INTA_MSI_DOMAIN
+ bool
+ select GENERIC_MSI_IRQ
+ help
+ Driver to enable Interrupt Aggregator specific MSI Domain.
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
new file mode 100644
index 0000000000..cb800a745e
--- /dev/null
+++ b/drivers/soc/ti/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# TI Keystone SOC drivers
+#
+obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o
+knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
+obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
+obj-$(CONFIG_AMX3_PM) += pm33xx.o
+obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
+obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
+obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
+obj-$(CONFIG_TI_K3_SOCINFO) += k3-socinfo.o
+obj-$(CONFIG_TI_PRUSS) += pruss.o
+obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
new file mode 100644
index 0000000000..148f54d969
--- /dev/null
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -0,0 +1,1577 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 NAVSS Ring Accelerator subsystem driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+static LIST_HEAD(k3_ringacc_list);
+static DEFINE_MUTEX(k3_ringacc_list_lock);
+
+#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+#define K3_DMARING_CFG_RING_SIZE_ELCNT_MASK GENMASK(15, 0)
+
+/**
+ * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
+ *
+ * @resv_16: Reserved
+ * @db: Ring Doorbell Register
+ * @resv_4: Reserved
+ * @occ: Ring Occupancy Register
+ * @indx: Ring Current Index Register
+ * @hwocc: Ring Hardware Occupancy Register
+ * @hwindx: Ring Hardware Current Index Register
+ */
+struct k3_ring_rt_regs {
+ u32 resv_16[4];
+ u32 db;
+ u32 resv_4[1];
+ u32 occ;
+ u32 indx;
+ u32 hwocc;
+ u32 hwindx;
+};
+
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+#define K3_DMARING_RT_REGS_STEP 0x2000
+#define K3_DMARING_RT_REGS_REVERSE_OFS 0x1000
+#define K3_RINGACC_RT_OCC_MASK GENMASK(20, 0)
+#define K3_DMARING_RT_OCC_TDOWN_COMPLETE BIT(31)
+#define K3_DMARING_RT_DB_ENTRY_MASK GENMASK(7, 0)
+#define K3_DMARING_RT_DB_TDOWN_ACK BIT(31)
+
+/**
+ * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
+ *
+ * @head_data: Ring Head Entry Data Registers
+ * @tail_data: Ring Tail Entry Data Registers
+ * @peek_head_data: Ring Peek Head Entry Data Regs
+ * @peek_tail_data: Ring Peek Tail Entry Data Regs
+ */
+struct k3_ring_fifo_regs {
+ u32 head_data[128];
+ u32 tail_data[128];
+ u32 peek_head_data[128];
+ u32 peek_tail_data[128];
+};
+
+/**
+ * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
+ *
+ * @revision: Revision Register
+ * @config: Config Register
+ */
+struct k3_ringacc_proxy_gcfg_regs {
+ u32 revision;
+ u32 config;
+};
+
+#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
+
+/**
+ * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
+ *
+ * @control: Proxy Control Register
+ * @status: Proxy Status Register
+ * @resv_512: Reserved
+ * @data: Proxy Data Register
+ */
+struct k3_ringacc_proxy_target_regs {
+ u32 control;
+ u32 status;
+ u8 resv_512[504];
+ u32 data[128];
+};
+
+#define K3_RINGACC_PROXY_TARGET_STEP 0x1000
+#define K3_RINGACC_PROXY_NOT_USED (-1)
+
+enum k3_ringacc_proxy_access_mode {
+ PROXY_ACCESS_MODE_HEAD = 0,
+ PROXY_ACCESS_MODE_TAIL = 1,
+ PROXY_ACCESS_MODE_PEEK_HEAD = 2,
+ PROXY_ACCESS_MODE_PEEK_TAIL = 3,
+};
+
+#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
+#define K3_RINGACC_FIFO_REGS_STEP 0x1000
+#define K3_RINGACC_MAX_DB_RING_CNT (127U)
+
+struct k3_ring_ops {
+ int (*push_tail)(struct k3_ring *ring, void *elm);
+ int (*push_head)(struct k3_ring *ring, void *elm);
+ int (*pop_tail)(struct k3_ring *ring, void *elm);
+ int (*pop_head)(struct k3_ring *ring, void *elm);
+};
+
+/**
+ * struct k3_ring_state - Internal state tracking structure
+ *
+ * @free: Number of free entries
+ * @occ: Occupancy
+ * @windex: Write index
+ * @rindex: Read index
+ * @tdown_complete: Tear down complete state
+ */
+struct k3_ring_state {
+ u32 free;
+ u32 occ;
+ u32 windex;
+ u32 rindex;
+ u32 tdown_complete:1;
+};
+
+/**
+ * struct k3_ring - RA Ring descriptor
+ *
+ * @rt: Ring control/status registers
+ * @fifos: Ring queues registers
+ * @proxy: Ring Proxy Datapath registers
+ * @ring_mem_dma: Ring buffer dma address
+ * @ring_mem_virt: Ring buffer virt address
+ * @ops: Ring operations
+ * @size: Ring size in elements
+ * @elm_size: Size of the ring element
+ * @mode: Ring mode
+ * @flags: flags
+ * @state: Ring state
+ * @ring_id: Ring Id
+ * @parent: Pointer on struct @k3_ringacc
+ * @use_count: Use count for shared rings
+ * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ * @dma_dev: device to be used for DMA API (allocation, mapping)
+ * @asel: Address Space Select value for physical addresses
+ */
+struct k3_ring {
+ struct k3_ring_rt_regs __iomem *rt;
+ struct k3_ring_fifo_regs __iomem *fifos;
+ struct k3_ringacc_proxy_target_regs __iomem *proxy;
+ dma_addr_t ring_mem_dma;
+ void *ring_mem_virt;
+ struct k3_ring_ops *ops;
+ u32 size;
+ enum k3_ring_size elm_size;
+ enum k3_ring_mode mode;
+ u32 flags;
+#define K3_RING_FLAG_BUSY BIT(1)
+#define K3_RING_FLAG_SHARED BIT(2)
+#define K3_RING_FLAG_REVERSE BIT(3)
+ struct k3_ring_state state;
+ u32 ring_id;
+ struct k3_ringacc *parent;
+ u32 use_count;
+ int proxy_id;
+ struct device *dma_dev;
+ u32 asel;
+#define K3_ADDRESS_ASEL_SHIFT 48
+};
+
+struct k3_ringacc_ops {
+ int (*init)(struct platform_device *pdev, struct k3_ringacc *ringacc);
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator descriptor
+ *
+ * @dev: pointer on RA device
+ * @proxy_gcfg: RA proxy global config registers
+ * @proxy_target_base: RA proxy datapath region
+ * @num_rings: number of ring in RA
+ * @rings_inuse: bitfield for ring usage tracking
+ * @rm_gp_range: general purpose rings range from tisci
+ * @dma_ring_reset_quirk: DMA reset workaround enable
+ * @num_proxies: number of RA proxies
+ * @proxy_inuse: bitfield for proxy usage tracking
+ * @rings: array of rings descriptors (struct @k3_ring)
+ * @list: list of RAs in the system
+ * @req_lock: protect rings allocation
+ * @tisci: pointer ti-sci handle
+ * @tisci_ring_ops: ti-sci rings ops
+ * @tisci_dev_id: ti-sci device id
+ * @ops: SoC specific ringacc operation
+ * @dma_rings: indicate DMA ring (dual ring within BCDMA/PKTDMA)
+ */
+struct k3_ringacc {
+ struct device *dev;
+ struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
+ void __iomem *proxy_target_base;
+ u32 num_rings; /* number of rings in Ringacc module */
+ unsigned long *rings_inuse;
+ struct ti_sci_resource *rm_gp_range;
+
+ bool dma_ring_reset_quirk;
+ u32 num_proxies;
+ unsigned long *proxy_inuse;
+
+ struct k3_ring *rings;
+ struct list_head list;
+ struct mutex req_lock; /* protect rings allocation */
+
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
+ u32 tisci_dev_id;
+
+ const struct k3_ringacc_ops *ops;
+ bool dma_rings;
+};
+
+/**
+ * struct k3_ringacc_soc_data - Rings accelerator SoC data
+ *
+ * @dma_ring_reset_quirk: DMA reset workaround enable
+ */
+struct k3_ringacc_soc_data {
+ unsigned dma_ring_reset_quirk:1;
+};
+
+static int k3_ringacc_ring_read_occ(struct k3_ring *ring)
+{
+ return readl(&ring->rt->occ) & K3_RINGACC_RT_OCC_MASK;
+}
+
+static void k3_ringacc_ring_update_occ(struct k3_ring *ring)
+{
+ u32 val;
+
+ val = readl(&ring->rt->occ);
+
+ ring->state.occ = val & K3_RINGACC_RT_OCC_MASK;
+ ring->state.tdown_complete = !!(val & K3_DMARING_RT_OCC_TDOWN_COMPLETE);
+}
+
+static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+{
+ return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+ (4 << ring->elm_size);
+}
+
+static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
+{
+ return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem);
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_ring_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_ringacc_ring_pop_mem,
+};
+
+static struct k3_ring_ops k3_dmaring_fwd_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_dmaring_fwd_pop,
+};
+
+static struct k3_ring_ops k3_dmaring_reverse_ops = {
+ /* Reverse side of the DMA ring can only be popped by SW */
+ .pop_head = k3_dmaring_reverse_pop,
+};
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_msg_ops = {
+ .push_tail = k3_ringacc_ring_push_io,
+ .push_head = k3_ringacc_ring_push_head_io,
+ .pop_tail = k3_ringacc_ring_pop_tail_io,
+ .pop_head = k3_ringacc_ring_pop_io,
+};
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+ .push_tail = k3_ringacc_ring_push_tail_proxy,
+ .push_head = k3_ringacc_ring_push_head_proxy,
+ .pop_tail = k3_ringacc_ring_pop_tail_proxy,
+ .pop_head = k3_ringacc_ring_pop_head_proxy,
+};
+
+static void k3_ringacc_ring_dump(struct k3_ring *ring)
+{
+ struct device *dev = ring->parent->dev;
+
+ dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
+ dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
+ &ring->ring_mem_dma);
+ dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
+ ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+ dev_dbg(dev, "dump flags %08X\n", ring->flags);
+
+ dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
+ dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
+ dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
+ dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
+ dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
+
+ if (ring->ring_mem_virt)
+ print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
+ 16, 1, ring->ring_mem_virt, 16 * 8, false);
+}
+
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+ int id, u32 flags)
+{
+ int proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (!try_module_get(ringacc->dev->driver->owner))
+ goto err_module_get;
+
+ if (id == K3_RINGACC_RING_ID_ANY) {
+ /* Request for any general purpose ring */
+ struct ti_sci_resource_desc *gp_rings =
+ &ringacc->rm_gp_range->desc[0];
+ unsigned long size;
+
+ size = gp_rings->start + gp_rings->num;
+ id = find_next_zero_bit(ringacc->rings_inuse, size,
+ gp_rings->start);
+ if (id == size)
+ goto error;
+ } else if (id < 0) {
+ goto error;
+ }
+
+ if (test_bit(id, ringacc->rings_inuse) &&
+ !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
+ goto error;
+ else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
+ goto out;
+
+ if (flags & K3_RINGACC_RING_USE_PROXY) {
+ proxy_id = find_first_zero_bit(ringacc->proxy_inuse,
+ ringacc->num_proxies);
+ if (proxy_id == ringacc->num_proxies)
+ goto error;
+ }
+
+ if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ set_bit(proxy_id, ringacc->proxy_inuse);
+ ringacc->rings[id].proxy_id = proxy_id;
+ dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
+ proxy_id);
+ } else {
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
+ }
+
+ set_bit(id, ringacc->rings_inuse);
+out:
+ ringacc->rings[id].use_count++;
+ mutex_unlock(&ringacc->req_lock);
+ return &ringacc->rings[id];
+
+error:
+ module_put(ringacc->dev->driver->owner);
+
+err_module_get:
+ mutex_unlock(&ringacc->req_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+
+static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ /*
+ * DMA rings must be requested by ID, completion ring is the reverse
+ * side of the forward ring
+ */
+ if (fwd_id < 0)
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (!try_module_get(ringacc->dev->driver->owner)) {
+ ret = -EINVAL;
+ goto err_module_get;
+ }
+
+ if (test_bit(fwd_id, ringacc->rings_inuse)) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ *fwd_ring = &ringacc->rings[fwd_id];
+ *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings];
+ set_bit(fwd_id, ringacc->rings_inuse);
+ ringacc->rings[fwd_id].use_count++;
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id);
+
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+
+error:
+ module_put(ringacc->dev->driver->owner);
+err_module_get:
+ mutex_unlock(&ringacc->req_lock);
+ return ret;
+}
+
+int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
+ int fwd_id, int compl_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ if (!fwd_ring || !compl_ring)
+ return -EINVAL;
+
+ if (ringacc->dma_rings)
+ return k3_dmaring_request_dual_ring(ringacc, fwd_id,
+ fwd_ring, compl_ring);
+
+ *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
+ if (!(*fwd_ring))
+ return -ENODEV;
+
+ *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0);
+ if (!(*compl_ring)) {
+ k3_ringacc_ring_free(*fwd_ring);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
+
+static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
+{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID;
+ ring_cfg.count = ring->size;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ k3_ringacc_ring_reset_sci(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
+
+static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
+ enum k3_ring_mode mode)
+{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID;
+ ring_cfg.mode = mode;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ if (!ring->parent->dma_ring_reset_quirk)
+ goto reset;
+
+ if (!occ)
+ occ = k3_ringacc_ring_read_occ(ring);
+
+ if (occ) {
+ u32 db_ring_cnt, db_ring_cnt_cur;
+
+ dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
+ ring->ring_id, occ);
+ /* TI-SCI ring reset */
+ k3_ringacc_ring_reset_sci(ring);
+
+ /*
+ * Setup the ring in ring/doorbell mode (if not already in this
+ * mode)
+ */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(
+ ring, K3_RINGACC_RING_MODE_RING);
+ /*
+ * Ring the doorbell 2**22 – ringOcc times.
+ * This will wrap the internal UDMAP ring state occupancy
+ * counter (which is 21-bits wide) to 0.
+ */
+ db_ring_cnt = (1U << 22) - occ;
+
+ while (db_ring_cnt != 0) {
+ /*
+ * Ring the doorbell with the maximum count each
+ * iteration if possible to minimize the total
+ * of writes
+ */
+ if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
+ db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
+ else
+ db_ring_cnt_cur = db_ring_cnt;
+
+ writel(db_ring_cnt_cur, &ring->rt->db);
+ db_ring_cnt -= db_ring_cnt_cur;
+ }
+
+ /* Restore the original ring mode (if not ring mode) */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
+ }
+
+reset:
+ /* Reset the ring */
+ k3_ringacc_ring_reset(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
+
+static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
+{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+int k3_ringacc_ring_free(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc;
+
+ if (!ring)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
+ dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (--ring->use_count)
+ goto out;
+
+ if (!(ring->flags & K3_RING_FLAG_BUSY))
+ goto no_init;
+
+ k3_ringacc_ring_free_sci(ring);
+
+ dma_free_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt, ring->ring_mem_dma);
+ ring->flags = 0;
+ ring->ops = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
+
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ clear_bit(ring->proxy_id, ringacc->proxy_inuse);
+ ring->proxy = NULL;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+
+no_init:
+ clear_bit(ring->ring_id, ringacc->rings_inuse);
+
+ module_put(ringacc->dev->driver->owner);
+
+out:
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
+
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->ring_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->parent->tisci_dev_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
+
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
+{
+ int irq_num;
+
+ if (!ring)
+ return -EINVAL;
+
+ irq_num = msi_get_virq(ring->parent->dev, ring->ring_id);
+ if (irq_num <= 0)
+ irq_num = -EINVAL;
+ return irq_num;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
+
+static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
+{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ if (!ringacc->tisci)
+ return -EINVAL;
+
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+ ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma);
+ ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma);
+ ring_cfg.count = ring->size;
+ ring_cfg.mode = ring->mode;
+ ring_cfg.size = ring->elm_size;
+ ring_cfg.asel = ring->asel;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+
+ return ret;
+}
+
+static int k3_dmaring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc;
+ struct k3_ring *reverse_ring;
+ int ret = 0;
+
+ if (cfg->elm_size != K3_RINGACC_RING_ELSIZE_8 ||
+ cfg->mode != K3_RINGACC_RING_MODE_RING ||
+ cfg->size & ~K3_DMARING_CFG_RING_SIZE_ELCNT_MASK)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ ring->asel = cfg->asel;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev) {
+ dev_warn(ringacc->dev, "dma_dev is not provided for ring%d\n",
+ ring->ring_id);
+ ring->dma_dev = ringacc->dev;
+ }
+
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ ring->ops = &k3_dmaring_fwd_ops;
+
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+
+ k3_ringacc_ring_dump(ring);
+
+ /* DMA rings: configure reverse ring */
+ reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings];
+ reverse_ring->size = cfg->size;
+ reverse_ring->elm_size = cfg->elm_size;
+ reverse_ring->mode = cfg->mode;
+ reverse_ring->asel = cfg->asel;
+ memset(&reverse_ring->state, 0, sizeof(reverse_ring->state));
+ reverse_ring->ops = &k3_dmaring_reverse_ops;
+
+ reverse_ring->ring_mem_virt = ring->ring_mem_virt;
+ reverse_ring->ring_mem_dma = ring->ring_mem_dma;
+ reverse_ring->flags |= K3_RING_FLAG_BUSY;
+ k3_ringacc_ring_dump(reverse_ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+ ring->proxy = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
+ return ret;
+}
+
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc;
+ int ret = 0;
+
+ if (!ring || !cfg)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ if (ringacc->dma_rings)
+ return k3_dmaring_cfg(ring, cfg);
+
+ if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
+ cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
+ cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
+ !test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
+ ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
+ cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
+ dev_err(ringacc->dev,
+ "Message mode must use proxy for %u element size\n",
+ 4 << ring->elm_size);
+ return -EINVAL;
+ }
+
+ /*
+ * In case of shared ring only the first user (master user) can
+ * configure the ring. The sequence should be by the client:
+ * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
+ * k3_ringacc_ring_cfg(ring, cfg); # master configuration
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ */
+ if (ring->use_count != 1)
+ return 0;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
+ ring->proxy = ringacc->proxy_target_base +
+ ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
+
+ switch (ring->mode) {
+ case K3_RINGACC_RING_MODE_RING:
+ ring->ops = &k3_ring_mode_ring_ops;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev)
+ ring->dma_dev = ringacc->dev;
+ break;
+ case K3_RINGACC_RING_MODE_MESSAGE:
+ ring->dma_dev = ringacc->dev;
+ if (ring->proxy)
+ ring->ops = &k3_ring_mode_proxy_ops;
+ else
+ ring->ops = &k3_ring_mode_msg_ops;
+ break;
+ default:
+ ring->ops = NULL;
+ ret = -EINVAL;
+ goto err_free_proxy;
+ }
+
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+ ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
+ K3_RING_FLAG_SHARED : 0;
+
+ k3_ringacc_ring_dump(ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+ ring->dma_dev = NULL;
+err_free_proxy:
+ ring->proxy = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
+
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return ring->size;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
+
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->state.free)
+ ring->state.free = ring->size - k3_ringacc_ring_read_occ(ring);
+
+ return ring->state.free;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
+
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return k3_ringacc_ring_read_occ(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
+
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
+{
+ return !k3_ringacc_ring_get_free(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
+
+enum k3_ringacc_access_mode {
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
+ K3_RINGACC_ACCESS_MODE_POP_TAIL,
+ K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
+ K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
+};
+
+#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
+#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
+static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
+ enum k3_ringacc_proxy_access_mode mode)
+{
+ u32 val;
+
+ val = ring->ring_id;
+ val |= K3_RINGACC_PROXY_MODE(mode);
+ val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
+ writel(val, &ring->proxy->control);
+ return 0;
+}
+
+static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)&ring->proxy->data;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->state.occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->state.free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free,
+ ring->state.occ);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ ptr = (void __iomem *)&ring->fifos->head_data;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ ptr = (void __iomem *)&ring->fifos->tail_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->state.occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->state.free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n",
+ ring->state.free, ring->state.windex, ring->state.occ,
+ ring->state.rindex);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+/*
+ * The element is 48 bits of address + ASEL bits in the ring.
+ * ASEL is used by the DMAs and should be removed for the kernel as it is not
+ * part of the physical memory address.
+ */
+static void k3_dmaring_remove_asel_from_elem(u64 *elem)
+{
+ *elem &= GENMASK_ULL(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+ u32 elem_idx;
+
+ /*
+ * DMA rings: forward ring is always tied DMA channel and HW does not
+ * maintain any state data required for POP operation and its unknown
+ * how much elements were consumed by HW. So, to actually
+ * do POP, the read pointer has to be recalculated every time.
+ */
+ ring->state.occ = k3_ringacc_ring_read_occ(ring);
+ if (ring->state.windex >= ring->state.occ)
+ elem_idx = ring->state.windex - ring->state.occ;
+ else
+ elem_idx = ring->size - (ring->state.occ - ring->state.windex);
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, elem_idx);
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.windex, elem_idx,
+ elem_ptr);
+ return 0;
+}
+
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
+
+ if (ring->state.occ) {
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
+ writel(-1 & K3_DMARING_RT_DB_ENTRY_MASK, &ring->rt->db);
+ } else if (ring->state.tdown_complete) {
+ dma_addr_t *value = elem;
+
+ *value = CPPI5_TDCM_MARKER;
+ writel(K3_DMARING_RT_DB_TDOWN_ACK, &ring->rt->db);
+ ring->state.tdown_complete = false;
+ }
+
+ dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.rindex, elem_ptr);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
+
+ memcpy(elem_ptr, elem, (4 << ring->elm_size));
+ if (ring->parent->dma_rings) {
+ u64 *addr = elem_ptr;
+
+ *addr |= ((u64)ring->asel << K3_ADDRESS_ASEL_SHIFT);
+ }
+
+ ring->state.windex = (ring->state.windex + 1) % ring->size;
+ ring->state.free--;
+ writel(1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
+ ring->state.free, ring->state.windex);
+
+ return 0;
+}
+
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
+
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
+ ring->state.occ, ring->state.rindex, elem_ptr);
+ return 0;
+}
+
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n",
+ ring->state.free, ring->state.windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_tail)
+ ret = ring->ops->push_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
+
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
+ ring->state.free, ring->state.windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_head)
+ ret = ring->ops->push_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
+
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->state.occ)
+ k3_ringacc_ring_update_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
+ ring->state.rindex);
+
+ if (!ring->state.occ && !ring->state.tdown_complete)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_head)
+ ret = ring->ops->pop_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
+
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->state.occ)
+ k3_ringacc_ring_update_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
+ ring->state.occ, ring->state.rindex);
+
+ if (!ring->state.occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_tail)
+ ret = ring->ops->pop_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
+
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct device_node *ringacc_np;
+ struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
+ struct k3_ringacc *entry;
+
+ ringacc_np = of_parse_phandle(np, property, 0);
+ if (!ringacc_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_for_each_entry(entry, &k3_ringacc_list, list)
+ if (entry->dev->of_node == ringacc_np) {
+ ringacc = entry;
+ break;
+ }
+ mutex_unlock(&k3_ringacc_list_lock);
+ of_node_put(ringacc_np);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
+
+static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+{
+ struct device_node *node = ringacc->dev->of_node;
+ struct device *dev = ringacc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
+ if (ret) {
+ dev_err(dev, "ti,num-rings read failure %d\n", ret);
+ return ret;
+ }
+
+ ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+ if (IS_ERR(ringacc->tisci)) {
+ ret = PTR_ERR(ringacc->tisci);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "ti,sci read fail %d\n", ret);
+ ringacc->tisci = NULL;
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "ti,sci-dev-id",
+ &ringacc->tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
+ return ret;
+ }
+
+ pdev->id = ringacc->tisci_dev_id;
+
+ ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
+ ringacc->tisci_dev_id,
+ "ti,sci-rm-range-gp-rings");
+ if (IS_ERR(ringacc->rm_gp_range)) {
+ dev_err(dev, "Failed to allocate MSI interrupts\n");
+ return PTR_ERR(ringacc->rm_gp_range);
+ }
+
+ return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
+ ringacc->rm_gp_range);
+}
+
+static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = {
+ .dma_ring_reset_quirk = 1,
+};
+
+static const struct soc_device_attribute k3_ringacc_socinfo[] = {
+ { .family = "AM65X",
+ .revision = "SR1.0",
+ .data = &k3_ringacc_soc_data_sr1
+ },
+ {/* sentinel */}
+};
+
+static int k3_ringacc_init(struct platform_device *pdev,
+ struct k3_ringacc *ringacc)
+{
+ const struct soc_device_attribute *soc;
+ void __iomem *base_fifo, *base_rt;
+ struct device *dev = &pdev->dev;
+ int ret, i;
+
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi.domain)
+ return -EPROBE_DEFER;
+
+ ret = k3_ringacc_probe_dt(ringacc);
+ if (ret)
+ return ret;
+
+ soc = soc_device_match(k3_ringacc_socinfo);
+ if (soc && soc->data) {
+ const struct k3_ringacc_soc_data *soc_data = soc->data;
+
+ ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk;
+ }
+
+ base_rt = devm_platform_ioremap_resource_byname(pdev, "rt");
+ if (IS_ERR(base_rt))
+ return PTR_ERR(base_rt);
+
+ base_fifo = devm_platform_ioremap_resource_byname(pdev, "fifos");
+ if (IS_ERR(base_fifo))
+ return PTR_ERR(base_fifo);
+
+ ringacc->proxy_gcfg = devm_platform_ioremap_resource_byname(pdev, "proxy_gcfg");
+ if (IS_ERR(ringacc->proxy_gcfg))
+ return PTR_ERR(ringacc->proxy_gcfg);
+
+ ringacc->proxy_target_base = devm_platform_ioremap_resource_byname(pdev,
+ "proxy_target");
+ if (IS_ERR(ringacc->proxy_target_base))
+ return PTR_ERR(ringacc->proxy_target_base);
+
+ ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
+ K3_RINGACC_PROXY_CFG_THREADS_MASK;
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings,
+ GFP_KERNEL);
+ ringacc->proxy_inuse = devm_bitmap_zalloc(dev, ringacc->num_proxies,
+ GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
+ return -ENOMEM;
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ ringacc->rings[i].rt = base_rt +
+ K3_RINGACC_RT_REGS_STEP * i;
+ ringacc->rings[i].fifos = base_fifo +
+ K3_RINGACC_FIFO_REGS_STEP * i;
+ ringacc->rings[i].parent = ringacc;
+ ringacc->rings[i].ring_id = i;
+ ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
+ ringacc->num_rings,
+ ringacc->rm_gp_range->desc[0].start,
+ ringacc->rm_gp_range->desc[0].num,
+ ringacc->tisci_dev_id);
+ dev_info(dev, "dma-ring-reset-quirk: %s\n",
+ ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
+ dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
+ readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+
+ return 0;
+}
+
+struct ringacc_match_data {
+ struct k3_ringacc_ops ops;
+};
+
+static struct ringacc_match_data k3_ringacc_data = {
+ .ops = {
+ .init = k3_ringacc_init,
+ },
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id k3_ringacc_of_match[] = {
+ { .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, k3_ringacc_of_match);
+
+struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev,
+ struct k3_ringacc_init_data *data)
+{
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ void __iomem *base_rt;
+ int i;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return ERR_PTR(-ENOMEM);
+
+ ringacc->dev = dev;
+ ringacc->dma_rings = true;
+ ringacc->num_rings = data->num_rings;
+ ringacc->tisci = data->tisci;
+ ringacc->tisci_dev_id = data->tisci_dev_id;
+
+ mutex_init(&ringacc->req_lock);
+
+ base_rt = devm_platform_ioremap_resource_byname(pdev, "ringrt");
+ if (IS_ERR(base_rt))
+ return ERR_CAST(base_rt);
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings * 2,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_bitmap_zalloc(dev, ringacc->num_rings,
+ GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ struct k3_ring *ring = &ringacc->rings[i];
+
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ ring = &ringacc->rings[ringacc->num_rings + i];
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i +
+ K3_DMARING_RT_REGS_REVERSE_OFS;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ ring->flags = K3_RING_FLAG_REVERSE;
+ }
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ dev_info(dev, "Number of rings: %u\n", ringacc->num_rings);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_dmarings_init);
+
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+ const struct ringacc_match_data *match_data;
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ int ret;
+
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
+ return -ENODEV;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return -ENOMEM;
+
+ ringacc->dev = dev;
+ mutex_init(&ringacc->req_lock);
+ ringacc->ops = &match_data->ops;
+
+ ret = ringacc->ops->init(pdev, ringacc);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, ringacc);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_add_tail(&ringacc->list, &k3_ringacc_list);
+ mutex_unlock(&k3_ringacc_list_lock);
+
+ return 0;
+}
+
+static int k3_ringacc_remove(struct platform_device *pdev)
+{
+ struct k3_ringacc *ringacc = dev_get_drvdata(&pdev->dev);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_del(&ringacc->list);
+ mutex_unlock(&k3_ringacc_list_lock);
+ return 0;
+}
+
+static struct platform_driver k3_ringacc_driver = {
+ .probe = k3_ringacc_probe,
+ .remove = k3_ringacc_remove,
+ .driver = {
+ .name = "k3-ringacc",
+ .of_match_table = k3_ringacc_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(k3_ringacc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI Ringacc driver for K3 SOCs");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
new file mode 100644
index 0000000000..6ea9b8c7d3
--- /dev/null
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 SoC info driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+#define CTRLMMR_WKUP_JTAGID_REG 0
+/*
+ * Bits:
+ * 31-28 VARIANT Device variant
+ * 27-12 PARTNO Part number
+ * 11-1 MFG Indicates TI as manufacturer (0x17)
+ * 1 Always 1
+ */
+#define CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT (28)
+#define CTRLMMR_WKUP_JTAGID_VARIANT_MASK GENMASK(31, 28)
+
+#define CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT (12)
+#define CTRLMMR_WKUP_JTAGID_PARTNO_MASK GENMASK(27, 12)
+
+#define CTRLMMR_WKUP_JTAGID_MFG_SHIFT (1)
+#define CTRLMMR_WKUP_JTAGID_MFG_MASK GENMASK(11, 1)
+
+#define CTRLMMR_WKUP_JTAGID_MFG_TI 0x17
+
+static const struct k3_soc_id {
+ unsigned int id;
+ const char *family_name;
+} k3_soc_ids[] = {
+ { 0xBB5A, "AM65X" },
+ { 0xBB64, "J721E" },
+ { 0xBB6D, "J7200" },
+ { 0xBB38, "AM64X" },
+ { 0xBB75, "J721S2"},
+ { 0xBB7E, "AM62X" },
+ { 0xBB80, "J784S4" },
+ { 0xBB8D, "AM62AX" },
+ { 0xBB9D, "AM62PX" },
+};
+
+static int
+k3_chipinfo_partno_to_names(unsigned int partno,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(k3_soc_ids); i++)
+ if (partno == k3_soc_ids[i].id) {
+ soc_dev_attr->family = k3_soc_ids[i].family_name;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int k3_chipinfo_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device *dev = &pdev->dev;
+ struct soc_device *soc_dev;
+ struct regmap *regmap;
+ u32 partno_id;
+ u32 variant;
+ u32 jtag_id;
+ u32 mfg;
+ int ret;
+
+ regmap = device_node_to_regmap(node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = regmap_read(regmap, CTRLMMR_WKUP_JTAGID_REG, &jtag_id);
+ if (ret < 0)
+ return ret;
+
+ mfg = (jtag_id & CTRLMMR_WKUP_JTAGID_MFG_MASK) >>
+ CTRLMMR_WKUP_JTAGID_MFG_SHIFT;
+
+ if (mfg != CTRLMMR_WKUP_JTAGID_MFG_TI) {
+ dev_err(dev, "Invalid MFG SoC\n");
+ return -ENODEV;
+ }
+
+ variant = (jtag_id & CTRLMMR_WKUP_JTAGID_VARIANT_MASK) >>
+ CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT;
+ variant++;
+
+ partno_id = (jtag_id & CTRLMMR_WKUP_JTAGID_PARTNO_MASK) >>
+ CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0", variant);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr);
+ if (ret) {
+ dev_err(dev, "Unknown SoC JTAGID[0x%08X]\n", jtag_id);
+ ret = -ENODEV;
+ goto err_free_rev;
+ }
+
+ node = of_find_node_by_path("/");
+ of_property_read_string(node, "model", &soc_dev_attr->machine);
+ of_node_put(node);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err_free_rev;
+ }
+
+ dev_info(dev, "Family:%s rev:%s JTAGID[0x%08x] Detected\n",
+ soc_dev_attr->family,
+ soc_dev_attr->revision, jtag_id);
+
+ return 0;
+
+err_free_rev:
+ kfree(soc_dev_attr->revision);
+err:
+ kfree(soc_dev_attr);
+ return ret;
+}
+
+static const struct of_device_id k3_chipinfo_of_match[] = {
+ { .compatible = "ti,am654-chipid", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver k3_chipinfo_driver = {
+ .driver = {
+ .name = "k3-chipinfo",
+ .of_match_table = k3_chipinfo_of_match,
+ },
+ .probe = k3_chipinfo_probe,
+};
+
+static int __init k3_chipinfo_init(void)
+{
+ return platform_driver_register(&k3_chipinfo_driver);
+}
+subsys_initcall(k3_chipinfo_init);
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
new file mode 100644
index 0000000000..0fbc37cd51
--- /dev/null
+++ b/drivers/soc/ti/knav_dma.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/knav_dma.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define REG_MASK 0xffffffff
+
+#define DMA_LOOPBACK BIT(31)
+#define DMA_ENABLE BIT(31)
+#define DMA_TEARDOWN BIT(30)
+
+#define DMA_TX_FILT_PSWORDS BIT(29)
+#define DMA_TX_FILT_EINFO BIT(30)
+#define DMA_TX_PRIO_SHIFT 0
+#define DMA_RX_PRIO_SHIFT 16
+#define DMA_PRIO_MASK GENMASK(3, 0)
+#define DMA_PRIO_DEFAULT 0
+#define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */
+#define DMA_RX_TIMEOUT_MASK GENMASK(16, 0)
+#define DMA_RX_TIMEOUT_SHIFT 0
+
+#define CHAN_HAS_EPIB BIT(30)
+#define CHAN_HAS_PSINFO BIT(29)
+#define CHAN_ERR_RETRY BIT(28)
+#define CHAN_PSINFO_AT_SOP BIT(25)
+#define CHAN_SOP_OFF_SHIFT 16
+#define CHAN_SOP_OFF_MASK GENMASK(9, 0)
+#define DESC_TYPE_SHIFT 26
+#define DESC_TYPE_MASK GENMASK(2, 0)
+
+/*
+ * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical
+ * navigator cloud mapping scheme.
+ * using the 14bit physical queue numbers directly maps into this scheme.
+ */
+#define CHAN_QNUM_MASK GENMASK(14, 0)
+#define DMA_MAX_QMS 4
+#define DMA_TIMEOUT 1 /* msecs */
+#define DMA_INVALID_ID 0xffff
+
+struct reg_global {
+ u32 revision;
+ u32 perf_control;
+ u32 emulation_control;
+ u32 priority_control;
+ u32 qm_base_address[DMA_MAX_QMS];
+};
+
+struct reg_chan {
+ u32 control;
+ u32 mode;
+ u32 __rsvd[6];
+};
+
+struct reg_tx_sched {
+ u32 prio;
+};
+
+struct reg_rx_flow {
+ u32 control;
+ u32 tags;
+ u32 tag_sel;
+ u32 fdq_sel[2];
+ u32 thresh[3];
+};
+
+struct knav_dma_pool_device {
+ struct device *dev;
+ struct list_head list;
+};
+
+struct knav_dma_device {
+ bool loopback, enable_all;
+ unsigned tx_priority, rx_priority, rx_timeout;
+ unsigned logical_queue_managers;
+ unsigned qm_base_address[DMA_MAX_QMS];
+ struct reg_global __iomem *reg_global;
+ struct reg_chan __iomem *reg_tx_chan;
+ struct reg_rx_flow __iomem *reg_rx_flow;
+ struct reg_chan __iomem *reg_rx_chan;
+ struct reg_tx_sched __iomem *reg_tx_sched;
+ unsigned max_rx_chan, max_tx_chan;
+ unsigned max_rx_flow;
+ char name[32];
+ atomic_t ref_count;
+ struct list_head list;
+ struct list_head chan_list;
+ spinlock_t lock;
+};
+
+struct knav_dma_chan {
+ enum dma_transfer_direction direction;
+ struct knav_dma_device *dma;
+ atomic_t ref_count;
+
+ /* registers */
+ struct reg_chan __iomem *reg_chan;
+ struct reg_tx_sched __iomem *reg_tx_sched;
+ struct reg_rx_flow __iomem *reg_rx_flow;
+
+ /* configuration stuff */
+ unsigned channel, flow;
+ struct knav_dma_cfg cfg;
+ struct list_head list;
+ spinlock_t lock;
+};
+
+#define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \
+ ch->channel : ch->flow)
+
+static struct knav_dma_pool_device *kdev;
+
+static bool device_ready;
+bool knav_dma_device_ready(void)
+{
+ return device_ready;
+}
+EXPORT_SYMBOL_GPL(knav_dma_device_ready);
+
+static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg)
+{
+ if (!memcmp(&chan->cfg, cfg, sizeof(*cfg)))
+ return true;
+ else
+ return false;
+}
+
+static int chan_start(struct knav_dma_chan *chan,
+ struct knav_dma_cfg *cfg)
+{
+ u32 v = 0;
+
+ spin_lock(&chan->lock);
+ if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) {
+ if (cfg->u.tx.filt_pswords)
+ v |= DMA_TX_FILT_PSWORDS;
+ if (cfg->u.tx.filt_einfo)
+ v |= DMA_TX_FILT_EINFO;
+ writel_relaxed(v, &chan->reg_chan->mode);
+ writel_relaxed(DMA_ENABLE, &chan->reg_chan->control);
+ }
+
+ if (chan->reg_tx_sched)
+ writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio);
+
+ if (chan->reg_rx_flow) {
+ v = 0;
+
+ if (cfg->u.rx.einfo_present)
+ v |= CHAN_HAS_EPIB;
+ if (cfg->u.rx.psinfo_present)
+ v |= CHAN_HAS_PSINFO;
+ if (cfg->u.rx.err_mode == DMA_RETRY)
+ v |= CHAN_ERR_RETRY;
+ v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT;
+ if (cfg->u.rx.psinfo_at_sop)
+ v |= CHAN_PSINFO_AT_SOP;
+ v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK)
+ << CHAN_SOP_OFF_SHIFT;
+ v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK;
+
+ writel_relaxed(v, &chan->reg_rx_flow->control);
+ writel_relaxed(0, &chan->reg_rx_flow->tags);
+ writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
+
+ v = cfg->u.rx.fdq[0] << 16;
+ v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
+ writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]);
+
+ v = cfg->u.rx.fdq[2] << 16;
+ v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
+ writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]);
+
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
+ }
+
+ /* Keep a copy of the cfg */
+ memcpy(&chan->cfg, cfg, sizeof(*cfg));
+ spin_unlock(&chan->lock);
+
+ return 0;
+}
+
+static int chan_teardown(struct knav_dma_chan *chan)
+{
+ unsigned long end, value;
+
+ if (!chan->reg_chan)
+ return 0;
+
+ /* indicate teardown */
+ writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control);
+
+ /* wait for the dma to shut itself down */
+ end = jiffies + msecs_to_jiffies(DMA_TIMEOUT);
+ do {
+ value = readl_relaxed(&chan->reg_chan->control);
+ if ((value & DMA_ENABLE) == 0)
+ break;
+ } while (time_after(end, jiffies));
+
+ if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) {
+ dev_err(kdev->dev, "timeout waiting for teardown\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void chan_stop(struct knav_dma_chan *chan)
+{
+ spin_lock(&chan->lock);
+ if (chan->reg_rx_flow) {
+ /* first detach fdqs, starve out the flow */
+ writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
+ }
+
+ /* teardown the dma channel */
+ chan_teardown(chan);
+
+ /* then disconnect the completion side */
+ if (chan->reg_rx_flow) {
+ writel_relaxed(0, &chan->reg_rx_flow->control);
+ writel_relaxed(0, &chan->reg_rx_flow->tags);
+ writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
+ }
+
+ memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg));
+ spin_unlock(&chan->lock);
+
+ dev_dbg(kdev->dev, "channel stopped\n");
+}
+
+static void dma_hw_enable_all(struct knav_dma_device *dma)
+{
+ int i;
+
+ for (i = 0; i < dma->max_tx_chan; i++) {
+ writel_relaxed(0, &dma->reg_tx_chan[i].mode);
+ writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control);
+ }
+}
+
+
+static void knav_dma_hw_init(struct knav_dma_device *dma)
+{
+ unsigned v;
+ int i;
+
+ spin_lock(&dma->lock);
+ v = dma->loopback ? DMA_LOOPBACK : 0;
+ writel_relaxed(v, &dma->reg_global->emulation_control);
+
+ v = readl_relaxed(&dma->reg_global->perf_control);
+ v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT);
+ writel_relaxed(v, &dma->reg_global->perf_control);
+
+ v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) |
+ (dma->rx_priority << DMA_RX_PRIO_SHIFT));
+
+ writel_relaxed(v, &dma->reg_global->priority_control);
+
+ /* Always enable all Rx channels. Rx paths are managed using flows */
+ for (i = 0; i < dma->max_rx_chan; i++)
+ writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control);
+
+ for (i = 0; i < dma->logical_queue_managers; i++)
+ writel_relaxed(dma->qm_base_address[i],
+ &dma->reg_global->qm_base_address[i]);
+ spin_unlock(&dma->lock);
+}
+
+static void knav_dma_hw_destroy(struct knav_dma_device *dma)
+{
+ int i;
+ unsigned v;
+
+ spin_lock(&dma->lock);
+ v = ~DMA_ENABLE & REG_MASK;
+
+ for (i = 0; i < dma->max_rx_chan; i++)
+ writel_relaxed(v, &dma->reg_rx_chan[i].control);
+
+ for (i = 0; i < dma->max_tx_chan; i++)
+ writel_relaxed(v, &dma->reg_tx_chan[i].control);
+ spin_unlock(&dma->lock);
+}
+
+static void dma_debug_show_channels(struct seq_file *s,
+ struct knav_dma_chan *chan)
+{
+ int i;
+
+ seq_printf(s, "\t%s %d:\t",
+ ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"),
+ chan_number(chan));
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n",
+ chan->cfg.u.tx.filt_einfo,
+ chan->cfg.u.tx.filt_pswords,
+ chan->cfg.u.tx.priority);
+ } else {
+ seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n",
+ chan->cfg.u.rx.einfo_present,
+ chan->cfg.u.rx.psinfo_present,
+ chan->cfg.u.rx.desc_type);
+ seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ",
+ chan->cfg.u.rx.dst_q,
+ chan->cfg.u.rx.thresh);
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
+ seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
+ seq_printf(s, "\n");
+ }
+}
+
+static void dma_debug_show_devices(struct seq_file *s,
+ struct knav_dma_device *dma)
+{
+ struct knav_dma_chan *chan;
+
+ list_for_each_entry(chan, &dma->chan_list, list) {
+ if (atomic_read(&chan->ref_count))
+ dma_debug_show_channels(s, chan);
+ }
+}
+
+static int knav_dma_debug_show(struct seq_file *s, void *v)
+{
+ struct knav_dma_device *dma;
+
+ list_for_each_entry(dma, &kdev->list, list) {
+ if (atomic_read(&dma->ref_count)) {
+ seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n",
+ dma->name, dma->max_tx_chan, dma->max_rx_flow);
+ dma_debug_show_devices(s, dma);
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(knav_dma_debug);
+
+static int of_channel_match_helper(struct device_node *np, const char *name,
+ const char **dma_instance)
+{
+ struct of_phandle_args args;
+ struct device_node *dma_node;
+ int index;
+
+ dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0);
+ if (!dma_node)
+ return -ENODEV;
+
+ *dma_instance = dma_node->name;
+ index = of_property_match_string(np, "ti,navigator-dma-names", name);
+ if (index < 0) {
+ dev_err(kdev->dev, "No 'ti,navigator-dma-names' property\n");
+ return -ENODEV;
+ }
+
+ if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas",
+ 1, index, &args)) {
+ dev_err(kdev->dev, "Missing the phandle args name %s\n", name);
+ return -ENODEV;
+ }
+
+ if (args.args[0] < 0) {
+ dev_err(kdev->dev, "Missing args for %s\n", name);
+ return -ENODEV;
+ }
+
+ return args.args[0];
+}
+
+/**
+ * knav_dma_open_channel() - try to setup an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ * @config: dma configuration parameters
+ *
+ * Returns pointer to appropriate DMA channel on success or error.
+ */
+void *knav_dma_open_channel(struct device *dev, const char *name,
+ struct knav_dma_cfg *config)
+{
+ struct knav_dma_device *dma = NULL, *iter1;
+ struct knav_dma_chan *chan = NULL, *iter2;
+ int chan_num = -1;
+ const char *instance;
+
+ if (!kdev) {
+ pr_err("keystone-navigator-dma driver not registered\n");
+ return (void *)-EINVAL;
+ }
+
+ chan_num = of_channel_match_helper(dev->of_node, name, &instance);
+ if (chan_num < 0) {
+ dev_err(kdev->dev, "No DMA instance with name %s\n", name);
+ return (void *)-EINVAL;
+ }
+
+ dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
+ config->direction == DMA_MEM_TO_DEV ? "transmit" :
+ config->direction == DMA_DEV_TO_MEM ? "receive" :
+ "unknown", chan_num, instance);
+
+ if (config->direction != DMA_MEM_TO_DEV &&
+ config->direction != DMA_DEV_TO_MEM) {
+ dev_err(kdev->dev, "bad direction\n");
+ return (void *)-EINVAL;
+ }
+
+ /* Look for correct dma instance */
+ list_for_each_entry(iter1, &kdev->list, list) {
+ if (!strcmp(iter1->name, instance)) {
+ dma = iter1;
+ break;
+ }
+ }
+ if (!dma) {
+ dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
+ return (void *)-EINVAL;
+ }
+
+ /* Look for correct dma channel from dma instance */
+ list_for_each_entry(iter2, &dma->chan_list, list) {
+ if (config->direction == DMA_MEM_TO_DEV) {
+ if (iter2->channel == chan_num) {
+ chan = iter2;
+ break;
+ }
+ } else {
+ if (iter2->flow == chan_num) {
+ chan = iter2;
+ break;
+ }
+ }
+ }
+ if (!chan) {
+ dev_err(kdev->dev, "channel %d is not in DMA %s\n",
+ chan_num, instance);
+ return (void *)-EINVAL;
+ }
+
+ if (atomic_read(&chan->ref_count) >= 1) {
+ if (!check_config(chan, config)) {
+ dev_err(kdev->dev, "channel %d config miss-match\n",
+ chan_num);
+ return (void *)-EINVAL;
+ }
+ }
+
+ if (atomic_inc_return(&chan->dma->ref_count) <= 1)
+ knav_dma_hw_init(chan->dma);
+
+ if (atomic_inc_return(&chan->ref_count) <= 1)
+ chan_start(chan, config);
+
+ dev_dbg(kdev->dev, "channel %d opened from DMA %s\n",
+ chan_num, instance);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(knav_dma_open_channel);
+
+/**
+ * knav_dma_close_channel() - Destroy a dma channel
+ *
+ * @channel: dma channel handle
+ *
+ */
+void knav_dma_close_channel(void *channel)
+{
+ struct knav_dma_chan *chan = channel;
+
+ if (!kdev) {
+ pr_err("keystone-navigator-dma driver not registered\n");
+ return;
+ }
+
+ if (atomic_dec_return(&chan->ref_count) <= 0)
+ chan_stop(chan);
+
+ if (atomic_dec_return(&chan->dma->ref_count) <= 0)
+ knav_dma_hw_destroy(chan->dma);
+
+ dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n",
+ chan->channel, chan->flow, chan->dma->name);
+}
+EXPORT_SYMBOL_GPL(knav_dma_close_channel);
+
+static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
+ struct device_node *node,
+ unsigned index, resource_size_t *_size)
+{
+ struct device *dev = kdev->dev;
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ ret = of_address_to_resource(node, index, &res);
+ if (ret) {
+ dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n",
+ node, index);
+ return ERR_PTR(ret);
+ }
+
+ regs = devm_ioremap_resource(kdev->dev, &res);
+ if (IS_ERR(regs))
+ dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n",
+ index, node);
+ if (_size)
+ *_size = resource_size(&res);
+
+ return regs;
+}
+
+static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow)
+{
+ struct knav_dma_device *dma = chan->dma;
+
+ chan->flow = flow;
+ chan->reg_rx_flow = dma->reg_rx_flow + flow;
+ chan->channel = DMA_INVALID_ID;
+ dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow);
+
+ return 0;
+}
+
+static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel)
+{
+ struct knav_dma_device *dma = chan->dma;
+
+ chan->channel = channel;
+ chan->reg_chan = dma->reg_tx_chan + channel;
+ chan->reg_tx_sched = dma->reg_tx_sched + channel;
+ chan->flow = DMA_INVALID_ID;
+ dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan);
+
+ return 0;
+}
+
+static int pktdma_init_chan(struct knav_dma_device *dma,
+ enum dma_transfer_direction dir,
+ unsigned chan_num)
+{
+ struct device *dev = kdev->dev;
+ struct knav_dma_chan *chan;
+ int ret = -EINVAL;
+
+ chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&chan->list);
+ chan->dma = dma;
+ chan->direction = DMA_TRANS_NONE;
+ atomic_set(&chan->ref_count, 0);
+ spin_lock_init(&chan->lock);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ chan->direction = dir;
+ ret = pktdma_init_tx_chan(chan, chan_num);
+ } else if (dir == DMA_DEV_TO_MEM) {
+ chan->direction = dir;
+ ret = pktdma_init_rx_chan(chan, chan_num);
+ } else {
+ dev_err(dev, "channel(%d) direction unknown\n", chan_num);
+ }
+
+ list_add_tail(&chan->list, &dma->chan_list);
+
+ return ret;
+}
+
+static int dma_init(struct device_node *cloud, struct device_node *dma_node)
+{
+ unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
+ struct device_node *node = dma_node;
+ struct knav_dma_device *dma;
+ int ret, len, num_chan = 0;
+ resource_size_t size;
+ u32 timeout;
+ u32 i;
+
+ dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma) {
+ dev_err(kdev->dev, "could not allocate driver mem\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&dma->list);
+ INIT_LIST_HEAD(&dma->chan_list);
+
+ if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
+ dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
+ return -ENODEV;
+ }
+
+ dma->logical_queue_managers = len / sizeof(u32);
+ if (dma->logical_queue_managers > DMA_MAX_QMS) {
+ dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
+ dma->logical_queue_managers);
+ dma->logical_queue_managers = DMA_MAX_QMS;
+ }
+
+ ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
+ dma->qm_base_address,
+ dma->logical_queue_managers);
+ if (ret) {
+ dev_err(kdev->dev, "invalid navigator cloud addresses\n");
+ return -ENODEV;
+ }
+
+ dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
+ if (IS_ERR(dma->reg_global))
+ return PTR_ERR(dma->reg_global);
+ if (size < sizeof(struct reg_global)) {
+ dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
+ return -ENODEV;
+ }
+
+ dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
+ if (IS_ERR(dma->reg_tx_chan))
+ return PTR_ERR(dma->reg_tx_chan);
+
+ max_tx_chan = size / sizeof(struct reg_chan);
+ dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
+ if (IS_ERR(dma->reg_rx_chan))
+ return PTR_ERR(dma->reg_rx_chan);
+
+ max_rx_chan = size / sizeof(struct reg_chan);
+ dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
+ if (IS_ERR(dma->reg_tx_sched))
+ return PTR_ERR(dma->reg_tx_sched);
+
+ max_tx_sched = size / sizeof(struct reg_tx_sched);
+ dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
+ if (IS_ERR(dma->reg_rx_flow))
+ return PTR_ERR(dma->reg_rx_flow);
+
+ max_rx_flow = size / sizeof(struct reg_rx_flow);
+ dma->rx_priority = DMA_PRIO_DEFAULT;
+ dma->tx_priority = DMA_PRIO_DEFAULT;
+
+ dma->enable_all = of_property_read_bool(node, "ti,enable-all");
+ dma->loopback = of_property_read_bool(node, "ti,loop-back");
+
+ ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
+ if (ret < 0) {
+ dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n",
+ DMA_RX_TIMEOUT_DEFAULT);
+ timeout = DMA_RX_TIMEOUT_DEFAULT;
+ }
+
+ dma->rx_timeout = timeout;
+ dma->max_rx_chan = max_rx_chan;
+ dma->max_rx_flow = max_rx_flow;
+ dma->max_tx_chan = min(max_tx_chan, max_tx_sched);
+ atomic_set(&dma->ref_count, 0);
+ strcpy(dma->name, node->name);
+ spin_lock_init(&dma->lock);
+
+ for (i = 0; i < dma->max_tx_chan; i++) {
+ if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0)
+ num_chan++;
+ }
+
+ for (i = 0; i < dma->max_rx_flow; i++) {
+ if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0)
+ num_chan++;
+ }
+
+ list_add_tail(&dma->list, &kdev->list);
+
+ /*
+ * For DSP software usecases or userpace transport software, setup all
+ * the DMA hardware resources.
+ */
+ if (dma->enable_all) {
+ atomic_inc(&dma->ref_count);
+ knav_dma_hw_init(dma);
+ dma_hw_enable_all(dma);
+ }
+
+ dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n",
+ dma->name, num_chan, dma->max_rx_flow,
+ dma->max_tx_chan, dma->max_rx_chan,
+ dma->loopback ? ", loopback" : "");
+
+ return 0;
+}
+
+static int knav_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *child;
+ int ret = 0;
+
+ if (!node) {
+ dev_err(&pdev->dev, "could not find device info\n");
+ return -EINVAL;
+ }
+
+ kdev = devm_kzalloc(dev,
+ sizeof(struct knav_dma_pool_device), GFP_KERNEL);
+ if (!kdev) {
+ dev_err(dev, "could not allocate driver mem\n");
+ return -ENOMEM;
+ }
+
+ kdev->dev = dev;
+ INIT_LIST_HEAD(&kdev->list);
+
+ pm_runtime_enable(kdev->dev);
+ ret = pm_runtime_resume_and_get(kdev->dev);
+ if (ret < 0) {
+ dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
+ goto err_pm_disable;
+ }
+
+ /* Initialise all packet dmas */
+ for_each_child_of_node(node, child) {
+ ret = dma_init(node, child);
+ if (ret) {
+ of_node_put(child);
+ dev_err(&pdev->dev, "init failed with %d\n", ret);
+ break;
+ }
+ }
+
+ if (list_empty(&kdev->list)) {
+ dev_err(dev, "no valid dma instance\n");
+ ret = -ENODEV;
+ goto err_put_sync;
+ }
+
+ debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
+ &knav_dma_debug_fops);
+
+ device_ready = true;
+ return ret;
+
+err_put_sync:
+ pm_runtime_put_sync(kdev->dev);
+err_pm_disable:
+ pm_runtime_disable(kdev->dev);
+
+ return ret;
+}
+
+static int knav_dma_remove(struct platform_device *pdev)
+{
+ struct knav_dma_device *dma;
+
+ list_for_each_entry(dma, &kdev->list, list) {
+ if (atomic_dec_return(&dma->ref_count) == 0)
+ knav_dma_hw_destroy(dma);
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct of_device_id of_match[] = {
+ { .compatible = "ti,keystone-navigator-dma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver knav_dma_driver = {
+ .probe = knav_dma_probe,
+ .remove = knav_dma_remove,
+ .driver = {
+ .name = "keystone-navigator-dma",
+ .of_match_table = of_match,
+ },
+};
+module_platform_driver(knav_dma_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
+MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
new file mode 100644
index 0000000000..a01eda720b
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss.h
@@ -0,0 +1,387 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Keystone Navigator QMSS driver internal header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ */
+
+#ifndef __KNAV_QMSS_H__
+#define __KNAV_QMSS_H__
+
+#include <linux/percpu.h>
+
+#define THRESH_GTE BIT(7)
+#define THRESH_LT 0
+
+#define PDSP_CTRL_PC_MASK 0xffff0000
+#define PDSP_CTRL_SOFT_RESET BIT(0)
+#define PDSP_CTRL_ENABLE BIT(1)
+#define PDSP_CTRL_RUNNING BIT(15)
+
+#define ACC_MAX_CHANNEL 48
+#define ACC_DEFAULT_PERIOD 25 /* usecs */
+
+#define ACC_CHANNEL_INT_BASE 2
+
+#define ACC_LIST_ENTRY_TYPE 1
+#define ACC_LIST_ENTRY_WORDS (1 << ACC_LIST_ENTRY_TYPE)
+#define ACC_LIST_ENTRY_QUEUE_IDX 0
+#define ACC_LIST_ENTRY_DESC_IDX (ACC_LIST_ENTRY_WORDS - 1)
+
+#define ACC_CMD_DISABLE_CHANNEL 0x80
+#define ACC_CMD_ENABLE_CHANNEL 0x81
+#define ACC_CFG_MULTI_QUEUE BIT(21)
+
+#define ACC_INTD_OFFSET_EOI (0x0010)
+#define ACC_INTD_OFFSET_COUNT(ch) (0x0300 + 4 * (ch))
+#define ACC_INTD_OFFSET_STATUS(ch) (0x0200 + 4 * ((ch) / 32))
+
+#define RANGE_MAX_IRQS 64
+
+#define ACC_DESCS_MAX SZ_1K
+#define ACC_DESCS_MASK (ACC_DESCS_MAX - 1)
+#define DESC_SIZE_MASK 0xful
+#define DESC_PTR_MASK (~DESC_SIZE_MASK)
+
+#define KNAV_NAME_SIZE 32
+
+enum knav_acc_result {
+ ACC_RET_IDLE,
+ ACC_RET_SUCCESS,
+ ACC_RET_INVALID_COMMAND,
+ ACC_RET_INVALID_CHANNEL,
+ ACC_RET_INACTIVE_CHANNEL,
+ ACC_RET_ACTIVE_CHANNEL,
+ ACC_RET_INVALID_QUEUE,
+ ACC_RET_INVALID_RET,
+};
+
+struct knav_reg_config {
+ u32 revision;
+ u32 __pad1;
+ u32 divert;
+ u32 link_ram_base0;
+ u32 link_ram_size0;
+ u32 link_ram_base1;
+ u32 __pad2[2];
+ u32 starvation[];
+};
+
+struct knav_reg_region {
+ u32 base;
+ u32 start_index;
+ u32 size_count;
+ u32 __pad;
+};
+
+struct knav_reg_pdsp_regs {
+ u32 control;
+ u32 status;
+ u32 cycle_count;
+ u32 stall_count;
+};
+
+struct knav_reg_acc_command {
+ u32 command;
+ u32 queue_mask;
+ u32 list_dma;
+ u32 queue_num;
+ u32 timer_config;
+};
+
+struct knav_link_ram_block {
+ dma_addr_t dma;
+ void *virt;
+ size_t size;
+};
+
+struct knav_acc_info {
+ u32 pdsp_id;
+ u32 start_channel;
+ u32 list_entries;
+ u32 pacing_mode;
+ u32 timer_count;
+ int mem_size;
+ int list_size;
+ struct knav_pdsp_info *pdsp;
+};
+
+struct knav_acc_channel {
+ u32 channel;
+ u32 list_index;
+ u32 open_mask;
+ u32 *list_cpu[2];
+ dma_addr_t list_dma[2];
+ char name[KNAV_NAME_SIZE];
+ atomic_t retrigger_count;
+};
+
+struct knav_pdsp_info {
+ const char *name;
+ struct knav_reg_pdsp_regs __iomem *regs;
+ union {
+ void __iomem *command;
+ struct knav_reg_acc_command __iomem *acc_command;
+ u32 __iomem *qos_command;
+ };
+ void __iomem *intd;
+ u32 __iomem *iram;
+ u32 id;
+ struct list_head list;
+ bool loaded;
+ bool started;
+};
+
+struct knav_qmgr_info {
+ unsigned start_queue;
+ unsigned num_queues;
+ struct knav_reg_config __iomem *reg_config;
+ struct knav_reg_region __iomem *reg_region;
+ struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
+ void __iomem *reg_status;
+ struct list_head list;
+};
+
+#define KNAV_NUM_LINKRAM 2
+
+/**
+ * struct knav_queue_stats: queue statistics
+ * pushes: number of push operations
+ * pops: number of pop operations
+ * push_errors: number of push errors
+ * pop_errors: number of pop errors
+ * notifies: notifier counts
+ */
+struct knav_queue_stats {
+ unsigned int pushes;
+ unsigned int pops;
+ unsigned int push_errors;
+ unsigned int pop_errors;
+ unsigned int notifies;
+};
+
+/**
+ * struct knav_reg_queue: queue registers
+ * @entry_count: valid entries in the queue
+ * @byte_count: total byte count in thhe queue
+ * @packet_size: packet size for the queue
+ * @ptr_size_thresh: packet pointer size threshold
+ */
+struct knav_reg_queue {
+ u32 entry_count;
+ u32 byte_count;
+ u32 packet_size;
+ u32 ptr_size_thresh;
+};
+
+/**
+ * struct knav_region: qmss region info
+ * @dma_start, dma_end: start and end dma address
+ * @virt_start, virt_end: start and end virtual address
+ * @desc_size: descriptor size
+ * @used_desc: consumed descriptors
+ * @id: region number
+ * @num_desc: total descriptors
+ * @link_index: index of the first descriptor
+ * @name: region name
+ * @list: instance in the device's region list
+ * @pools: list of descriptor pools in the region
+ */
+struct knav_region {
+ dma_addr_t dma_start, dma_end;
+ void *virt_start, *virt_end;
+ unsigned desc_size;
+ unsigned used_desc;
+ unsigned id;
+ unsigned num_desc;
+ unsigned link_index;
+ const char *name;
+ struct list_head list;
+ struct list_head pools;
+};
+
+/**
+ * struct knav_pool: qmss pools
+ * @dev: device pointer
+ * @region: qmss region info
+ * @queue: queue registers
+ * @kdev: qmss device pointer
+ * @region_offset: offset from the base
+ * @num_desc: total descriptors
+ * @desc_size: descriptor size
+ * @region_id: region number
+ * @name: pool name
+ * @list: list head
+ * @region_inst: instance in the region's pool list
+ */
+struct knav_pool {
+ struct device *dev;
+ struct knav_region *region;
+ struct knav_queue *queue;
+ struct knav_device *kdev;
+ int region_offset;
+ int num_desc;
+ int desc_size;
+ int region_id;
+ const char *name;
+ struct list_head list;
+ struct list_head region_inst;
+};
+
+/**
+ * struct knav_queue_inst: qmss queue instance properties
+ * @descs: descriptor pointer
+ * @desc_head, desc_tail, desc_count: descriptor counters
+ * @acc: accumulator channel pointer
+ * @kdev: qmss device pointer
+ * @range: range info
+ * @qmgr: queue manager info
+ * @id: queue instance id
+ * @irq_num: irq line number
+ * @notify_needed: notifier needed based on queue type
+ * @num_notifiers: total notifiers
+ * @handles: list head
+ * @name: queue instance name
+ * @irq_name: irq line name
+ */
+struct knav_queue_inst {
+ u32 *descs;
+ atomic_t desc_head, desc_tail, desc_count;
+ struct knav_acc_channel *acc;
+ struct knav_device *kdev;
+ struct knav_range_info *range;
+ struct knav_qmgr_info *qmgr;
+ u32 id;
+ int irq_num;
+ int notify_needed;
+ atomic_t num_notifiers;
+ struct list_head handles;
+ const char *name;
+ const char *irq_name;
+};
+
+/**
+ * struct knav_queue: qmss queue properties
+ * @reg_push, reg_pop, reg_peek: push, pop queue registers
+ * @inst: qmss queue instance properties
+ * @notifier_fn: notifier function
+ * @notifier_fn_arg: notifier function argument
+ * @notifier_enabled: notier enabled for a give queue
+ * @rcu: rcu head
+ * @flags: queue flags
+ * @list: list head
+ */
+struct knav_queue {
+ struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
+ struct knav_queue_inst *inst;
+ struct knav_queue_stats __percpu *stats;
+ knav_queue_notify_fn notifier_fn;
+ void *notifier_fn_arg;
+ atomic_t notifier_enabled;
+ struct rcu_head rcu;
+ unsigned flags;
+ struct list_head list;
+};
+
+enum qmss_version {
+ QMSS,
+ QMSS_66AK2G,
+};
+
+struct knav_device {
+ struct device *dev;
+ unsigned base_id;
+ unsigned num_queues;
+ unsigned num_queues_in_use;
+ unsigned inst_shift;
+ struct knav_link_ram_block link_rams[KNAV_NUM_LINKRAM];
+ void *instances;
+ struct list_head regions;
+ struct list_head queue_ranges;
+ struct list_head pools;
+ struct list_head pdsps;
+ struct list_head qmgrs;
+ enum qmss_version version;
+};
+
+struct knav_range_ops {
+ int (*init_range)(struct knav_range_info *range);
+ int (*free_range)(struct knav_range_info *range);
+ int (*init_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst);
+ int (*open_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags);
+ int (*close_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst);
+ int (*set_notify)(struct knav_range_info *range,
+ struct knav_queue_inst *inst, bool enabled);
+};
+
+struct knav_irq_info {
+ int irq;
+ struct cpumask *cpu_mask;
+};
+
+struct knav_range_info {
+ const char *name;
+ struct knav_device *kdev;
+ unsigned queue_base;
+ unsigned num_queues;
+ void *queue_base_inst;
+ unsigned flags;
+ struct list_head list;
+ struct knav_range_ops *ops;
+ struct knav_acc_info acc_info;
+ struct knav_acc_channel *acc;
+ unsigned num_irqs;
+ struct knav_irq_info irqs[RANGE_MAX_IRQS];
+};
+
+#define RANGE_RESERVED BIT(0)
+#define RANGE_HAS_IRQ BIT(1)
+#define RANGE_HAS_ACCUMULATOR BIT(2)
+#define RANGE_MULTI_QUEUE BIT(3)
+
+#define for_each_region(kdev, region) \
+ list_for_each_entry(region, &kdev->regions, list)
+
+#define first_region(kdev) \
+ list_first_entry_or_null(&kdev->regions, \
+ struct knav_region, list)
+
+#define for_each_queue_range(kdev, range) \
+ list_for_each_entry(range, &kdev->queue_ranges, list)
+
+#define first_queue_range(kdev) \
+ list_first_entry_or_null(&kdev->queue_ranges, \
+ struct knav_range_info, list)
+
+#define for_each_pool(kdev, pool) \
+ list_for_each_entry(pool, &kdev->pools, list)
+
+#define for_each_pdsp(kdev, pdsp) \
+ list_for_each_entry(pdsp, &kdev->pdsps, list)
+
+#define for_each_qmgr(kdev, qmgr) \
+ list_for_each_entry(qmgr, &kdev->qmgrs, list)
+
+static inline struct knav_pdsp_info *
+knav_find_pdsp(struct knav_device *kdev, unsigned pdsp_id)
+{
+ struct knav_pdsp_info *pdsp;
+
+ for_each_pdsp(kdev, pdsp)
+ if (pdsp_id == pdsp->id)
+ return pdsp;
+ return NULL;
+}
+
+extern int knav_init_acc_range(struct knav_device *kdev,
+ struct device_node *node,
+ struct knav_range_info *range);
+extern void knav_queue_notify(struct knav_queue_inst *inst);
+
+#endif /* __KNAV_QMSS_H__ */
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
new file mode 100644
index 0000000000..3d388646ed
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Keystone accumulator queue manager
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/soc/ti/knav_qmss.h>
+
+#include "knav_qmss.h"
+
+#define knav_range_offset_to_inst(kdev, range, q) \
+ (range->queue_base_inst + (q << kdev->inst_shift))
+
+static void __knav_acc_notify(struct knav_range_info *range,
+ struct knav_acc_channel *acc)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_queue_inst *inst;
+ int range_base, queue;
+
+ range_base = kdev->base_id + range->queue_base;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ for (queue = 0; queue < range->num_queues; queue++) {
+ inst = knav_range_offset_to_inst(kdev, range,
+ queue);
+ if (inst->notify_needed) {
+ inst->notify_needed = 0;
+ dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
+ range_base + queue);
+ knav_queue_notify(inst);
+ }
+ }
+ } else {
+ queue = acc->channel - range->acc_info.start_channel;
+ inst = knav_range_offset_to_inst(kdev, range, queue);
+ dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
+ range_base + queue);
+ knav_queue_notify(inst);
+ }
+}
+
+static int knav_acc_set_notify(struct knav_range_info *range,
+ struct knav_queue_inst *kq,
+ bool enabled)
+{
+ struct knav_pdsp_info *pdsp = range->acc_info.pdsp;
+ struct knav_device *kdev = range->kdev;
+ u32 mask, offset;
+
+ /*
+ * when enabling, we need to re-trigger an interrupt if we
+ * have descriptors pending
+ */
+ if (!enabled || atomic_read(&kq->desc_count) <= 0)
+ return 0;
+
+ kq->notify_needed = 1;
+ atomic_inc(&kq->acc->retrigger_count);
+ mask = BIT(kq->acc->channel % 32);
+ offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel);
+ dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n",
+ kq->acc->name);
+ writel_relaxed(mask, pdsp->intd + offset);
+ return 0;
+}
+
+static irqreturn_t knav_acc_int_handler(int irq, void *_instdata)
+{
+ struct knav_acc_channel *acc;
+ struct knav_queue_inst *kq = NULL;
+ struct knav_range_info *range;
+ struct knav_pdsp_info *pdsp;
+ struct knav_acc_info *info;
+ struct knav_device *kdev;
+
+ u32 *list, *list_cpu, val, idx, notifies;
+ int range_base, channel, queue = 0;
+ dma_addr_t list_dma;
+
+ range = _instdata;
+ info = &range->acc_info;
+ kdev = range->kdev;
+ pdsp = range->acc_info.pdsp;
+ acc = range->acc;
+
+ range_base = kdev->base_id + range->queue_base;
+ if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
+ for (queue = 0; queue < range->num_irqs; queue++)
+ if (range->irqs[queue].irq == irq)
+ break;
+ kq = knav_range_offset_to_inst(kdev, range, queue);
+ acc += queue;
+ }
+
+ channel = acc->channel;
+ list_dma = acc->list_dma[acc->list_index];
+ list_cpu = acc->list_cpu[acc->list_index];
+ dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, dma %pad\n",
+ channel, acc->list_index, list_cpu, &list_dma);
+ if (atomic_read(&acc->retrigger_count)) {
+ atomic_dec(&acc->retrigger_count);
+ __knav_acc_notify(range, acc);
+ writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+ /* ack the interrupt */
+ writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
+ pdsp->intd + ACC_INTD_OFFSET_EOI);
+
+ return IRQ_HANDLED;
+ }
+
+ notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+ WARN_ON(!notifies);
+ dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size,
+ DMA_FROM_DEVICE);
+
+ for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32));
+ list += ACC_LIST_ENTRY_WORDS) {
+ if (ACC_LIST_ENTRY_WORDS == 1) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x\n",
+ acc->list_index, list, list[0]);
+ } else if (ACC_LIST_ENTRY_WORDS == 2) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x %08x\n",
+ acc->list_index, list, list[0], list[1]);
+ } else if (ACC_LIST_ENTRY_WORDS == 4) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n",
+ acc->list_index, list, list[0], list[1],
+ list[2], list[3]);
+ }
+
+ val = list[ACC_LIST_ENTRY_DESC_IDX];
+ if (!val)
+ break;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
+ if (queue < range_base ||
+ queue >= range_base + range->num_queues) {
+ dev_err(kdev->dev,
+ "bad queue %d, expecting %d-%d\n",
+ queue, range_base,
+ range_base + range->num_queues);
+ break;
+ }
+ queue -= range_base;
+ kq = knav_range_offset_to_inst(kdev, range,
+ queue);
+ }
+
+ if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) {
+ atomic_dec(&kq->desc_count);
+ dev_err(kdev->dev,
+ "acc-irq: queue %d full, entry dropped\n",
+ queue + range_base);
+ continue;
+ }
+
+ idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK;
+ kq->descs[idx] = val;
+ kq->notify_needed = 1;
+ dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n",
+ val, idx, queue + range_base);
+ }
+
+ __knav_acc_notify(range, acc);
+ memset(list_cpu, 0, info->list_size);
+ dma_sync_single_for_device(kdev->dev, list_dma, info->list_size,
+ DMA_TO_DEVICE);
+
+ /* flip to the other list */
+ acc->list_index ^= 1;
+
+ /* reset the interrupt counter */
+ writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+
+ /* ack the interrupt */
+ writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
+ pdsp->intd + ACC_INTD_OFFSET_EOI);
+
+ return IRQ_HANDLED;
+}
+
+static int knav_range_setup_acc_irq(struct knav_range_info *range,
+ int queue, bool enabled)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ struct cpumask *cpu_mask;
+ int ret = 0, irq;
+ u32 old, new;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ acc = range->acc;
+ irq = range->irqs[0].irq;
+ cpu_mask = range->irqs[0].cpu_mask;
+ } else {
+ acc = range->acc + queue;
+ irq = range->irqs[queue].irq;
+ cpu_mask = range->irqs[queue].cpu_mask;
+ }
+
+ old = acc->open_mask;
+ if (enabled)
+ new = old | BIT(queue);
+ else
+ new = old & ~BIT(queue);
+ acc->open_mask = new;
+
+ dev_dbg(kdev->dev,
+ "setup-acc-irq: open mask old %08x, new %08x, channel %s\n",
+ old, new, acc->name);
+
+ if (likely(new == old))
+ return 0;
+
+ if (new && !old) {
+ dev_dbg(kdev->dev,
+ "setup-acc-irq: requesting %s for channel %s\n",
+ acc->name, acc->name);
+ ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
+ range);
+ if (!ret && cpu_mask) {
+ ret = irq_set_affinity_hint(irq, cpu_mask);
+ if (ret) {
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ return ret;
+ }
+ }
+ }
+
+ if (old && !new) {
+ dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n",
+ acc->name, acc->name);
+ ret = irq_set_affinity_hint(irq, NULL);
+ if (ret)
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ free_irq(irq, range);
+ }
+
+ return ret;
+}
+
+static const char *knav_acc_result_str(enum knav_acc_result result)
+{
+ static const char * const result_str[] = {
+ [ACC_RET_IDLE] = "idle",
+ [ACC_RET_SUCCESS] = "success",
+ [ACC_RET_INVALID_COMMAND] = "invalid command",
+ [ACC_RET_INVALID_CHANNEL] = "invalid channel",
+ [ACC_RET_INACTIVE_CHANNEL] = "inactive channel",
+ [ACC_RET_ACTIVE_CHANNEL] = "active channel",
+ [ACC_RET_INVALID_QUEUE] = "invalid queue",
+ [ACC_RET_INVALID_RET] = "invalid return code",
+ };
+
+ if (result >= ARRAY_SIZE(result_str))
+ return result_str[ACC_RET_INVALID_RET];
+ else
+ return result_str[result];
+}
+
+static enum knav_acc_result
+knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp,
+ struct knav_reg_acc_command *cmd)
+{
+ u32 result;
+
+ dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n",
+ cmd->command, cmd->queue_mask, cmd->list_dma,
+ cmd->queue_num, cmd->timer_config);
+
+ writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config);
+ writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num);
+ writel_relaxed(cmd->list_dma, &pdsp->acc_command->list_dma);
+ writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);
+ writel_relaxed(cmd->command, &pdsp->acc_command->command);
+
+ /* wait for the command to clear */
+ do {
+ result = readl_relaxed(&pdsp->acc_command->command);
+ } while ((result >> 8) & 0xff);
+
+ return (result >> 24) & 0xff;
+}
+
+static void knav_acc_setup_cmd(struct knav_device *kdev,
+ struct knav_range_info *range,
+ struct knav_reg_acc_command *cmd,
+ int queue)
+{
+ struct knav_acc_info *info = &range->acc_info;
+ struct knav_acc_channel *acc;
+ int queue_base;
+ u32 queue_mask;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ acc = range->acc;
+ queue_base = range->queue_base;
+ queue_mask = BIT(range->num_queues) - 1;
+ } else {
+ acc = range->acc + queue;
+ queue_base = range->queue_base + queue;
+ queue_mask = 0;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->command = acc->channel;
+ cmd->queue_mask = queue_mask;
+ cmd->list_dma = (u32)acc->list_dma[0];
+ cmd->queue_num = info->list_entries << 16;
+ cmd->queue_num |= queue_base;
+
+ cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18;
+ if (range->flags & RANGE_MULTI_QUEUE)
+ cmd->timer_config |= ACC_CFG_MULTI_QUEUE;
+ cmd->timer_config |= info->pacing_mode << 16;
+ cmd->timer_config |= info->timer_count;
+}
+
+static void knav_acc_stop(struct knav_device *kdev,
+ struct knav_range_info *range,
+ int queue)
+{
+ struct knav_reg_acc_command cmd;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+
+ acc = range->acc + queue;
+
+ knav_acc_setup_cmd(kdev, range, &cmd, queue);
+ cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8;
+ result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
+
+ dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n",
+ acc->name, knav_acc_result_str(result));
+}
+
+static enum knav_acc_result knav_acc_start(struct knav_device *kdev,
+ struct knav_range_info *range,
+ int queue)
+{
+ struct knav_reg_acc_command cmd;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+
+ acc = range->acc + queue;
+
+ knav_acc_setup_cmd(kdev, range, &cmd, queue);
+ cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8;
+ result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
+
+ dev_dbg(kdev->dev, "started acc channel %s, result %s\n",
+ acc->name, knav_acc_result_str(result));
+
+ return result;
+}
+
+static int knav_acc_init_range(struct knav_range_info *range)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+ int queue;
+
+ for (queue = 0; queue < range->num_queues; queue++) {
+ acc = range->acc + queue;
+
+ knav_acc_stop(kdev, range, queue);
+ acc->list_index = 0;
+ result = knav_acc_start(kdev, range, queue);
+
+ if (result != ACC_RET_SUCCESS)
+ return -EIO;
+
+ if (range->flags & RANGE_MULTI_QUEUE)
+ return 0;
+ }
+ return 0;
+}
+
+static int knav_acc_init_queue(struct knav_range_info *range,
+ struct knav_queue_inst *kq)
+{
+ unsigned id = kq->id - range->queue_base;
+
+ kq->descs = devm_kcalloc(range->kdev->dev,
+ ACC_DESCS_MAX, sizeof(u32), GFP_KERNEL);
+ if (!kq->descs)
+ return -ENOMEM;
+
+ kq->acc = range->acc;
+ if ((range->flags & RANGE_MULTI_QUEUE) == 0)
+ kq->acc += id;
+ return 0;
+}
+
+static int knav_acc_open_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags)
+{
+ unsigned id = inst->id - range->queue_base;
+
+ return knav_range_setup_acc_irq(range, id, true);
+}
+
+static int knav_acc_close_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ unsigned id = inst->id - range->queue_base;
+
+ return knav_range_setup_acc_irq(range, id, false);
+}
+
+static int knav_acc_free_range(struct knav_range_info *range)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ struct knav_acc_info *info;
+ int channel, channels;
+
+ info = &range->acc_info;
+
+ if (range->flags & RANGE_MULTI_QUEUE)
+ channels = 1;
+ else
+ channels = range->num_queues;
+
+ for (channel = 0; channel < channels; channel++) {
+ acc = range->acc + channel;
+ if (!acc->list_cpu[0])
+ continue;
+ dma_unmap_single(kdev->dev, acc->list_dma[0],
+ info->mem_size, DMA_BIDIRECTIONAL);
+ free_pages_exact(acc->list_cpu[0], info->mem_size);
+ }
+ devm_kfree(range->kdev->dev, range->acc);
+ return 0;
+}
+
+static struct knav_range_ops knav_acc_range_ops = {
+ .set_notify = knav_acc_set_notify,
+ .init_queue = knav_acc_init_queue,
+ .open_queue = knav_acc_open_queue,
+ .close_queue = knav_acc_close_queue,
+ .init_range = knav_acc_init_range,
+ .free_range = knav_acc_free_range,
+};
+
+/**
+ * knav_init_acc_range: Initialise accumulator ranges
+ *
+ * @kdev: qmss device
+ * @node: device node
+ * @range: qmms range information
+ *
+ * Return 0 on success or error
+ */
+int knav_init_acc_range(struct knav_device *kdev,
+ struct device_node *node,
+ struct knav_range_info *range)
+{
+ struct knav_acc_channel *acc;
+ struct knav_pdsp_info *pdsp;
+ struct knav_acc_info *info;
+ int ret, channel, channels;
+ int list_size, mem_size;
+ dma_addr_t list_dma;
+ void *list_mem;
+ u32 config[5];
+
+ range->flags |= RANGE_HAS_ACCUMULATOR;
+ info = &range->acc_info;
+
+ ret = of_property_read_u32_array(node, "accumulator", config, 5);
+ if (ret)
+ return ret;
+
+ info->pdsp_id = config[0];
+ info->start_channel = config[1];
+ info->list_entries = config[2];
+ info->pacing_mode = config[3];
+ info->timer_count = config[4] / ACC_DEFAULT_PERIOD;
+
+ if (info->start_channel > ACC_MAX_CHANNEL) {
+ dev_err(kdev->dev, "channel %d invalid for range %s\n",
+ info->start_channel, range->name);
+ return -EINVAL;
+ }
+
+ if (info->pacing_mode > 3) {
+ dev_err(kdev->dev, "pacing mode %d invalid for range %s\n",
+ info->pacing_mode, range->name);
+ return -EINVAL;
+ }
+
+ pdsp = knav_find_pdsp(kdev, info->pdsp_id);
+ if (!pdsp) {
+ dev_err(kdev->dev, "pdsp id %d not found for range %s\n",
+ info->pdsp_id, range->name);
+ return -EINVAL;
+ }
+
+ if (!pdsp->started) {
+ dev_err(kdev->dev, "pdsp id %d not started for range %s\n",
+ info->pdsp_id, range->name);
+ return -ENODEV;
+ }
+
+ info->pdsp = pdsp;
+ channels = range->num_queues;
+ if (of_property_read_bool(node, "multi-queue")) {
+ range->flags |= RANGE_MULTI_QUEUE;
+ channels = 1;
+ if (range->queue_base & (32 - 1)) {
+ dev_err(kdev->dev,
+ "misaligned multi-queue accumulator range %s\n",
+ range->name);
+ return -EINVAL;
+ }
+ if (range->num_queues > 32) {
+ dev_err(kdev->dev,
+ "too many queues in accumulator range %s\n",
+ range->name);
+ return -EINVAL;
+ }
+ }
+
+ /* figure out list size */
+ list_size = info->list_entries;
+ list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32);
+ info->list_size = list_size;
+ mem_size = PAGE_ALIGN(list_size * 2);
+ info->mem_size = mem_size;
+ range->acc = devm_kcalloc(kdev->dev, channels, sizeof(*range->acc),
+ GFP_KERNEL);
+ if (!range->acc)
+ return -ENOMEM;
+
+ for (channel = 0; channel < channels; channel++) {
+ acc = range->acc + channel;
+ acc->channel = info->start_channel + channel;
+
+ /* allocate memory for the two lists */
+ list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA);
+ if (!list_mem)
+ return -ENOMEM;
+
+ list_dma = dma_map_single(kdev->dev, list_mem, mem_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev->dev, list_dma)) {
+ free_pages_exact(list_mem, mem_size);
+ return -ENOMEM;
+ }
+
+ memset(list_mem, 0, mem_size);
+ dma_sync_single_for_device(kdev->dev, list_dma, mem_size,
+ DMA_TO_DEVICE);
+ scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d",
+ acc->channel);
+ acc->list_cpu[0] = list_mem;
+ acc->list_cpu[1] = list_mem + list_size;
+ acc->list_dma[0] = list_dma;
+ acc->list_dma[1] = list_dma + list_size;
+ dev_dbg(kdev->dev, "%s: channel %d, dma %pad, virt %8p\n",
+ acc->name, acc->channel, &list_dma, list_mem);
+ }
+
+ range->ops = &knav_acc_range_ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(knav_init_acc_range);
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
new file mode 100644
index 0000000000..0f252c2549
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -0,0 +1,1908 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Keystone Queue Manager subsystem driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/knav_qmss.h>
+
+#include "knav_qmss.h"
+
+static struct knav_device *kdev;
+static DEFINE_MUTEX(knav_dev_lock);
+#define knav_dev_lock_held() \
+ lockdep_is_held(&knav_dev_lock)
+
+/* Queue manager register indices in DTS */
+#define KNAV_QUEUE_PEEK_REG_INDEX 0
+#define KNAV_QUEUE_STATUS_REG_INDEX 1
+#define KNAV_QUEUE_CONFIG_REG_INDEX 2
+#define KNAV_QUEUE_REGION_REG_INDEX 3
+#define KNAV_QUEUE_PUSH_REG_INDEX 4
+#define KNAV_QUEUE_POP_REG_INDEX 5
+
+/* Queue manager register indices in DTS for QMSS in K2G NAVSS.
+ * There are no status and vbusm push registers on this version
+ * of QMSS. Push registers are same as pop, So all indices above 1
+ * are to be re-defined
+ */
+#define KNAV_L_QUEUE_CONFIG_REG_INDEX 1
+#define KNAV_L_QUEUE_REGION_REG_INDEX 2
+#define KNAV_L_QUEUE_PUSH_REG_INDEX 3
+
+/* PDSP register indices in DTS */
+#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
+#define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
+#define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
+#define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
+
+#define knav_queue_idx_to_inst(kdev, idx) \
+ (kdev->instances + (idx << kdev->inst_shift))
+
+#define for_each_handle_rcu(qh, inst) \
+ list_for_each_entry_rcu(qh, &inst->handles, list, \
+ knav_dev_lock_held())
+
+#define for_each_instance(idx, inst, kdev) \
+ for (idx = 0, inst = kdev->instances; \
+ idx < (kdev)->num_queues_in_use; \
+ idx++, inst = knav_queue_idx_to_inst(kdev, idx))
+
+/* All firmware file names end up here. List the firmware file names below.
+ * Newest followed by older ones. Search is done from start of the array
+ * until a firmware file is found.
+ */
+static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
+
+static bool device_ready;
+bool knav_qmss_device_ready(void)
+{
+ return device_ready;
+}
+EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
+
+/**
+ * knav_queue_notify: qmss queue notfier call
+ *
+ * @inst: - qmss queue instance like accumulator
+ */
+void knav_queue_notify(struct knav_queue_inst *inst)
+{
+ struct knav_queue *qh;
+
+ if (!inst)
+ return;
+
+ rcu_read_lock();
+ for_each_handle_rcu(qh, inst) {
+ if (atomic_read(&qh->notifier_enabled) <= 0)
+ continue;
+ if (WARN_ON(!qh->notifier_fn))
+ continue;
+ this_cpu_inc(qh->stats->notifies);
+ qh->notifier_fn(qh->notifier_fn_arg);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(knav_queue_notify);
+
+static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
+{
+ struct knav_queue_inst *inst = _instdata;
+
+ knav_queue_notify(inst);
+ return IRQ_HANDLED;
+}
+
+static int knav_queue_setup_irq(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ unsigned queue = inst->id - range->queue_base;
+ int ret = 0, irq;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ irq = range->irqs[queue].irq;
+ ret = request_irq(irq, knav_queue_int_handler, 0,
+ inst->irq_name, inst);
+ if (ret)
+ return ret;
+ disable_irq(irq);
+ if (range->irqs[queue].cpu_mask) {
+ ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
+ if (ret) {
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+static void knav_queue_free_irq(struct knav_queue_inst *inst)
+{
+ struct knav_range_info *range = inst->range;
+ unsigned queue = inst->id - inst->range->queue_base;
+ int irq;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ irq = range->irqs[queue].irq;
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, inst);
+ }
+}
+
+static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
+{
+ return !list_empty(&inst->handles);
+}
+
+static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
+{
+ return inst->range->flags & RANGE_RESERVED;
+}
+
+static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
+{
+ struct knav_queue *tmp;
+
+ rcu_read_lock();
+ for_each_handle_rcu(tmp, inst) {
+ if (tmp->flags & KNAV_QUEUE_SHARED) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+ return false;
+}
+
+static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
+ unsigned type)
+{
+ if ((type == KNAV_QUEUE_QPEND) &&
+ (inst->range->flags & RANGE_HAS_IRQ)) {
+ return true;
+ } else if ((type == KNAV_QUEUE_ACC) &&
+ (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
+ return true;
+ } else if ((type == KNAV_QUEUE_GP) &&
+ !(inst->range->flags &
+ (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
+ return true;
+ }
+ return false;
+}
+
+static inline struct knav_queue_inst *
+knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
+{
+ struct knav_queue_inst *inst;
+ int idx;
+
+ for_each_instance(idx, inst, kdev) {
+ if (inst->id == id)
+ return inst;
+ }
+ return NULL;
+}
+
+static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
+{
+ if (kdev->base_id <= id &&
+ kdev->base_id + kdev->num_queues > id) {
+ id -= kdev->base_id;
+ return knav_queue_match_id_to_inst(kdev, id);
+ }
+ return NULL;
+}
+
+static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
+ const char *name, unsigned flags)
+{
+ struct knav_queue *qh;
+ unsigned id;
+ int ret = 0;
+
+ qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
+ if (!qh)
+ return ERR_PTR(-ENOMEM);
+
+ qh->stats = alloc_percpu(struct knav_queue_stats);
+ if (!qh->stats) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ qh->flags = flags;
+ qh->inst = inst;
+ id = inst->id - inst->qmgr->start_queue;
+ qh->reg_push = &inst->qmgr->reg_push[id];
+ qh->reg_pop = &inst->qmgr->reg_pop[id];
+ qh->reg_peek = &inst->qmgr->reg_peek[id];
+
+ /* first opener? */
+ if (!knav_queue_is_busy(inst)) {
+ struct knav_range_info *range = inst->range;
+
+ inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
+ if (range->ops && range->ops->open_queue)
+ ret = range->ops->open_queue(range, inst, flags);
+
+ if (ret)
+ goto err;
+ }
+ list_add_tail_rcu(&qh->list, &inst->handles);
+ return qh;
+
+err:
+ if (qh->stats)
+ free_percpu(qh->stats);
+ devm_kfree(inst->kdev->dev, qh);
+ return ERR_PTR(ret);
+}
+
+static struct knav_queue *
+knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
+{
+ struct knav_queue_inst *inst;
+ struct knav_queue *qh;
+
+ mutex_lock(&knav_dev_lock);
+
+ qh = ERR_PTR(-ENODEV);
+ inst = knav_queue_find_by_id(id);
+ if (!inst)
+ goto unlock_ret;
+
+ qh = ERR_PTR(-EEXIST);
+ if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
+ goto unlock_ret;
+
+ qh = ERR_PTR(-EBUSY);
+ if ((flags & KNAV_QUEUE_SHARED) &&
+ (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
+ goto unlock_ret;
+
+ qh = __knav_queue_open(inst, name, flags);
+
+unlock_ret:
+ mutex_unlock(&knav_dev_lock);
+
+ return qh;
+}
+
+static struct knav_queue *knav_queue_open_by_type(const char *name,
+ unsigned type, unsigned flags)
+{
+ struct knav_queue_inst *inst;
+ struct knav_queue *qh = ERR_PTR(-EINVAL);
+ int idx;
+
+ mutex_lock(&knav_dev_lock);
+
+ for_each_instance(idx, inst, kdev) {
+ if (knav_queue_is_reserved(inst))
+ continue;
+ if (!knav_queue_match_type(inst, type))
+ continue;
+ if (knav_queue_is_busy(inst))
+ continue;
+ qh = __knav_queue_open(inst, name, flags);
+ goto unlock_ret;
+ }
+
+unlock_ret:
+ mutex_unlock(&knav_dev_lock);
+ return qh;
+}
+
+static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
+{
+ struct knav_range_info *range = inst->range;
+
+ if (range->ops && range->ops->set_notify)
+ range->ops->set_notify(range, inst, enabled);
+}
+
+static int knav_queue_enable_notifier(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ bool first;
+
+ if (WARN_ON(!qh->notifier_fn))
+ return -EINVAL;
+
+ /* Adjust the per handle notifier count */
+ first = (atomic_inc_return(&qh->notifier_enabled) == 1);
+ if (!first)
+ return 0; /* nothing to do */
+
+ /* Now adjust the per instance notifier count */
+ first = (atomic_inc_return(&inst->num_notifiers) == 1);
+ if (first)
+ knav_queue_set_notify(inst, true);
+
+ return 0;
+}
+
+static int knav_queue_disable_notifier(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ bool last;
+
+ last = (atomic_dec_return(&qh->notifier_enabled) == 0);
+ if (!last)
+ return 0; /* nothing to do */
+
+ last = (atomic_dec_return(&inst->num_notifiers) == 0);
+ if (last)
+ knav_queue_set_notify(inst, false);
+
+ return 0;
+}
+
+static int knav_queue_set_notifier(struct knav_queue *qh,
+ struct knav_queue_notify_config *cfg)
+{
+ knav_queue_notify_fn old_fn = qh->notifier_fn;
+
+ if (!cfg)
+ return -EINVAL;
+
+ if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
+ return -ENOTSUPP;
+
+ if (!cfg->fn && old_fn)
+ knav_queue_disable_notifier(qh);
+
+ qh->notifier_fn = cfg->fn;
+ qh->notifier_fn_arg = cfg->fn_arg;
+
+ if (cfg->fn && !old_fn)
+ knav_queue_enable_notifier(qh);
+
+ return 0;
+}
+
+static int knav_gp_set_notify(struct knav_range_info *range,
+ struct knav_queue_inst *inst,
+ bool enabled)
+{
+ unsigned queue;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ queue = inst->id - range->queue_base;
+ if (enabled)
+ enable_irq(range->irqs[queue].irq);
+ else
+ disable_irq_nosync(range->irqs[queue].irq);
+ }
+ return 0;
+}
+
+static int knav_gp_open_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags)
+{
+ return knav_queue_setup_irq(range, inst);
+}
+
+static int knav_gp_close_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ knav_queue_free_irq(inst);
+ return 0;
+}
+
+static struct knav_range_ops knav_gp_range_ops = {
+ .set_notify = knav_gp_set_notify,
+ .open_queue = knav_gp_open_queue,
+ .close_queue = knav_gp_close_queue,
+};
+
+
+static int knav_queue_get_count(void *qhandle)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+
+ return readl_relaxed(&qh->reg_peek[0].entry_count) +
+ atomic_read(&inst->desc_count);
+}
+
+static void knav_queue_debug_show_instance(struct seq_file *s,
+ struct knav_queue_inst *inst)
+{
+ struct knav_device *kdev = inst->kdev;
+ struct knav_queue *qh;
+ int cpu = 0;
+ int pushes = 0;
+ int pops = 0;
+ int push_errors = 0;
+ int pop_errors = 0;
+ int notifies = 0;
+
+ if (!knav_queue_is_busy(inst))
+ return;
+
+ seq_printf(s, "\tqueue id %d (%s)\n",
+ kdev->base_id + inst->id, inst->name);
+ for_each_handle_rcu(qh, inst) {
+ for_each_possible_cpu(cpu) {
+ pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
+ pops += per_cpu_ptr(qh->stats, cpu)->pops;
+ push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
+ pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
+ notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
+ }
+
+ seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
+ qh,
+ pushes,
+ pops,
+ knav_queue_get_count(qh),
+ notifies,
+ push_errors,
+ pop_errors);
+ }
+}
+
+static int knav_queue_debug_show(struct seq_file *s, void *v)
+{
+ struct knav_queue_inst *inst;
+ int idx;
+
+ mutex_lock(&knav_dev_lock);
+ seq_printf(s, "%s: %u-%u\n",
+ dev_name(kdev->dev), kdev->base_id,
+ kdev->base_id + kdev->num_queues - 1);
+ for_each_instance(idx, inst, kdev)
+ knav_queue_debug_show_instance(s, inst);
+ mutex_unlock(&knav_dev_lock);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
+
+static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
+ u32 flags)
+{
+ unsigned long end;
+ u32 val = 0;
+
+ end = jiffies + msecs_to_jiffies(timeout);
+ while (time_after(end, jiffies)) {
+ val = readl_relaxed(addr);
+ if (flags)
+ val &= flags;
+ if (!val)
+ break;
+ cpu_relax();
+ }
+ return val ? -ETIMEDOUT : 0;
+}
+
+
+static int knav_queue_flush(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ unsigned id = inst->id - inst->qmgr->start_queue;
+
+ atomic_set(&inst->desc_count, 0);
+ writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
+ return 0;
+}
+
+/**
+ * knav_queue_open() - open a hardware queue
+ * @name: - name to give the queue handle
+ * @id: - desired queue number if any or specifes the type
+ * of queue
+ * @flags: - the following flags are applicable to queues:
+ * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
+ * exclusive by default.
+ * Subsequent attempts to open a shared queue should
+ * also have this flag.
+ *
+ * Returns a handle to the open hardware queue if successful. Use IS_ERR()
+ * to check the returned value for error codes.
+ */
+void *knav_queue_open(const char *name, unsigned id,
+ unsigned flags)
+{
+ struct knav_queue *qh = ERR_PTR(-EINVAL);
+
+ switch (id) {
+ case KNAV_QUEUE_QPEND:
+ case KNAV_QUEUE_ACC:
+ case KNAV_QUEUE_GP:
+ qh = knav_queue_open_by_type(name, id, flags);
+ break;
+
+ default:
+ qh = knav_queue_open_by_id(name, id, flags);
+ break;
+ }
+ return qh;
+}
+EXPORT_SYMBOL_GPL(knav_queue_open);
+
+/**
+ * knav_queue_close() - close a hardware queue handle
+ * @qhandle: - handle to close
+ */
+void knav_queue_close(void *qhandle)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+
+ while (atomic_read(&qh->notifier_enabled) > 0)
+ knav_queue_disable_notifier(qh);
+
+ mutex_lock(&knav_dev_lock);
+ list_del_rcu(&qh->list);
+ mutex_unlock(&knav_dev_lock);
+ synchronize_rcu();
+ if (!knav_queue_is_busy(inst)) {
+ struct knav_range_info *range = inst->range;
+
+ if (range->ops && range->ops->close_queue)
+ range->ops->close_queue(range, inst);
+ }
+ free_percpu(qh->stats);
+ devm_kfree(inst->kdev->dev, qh);
+}
+EXPORT_SYMBOL_GPL(knav_queue_close);
+
+/**
+ * knav_queue_device_control() - Perform control operations on a queue
+ * @qhandle: - queue handle
+ * @cmd: - control commands
+ * @arg: - command argument
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_notify_config *cfg;
+ int ret;
+
+ switch ((int)cmd) {
+ case KNAV_QUEUE_GET_ID:
+ ret = qh->inst->kdev->base_id + qh->inst->id;
+ break;
+
+ case KNAV_QUEUE_FLUSH:
+ ret = knav_queue_flush(qh);
+ break;
+
+ case KNAV_QUEUE_SET_NOTIFIER:
+ cfg = (void *)arg;
+ ret = knav_queue_set_notifier(qh, cfg);
+ break;
+
+ case KNAV_QUEUE_ENABLE_NOTIFY:
+ ret = knav_queue_enable_notifier(qh);
+ break;
+
+ case KNAV_QUEUE_DISABLE_NOTIFY:
+ ret = knav_queue_disable_notifier(qh);
+ break;
+
+ case KNAV_QUEUE_GET_COUNT:
+ ret = knav_queue_get_count(qh);
+ break;
+
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(knav_queue_device_control);
+
+
+
+/**
+ * knav_queue_push() - push data (or descriptor) to the tail of a queue
+ * @qhandle: - hardware queue handle
+ * @dma: - DMA data to push
+ * @size: - size of data to push
+ * @flags: - can be used to pass additional information
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_queue_push(void *qhandle, dma_addr_t dma,
+ unsigned size, unsigned flags)
+{
+ struct knav_queue *qh = qhandle;
+ u32 val;
+
+ val = (u32)dma | ((size / 16) - 1);
+ writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
+
+ this_cpu_inc(qh->stats->pushes);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(knav_queue_push);
+
+/**
+ * knav_queue_pop() - pop data (or descriptor) from the head of a queue
+ * @qhandle: - hardware queue handle
+ * @size: - (optional) size of the data pop'ed.
+ *
+ * Returns a DMA address on success, 0 on failure.
+ */
+dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+ dma_addr_t dma;
+ u32 val, idx;
+
+ /* are we accumulated? */
+ if (inst->descs) {
+ if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
+ atomic_inc(&inst->desc_count);
+ return 0;
+ }
+ idx = atomic_inc_return(&inst->desc_head);
+ idx &= ACC_DESCS_MASK;
+ val = inst->descs[idx];
+ } else {
+ val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
+ if (unlikely(!val))
+ return 0;
+ }
+
+ dma = val & DESC_PTR_MASK;
+ if (size)
+ *size = ((val & DESC_SIZE_MASK) + 1) * 16;
+
+ this_cpu_inc(qh->stats->pops);
+ return dma;
+}
+EXPORT_SYMBOL_GPL(knav_queue_pop);
+
+/* carve out descriptors and push into queue */
+static void kdesc_fill_pool(struct knav_pool *pool)
+{
+ struct knav_region *region;
+ int i;
+
+ region = pool->region;
+ pool->desc_size = region->desc_size;
+ for (i = 0; i < pool->num_desc; i++) {
+ int index = pool->region_offset + i;
+ dma_addr_t dma_addr;
+ unsigned dma_size;
+ dma_addr = region->dma_start + (region->desc_size * index);
+ dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
+ dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
+ DMA_TO_DEVICE);
+ knav_queue_push(pool->queue, dma_addr, dma_size, 0);
+ }
+}
+
+/* pop out descriptors and close the queue */
+static void kdesc_empty_pool(struct knav_pool *pool)
+{
+ dma_addr_t dma;
+ unsigned size;
+ void *desc;
+ int i;
+
+ if (!pool->queue)
+ return;
+
+ for (i = 0;; i++) {
+ dma = knav_queue_pop(pool->queue, &size);
+ if (!dma)
+ break;
+ desc = knav_pool_desc_dma_to_virt(pool, dma);
+ if (!desc) {
+ dev_dbg(pool->kdev->dev,
+ "couldn't unmap desc, continuing\n");
+ continue;
+ }
+ }
+ WARN_ON(i != pool->num_desc);
+ knav_queue_close(pool->queue);
+}
+
+
+/* Get the DMA address of a descriptor */
+dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
+{
+ struct knav_pool *pool = ph;
+ return pool->region->dma_start + (virt - pool->region->virt_start);
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
+
+void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
+{
+ struct knav_pool *pool = ph;
+ return pool->region->virt_start + (dma - pool->region->dma_start);
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
+
+/**
+ * knav_pool_create() - Create a pool of descriptors
+ * @name: - name to give the pool handle
+ * @num_desc: - numbers of descriptors in the pool
+ * @region_id: - QMSS region id from which the descriptors are to be
+ * allocated.
+ *
+ * Returns a pool handle on success.
+ * Use IS_ERR_OR_NULL() to identify error values on return.
+ */
+void *knav_pool_create(const char *name,
+ int num_desc, int region_id)
+{
+ struct knav_region *reg_itr, *region = NULL;
+ struct knav_pool *pool, *pi = NULL, *iter;
+ struct list_head *node;
+ unsigned last_offset;
+ int ret;
+
+ if (!kdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!kdev->dev)
+ return ERR_PTR(-ENODEV);
+
+ pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ dev_err(kdev->dev, "out of memory allocating pool\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_region(kdev, reg_itr) {
+ if (reg_itr->id != region_id)
+ continue;
+ region = reg_itr;
+ break;
+ }
+
+ if (!region) {
+ dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
+ if (IS_ERR(pool->queue)) {
+ dev_err(kdev->dev,
+ "failed to open queue for pool(%s), error %ld\n",
+ name, PTR_ERR(pool->queue));
+ ret = PTR_ERR(pool->queue);
+ goto err;
+ }
+
+ pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
+ pool->kdev = kdev;
+ pool->dev = kdev->dev;
+
+ mutex_lock(&knav_dev_lock);
+
+ if (num_desc > (region->num_desc - region->used_desc)) {
+ dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
+ region_id, name);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ /* Region maintains a sorted (by region offset) list of pools
+ * use the first free slot which is large enough to accomodate
+ * the request
+ */
+ last_offset = 0;
+ node = &region->pools;
+ list_for_each_entry(iter, &region->pools, region_inst) {
+ if ((iter->region_offset - last_offset) >= num_desc) {
+ pi = iter;
+ break;
+ }
+ last_offset = iter->region_offset + iter->num_desc;
+ }
+
+ if (pi) {
+ node = &pi->region_inst;
+ pool->region = region;
+ pool->num_desc = num_desc;
+ pool->region_offset = last_offset;
+ region->used_desc += num_desc;
+ list_add_tail(&pool->list, &kdev->pools);
+ list_add_tail(&pool->region_inst, node);
+ } else {
+ dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
+ name, region_id);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ mutex_unlock(&knav_dev_lock);
+ kdesc_fill_pool(pool);
+ return pool;
+
+err_unlock:
+ mutex_unlock(&knav_dev_lock);
+err:
+ kfree(pool->name);
+ devm_kfree(kdev->dev, pool);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(knav_pool_create);
+
+/**
+ * knav_pool_destroy() - Free a pool of descriptors
+ * @ph: - pool handle
+ */
+void knav_pool_destroy(void *ph)
+{
+ struct knav_pool *pool = ph;
+
+ if (!pool)
+ return;
+
+ if (!pool->region)
+ return;
+
+ kdesc_empty_pool(pool);
+ mutex_lock(&knav_dev_lock);
+
+ pool->region->used_desc -= pool->num_desc;
+ list_del(&pool->region_inst);
+ list_del(&pool->list);
+
+ mutex_unlock(&knav_dev_lock);
+ kfree(pool->name);
+ devm_kfree(kdev->dev, pool);
+}
+EXPORT_SYMBOL_GPL(knav_pool_destroy);
+
+
+/**
+ * knav_pool_desc_get() - Get a descriptor from the pool
+ * @ph: - pool handle
+ *
+ * Returns descriptor from the pool.
+ */
+void *knav_pool_desc_get(void *ph)
+{
+ struct knav_pool *pool = ph;
+ dma_addr_t dma;
+ unsigned size;
+ void *data;
+
+ dma = knav_queue_pop(pool->queue, &size);
+ if (unlikely(!dma))
+ return ERR_PTR(-ENOMEM);
+ data = knav_pool_desc_dma_to_virt(pool, dma);
+ return data;
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_get);
+
+/**
+ * knav_pool_desc_put() - return a descriptor to the pool
+ * @ph: - pool handle
+ * @desc: - virtual address
+ */
+void knav_pool_desc_put(void *ph, void *desc)
+{
+ struct knav_pool *pool = ph;
+ dma_addr_t dma;
+ dma = knav_pool_desc_virt_to_dma(pool, desc);
+ knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_put);
+
+/**
+ * knav_pool_desc_map() - Map descriptor for DMA transfer
+ * @ph: - pool handle
+ * @desc: - address of descriptor to map
+ * @size: - size of descriptor to map
+ * @dma: - DMA address return pointer
+ * @dma_sz: - adjusted return pointer
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_pool_desc_map(void *ph, void *desc, unsigned size,
+ dma_addr_t *dma, unsigned *dma_sz)
+{
+ struct knav_pool *pool = ph;
+ *dma = knav_pool_desc_virt_to_dma(pool, desc);
+ size = min(size, pool->region->desc_size);
+ size = ALIGN(size, SMP_CACHE_BYTES);
+ *dma_sz = size;
+ dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
+
+ /* Ensure the descriptor reaches to the memory */
+ __iowmb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_map);
+
+/**
+ * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
+ * @ph: - pool handle
+ * @dma: - DMA address of descriptor to unmap
+ * @dma_sz: - size of descriptor to unmap
+ *
+ * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
+ * error values on return.
+ */
+void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
+{
+ struct knav_pool *pool = ph;
+ unsigned desc_sz;
+ void *desc;
+
+ desc_sz = min(dma_sz, pool->region->desc_size);
+ desc = knav_pool_desc_dma_to_virt(pool, dma);
+ dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
+ prefetch(desc);
+ return desc;
+}
+EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
+
+/**
+ * knav_pool_count() - Get the number of descriptors in pool.
+ * @ph: - pool handle
+ * Returns number of elements in the pool.
+ */
+int knav_pool_count(void *ph)
+{
+ struct knav_pool *pool = ph;
+ return knav_queue_get_count(pool->queue);
+}
+EXPORT_SYMBOL_GPL(knav_pool_count);
+
+static void knav_queue_setup_region(struct knav_device *kdev,
+ struct knav_region *region)
+{
+ unsigned hw_num_desc, hw_desc_size, size;
+ struct knav_reg_region __iomem *regs;
+ struct knav_qmgr_info *qmgr;
+ struct knav_pool *pool;
+ int id = region->id;
+ struct page *page;
+
+ /* unused region? */
+ if (!region->num_desc) {
+ dev_warn(kdev->dev, "unused region %s\n", region->name);
+ return;
+ }
+
+ /* get hardware descriptor value */
+ hw_num_desc = ilog2(region->num_desc - 1) + 1;
+
+ /* did we force fit ourselves into nothingness? */
+ if (region->num_desc < 32) {
+ region->num_desc = 0;
+ dev_warn(kdev->dev, "too few descriptors in region %s\n",
+ region->name);
+ return;
+ }
+
+ size = region->num_desc * region->desc_size;
+ region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
+ GFP_DMA32);
+ if (!region->virt_start) {
+ region->num_desc = 0;
+ dev_err(kdev->dev, "memory alloc failed for region %s\n",
+ region->name);
+ return;
+ }
+ region->virt_end = region->virt_start + size;
+ page = virt_to_page(region->virt_start);
+
+ region->dma_start = dma_map_page(kdev->dev, page, 0, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev->dev, region->dma_start)) {
+ dev_err(kdev->dev, "dma map failed for region %s\n",
+ region->name);
+ goto fail;
+ }
+ region->dma_end = region->dma_start + size;
+
+ pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ dev_err(kdev->dev, "out of memory allocating dummy pool\n");
+ goto fail;
+ }
+ pool->num_desc = 0;
+ pool->region_offset = region->num_desc;
+ list_add(&pool->region_inst, &region->pools);
+
+ dev_dbg(kdev->dev,
+ "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
+ region->name, id, region->desc_size, region->num_desc,
+ region->link_index, &region->dma_start, &region->dma_end,
+ region->virt_start, region->virt_end);
+
+ hw_desc_size = (region->desc_size / 16) - 1;
+ hw_num_desc -= 5;
+
+ for_each_qmgr(kdev, qmgr) {
+ regs = qmgr->reg_region + id;
+ writel_relaxed((u32)region->dma_start, &regs->base);
+ writel_relaxed(region->link_index, &regs->start_index);
+ writel_relaxed(hw_desc_size << 16 | hw_num_desc,
+ &regs->size_count);
+ }
+ return;
+
+fail:
+ if (region->dma_start)
+ dma_unmap_page(kdev->dev, region->dma_start, size,
+ DMA_BIDIRECTIONAL);
+ if (region->virt_start)
+ free_pages_exact(region->virt_start, size);
+ region->num_desc = 0;
+ return;
+}
+
+static const char *knav_queue_find_name(struct device_node *node)
+{
+ const char *name;
+
+ if (of_property_read_string(node, "label", &name) < 0)
+ name = node->name;
+ if (!name)
+ name = "unknown";
+ return name;
+}
+
+static int knav_queue_setup_regions(struct knav_device *kdev,
+ struct device_node *regions)
+{
+ struct device *dev = kdev->dev;
+ struct knav_region *region;
+ struct device_node *child;
+ u32 temp[2];
+ int ret;
+
+ for_each_child_of_node(regions, child) {
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ of_node_put(child);
+ dev_err(dev, "out of memory allocating region\n");
+ return -ENOMEM;
+ }
+
+ region->name = knav_queue_find_name(child);
+ of_property_read_u32(child, "id", &region->id);
+ ret = of_property_read_u32_array(child, "region-spec", temp, 2);
+ if (!ret) {
+ region->num_desc = temp[0];
+ region->desc_size = temp[1];
+ } else {
+ dev_err(dev, "invalid region info %s\n", region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+
+ if (!of_get_property(child, "link-index", NULL)) {
+ dev_err(dev, "No link info for %s\n", region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+ ret = of_property_read_u32(child, "link-index",
+ &region->link_index);
+ if (ret) {
+ dev_err(dev, "link index not found for %s\n",
+ region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+
+ INIT_LIST_HEAD(&region->pools);
+ list_add_tail(&region->list, &kdev->regions);
+ }
+ if (list_empty(&kdev->regions)) {
+ dev_err(dev, "no valid region information found\n");
+ return -ENODEV;
+ }
+
+ /* Next, we run through the regions and set things up */
+ for_each_region(kdev, region)
+ knav_queue_setup_region(kdev, region);
+
+ return 0;
+}
+
+static int knav_get_link_ram(struct knav_device *kdev,
+ const char *name,
+ struct knav_link_ram_block *block)
+{
+ struct platform_device *pdev = to_platform_device(kdev->dev);
+ struct device_node *node = pdev->dev.of_node;
+ u32 temp[2];
+
+ /*
+ * Note: link ram resources are specified in "entry" sized units. In
+ * reality, although entries are ~40bits in hardware, we treat them as
+ * 64-bit entities here.
+ *
+ * For example, to specify the internal link ram for Keystone-I class
+ * devices, we would set the linkram0 resource to 0x80000-0x83fff.
+ *
+ * This gets a bit weird when other link rams are used. For example,
+ * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
+ * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
+ * which accounts for 64-bits per entry, for 16K entries.
+ */
+ if (!of_property_read_u32_array(node, name , temp, 2)) {
+ if (temp[0]) {
+ /*
+ * queue_base specified => using internal or onchip
+ * link ram WARNING - we do not "reserve" this block
+ */
+ block->dma = (dma_addr_t)temp[0];
+ block->virt = NULL;
+ block->size = temp[1];
+ } else {
+ block->size = temp[1];
+ /* queue_base not specific => allocate requested size */
+ block->virt = dmam_alloc_coherent(kdev->dev,
+ 8 * block->size, &block->dma,
+ GFP_KERNEL);
+ if (!block->virt) {
+ dev_err(kdev->dev, "failed to alloc linkram\n");
+ return -ENOMEM;
+ }
+ }
+ } else {
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int knav_queue_setup_link_ram(struct knav_device *kdev)
+{
+ struct knav_link_ram_block *block;
+ struct knav_qmgr_info *qmgr;
+
+ for_each_qmgr(kdev, qmgr) {
+ block = &kdev->link_rams[0];
+ dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
+ &block->dma, block->virt, block->size);
+ writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
+ if (kdev->version == QMSS_66AK2G)
+ writel_relaxed(block->size,
+ &qmgr->reg_config->link_ram_size0);
+ else
+ writel_relaxed(block->size - 1,
+ &qmgr->reg_config->link_ram_size0);
+ block++;
+ if (!block->size)
+ continue;
+
+ dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
+ &block->dma, block->virt, block->size);
+ writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
+ }
+
+ return 0;
+}
+
+static int knav_setup_queue_range(struct knav_device *kdev,
+ struct device_node *node)
+{
+ struct device *dev = kdev->dev;
+ struct knav_range_info *range;
+ struct knav_qmgr_info *qmgr;
+ u32 temp[2], start, end, id, index;
+ int ret, i;
+
+ range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
+ if (!range) {
+ dev_err(dev, "out of memory allocating range\n");
+ return -ENOMEM;
+ }
+
+ range->kdev = kdev;
+ range->name = knav_queue_find_name(node);
+ ret = of_property_read_u32_array(node, "qrange", temp, 2);
+ if (!ret) {
+ range->queue_base = temp[0] - kdev->base_id;
+ range->num_queues = temp[1];
+ } else {
+ dev_err(dev, "invalid queue range %s\n", range->name);
+ devm_kfree(dev, range);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < RANGE_MAX_IRQS; i++) {
+ struct of_phandle_args oirq;
+
+ if (of_irq_parse_one(node, i, &oirq))
+ break;
+
+ range->irqs[i].irq = irq_create_of_mapping(&oirq);
+ if (range->irqs[i].irq == IRQ_NONE)
+ break;
+
+ range->num_irqs++;
+
+ if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
+ unsigned long mask;
+ int bit;
+
+ range->irqs[i].cpu_mask = devm_kzalloc(dev,
+ cpumask_size(), GFP_KERNEL);
+ if (!range->irqs[i].cpu_mask)
+ return -ENOMEM;
+
+ mask = (oirq.args[2] & 0x0000ff00) >> 8;
+ for_each_set_bit(bit, &mask, BITS_PER_LONG)
+ cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
+ }
+ }
+
+ range->num_irqs = min(range->num_irqs, range->num_queues);
+ if (range->num_irqs)
+ range->flags |= RANGE_HAS_IRQ;
+
+ if (of_property_read_bool(node, "qalloc-by-id"))
+ range->flags |= RANGE_RESERVED;
+
+ if (of_property_present(node, "accumulator")) {
+ ret = knav_init_acc_range(kdev, node, range);
+ if (ret < 0) {
+ devm_kfree(dev, range);
+ return ret;
+ }
+ } else {
+ range->ops = &knav_gp_range_ops;
+ }
+
+ /* set threshold to 1, and flush out the queues */
+ for_each_qmgr(kdev, qmgr) {
+ start = max(qmgr->start_queue, range->queue_base);
+ end = min(qmgr->start_queue + qmgr->num_queues,
+ range->queue_base + range->num_queues);
+ for (id = start; id < end; id++) {
+ index = id - qmgr->start_queue;
+ writel_relaxed(THRESH_GTE | 1,
+ &qmgr->reg_peek[index].ptr_size_thresh);
+ writel_relaxed(0,
+ &qmgr->reg_push[index].ptr_size_thresh);
+ }
+ }
+
+ list_add_tail(&range->list, &kdev->queue_ranges);
+ dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
+ range->name, range->queue_base,
+ range->queue_base + range->num_queues - 1,
+ range->num_irqs,
+ (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
+ (range->flags & RANGE_RESERVED) ? ", reserved" : "",
+ (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
+ kdev->num_queues_in_use += range->num_queues;
+ return 0;
+}
+
+static int knav_setup_queue_pools(struct knav_device *kdev,
+ struct device_node *queue_pools)
+{
+ struct device_node *type, *range;
+
+ for_each_child_of_node(queue_pools, type) {
+ for_each_child_of_node(type, range) {
+ /* return value ignored, we init the rest... */
+ knav_setup_queue_range(kdev, range);
+ }
+ }
+
+ /* ... and barf if they all failed! */
+ if (list_empty(&kdev->queue_ranges)) {
+ dev_err(kdev->dev, "no valid queue range found\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void knav_free_queue_range(struct knav_device *kdev,
+ struct knav_range_info *range)
+{
+ if (range->ops && range->ops->free_range)
+ range->ops->free_range(range);
+ list_del(&range->list);
+ devm_kfree(kdev->dev, range);
+}
+
+static void knav_free_queue_ranges(struct knav_device *kdev)
+{
+ struct knav_range_info *range;
+
+ for (;;) {
+ range = first_queue_range(kdev);
+ if (!range)
+ break;
+ knav_free_queue_range(kdev, range);
+ }
+}
+
+static void knav_queue_free_regions(struct knav_device *kdev)
+{
+ struct knav_region *region;
+ struct knav_pool *pool, *tmp;
+ unsigned size;
+
+ for (;;) {
+ region = first_region(kdev);
+ if (!region)
+ break;
+ list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
+ knav_pool_destroy(pool);
+
+ size = region->virt_end - region->virt_start;
+ if (size)
+ free_pages_exact(region->virt_start, size);
+ list_del(&region->list);
+ devm_kfree(kdev->dev, region);
+ }
+}
+
+static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
+ struct device_node *node, int index)
+{
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ ret = of_address_to_resource(node, index, &res);
+ if (ret) {
+ dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
+ node, index);
+ return ERR_PTR(ret);
+ }
+
+ regs = devm_ioremap_resource(kdev->dev, &res);
+ if (IS_ERR(regs))
+ dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
+ index, node);
+ return regs;
+}
+
+static int knav_queue_init_qmgrs(struct knav_device *kdev,
+ struct device_node *qmgrs)
+{
+ struct device *dev = kdev->dev;
+ struct knav_qmgr_info *qmgr;
+ struct device_node *child;
+ u32 temp[2];
+ int ret;
+
+ for_each_child_of_node(qmgrs, child) {
+ qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
+ if (!qmgr) {
+ of_node_put(child);
+ dev_err(dev, "out of memory allocating qmgr\n");
+ return -ENOMEM;
+ }
+
+ ret = of_property_read_u32_array(child, "managed-queues",
+ temp, 2);
+ if (!ret) {
+ qmgr->start_queue = temp[0];
+ qmgr->num_queues = temp[1];
+ } else {
+ dev_err(dev, "invalid qmgr queue range\n");
+ devm_kfree(dev, qmgr);
+ continue;
+ }
+
+ dev_info(dev, "qmgr start queue %d, number of queues %d\n",
+ qmgr->start_queue, qmgr->num_queues);
+
+ qmgr->reg_peek =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PEEK_REG_INDEX);
+
+ if (kdev->version == QMSS) {
+ qmgr->reg_status =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_STATUS_REG_INDEX);
+ }
+
+ qmgr->reg_config =
+ knav_queue_map_reg(kdev, child,
+ (kdev->version == QMSS_66AK2G) ?
+ KNAV_L_QUEUE_CONFIG_REG_INDEX :
+ KNAV_QUEUE_CONFIG_REG_INDEX);
+ qmgr->reg_region =
+ knav_queue_map_reg(kdev, child,
+ (kdev->version == QMSS_66AK2G) ?
+ KNAV_L_QUEUE_REGION_REG_INDEX :
+ KNAV_QUEUE_REGION_REG_INDEX);
+
+ qmgr->reg_push =
+ knav_queue_map_reg(kdev, child,
+ (kdev->version == QMSS_66AK2G) ?
+ KNAV_L_QUEUE_PUSH_REG_INDEX :
+ KNAV_QUEUE_PUSH_REG_INDEX);
+
+ if (kdev->version == QMSS) {
+ qmgr->reg_pop =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_POP_REG_INDEX);
+ }
+
+ if (IS_ERR(qmgr->reg_peek) ||
+ ((kdev->version == QMSS) &&
+ (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
+ IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
+ IS_ERR(qmgr->reg_push)) {
+ dev_err(dev, "failed to map qmgr regs\n");
+ if (kdev->version == QMSS) {
+ if (!IS_ERR(qmgr->reg_status))
+ devm_iounmap(dev, qmgr->reg_status);
+ if (!IS_ERR(qmgr->reg_pop))
+ devm_iounmap(dev, qmgr->reg_pop);
+ }
+ if (!IS_ERR(qmgr->reg_peek))
+ devm_iounmap(dev, qmgr->reg_peek);
+ if (!IS_ERR(qmgr->reg_config))
+ devm_iounmap(dev, qmgr->reg_config);
+ if (!IS_ERR(qmgr->reg_region))
+ devm_iounmap(dev, qmgr->reg_region);
+ if (!IS_ERR(qmgr->reg_push))
+ devm_iounmap(dev, qmgr->reg_push);
+ devm_kfree(dev, qmgr);
+ continue;
+ }
+
+ /* Use same push register for pop as well */
+ if (kdev->version == QMSS_66AK2G)
+ qmgr->reg_pop = qmgr->reg_push;
+
+ list_add_tail(&qmgr->list, &kdev->qmgrs);
+ dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
+ qmgr->start_queue, qmgr->num_queues,
+ qmgr->reg_peek, qmgr->reg_status,
+ qmgr->reg_config, qmgr->reg_region,
+ qmgr->reg_push, qmgr->reg_pop);
+ }
+ return 0;
+}
+
+static int knav_queue_init_pdsps(struct knav_device *kdev,
+ struct device_node *pdsps)
+{
+ struct device *dev = kdev->dev;
+ struct knav_pdsp_info *pdsp;
+ struct device_node *child;
+
+ for_each_child_of_node(pdsps, child) {
+ pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
+ if (!pdsp) {
+ of_node_put(child);
+ dev_err(dev, "out of memory allocating pdsp\n");
+ return -ENOMEM;
+ }
+ pdsp->name = knav_queue_find_name(child);
+ pdsp->iram =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
+ pdsp->regs =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_REGS_REG_INDEX);
+ pdsp->intd =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_INTD_REG_INDEX);
+ pdsp->command =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_CMD_REG_INDEX);
+
+ if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
+ IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
+ dev_err(dev, "failed to map pdsp %s regs\n",
+ pdsp->name);
+ if (!IS_ERR(pdsp->command))
+ devm_iounmap(dev, pdsp->command);
+ if (!IS_ERR(pdsp->iram))
+ devm_iounmap(dev, pdsp->iram);
+ if (!IS_ERR(pdsp->regs))
+ devm_iounmap(dev, pdsp->regs);
+ if (!IS_ERR(pdsp->intd))
+ devm_iounmap(dev, pdsp->intd);
+ devm_kfree(dev, pdsp);
+ continue;
+ }
+ of_property_read_u32(child, "id", &pdsp->id);
+ list_add_tail(&pdsp->list, &kdev->pdsps);
+ dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
+ pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
+ pdsp->intd);
+ }
+ return 0;
+}
+
+static int knav_queue_stop_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ u32 val, timeout = 1000;
+ int ret;
+
+ val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
+ writel_relaxed(val, &pdsp->regs->control);
+ ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
+ PDSP_CTRL_RUNNING);
+ if (ret < 0) {
+ dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
+ return ret;
+ }
+ pdsp->loaded = false;
+ pdsp->started = false;
+ return 0;
+}
+
+static int knav_queue_load_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ int i, ret, fwlen;
+ const struct firmware *fw;
+ bool found = false;
+ u32 *fwdata;
+
+ for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
+ if (knav_acc_firmwares[i]) {
+ ret = request_firmware_direct(&fw,
+ knav_acc_firmwares[i],
+ kdev->dev);
+ if (!ret) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ dev_err(kdev->dev, "failed to get firmware for pdsp\n");
+ return -ENODEV;
+ }
+
+ dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
+ knav_acc_firmwares[i]);
+
+ writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
+ /* download the firmware */
+ fwdata = (u32 *)fw->data;
+ fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
+ for (i = 0; i < fwlen; i++)
+ writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
+
+ release_firmware(fw);
+ return 0;
+}
+
+static int knav_queue_start_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ u32 val, timeout = 1000;
+ int ret;
+
+ /* write a command for sync */
+ writel_relaxed(0xffffffff, pdsp->command);
+ while (readl_relaxed(pdsp->command) != 0xffffffff)
+ cpu_relax();
+
+ /* soft reset the PDSP */
+ val = readl_relaxed(&pdsp->regs->control);
+ val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
+ writel_relaxed(val, &pdsp->regs->control);
+
+ /* enable pdsp */
+ val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
+ writel_relaxed(val, &pdsp->regs->control);
+
+ /* wait for command register to clear */
+ ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
+ if (ret < 0) {
+ dev_err(kdev->dev,
+ "timed out on pdsp %s command register wait\n",
+ pdsp->name);
+ return ret;
+ }
+ return 0;
+}
+
+static void knav_queue_stop_pdsps(struct knav_device *kdev)
+{
+ struct knav_pdsp_info *pdsp;
+
+ /* disable all pdsps */
+ for_each_pdsp(kdev, pdsp)
+ knav_queue_stop_pdsp(kdev, pdsp);
+}
+
+static int knav_queue_start_pdsps(struct knav_device *kdev)
+{
+ struct knav_pdsp_info *pdsp;
+ int ret;
+
+ knav_queue_stop_pdsps(kdev);
+ /* now load them all. We return success even if pdsp
+ * is not loaded as acc channels are optional on having
+ * firmware availability in the system. We set the loaded
+ * and stated flag and when initialize the acc range, check
+ * it and init the range only if pdsp is started.
+ */
+ for_each_pdsp(kdev, pdsp) {
+ ret = knav_queue_load_pdsp(kdev, pdsp);
+ if (!ret)
+ pdsp->loaded = true;
+ }
+
+ for_each_pdsp(kdev, pdsp) {
+ if (pdsp->loaded) {
+ ret = knav_queue_start_pdsp(kdev, pdsp);
+ if (!ret)
+ pdsp->started = true;
+ }
+ }
+ return 0;
+}
+
+static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
+{
+ struct knav_qmgr_info *qmgr;
+
+ for_each_qmgr(kdev, qmgr) {
+ if ((id >= qmgr->start_queue) &&
+ (id < qmgr->start_queue + qmgr->num_queues))
+ return qmgr;
+ }
+ return NULL;
+}
+
+static int knav_queue_init_queue(struct knav_device *kdev,
+ struct knav_range_info *range,
+ struct knav_queue_inst *inst,
+ unsigned id)
+{
+ char irq_name[KNAV_NAME_SIZE];
+ inst->qmgr = knav_find_qmgr(id);
+ if (!inst->qmgr)
+ return -1;
+
+ INIT_LIST_HEAD(&inst->handles);
+ inst->kdev = kdev;
+ inst->range = range;
+ inst->irq_num = -1;
+ inst->id = id;
+ scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
+ inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
+
+ if (range->ops && range->ops->init_queue)
+ return range->ops->init_queue(range, inst);
+ else
+ return 0;
+}
+
+static int knav_queue_init_queues(struct knav_device *kdev)
+{
+ struct knav_range_info *range;
+ int size, id, base_idx;
+ int idx = 0, ret = 0;
+
+ /* how much do we need for instance data? */
+ size = sizeof(struct knav_queue_inst);
+
+ /* round this up to a power of 2, keep the index to instance
+ * arithmetic fast.
+ * */
+ kdev->inst_shift = order_base_2(size);
+ size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
+ kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
+ if (!kdev->instances)
+ return -ENOMEM;
+
+ for_each_queue_range(kdev, range) {
+ if (range->ops && range->ops->init_range)
+ range->ops->init_range(range);
+ base_idx = idx;
+ for (id = range->queue_base;
+ id < range->queue_base + range->num_queues; id++, idx++) {
+ ret = knav_queue_init_queue(kdev, range,
+ knav_queue_idx_to_inst(kdev, idx), id);
+ if (ret < 0)
+ return ret;
+ }
+ range->queue_base_inst =
+ knav_queue_idx_to_inst(kdev, base_idx);
+ }
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id keystone_qmss_of_match[] = {
+ {
+ .compatible = "ti,keystone-navigator-qmss",
+ },
+ {
+ .compatible = "ti,66ak2g-navss-qm",
+ .data = (void *)QMSS_66AK2G,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
+
+static int knav_queue_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ u32 temp[2];
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
+ if (!kdev) {
+ dev_err(dev, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
+ if (match && match->data)
+ kdev->version = QMSS_66AK2G;
+
+ platform_set_drvdata(pdev, kdev);
+ kdev->dev = dev;
+ INIT_LIST_HEAD(&kdev->queue_ranges);
+ INIT_LIST_HEAD(&kdev->qmgrs);
+ INIT_LIST_HEAD(&kdev->pools);
+ INIT_LIST_HEAD(&kdev->regions);
+ INIT_LIST_HEAD(&kdev->pdsps);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_disable(&pdev->dev);
+ dev_err(dev, "Failed to enable QMSS\n");
+ return ret;
+ }
+
+ if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
+ dev_err(dev, "queue-range not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ kdev->base_id = temp[0];
+ kdev->num_queues = temp[1];
+
+ /* Initialize queue managers using device tree configuration */
+ qmgrs = of_get_child_by_name(node, "qmgrs");
+ if (!qmgrs) {
+ dev_err(dev, "queue manager info not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_queue_init_qmgrs(kdev, qmgrs);
+ of_node_put(qmgrs);
+ if (ret)
+ goto err;
+
+ /* get pdsp configuration values from device tree */
+ pdsps = of_get_child_by_name(node, "pdsps");
+ if (pdsps) {
+ ret = knav_queue_init_pdsps(kdev, pdsps);
+ if (ret)
+ goto err;
+
+ ret = knav_queue_start_pdsps(kdev);
+ if (ret)
+ goto err;
+ }
+ of_node_put(pdsps);
+
+ /* get usable queue range values from device tree */
+ queue_pools = of_get_child_by_name(node, "queue-pools");
+ if (!queue_pools) {
+ dev_err(dev, "queue-pools not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_setup_queue_pools(kdev, queue_pools);
+ of_node_put(queue_pools);
+ if (ret)
+ goto err;
+
+ ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
+ if (ret) {
+ dev_err(kdev->dev, "could not setup linking ram\n");
+ goto err;
+ }
+
+ ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
+ if (ret) {
+ /*
+ * nothing really, we have one linking ram already, so we just
+ * live within our means
+ */
+ }
+
+ ret = knav_queue_setup_link_ram(kdev);
+ if (ret)
+ goto err;
+
+ regions = of_get_child_by_name(node, "descriptor-regions");
+ if (!regions) {
+ dev_err(dev, "descriptor-regions not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_queue_setup_regions(kdev, regions);
+ of_node_put(regions);
+ if (ret)
+ goto err;
+
+ ret = knav_queue_init_queues(kdev);
+ if (ret < 0) {
+ dev_err(dev, "hwqueue initialization failed\n");
+ goto err;
+ }
+
+ debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
+ &knav_queue_debug_fops);
+ device_ready = true;
+ return 0;
+
+err:
+ knav_queue_stop_pdsps(kdev);
+ knav_queue_free_regions(kdev);
+ knav_free_queue_ranges(kdev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int knav_queue_remove(struct platform_device *pdev)
+{
+ /* TODO: Free resources */
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver keystone_qmss_driver = {
+ .probe = knav_queue_probe,
+ .remove = knav_queue_remove,
+ .driver = {
+ .name = "keystone-navigator-qmss",
+ .of_match_table = keystone_qmss_of_match,
+ },
+};
+module_platform_driver(keystone_qmss_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
+MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
new file mode 100644
index 0000000000..f04c211579
--- /dev/null
+++ b/drivers/soc/ti/pm33xx.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AM33XX Power Management Routines
+ *
+ * Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Vaibhav Bedia, Dave Gerlach
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_data/pm33xx.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/rtc.h>
+#include <linux/rtc/rtc-omap.h>
+#include <linux/sizes.h>
+#include <linux/sram.h>
+#include <linux/suspend.h>
+#include <linux/ti-emif-sram.h>
+#include <linux/wkup_m3_ipc.h>
+
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+#include <asm/system_misc.h>
+
+#define AMX3_PM_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
+ (unsigned long)pm_sram->do_wfi)
+
+#define RTC_SCRATCH_RESUME_REG 0
+#define RTC_SCRATCH_MAGIC_REG 1
+#define RTC_REG_BOOT_MAGIC 0x8cd0 /* RTC */
+#define GIC_INT_SET_PENDING_BASE 0x200
+#define AM43XX_GIC_DIST_BASE 0x48241000
+
+static void __iomem *rtc_base_virt;
+static struct clk *rtc_fck;
+static u32 rtc_magic_val;
+
+static int (*am33xx_do_wfi_sram)(unsigned long unused);
+static phys_addr_t am33xx_do_wfi_sram_phys;
+
+static struct gen_pool *sram_pool, *sram_pool_data;
+static unsigned long ocmcram_location, ocmcram_location_data;
+
+static struct rtc_device *omap_rtc;
+static void __iomem *gic_dist_base;
+
+static struct am33xx_pm_platform_data *pm_ops;
+static struct am33xx_pm_sram_addr *pm_sram;
+
+static struct device *pm33xx_dev;
+static struct wkup_m3_ipc *m3_ipc;
+
+#ifdef CONFIG_SUSPEND
+static int rtc_only_idle;
+static int retrigger_irq;
+static unsigned long suspend_wfi_flags;
+
+static struct wkup_m3_wakeup_src wakeup_src = {.irq_nr = 0,
+ .src = "Unknown",
+};
+
+static struct wkup_m3_wakeup_src rtc_alarm_wakeup = {
+ .irq_nr = 108, .src = "RTC Alarm",
+};
+
+static struct wkup_m3_wakeup_src rtc_ext_wakeup = {
+ .irq_nr = 0, .src = "Ext wakeup",
+};
+#endif
+
+static u32 sram_suspend_address(unsigned long addr)
+{
+ return ((unsigned long)am33xx_do_wfi_sram +
+ AMX3_PM_SRAM_SYMBOL_OFFSET(addr));
+}
+
+static int am33xx_push_sram_idle(void)
+{
+ struct am33xx_pm_ro_sram_data ro_sram_data;
+ int ret;
+ u32 table_addr, ro_data_addr;
+ void *copy_addr;
+
+ ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
+ ro_sram_data.amx3_pm_sram_data_phys =
+ gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
+ ro_sram_data.rtc_base_virt = rtc_base_virt;
+
+ /* Save physical address to calculate resume offset during pm init */
+ am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
+ ocmcram_location);
+
+ am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
+ pm_sram->do_wfi,
+ *pm_sram->do_wfi_sz);
+ if (!am33xx_do_wfi_sram) {
+ dev_err(pm33xx_dev,
+ "PM: %s: am33xx_do_wfi copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ table_addr =
+ sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
+ ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
+ if (ret) {
+ dev_dbg(pm33xx_dev,
+ "PM: %s: EMIF function copy failed\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ ro_data_addr =
+ sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
+ copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
+ &ro_sram_data,
+ sizeof(ro_sram_data));
+ if (!copy_addr) {
+ dev_err(pm33xx_dev,
+ "PM: %s: ro_sram_data copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int am33xx_do_sram_idle(u32 wfi_flags)
+{
+ if (!m3_ipc || !pm_ops)
+ return 0;
+
+ if (wfi_flags & WFI_FLAG_WAKE_M3)
+ m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
+
+ return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
+}
+
+static int __init am43xx_map_gic(void)
+{
+ gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K);
+
+ if (!gic_dist_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+static struct wkup_m3_wakeup_src rtc_wake_src(void)
+{
+ u32 i;
+
+ i = __raw_readl(rtc_base_virt + 0x44) & 0x40;
+
+ if (i) {
+ retrigger_irq = rtc_alarm_wakeup.irq_nr;
+ return rtc_alarm_wakeup;
+ }
+
+ retrigger_irq = rtc_ext_wakeup.irq_nr;
+
+ return rtc_ext_wakeup;
+}
+
+static int am33xx_rtc_only_idle(unsigned long wfi_flags)
+{
+ omap_rtc_power_off_program(&omap_rtc->dev);
+ am33xx_do_wfi_sram(wfi_flags);
+ return 0;
+}
+
+/*
+ * Note that the RTC module clock must be re-enabled only for rtc+ddr suspend.
+ * And looks like the module can stay in SYSC_IDLE_SMART_WKUP mode configured
+ * by the interconnect code just fine for both rtc+ddr suspend and retention
+ * suspend.
+ */
+static int am33xx_pm_suspend(suspend_state_t suspend_state)
+{
+ int i, ret = 0;
+
+ if (suspend_state == PM_SUSPEND_MEM &&
+ pm_ops->check_off_mode_enable()) {
+ ret = clk_prepare_enable(rtc_fck);
+ if (ret) {
+ dev_err(pm33xx_dev, "Failed to enable clock: %i\n", ret);
+ return ret;
+ }
+
+ pm_ops->save_context();
+ suspend_wfi_flags |= WFI_FLAG_RTC_ONLY;
+ clk_save_context();
+ ret = pm_ops->soc_suspend(suspend_state, am33xx_rtc_only_idle,
+ suspend_wfi_flags);
+
+ suspend_wfi_flags &= ~WFI_FLAG_RTC_ONLY;
+ dev_info(pm33xx_dev, "Entering RTC Only mode with DDR in self-refresh\n");
+
+ if (!ret) {
+ clk_restore_context();
+ pm_ops->restore_context();
+ m3_ipc->ops->set_rtc_only(m3_ipc);
+ am33xx_push_sram_idle();
+ }
+ } else {
+ ret = pm_ops->soc_suspend(suspend_state, am33xx_do_wfi_sram,
+ suspend_wfi_flags);
+ }
+
+ if (ret) {
+ dev_err(pm33xx_dev, "PM: Kernel suspend failure\n");
+ } else {
+ i = m3_ipc->ops->request_pm_status(m3_ipc);
+
+ switch (i) {
+ case 0:
+ dev_info(pm33xx_dev,
+ "PM: Successfully put all powerdomains to target state\n");
+ break;
+ case 1:
+ dev_err(pm33xx_dev,
+ "PM: Could not transition all powerdomains to target state\n");
+ ret = -1;
+ break;
+ default:
+ dev_err(pm33xx_dev,
+ "PM: CM3 returned unknown result = %d\n", i);
+ ret = -1;
+ }
+
+ /* print the wakeup reason */
+ if (rtc_only_idle) {
+ wakeup_src = rtc_wake_src();
+ pr_info("PM: Wakeup source %s\n", wakeup_src.src);
+ } else {
+ pr_info("PM: Wakeup source %s\n",
+ m3_ipc->ops->request_wake_src(m3_ipc));
+ }
+ }
+
+ if (suspend_state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable())
+ clk_disable_unprepare(rtc_fck);
+
+ return ret;
+}
+
+static int am33xx_pm_enter(suspend_state_t suspend_state)
+{
+ int ret = 0;
+
+ switch (suspend_state) {
+ case PM_SUSPEND_MEM:
+ case PM_SUSPEND_STANDBY:
+ ret = am33xx_pm_suspend(suspend_state);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int am33xx_pm_begin(suspend_state_t state)
+{
+ int ret = -EINVAL;
+ struct nvmem_device *nvmem;
+
+ if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev,
+ "omap_rtc_scratch0");
+ if (!IS_ERR(nvmem))
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+ (void *)&rtc_magic_val);
+ rtc_only_idle = 1;
+ } else {
+ rtc_only_idle = 0;
+ }
+
+ pm_ops->begin_suspend();
+
+ switch (state) {
+ case PM_SUSPEND_MEM:
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP);
+ break;
+ case PM_SUSPEND_STANDBY:
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_STANDBY);
+ break;
+ }
+
+ return ret;
+}
+
+static void am33xx_pm_end(void)
+{
+ u32 val = 0;
+ struct nvmem_device *nvmem;
+
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
+ if (IS_ERR(nvmem))
+ return;
+
+ m3_ipc->ops->finish_low_power(m3_ipc);
+ if (rtc_only_idle) {
+ if (retrigger_irq) {
+ /*
+ * 32 bits of Interrupt Set-Pending correspond to 32
+ * 32 interrupts. Compute the bit offset of the
+ * Interrupt and set that particular bit
+ * Compute the register offset by dividing interrupt
+ * number by 32 and mutiplying by 4
+ */
+ writel_relaxed(1 << (retrigger_irq & 31),
+ gic_dist_base + GIC_INT_SET_PENDING_BASE
+ + retrigger_irq / 32 * 4);
+ }
+
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+ (void *)&val);
+ }
+
+ rtc_only_idle = 0;
+
+ pm_ops->finish_suspend();
+}
+
+static int am33xx_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static const struct platform_suspend_ops am33xx_pm_ops = {
+ .begin = am33xx_pm_begin,
+ .end = am33xx_pm_end,
+ .enter = am33xx_pm_enter,
+ .valid = am33xx_pm_valid,
+};
+#endif /* CONFIG_SUSPEND */
+
+static void am33xx_pm_set_ipc_ops(void)
+{
+ u32 resume_address;
+ int temp;
+
+ temp = ti_emif_get_mem_type();
+ if (temp < 0) {
+ dev_err(pm33xx_dev, "PM: Cannot determine memory type, no PM available\n");
+ return;
+ }
+ m3_ipc->ops->set_mem_type(m3_ipc, temp);
+
+ /* Physical resume address to be used by ROM code */
+ resume_address = am33xx_do_wfi_sram_phys +
+ *pm_sram->resume_offset + 0x4;
+
+ m3_ipc->ops->set_resume_address(m3_ipc, (void *)resume_address);
+}
+
+static void am33xx_pm_free_sram(void)
+{
+ gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
+ gen_pool_free(sram_pool_data, ocmcram_location_data,
+ sizeof(struct am33xx_pm_ro_sram_data));
+}
+
+/*
+ * Push the minimal suspend-resume code to SRAM
+ */
+static int am33xx_pm_alloc_sram(void)
+{
+ struct device_node *np;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
+ if (!np) {
+ np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+ if (!np) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
+ __func__);
+ return -ENODEV;
+ }
+ }
+
+ sram_pool = of_gen_pool_get(np, "pm-sram", 0);
+ if (!sram_pool) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
+ __func__);
+ ret = -ENODEV;
+ goto mpu_put_node;
+ }
+
+ sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
+ if (!sram_pool_data) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
+ __func__);
+ ret = -ENODEV;
+ goto mpu_put_node;
+ }
+
+ ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
+ if (!ocmcram_location) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
+ __func__);
+ ret = -ENOMEM;
+ goto mpu_put_node;
+ }
+
+ ocmcram_location_data = gen_pool_alloc(sram_pool_data,
+ sizeof(struct emif_regs_amx3));
+ if (!ocmcram_location_data) {
+ dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
+ gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
+ ret = -ENOMEM;
+ }
+
+mpu_put_node:
+ of_node_put(np);
+ return ret;
+}
+
+static int am33xx_pm_rtc_setup(void)
+{
+ struct device_node *np;
+ unsigned long val = 0;
+ struct nvmem_device *nvmem;
+ int error;
+
+ np = of_find_node_by_name(NULL, "rtc");
+
+ if (of_device_is_available(np)) {
+ /* RTC interconnect target module clock */
+ rtc_fck = of_clk_get_by_name(np->parent, "fck");
+ if (IS_ERR(rtc_fck))
+ return PTR_ERR(rtc_fck);
+
+ rtc_base_virt = of_iomap(np, 0);
+ if (!rtc_base_virt) {
+ pr_warn("PM: could not iomap rtc");
+ error = -ENODEV;
+ goto err_clk_put;
+ }
+
+ omap_rtc = rtc_class_open("rtc0");
+ if (!omap_rtc) {
+ pr_warn("PM: rtc0 not available");
+ error = -EPROBE_DEFER;
+ goto err_iounmap;
+ }
+
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev,
+ "omap_rtc_scratch0");
+ if (!IS_ERR(nvmem)) {
+ nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
+ 4, (void *)&rtc_magic_val);
+ if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
+ pr_warn("PM: bootloader does not support rtc-only!\n");
+
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
+ 4, (void *)&val);
+ val = pm_sram->resume_address;
+ nvmem_device_write(nvmem, RTC_SCRATCH_RESUME_REG * 4,
+ 4, (void *)&val);
+ }
+ } else {
+ pr_warn("PM: no-rtc available, rtc-only mode disabled.\n");
+ }
+
+ return 0;
+
+err_iounmap:
+ iounmap(rtc_base_virt);
+err_clk_put:
+ clk_put(rtc_fck);
+
+ return error;
+}
+
+static int am33xx_pm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (!of_machine_is_compatible("ti,am33xx") &&
+ !of_machine_is_compatible("ti,am43"))
+ return -ENODEV;
+
+ pm_ops = dev->platform_data;
+ if (!pm_ops) {
+ dev_err(dev, "PM: Cannot get core PM ops!\n");
+ return -ENODEV;
+ }
+
+ ret = am43xx_map_gic();
+ if (ret) {
+ pr_err("PM: Could not ioremap GIC base\n");
+ return ret;
+ }
+
+ pm_sram = pm_ops->get_sram_addrs();
+ if (!pm_sram) {
+ dev_err(dev, "PM: Cannot get PM asm function addresses!!\n");
+ return -ENODEV;
+ }
+
+ m3_ipc = wkup_m3_ipc_get();
+ if (!m3_ipc) {
+ pr_err("PM: Cannot get wkup_m3_ipc handle\n");
+ return -EPROBE_DEFER;
+ }
+
+ pm33xx_dev = dev;
+
+ ret = am33xx_pm_alloc_sram();
+ if (ret)
+ goto err_wkup_m3_ipc_put;
+
+ ret = am33xx_pm_rtc_setup();
+ if (ret)
+ goto err_free_sram;
+
+ ret = am33xx_push_sram_idle();
+ if (ret)
+ goto err_unsetup_rtc;
+
+ am33xx_pm_set_ipc_ops();
+
+#ifdef CONFIG_SUSPEND
+ suspend_set_ops(&am33xx_pm_ops);
+
+ /*
+ * For a system suspend we must flush the caches, we want
+ * the DDR in self-refresh, we want to save the context
+ * of the EMIF, and we want the wkup_m3 to handle low-power
+ * transition.
+ */
+ suspend_wfi_flags |= WFI_FLAG_FLUSH_CACHE;
+ suspend_wfi_flags |= WFI_FLAG_SELF_REFRESH;
+ suspend_wfi_flags |= WFI_FLAG_SAVE_EMIF;
+ suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
+#endif /* CONFIG_SUSPEND */
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto err_pm_runtime_disable;
+
+ ret = pm_ops->init(am33xx_do_sram_idle);
+ if (ret) {
+ dev_err(dev, "Unable to call core pm init!\n");
+ ret = -ENODEV;
+ goto err_pm_runtime_put;
+ }
+
+ return 0;
+
+err_pm_runtime_put:
+ pm_runtime_put_sync(dev);
+err_pm_runtime_disable:
+ pm_runtime_disable(dev);
+err_unsetup_rtc:
+ iounmap(rtc_base_virt);
+ clk_put(rtc_fck);
+err_free_sram:
+ am33xx_pm_free_sram();
+ pm33xx_dev = NULL;
+err_wkup_m3_ipc_put:
+ wkup_m3_ipc_put(m3_ipc);
+ return ret;
+}
+
+static int am33xx_pm_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ if (pm_ops->deinit)
+ pm_ops->deinit();
+ suspend_set_ops(NULL);
+ wkup_m3_ipc_put(m3_ipc);
+ am33xx_pm_free_sram();
+ iounmap(rtc_base_virt);
+ clk_put(rtc_fck);
+ return 0;
+}
+
+static struct platform_driver am33xx_pm_driver = {
+ .driver = {
+ .name = "pm33xx",
+ },
+ .probe = am33xx_pm_probe,
+ .remove = am33xx_pm_remove,
+};
+module_platform_driver(am33xx_pm_driver);
+
+MODULE_ALIAS("platform:pm33xx");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("am33xx power management driver");
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
new file mode 100644
index 0000000000..f49f8492dd
--- /dev/null
+++ b/drivers/soc/ti/pruss.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS platform driver for various TI SoCs
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Author(s):
+ * Suman Anna <s-anna@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pruss_driver.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/slab.h>
+#include "pruss.h"
+
+/**
+ * struct pruss_private_data - PRUSS driver private data
+ * @has_no_sharedram: flag to indicate the absence of PRUSS Shared Data RAM
+ * @has_core_mux_clock: flag to indicate the presence of PRUSS core clock
+ */
+struct pruss_private_data {
+ bool has_no_sharedram;
+ bool has_core_mux_clock;
+};
+
+/**
+ * pruss_get() - get the pruss for a given PRU remoteproc
+ * @rproc: remoteproc handle of a PRU instance
+ *
+ * Finds the parent pruss device for a PRU given the @rproc handle of the
+ * PRU remote processor. This function increments the pruss device's refcount,
+ * so always use pruss_put() to decrement it back once pruss isn't needed
+ * anymore.
+ *
+ * This API doesn't check if @rproc is valid or not. It is expected the caller
+ * will have done a pru_rproc_get() on @rproc, before calling this API to make
+ * sure that @rproc is valid.
+ *
+ * Return: pruss handle on success, and an ERR_PTR on failure using one
+ * of the following error values
+ * -EINVAL if invalid parameter
+ * -ENODEV if PRU device or PRUSS device is not found
+ */
+struct pruss *pruss_get(struct rproc *rproc)
+{
+ struct pruss *pruss;
+ struct device *dev;
+ struct platform_device *ppdev;
+
+ if (IS_ERR_OR_NULL(rproc))
+ return ERR_PTR(-EINVAL);
+
+ dev = &rproc->dev;
+
+ /* make sure it is PRU rproc */
+ if (!dev->parent || !is_pru_rproc(dev->parent))
+ return ERR_PTR(-ENODEV);
+
+ ppdev = to_platform_device(dev->parent->parent);
+ pruss = platform_get_drvdata(ppdev);
+ if (!pruss)
+ return ERR_PTR(-ENODEV);
+
+ get_device(pruss->dev);
+
+ return pruss;
+}
+EXPORT_SYMBOL_GPL(pruss_get);
+
+/**
+ * pruss_put() - decrement pruss device's usecount
+ * @pruss: pruss handle
+ *
+ * Complimentary function for pruss_get(). Needs to be called
+ * after the PRUSS is used, and only if the pruss_get() succeeds.
+ */
+void pruss_put(struct pruss *pruss)
+{
+ if (IS_ERR_OR_NULL(pruss))
+ return;
+
+ put_device(pruss->dev);
+}
+EXPORT_SYMBOL_GPL(pruss_put);
+
+/**
+ * pruss_request_mem_region() - request a memory resource
+ * @pruss: the pruss instance
+ * @mem_id: the memory resource id
+ * @region: pointer to memory region structure to be filled in
+ *
+ * This function allows a client driver to request a memory resource,
+ * and if successful, will let the client driver own the particular
+ * memory region until released using the pruss_release_mem_region()
+ * API.
+ *
+ * Return: 0 if requested memory region is available (in such case pointer to
+ * memory region is returned via @region), an error otherwise
+ */
+int pruss_request_mem_region(struct pruss *pruss, enum pruss_mem mem_id,
+ struct pruss_mem_region *region)
+{
+ if (!pruss || !region || mem_id >= PRUSS_MEM_MAX)
+ return -EINVAL;
+
+ mutex_lock(&pruss->lock);
+
+ if (pruss->mem_in_use[mem_id]) {
+ mutex_unlock(&pruss->lock);
+ return -EBUSY;
+ }
+
+ *region = pruss->mem_regions[mem_id];
+ pruss->mem_in_use[mem_id] = region;
+
+ mutex_unlock(&pruss->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pruss_request_mem_region);
+
+/**
+ * pruss_release_mem_region() - release a memory resource
+ * @pruss: the pruss instance
+ * @region: the memory region to release
+ *
+ * This function is the complimentary function to
+ * pruss_request_mem_region(), and allows the client drivers to
+ * release back a memory resource.
+ *
+ * Return: 0 on success, an error code otherwise
+ */
+int pruss_release_mem_region(struct pruss *pruss,
+ struct pruss_mem_region *region)
+{
+ int id;
+
+ if (!pruss || !region)
+ return -EINVAL;
+
+ mutex_lock(&pruss->lock);
+
+ /* find out the memory region being released */
+ for (id = 0; id < PRUSS_MEM_MAX; id++) {
+ if (pruss->mem_in_use[id] == region)
+ break;
+ }
+
+ if (id == PRUSS_MEM_MAX) {
+ mutex_unlock(&pruss->lock);
+ return -EINVAL;
+ }
+
+ pruss->mem_in_use[id] = NULL;
+
+ mutex_unlock(&pruss->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pruss_release_mem_region);
+
+/**
+ * pruss_cfg_get_gpmux() - get the current GPMUX value for a PRU device
+ * @pruss: pruss instance
+ * @pru_id: PRU identifier (0-1)
+ * @mux: pointer to store the current mux value into
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int pruss_cfg_get_gpmux(struct pruss *pruss, enum pruss_pru_id pru_id, u8 *mux)
+{
+ int ret;
+ u32 val;
+
+ if (pru_id >= PRUSS_NUM_PRUS || !mux)
+ return -EINVAL;
+
+ ret = pruss_cfg_read(pruss, PRUSS_CFG_GPCFG(pru_id), &val);
+ if (!ret)
+ *mux = (u8)((val & PRUSS_GPCFG_PRU_MUX_SEL_MASK) >>
+ PRUSS_GPCFG_PRU_MUX_SEL_SHIFT);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_get_gpmux);
+
+/**
+ * pruss_cfg_set_gpmux() - set the GPMUX value for a PRU device
+ * @pruss: pruss instance
+ * @pru_id: PRU identifier (0-1)
+ * @mux: new mux value for PRU
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int pruss_cfg_set_gpmux(struct pruss *pruss, enum pruss_pru_id pru_id, u8 mux)
+{
+ if (mux >= PRUSS_GP_MUX_SEL_MAX ||
+ pru_id >= PRUSS_NUM_PRUS)
+ return -EINVAL;
+
+ return pruss_cfg_update(pruss, PRUSS_CFG_GPCFG(pru_id),
+ PRUSS_GPCFG_PRU_MUX_SEL_MASK,
+ (u32)mux << PRUSS_GPCFG_PRU_MUX_SEL_SHIFT);
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_set_gpmux);
+
+/**
+ * pruss_cfg_gpimode() - set the GPI mode of the PRU
+ * @pruss: the pruss instance handle
+ * @pru_id: id of the PRU core within the PRUSS
+ * @mode: GPI mode to set
+ *
+ * Sets the GPI mode for a given PRU by programming the
+ * corresponding PRUSS_CFG_GPCFGx register
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int pruss_cfg_gpimode(struct pruss *pruss, enum pruss_pru_id pru_id,
+ enum pruss_gpi_mode mode)
+{
+ if (pru_id >= PRUSS_NUM_PRUS || mode >= PRUSS_GPI_MODE_MAX)
+ return -EINVAL;
+
+ return pruss_cfg_update(pruss, PRUSS_CFG_GPCFG(pru_id),
+ PRUSS_GPCFG_PRU_GPI_MODE_MASK,
+ mode << PRUSS_GPCFG_PRU_GPI_MODE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_gpimode);
+
+/**
+ * pruss_cfg_miirt_enable() - Enable/disable MII RT Events
+ * @pruss: the pruss instance
+ * @enable: enable/disable
+ *
+ * Enable/disable the MII RT Events for the PRUSS.
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable)
+{
+ u32 set = enable ? PRUSS_MII_RT_EVENT_EN : 0;
+
+ return pruss_cfg_update(pruss, PRUSS_CFG_MII_RT,
+ PRUSS_MII_RT_EVENT_EN, set);
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_miirt_enable);
+
+/**
+ * pruss_cfg_xfr_enable() - Enable/disable XIN XOUT shift functionality
+ * @pruss: the pruss instance
+ * @pru_type: PRU core type identifier
+ * @enable: enable/disable
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int pruss_cfg_xfr_enable(struct pruss *pruss, enum pru_type pru_type,
+ bool enable)
+{
+ u32 mask, set;
+
+ switch (pru_type) {
+ case PRU_TYPE_PRU:
+ mask = PRUSS_SPP_XFER_SHIFT_EN;
+ break;
+ case PRU_TYPE_RTU:
+ mask = PRUSS_SPP_RTU_XFR_SHIFT_EN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ set = enable ? mask : 0;
+
+ return pruss_cfg_update(pruss, PRUSS_CFG_SPP, mask, set);
+}
+EXPORT_SYMBOL_GPL(pruss_cfg_xfr_enable);
+
+static void pruss_of_free_clk_provider(void *data)
+{
+ struct device_node *clk_mux_np = data;
+
+ of_clk_del_provider(clk_mux_np);
+ of_node_put(clk_mux_np);
+}
+
+static void pruss_clk_unregister_mux(void *data)
+{
+ clk_unregister_mux(data);
+}
+
+static int pruss_clk_mux_setup(struct pruss *pruss, struct clk *clk_mux,
+ char *mux_name, struct device_node *clks_np)
+{
+ struct device_node *clk_mux_np;
+ struct device *dev = pruss->dev;
+ char *clk_mux_name;
+ unsigned int num_parents;
+ const char **parent_names;
+ void __iomem *reg;
+ u32 reg_offset;
+ int ret;
+
+ clk_mux_np = of_get_child_by_name(clks_np, mux_name);
+ if (!clk_mux_np) {
+ dev_err(dev, "%pOF is missing its '%s' node\n", clks_np,
+ mux_name);
+ return -ENODEV;
+ }
+
+ num_parents = of_clk_get_parent_count(clk_mux_np);
+ if (num_parents < 1) {
+ dev_err(dev, "mux-clock %pOF must have parents\n", clk_mux_np);
+ ret = -EINVAL;
+ goto put_clk_mux_np;
+ }
+
+ parent_names = devm_kcalloc(dev, sizeof(*parent_names), num_parents,
+ GFP_KERNEL);
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto put_clk_mux_np;
+ }
+
+ of_clk_parent_fill(clk_mux_np, parent_names, num_parents);
+
+ clk_mux_name = devm_kasprintf(dev, GFP_KERNEL, "%s.%pOFn",
+ dev_name(dev), clk_mux_np);
+ if (!clk_mux_name) {
+ ret = -ENOMEM;
+ goto put_clk_mux_np;
+ }
+
+ ret = of_property_read_u32(clk_mux_np, "reg", &reg_offset);
+ if (ret)
+ goto put_clk_mux_np;
+
+ reg = pruss->cfg_base + reg_offset;
+
+ clk_mux = clk_register_mux(NULL, clk_mux_name, parent_names,
+ num_parents, 0, reg, 0, 1, 0, NULL);
+ if (IS_ERR(clk_mux)) {
+ ret = PTR_ERR(clk_mux);
+ goto put_clk_mux_np;
+ }
+
+ ret = devm_add_action_or_reset(dev, pruss_clk_unregister_mux, clk_mux);
+ if (ret) {
+ dev_err(dev, "failed to add clkmux unregister action %d", ret);
+ goto put_clk_mux_np;
+ }
+
+ ret = of_clk_add_provider(clk_mux_np, of_clk_src_simple_get, clk_mux);
+ if (ret)
+ goto put_clk_mux_np;
+
+ ret = devm_add_action_or_reset(dev, pruss_of_free_clk_provider,
+ clk_mux_np);
+ if (ret) {
+ dev_err(dev, "failed to add clkmux free action %d", ret);
+ goto put_clk_mux_np;
+ }
+
+ return 0;
+
+put_clk_mux_np:
+ of_node_put(clk_mux_np);
+ return ret;
+}
+
+static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
+{
+ const struct pruss_private_data *data;
+ struct device_node *clks_np;
+ struct device *dev = pruss->dev;
+ int ret = 0;
+
+ data = of_device_get_match_data(dev);
+
+ clks_np = of_get_child_by_name(cfg_node, "clocks");
+ if (!clks_np) {
+ dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node);
+ return -ENODEV;
+ }
+
+ if (data && data->has_core_mux_clock) {
+ ret = pruss_clk_mux_setup(pruss, pruss->core_clk_mux,
+ "coreclk-mux", clks_np);
+ if (ret) {
+ dev_err(dev, "failed to setup coreclk-mux\n");
+ goto put_clks_node;
+ }
+ }
+
+ ret = pruss_clk_mux_setup(pruss, pruss->iep_clk_mux, "iepclk-mux",
+ clks_np);
+ if (ret) {
+ dev_err(dev, "failed to setup iepclk-mux\n");
+ goto put_clks_node;
+ }
+
+put_clks_node:
+ of_node_put(clks_np);
+
+ return ret;
+}
+
+static struct regmap_config regmap_conf = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int pruss_cfg_of_init(struct device *dev, struct pruss *pruss)
+{
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ struct resource res;
+ int ret;
+
+ child = of_get_child_by_name(np, "cfg");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(child, 0, &res)) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!pruss->cfg_base) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
+ (u64)res.start);
+ regmap_conf.max_register = resource_size(&res) - 4;
+
+ pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
+ &regmap_conf);
+ kfree(regmap_conf.name);
+ if (IS_ERR(pruss->cfg_regmap)) {
+ dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
+ PTR_ERR(pruss->cfg_regmap));
+ ret = PTR_ERR(pruss->cfg_regmap);
+ goto node_put;
+ }
+
+ ret = pruss_clk_init(pruss, child);
+ if (ret)
+ dev_err(dev, "pruss_clk_init failed, ret = %d\n", ret);
+
+node_put:
+ of_node_put(child);
+ return ret;
+}
+
+static int pruss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ struct pruss *pruss;
+ struct resource res;
+ int ret, i, index;
+ const struct pruss_private_data *data;
+ const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set the DMA coherent mask");
+ return ret;
+ }
+
+ pruss = devm_kzalloc(dev, sizeof(*pruss), GFP_KERNEL);
+ if (!pruss)
+ return -ENOMEM;
+
+ pruss->dev = dev;
+ mutex_init(&pruss->lock);
+
+ child = of_get_child_by_name(np, "memories");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'memories' node\n", child);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < PRUSS_MEM_MAX; i++) {
+ /*
+ * On AM437x one of two PRUSS units don't contain Shared RAM,
+ * skip it
+ */
+ if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2)
+ continue;
+
+ index = of_property_match_string(child, "reg-names",
+ mem_names[i]);
+ if (index < 0) {
+ of_node_put(child);
+ return index;
+ }
+
+ if (of_address_to_resource(child, index, &res)) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ pruss->mem_regions[i].va = devm_ioremap(dev, res.start,
+ resource_size(&res));
+ if (!pruss->mem_regions[i].va) {
+ dev_err(dev, "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ of_node_put(child);
+ return -ENOMEM;
+ }
+ pruss->mem_regions[i].pa = res.start;
+ pruss->mem_regions[i].size = resource_size(&res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ mem_names[i], &pruss->mem_regions[i].pa,
+ pruss->mem_regions[i].size, pruss->mem_regions[i].va);
+ }
+ of_node_put(child);
+
+ platform_set_drvdata(pdev, pruss);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "couldn't enable module\n");
+ goto rpm_disable;
+ }
+
+ ret = pruss_cfg_of_init(dev, pruss);
+ if (ret < 0)
+ goto rpm_put;
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ dev_err(dev, "failed to register child devices\n");
+ goto rpm_put;
+ }
+
+ return 0;
+
+rpm_put:
+ pm_runtime_put_sync(dev);
+rpm_disable:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int pruss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ devm_of_platform_depopulate(dev);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+/* instance-specific driver private data */
+static const struct pruss_private_data am437x_pruss1_data = {
+ .has_no_sharedram = false,
+};
+
+static const struct pruss_private_data am437x_pruss0_data = {
+ .has_no_sharedram = true,
+};
+
+static const struct pruss_private_data am65x_j721e_pruss_data = {
+ .has_core_mux_clock = true,
+};
+
+static const struct of_device_id pruss_of_match[] = {
+ { .compatible = "ti,am3356-pruss" },
+ { .compatible = "ti,am4376-pruss0", .data = &am437x_pruss0_data, },
+ { .compatible = "ti,am4376-pruss1", .data = &am437x_pruss1_data, },
+ { .compatible = "ti,am5728-pruss" },
+ { .compatible = "ti,k2g-pruss" },
+ { .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,am642-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,am625-pruss", .data = &am65x_j721e_pruss_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pruss_of_match);
+
+static struct platform_driver pruss_driver = {
+ .driver = {
+ .name = "pruss",
+ .of_match_table = pruss_of_match,
+ },
+ .probe = pruss_probe,
+ .remove = pruss_remove,
+};
+module_platform_driver(pruss_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_DESCRIPTION("PRU-ICSS Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/ti/pruss.h b/drivers/soc/ti/pruss.h
new file mode 100644
index 0000000000..6c55987e0e
--- /dev/null
+++ b/drivers/soc/ti/pruss.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PRU-ICSS Subsystem user interfaces
+ *
+ * Copyright (C) 2015-2023 Texas Instruments Incorporated - http://www.ti.com
+ * MD Danish Anwar <danishanwar@ti.com>
+ */
+
+#ifndef _SOC_TI_PRUSS_H_
+#define _SOC_TI_PRUSS_H_
+
+#include <linux/bits.h>
+#include <linux/regmap.h>
+
+/*
+ * PRU_ICSS_CFG registers
+ * SYSCFG, ISRP, ISP, IESP, IECP, SCRP applicable on AMxxxx devices only
+ */
+#define PRUSS_CFG_REVID 0x00
+#define PRUSS_CFG_SYSCFG 0x04
+#define PRUSS_CFG_GPCFG(x) (0x08 + (x) * 4)
+#define PRUSS_CFG_CGR 0x10
+#define PRUSS_CFG_ISRP 0x14
+#define PRUSS_CFG_ISP 0x18
+#define PRUSS_CFG_IESP 0x1C
+#define PRUSS_CFG_IECP 0x20
+#define PRUSS_CFG_SCRP 0x24
+#define PRUSS_CFG_PMAO 0x28
+#define PRUSS_CFG_MII_RT 0x2C
+#define PRUSS_CFG_IEPCLK 0x30
+#define PRUSS_CFG_SPP 0x34
+#define PRUSS_CFG_PIN_MX 0x40
+
+/* PRUSS_GPCFG register bits */
+#define PRUSS_GPCFG_PRU_GPI_MODE_MASK GENMASK(1, 0)
+#define PRUSS_GPCFG_PRU_GPI_MODE_SHIFT 0
+
+#define PRUSS_GPCFG_PRU_MUX_SEL_SHIFT 26
+#define PRUSS_GPCFG_PRU_MUX_SEL_MASK GENMASK(29, 26)
+
+/* PRUSS_MII_RT register bits */
+#define PRUSS_MII_RT_EVENT_EN BIT(0)
+
+/* PRUSS_SPP register bits */
+#define PRUSS_SPP_XFER_SHIFT_EN BIT(1)
+#define PRUSS_SPP_PRU1_PAD_HP_EN BIT(0)
+#define PRUSS_SPP_RTU_XFR_SHIFT_EN BIT(3)
+
+/**
+ * pruss_cfg_read() - read a PRUSS CFG sub-module register
+ * @pruss: the pruss instance handle
+ * @reg: register offset within the CFG sub-module
+ * @val: pointer to return the value in
+ *
+ * Reads a given register within the PRUSS CFG sub-module and
+ * returns it through the passed-in @val pointer
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+static int pruss_cfg_read(struct pruss *pruss, unsigned int reg, unsigned int *val)
+{
+ if (IS_ERR_OR_NULL(pruss))
+ return -EINVAL;
+
+ return regmap_read(pruss->cfg_regmap, reg, val);
+}
+
+/**
+ * pruss_cfg_update() - configure a PRUSS CFG sub-module register
+ * @pruss: the pruss instance handle
+ * @reg: register offset within the CFG sub-module
+ * @mask: bit mask to use for programming the @val
+ * @val: value to write
+ *
+ * Programs a given register within the PRUSS CFG sub-module
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+static int pruss_cfg_update(struct pruss *pruss, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ if (IS_ERR_OR_NULL(pruss))
+ return -EINVAL;
+
+ return regmap_update_bits(pruss->cfg_regmap, reg, mask, val);
+}
+
+#endif /* _SOC_TI_PRUSS_H_ */
diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
new file mode 100644
index 0000000000..62b2f1464e
--- /dev/null
+++ b/drivers/soc/ti/smartreflex.c
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OMAP SmartReflex Voltage Control
+ *
+ * Author: Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Kalle Jokiniemi
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Lesly A M <x0080970@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/power/smartreflex.h>
+
+#define DRIVER_NAME "smartreflex"
+#define SMARTREFLEX_NAME_LEN 32
+#define NVALUE_NAME_LEN 40
+#define SR_DISABLE_TIMEOUT 200
+
+/* sr_list contains all the instances of smartreflex module */
+static LIST_HEAD(sr_list);
+
+static struct omap_sr_class_data *sr_class;
+static struct dentry *sr_dbg_dir;
+
+static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value)
+{
+ __raw_writel(value, (sr->base + offset));
+}
+
+static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask,
+ u32 value)
+{
+ u32 reg_val;
+
+ /*
+ * Smartreflex error config register is special as it contains
+ * certain status bits which if written a 1 into means a clear
+ * of those bits. So in order to make sure no accidental write of
+ * 1 happens to those status bits, do a clear of them in the read
+ * value. This mean this API doesn't rewrite values in these bits
+ * if they are currently set, but does allow the caller to write
+ * those bits.
+ */
+ if (sr->ip_type == SR_TYPE_V1 && offset == ERRCONFIG_V1)
+ mask |= ERRCONFIG_STATUS_V1_MASK;
+ else if (sr->ip_type == SR_TYPE_V2 && offset == ERRCONFIG_V2)
+ mask |= ERRCONFIG_VPBOUNDINTST_V2;
+
+ reg_val = __raw_readl(sr->base + offset);
+ reg_val &= ~mask;
+
+ value &= mask;
+
+ reg_val |= value;
+
+ __raw_writel(reg_val, (sr->base + offset));
+}
+
+static inline u32 sr_read_reg(struct omap_sr *sr, unsigned offset)
+{
+ return __raw_readl(sr->base + offset);
+}
+
+static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr_info;
+
+ if (!voltdm) {
+ pr_err("%s: Null voltage domain passed!\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry(sr_info, &sr_list, node) {
+ if (voltdm == sr_info->voltdm)
+ return sr_info;
+ }
+
+ return ERR_PTR(-ENODATA);
+}
+
+static irqreturn_t sr_interrupt(int irq, void *data)
+{
+ struct omap_sr *sr_info = data;
+ u32 status = 0;
+
+ switch (sr_info->ip_type) {
+ case SR_TYPE_V1:
+ /* Read the status bits */
+ status = sr_read_reg(sr_info, ERRCONFIG_V1);
+
+ /* Clear them by writing back */
+ sr_write_reg(sr_info, ERRCONFIG_V1, status);
+ break;
+ case SR_TYPE_V2:
+ /* Read the status bits */
+ status = sr_read_reg(sr_info, IRQSTATUS);
+
+ /* Clear them by writing back */
+ sr_write_reg(sr_info, IRQSTATUS, status);
+ break;
+ default:
+ dev_err(&sr_info->pdev->dev, "UNKNOWN IP type %d\n",
+ sr_info->ip_type);
+ return IRQ_NONE;
+ }
+
+ if (sr_class->notify)
+ sr_class->notify(sr_info, status);
+
+ return IRQ_HANDLED;
+}
+
+static void sr_set_clk_length(struct omap_sr *sr)
+{
+ u32 fclk_speed;
+
+ /* Try interconnect target module fck first if it already exists */
+ if (IS_ERR(sr->fck))
+ return;
+
+ fclk_speed = clk_get_rate(sr->fck);
+
+ switch (fclk_speed) {
+ case 12000000:
+ sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK;
+ break;
+ case 13000000:
+ sr->clk_length = SRCLKLENGTH_13MHZ_SYSCLK;
+ break;
+ case 19200000:
+ sr->clk_length = SRCLKLENGTH_19MHZ_SYSCLK;
+ break;
+ case 26000000:
+ sr->clk_length = SRCLKLENGTH_26MHZ_SYSCLK;
+ break;
+ case 38400000:
+ sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Invalid fclk rate: %d\n",
+ __func__, fclk_speed);
+ break;
+ }
+}
+
+static void sr_start_vddautocomp(struct omap_sr *sr)
+{
+ if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ if (!sr_class->enable(sr))
+ sr->autocomp_active = true;
+}
+
+static void sr_stop_vddautocomp(struct omap_sr *sr)
+{
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ if (sr->autocomp_active) {
+ sr_class->disable(sr, 1);
+ sr->autocomp_active = false;
+ }
+}
+
+/*
+ * This function handles the initializations which have to be done
+ * only when both sr device and class driver regiter has
+ * completed. This will be attempted to be called from both sr class
+ * driver register and sr device intializtion API's. Only one call
+ * will ultimately succeed.
+ *
+ * Currently this function registers interrupt handler for a particular SR
+ * if smartreflex class driver is already registered and has
+ * requested for interrupts and the SR interrupt line in present.
+ */
+static int sr_late_init(struct omap_sr *sr_info)
+{
+ int ret = 0;
+
+ if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
+ ret = devm_request_irq(&sr_info->pdev->dev, sr_info->irq,
+ sr_interrupt, 0, sr_info->name, sr_info);
+ if (ret)
+ goto error;
+ disable_irq(sr_info->irq);
+ }
+
+ return ret;
+
+error:
+ list_del(&sr_info->node);
+ dev_err(&sr_info->pdev->dev, "%s: ERROR in registering interrupt handler. Smartreflex will not function as desired\n",
+ __func__);
+
+ return ret;
+}
+
+static void sr_v1_disable(struct omap_sr *sr)
+{
+ int timeout = 0;
+ int errconf_val = ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTST;
+
+ /* Enable MCUDisableAcknowledge interrupt */
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTEN);
+
+ /* SRCONFIG - disable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+ /* Disable all other SR interrupts and clear the status as needed */
+ if (sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_VPBOUNDINTST_V1)
+ errconf_val |= ERRCONFIG_VPBOUNDINTST_V1;
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+ ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1),
+ errconf_val);
+
+ /*
+ * Wait for SR to be disabled.
+ * wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us.
+ */
+ sr_test_cond_timeout((sr_read_reg(sr, ERRCONFIG_V1) &
+ ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT,
+ timeout);
+
+ if (timeout >= SR_DISABLE_TIMEOUT)
+ dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+ __func__);
+
+ /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN,
+ ERRCONFIG_MCUDISACKINTST);
+}
+
+static void sr_v2_disable(struct omap_sr *sr)
+{
+ int timeout = 0;
+
+ /* Enable MCUDisableAcknowledge interrupt */
+ sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUDISABLEACKINT);
+
+ /* SRCONFIG - disable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+ /*
+ * Disable all other SR interrupts and clear the status
+ * write to status register ONLY on need basis - only if status
+ * is set.
+ */
+ if (sr_read_reg(sr, ERRCONFIG_V2) & ERRCONFIG_VPBOUNDINTST_V2)
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ ERRCONFIG_VPBOUNDINTST_V2);
+ else
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ 0x0);
+ sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT |
+ IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT));
+ sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT |
+ IRQSTATUS_MCVALIDINT |
+ IRQSTATUS_MCBOUNDSINT));
+
+ /*
+ * Wait for SR to be disabled.
+ * wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us.
+ */
+ sr_test_cond_timeout((sr_read_reg(sr, IRQSTATUS) &
+ IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT,
+ timeout);
+
+ if (timeout >= SR_DISABLE_TIMEOUT)
+ dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+ __func__);
+
+ /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+ sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT);
+}
+
+static struct omap_sr_nvalue_table *sr_retrieve_nvalue_row(
+ struct omap_sr *sr, u32 efuse_offs)
+{
+ int i;
+
+ if (!sr->nvalue_table) {
+ dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n",
+ __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < sr->nvalue_count; i++) {
+ if (sr->nvalue_table[i].efuse_offs == efuse_offs)
+ return &sr->nvalue_table[i];
+ }
+
+ return NULL;
+}
+
+/* Public Functions */
+
+/**
+ * sr_configure_errgen() - Configures the SmartReflex to perform AVS using the
+ * error generator module.
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the error generator module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_errgen(struct omap_sr *sr)
+{
+ u32 sr_config, sr_errconfig, errconfig_offs;
+ u32 vpboundint_en, vpboundint_st;
+ u32 senp_en = 0, senn_en = 0;
+ u8 senp_shift, senn_shift;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ if (!sr->clk_length)
+ sr_set_clk_length(sr);
+
+ senp_en = sr->senp_mod;
+ senn_en = sr->senn_mod;
+
+ sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+ SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN;
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_config |= SRCONFIG_DELAYCTRL;
+ senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ break;
+ case SR_TYPE_V2:
+ senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+ sr_write_reg(sr, SRCONFIG, sr_config);
+ sr_errconfig = (sr->err_weight << ERRCONFIG_ERRWEIGHT_SHIFT) |
+ (sr->err_maxlimit << ERRCONFIG_ERRMAXLIMIT_SHIFT) |
+ (sr->err_minlimit << ERRCONFIG_ERRMINLIMIT_SHIFT);
+ sr_modify_reg(sr, errconfig_offs, (SR_ERRWEIGHT_MASK |
+ SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK),
+ sr_errconfig);
+
+ /* Enabling the interrupts if the ERROR module is used */
+ sr_modify_reg(sr, errconfig_offs, (vpboundint_en | vpboundint_st),
+ vpboundint_en);
+
+ return 0;
+}
+
+/**
+ * sr_disable_errgen() - Disables SmartReflex AVS module's errgen component
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable the error generator module inside the smartreflex module.
+ *
+ * Returns 0 on success and error value in case of failure.
+ */
+int sr_disable_errgen(struct omap_sr *sr)
+{
+ u32 errconfig_offs;
+ u32 vpboundint_en, vpboundint_st;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ break;
+ case SR_TYPE_V2:
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Disable the Sensor and errorgen */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN, 0);
+
+ /*
+ * Disable the interrupts of ERROR module
+ * NOTE: modify is a read, modify,write - an implicit OCP barrier
+ * which is required is present here - sequencing is critical
+ * at this point (after errgen is disabled, vpboundint disable)
+ */
+ sr_modify_reg(sr, errconfig_offs, vpboundint_en | vpboundint_st, 0);
+
+ return 0;
+}
+
+/**
+ * sr_configure_minmax() - Configures the SmartReflex to perform AVS using the
+ * minmaxavg module.
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the minmaxavg module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_minmax(struct omap_sr *sr)
+{
+ u32 sr_config, sr_avgwt;
+ u32 senp_en = 0, senn_en = 0;
+ u8 senp_shift, senn_shift;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ if (!sr->clk_length)
+ sr_set_clk_length(sr);
+
+ senp_en = sr->senp_mod;
+ senn_en = sr->senn_mod;
+
+ sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+ SRCONFIG_SENENABLE |
+ (sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT);
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_config |= SRCONFIG_DELAYCTRL;
+ senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+ break;
+ case SR_TYPE_V2:
+ senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+ sr_write_reg(sr, SRCONFIG, sr_config);
+ sr_avgwt = (sr->senp_avgweight << AVGWEIGHT_SENPAVGWEIGHT_SHIFT) |
+ (sr->senn_avgweight << AVGWEIGHT_SENNAVGWEIGHT_SHIFT);
+ sr_write_reg(sr, AVGWEIGHT, sr_avgwt);
+
+ /*
+ * Enabling the interrupts if MINMAXAVG module is used.
+ * TODO: check if all the interrupts are mandatory
+ */
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+ ERRCONFIG_MCUBOUNDINTEN),
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST |
+ ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST));
+ break;
+ case SR_TYPE_V2:
+ sr_write_reg(sr, IRQSTATUS,
+ IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT |
+ IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQENABLE_SET,
+ IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT);
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * sr_enable() - Enables the smartreflex module.
+ * @sr: pointer to which the SR module to be configured belongs to.
+ * @volt: The voltage at which the Voltage domain associated with
+ * the smartreflex module is operating at.
+ * This is required only to program the correct Ntarget value.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * enable a smartreflex module. Returns 0 on success. Returns error
+ * value if the voltage passed is wrong or if ntarget value is wrong.
+ */
+int sr_enable(struct omap_sr *sr, unsigned long volt)
+{
+ struct omap_volt_data *volt_data;
+ struct omap_sr_nvalue_table *nvalue_row;
+ int ret;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ volt_data = omap_voltage_get_voltdata(sr->voltdm, volt);
+
+ if (IS_ERR(volt_data)) {
+ dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table for nominal voltage %ld\n",
+ __func__, volt);
+ return PTR_ERR(volt_data);
+ }
+
+ nvalue_row = sr_retrieve_nvalue_row(sr, volt_data->sr_efuse_offs);
+
+ if (!nvalue_row) {
+ dev_warn(&sr->pdev->dev, "%s: failure getting SR data for this voltage %ld\n",
+ __func__, volt);
+ return -ENODATA;
+ }
+
+ /* errminlimit is opp dependent and hence linked to voltage */
+ sr->err_minlimit = nvalue_row->errminlimit;
+
+ clk_enable(sr->fck);
+
+ /* Check if SR is already enabled. If yes do nothing */
+ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE)
+ goto out_enabled;
+
+ /* Configure SR */
+ ret = sr_class->configure(sr);
+ if (ret)
+ goto out_enabled;
+
+ sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue);
+
+ /* SRCONFIG - enable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE);
+
+out_enabled:
+ sr->enabled = 1;
+
+ return 0;
+}
+
+/**
+ * sr_disable() - Disables the smartreflex module.
+ * @sr: pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable a smartreflex module.
+ */
+void sr_disable(struct omap_sr *sr)
+{
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return;
+ }
+
+ /* Check if SR clocks are already disabled. If yes do nothing */
+ if (!sr->enabled)
+ return;
+
+ /*
+ * Disable SR if only it is indeed enabled. Else just
+ * disable the clocks.
+ */
+ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) {
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_v1_disable(sr);
+ break;
+ case SR_TYPE_V2:
+ sr_v2_disable(sr);
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "UNKNOWN IP type %d\n",
+ sr->ip_type);
+ }
+ }
+
+ clk_disable(sr->fck);
+ sr->enabled = 0;
+}
+
+/**
+ * sr_register_class() - API to register a smartreflex class parameters.
+ * @class_data: The structure containing various sr class specific data.
+ *
+ * This API is to be called by the smartreflex class driver to register itself
+ * with the smartreflex driver during init. Returns 0 on success else the
+ * error value.
+ */
+int sr_register_class(struct omap_sr_class_data *class_data)
+{
+ struct omap_sr *sr_info;
+
+ if (!class_data) {
+ pr_warn("%s:, Smartreflex class data passed is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (sr_class) {
+ pr_warn("%s: Smartreflex class driver already registered\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ sr_class = class_data;
+
+ /*
+ * Call into late init to do initializations that require
+ * both sr driver and sr class driver to be initiallized.
+ */
+ list_for_each_entry(sr_info, &sr_list, node)
+ sr_late_init(sr_info);
+
+ return 0;
+}
+
+/**
+ * omap_sr_enable() - API to enable SR clocks and to call into the
+ * registered smartreflex class enable API.
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to enable
+ * a particular smartreflex module. This API will do the initial
+ * configurations to turn on the smartreflex module and in turn call
+ * into the registered smartreflex class enable API.
+ */
+void omap_sr_enable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->enable(sr);
+}
+
+/**
+ * omap_sr_disable() - API to disable SR without resetting the voltage
+ * processor voltage
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable not to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->disable(sr, 0);
+}
+
+/**
+ * omap_sr_disable_reset_volt() - API to disable SR and reset the
+ * voltage processor voltage
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->disable(sr, 1);
+}
+
+/* PM Debug FS entries to enable and disable smartreflex. */
+static int omap_sr_autocomp_show(void *data, u64 *val)
+{
+ struct omap_sr *sr_info = data;
+
+ if (!sr_info) {
+ pr_warn("%s: omap_sr struct not found\n", __func__);
+ return -EINVAL;
+ }
+
+ *val = sr_info->autocomp_active;
+
+ return 0;
+}
+
+static int omap_sr_autocomp_store(void *data, u64 val)
+{
+ struct omap_sr *sr_info = data;
+
+ if (!sr_info) {
+ pr_warn("%s: omap_sr struct not found\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Sanity check */
+ if (val > 1) {
+ pr_warn("%s: Invalid argument %lld\n", __func__, val);
+ return -EINVAL;
+ }
+
+ /* control enable/disable only if there is a delta in value */
+ if (sr_info->autocomp_active != val) {
+ if (!val)
+ sr_stop_vddautocomp(sr_info);
+ else
+ sr_start_vddautocomp(sr_info);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show,
+ omap_sr_autocomp_store, "%llu\n");
+
+static int omap_sr_probe(struct platform_device *pdev)
+{
+ struct omap_sr *sr_info;
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct dentry *nvalue_dir;
+ int i, ret = 0;
+
+ sr_info = devm_kzalloc(&pdev->dev, sizeof(struct omap_sr), GFP_KERNEL);
+ if (!sr_info)
+ return -ENOMEM;
+
+ sr_info->name = devm_kzalloc(&pdev->dev,
+ SMARTREFLEX_NAME_LEN, GFP_KERNEL);
+ if (!sr_info->name)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sr_info);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ sr_info->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sr_info->base))
+ return PTR_ERR(sr_info->base);
+
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return dev_err_probe(&pdev->dev, ret, "failed to get IRQ resource\n");
+ if (ret > 0)
+ sr_info->irq = ret;
+
+ sr_info->fck = devm_clk_get(pdev->dev.parent, "fck");
+ if (IS_ERR(sr_info->fck))
+ return PTR_ERR(sr_info->fck);
+ clk_prepare(sr_info->fck);
+
+ pm_runtime_enable(&pdev->dev);
+
+ snprintf(sr_info->name, SMARTREFLEX_NAME_LEN, "%s", pdata->name);
+
+ sr_info->pdev = pdev;
+ sr_info->srid = pdev->id;
+ sr_info->voltdm = pdata->voltdm;
+ sr_info->nvalue_table = pdata->nvalue_table;
+ sr_info->nvalue_count = pdata->nvalue_count;
+ sr_info->senn_mod = pdata->senn_mod;
+ sr_info->senp_mod = pdata->senp_mod;
+ sr_info->err_weight = pdata->err_weight;
+ sr_info->err_maxlimit = pdata->err_maxlimit;
+ sr_info->accum_data = pdata->accum_data;
+ sr_info->senn_avgweight = pdata->senn_avgweight;
+ sr_info->senp_avgweight = pdata->senp_avgweight;
+ sr_info->autocomp_active = false;
+ sr_info->ip_type = pdata->ip_type;
+
+ sr_set_clk_length(sr_info);
+
+ list_add(&sr_info->node, &sr_list);
+
+ /*
+ * Call into late init to do initializations that require
+ * both sr driver and sr class driver to be initiallized.
+ */
+ if (sr_class) {
+ ret = sr_late_init(sr_info);
+ if (ret) {
+ pr_warn("%s: Error in SR late init\n", __func__);
+ goto err_list_del;
+ }
+ }
+
+ dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__);
+ if (!sr_dbg_dir)
+ sr_dbg_dir = debugfs_create_dir("smartreflex", NULL);
+
+ sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir);
+
+ debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir,
+ sr_info, &pm_sr_fops);
+ debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_weight);
+ debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_maxlimit);
+
+ nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir);
+
+ if (sr_info->nvalue_count == 0 || !sr_info->nvalue_table) {
+ dev_warn(&pdev->dev, "%s: %s: No Voltage table for the corresponding vdd. Cannot create debugfs entries for n-values\n",
+ __func__, sr_info->name);
+
+ ret = -ENODATA;
+ goto err_debugfs;
+ }
+
+ for (i = 0; i < sr_info->nvalue_count; i++) {
+ char name[NVALUE_NAME_LEN + 1];
+
+ snprintf(name, sizeof(name), "volt_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].nvalue));
+ snprintf(name, sizeof(name), "errminlimit_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].errminlimit));
+
+ }
+
+ return 0;
+
+err_debugfs:
+ debugfs_remove_recursive(sr_info->dbg_dir);
+err_list_del:
+ pm_runtime_disable(&pdev->dev);
+ list_del(&sr_info->node);
+ clk_unprepare(sr_info->fck);
+
+ return ret;
+}
+
+static int omap_sr_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct omap_sr *sr_info = platform_get_drvdata(pdev);
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+ debugfs_remove_recursive(sr_info->dbg_dir);
+
+ pm_runtime_disable(dev);
+ clk_unprepare(sr_info->fck);
+ list_del(&sr_info->node);
+ return 0;
+}
+
+static void omap_sr_shutdown(struct platform_device *pdev)
+{
+ struct omap_sr *sr_info = platform_get_drvdata(pdev);
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+
+ return;
+}
+
+static const struct of_device_id omap_sr_match[] = {
+ { .compatible = "ti,omap3-smartreflex-core", },
+ { .compatible = "ti,omap3-smartreflex-mpu-iva", },
+ { .compatible = "ti,omap4-smartreflex-core", },
+ { .compatible = "ti,omap4-smartreflex-mpu", },
+ { .compatible = "ti,omap4-smartreflex-iva", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_sr_match);
+
+static struct platform_driver smartreflex_driver = {
+ .probe = omap_sr_probe,
+ .remove = omap_sr_remove,
+ .shutdown = omap_sr_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = omap_sr_match,
+ },
+};
+
+static int __init sr_init(void)
+{
+ int ret = 0;
+
+ ret = platform_driver_register(&smartreflex_driver);
+ if (ret) {
+ pr_err("%s: platform driver register failed for SR\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+late_initcall(sr_init);
+
+static void __exit sr_exit(void)
+{
+ platform_driver_unregister(&smartreflex_driver);
+}
+module_exit(sr_exit);
+
+MODULE_DESCRIPTION("OMAP Smartreflex Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
new file mode 100644
index 0000000000..c363645221
--- /dev/null
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' K3 Interrupt Aggregator MSI bus
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+static void ti_sci_inta_msi_write_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ /* Nothing to do */
+}
+
+static void ti_sci_inta_msi_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ /* Nothing to do */
+}
+
+static void ti_sci_inta_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (WARN_ON(!chip))
+ return;
+
+ chip->irq_request_resources = irq_chip_request_resources_parent;
+ chip->irq_release_resources = irq_chip_release_resources_parent;
+ chip->irq_compose_msi_msg = ti_sci_inta_msi_compose_msi_msg;
+ chip->irq_write_msi_msg = ti_sci_inta_msi_write_msg;
+ chip->irq_set_type = irq_chip_set_type_parent;
+ chip->irq_unmask = irq_chip_unmask_parent;
+ chip->irq_mask = irq_chip_mask_parent;
+ chip->irq_ack = irq_chip_ack_parent;
+}
+
+struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ ti_sci_inta_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_FREE_MSI_DESCS;
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (domain)
+ irq_domain_update_bus_token(domain, DOMAIN_BUS_TI_SCI_INTA_MSI);
+
+ return domain;
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_create_irq_domain);
+
+static int ti_sci_inta_msi_alloc_descs(struct device *dev,
+ struct ti_sci_resource *res)
+{
+ struct msi_desc msi_desc;
+ int set, i, count = 0;
+
+ memset(&msi_desc, 0, sizeof(msi_desc));
+ msi_desc.nvec_used = 1;
+
+ for (set = 0; set < res->sets; set++) {
+ for (i = 0; i < res->desc[set].num; i++, count++) {
+ msi_desc.msi_index = res->desc[set].start + i;
+ if (msi_insert_msi_desc(dev, &msi_desc))
+ goto fail;
+ }
+
+ for (i = 0; i < res->desc[set].num_sec; i++, count++) {
+ msi_desc.msi_index = res->desc[set].start_sec + i;
+ if (msi_insert_msi_desc(dev, &msi_desc))
+ goto fail;
+ }
+ }
+ return count;
+fail:
+ msi_free_msi_descs(dev);
+ return -ENOMEM;
+}
+
+int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
+ struct ti_sci_resource *res)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret, nvec;
+
+ if (pdev->id < 0)
+ return -ENODEV;
+
+ ret = msi_setup_device_data(dev);
+ if (ret)
+ return ret;
+
+ msi_lock_descs(dev);
+ nvec = ti_sci_inta_msi_alloc_descs(dev, res);
+ if (nvec <= 0) {
+ ret = nvec;
+ goto unlock;
+ }
+
+ /* Use alloc ALL as it's unclear whether there are gaps in the indices */
+ ret = msi_domain_alloc_irqs_all_locked(dev, MSI_DEFAULT_DOMAIN, nvec);
+ if (ret)
+ dev_err(dev, "Failed to allocate IRQs %d\n", ret);
+unlock:
+ msi_unlock_descs(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
new file mode 100644
index 0000000000..3aff106fc1
--- /dev/null
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMx3 Wkup M3 IPC driver
+ *
+ * Copyright (C) 2015 Texas Instruments, Inc.
+ *
+ * Dave Gerlach <d-gerlach@ti.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/suspend.h>
+#include <linux/wkup_m3_ipc.h>
+
+#define AM33XX_CTRL_IPC_REG_COUNT 0x8
+#define AM33XX_CTRL_IPC_REG_OFFSET(m) (0x4 + 4 * (m))
+
+/* AM33XX M3_TXEV_EOI register */
+#define AM33XX_CONTROL_M3_TXEV_EOI 0x00
+
+#define AM33XX_M3_TXEV_ACK (0x1 << 0)
+#define AM33XX_M3_TXEV_ENABLE (0x0 << 0)
+
+#define IPC_CMD_DS0 0x4
+#define IPC_CMD_STANDBY 0xc
+#define IPC_CMD_IDLE 0x10
+#define IPC_CMD_RESET 0xe
+#define DS_IPC_DEFAULT 0xffffffff
+#define M3_VERSION_UNKNOWN 0x0000ffff
+#define M3_BASELINE_VERSION 0x191
+#define M3_STATUS_RESP_MASK (0xffff << 16)
+#define M3_FW_VERSION_MASK 0xffff
+#define M3_WAKE_SRC_MASK 0xff
+
+#define IPC_MEM_TYPE_SHIFT (0x0)
+#define IPC_MEM_TYPE_MASK (0x7 << 0)
+#define IPC_VTT_STAT_SHIFT (0x3)
+#define IPC_VTT_STAT_MASK (0x1 << 3)
+#define IPC_VTT_GPIO_PIN_SHIFT (0x4)
+#define IPC_VTT_GPIO_PIN_MASK (0x3f << 4)
+#define IPC_IO_ISOLATION_STAT_SHIFT (10)
+#define IPC_IO_ISOLATION_STAT_MASK (0x1 << 10)
+
+#define IPC_DBG_HALT_SHIFT (11)
+#define IPC_DBG_HALT_MASK (0x1 << 11)
+
+#define M3_STATE_UNKNOWN 0
+#define M3_STATE_RESET 1
+#define M3_STATE_INITED 2
+#define M3_STATE_MSG_FOR_LP 3
+#define M3_STATE_MSG_FOR_RESET 4
+
+#define WKUP_M3_SD_FW_MAGIC 0x570C
+
+#define WKUP_M3_DMEM_START 0x80000
+#define WKUP_M3_AUXDATA_OFFSET 0x1000
+#define WKUP_M3_AUXDATA_SIZE 0xFF
+
+static struct wkup_m3_ipc *m3_ipc_state;
+
+static const struct wkup_m3_wakeup_src wakeups[] = {
+ {.irq_nr = 16, .src = "PRCM"},
+ {.irq_nr = 35, .src = "USB0_PHY"},
+ {.irq_nr = 36, .src = "USB1_PHY"},
+ {.irq_nr = 40, .src = "I2C0"},
+ {.irq_nr = 41, .src = "RTC Timer"},
+ {.irq_nr = 42, .src = "RTC Alarm"},
+ {.irq_nr = 43, .src = "Timer0"},
+ {.irq_nr = 44, .src = "Timer1"},
+ {.irq_nr = 45, .src = "UART"},
+ {.irq_nr = 46, .src = "GPIO0"},
+ {.irq_nr = 48, .src = "MPU_WAKE"},
+ {.irq_nr = 49, .src = "WDT0"},
+ {.irq_nr = 50, .src = "WDT1"},
+ {.irq_nr = 51, .src = "ADC_TSC"},
+ {.irq_nr = 0, .src = "Unknown"},
+};
+
+/**
+ * wkup_m3_copy_aux_data - Copy auxiliary data to special region of m3 dmem
+ * @data - pointer to data
+ * @sz - size of data to copy (limit 256 bytes)
+ *
+ * Copies any additional blob of data to the wkup_m3 dmem to be used by the
+ * firmware
+ */
+static unsigned long wkup_m3_copy_aux_data(struct wkup_m3_ipc *m3_ipc,
+ const void *data, int sz)
+{
+ unsigned long aux_data_dev_addr;
+ void *aux_data_addr;
+
+ aux_data_dev_addr = WKUP_M3_DMEM_START + WKUP_M3_AUXDATA_OFFSET;
+ aux_data_addr = rproc_da_to_va(m3_ipc->rproc,
+ aux_data_dev_addr,
+ WKUP_M3_AUXDATA_SIZE,
+ NULL);
+ memcpy(aux_data_addr, data, sz);
+
+ return WKUP_M3_AUXDATA_OFFSET;
+}
+
+static void wkup_m3_scale_data_fw_cb(const struct firmware *fw, void *context)
+{
+ unsigned long val, aux_base;
+ struct wkup_m3_scale_data_header hdr;
+ struct wkup_m3_ipc *m3_ipc = context;
+ struct device *dev = m3_ipc->dev;
+
+ if (!fw) {
+ dev_err(dev, "Voltage scale fw name given but file missing.\n");
+ return;
+ }
+
+ memcpy(&hdr, fw->data, sizeof(hdr));
+
+ if (hdr.magic != WKUP_M3_SD_FW_MAGIC) {
+ dev_err(dev, "PM: Voltage Scale Data binary does not appear valid.\n");
+ goto release_sd_fw;
+ }
+
+ aux_base = wkup_m3_copy_aux_data(m3_ipc, fw->data + sizeof(hdr),
+ fw->size - sizeof(hdr));
+
+ val = (aux_base + hdr.sleep_offset);
+ val |= ((aux_base + hdr.wake_offset) << 16);
+
+ m3_ipc->volt_scale_offsets = val;
+
+release_sd_fw:
+ release_firmware(fw);
+};
+
+static int wkup_m3_init_scale_data(struct wkup_m3_ipc *m3_ipc,
+ struct device *dev)
+{
+ int ret = 0;
+
+ /*
+ * If no name is provided, user has already been warned, pm will
+ * still work so return 0
+ */
+
+ if (!m3_ipc->sd_fw_name)
+ return ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+ m3_ipc->sd_fw_name, dev, GFP_ATOMIC,
+ m3_ipc, wkup_m3_scale_data_fw_cb);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void wkup_m3_set_halt_late(bool enabled)
+{
+ if (enabled)
+ m3_ipc_state->halt = (1 << IPC_DBG_HALT_SHIFT);
+ else
+ m3_ipc_state->halt = 0;
+}
+
+static int option_get(void *data, u64 *val)
+{
+ u32 *option = data;
+
+ *val = *option;
+
+ return 0;
+}
+
+static int option_set(void *data, u64 val)
+{
+ u32 *option = data;
+
+ *option = val;
+
+ if (option == &m3_ipc_state->halt) {
+ if (val)
+ wkup_m3_set_halt_late(true);
+ else
+ wkup_m3_set_halt_late(false);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(wkup_m3_ipc_option_fops, option_get, option_set,
+ "%llu\n");
+
+static int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->dbg_path = debugfs_create_dir("wkup_m3_ipc", NULL);
+
+ if (IS_ERR(m3_ipc->dbg_path))
+ return -EINVAL;
+
+ (void)debugfs_create_file("enable_late_halt", 0644,
+ m3_ipc->dbg_path,
+ &m3_ipc->halt,
+ &wkup_m3_ipc_option_fops);
+
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+ debugfs_remove_recursive(m3_ipc->dbg_path);
+}
+#else
+static inline int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
+{
+ return 0;
+}
+
+static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
+{
+ writel(AM33XX_M3_TXEV_ACK,
+ m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
+}
+
+static void am33xx_txev_enable(struct wkup_m3_ipc *m3_ipc)
+{
+ writel(AM33XX_M3_TXEV_ENABLE,
+ m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
+}
+
+static void wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc *m3_ipc,
+ u32 val, int ipc_reg_num)
+{
+ if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
+ "ipc register operation out of range"))
+ return;
+
+ writel(val, m3_ipc->ipc_mem_base +
+ AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
+}
+
+static unsigned int wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc *m3_ipc,
+ int ipc_reg_num)
+{
+ if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
+ "ipc register operation out of range"))
+ return 0;
+
+ return readl(m3_ipc->ipc_mem_base +
+ AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
+}
+
+static int wkup_m3_fw_version_read(struct wkup_m3_ipc *m3_ipc)
+{
+ int val;
+
+ val = wkup_m3_ctrl_ipc_read(m3_ipc, 2);
+
+ return val & M3_FW_VERSION_MASK;
+}
+
+static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
+{
+ struct wkup_m3_ipc *m3_ipc = ipc_data;
+ struct device *dev = m3_ipc->dev;
+ int ver = 0;
+
+ am33xx_txev_eoi(m3_ipc);
+
+ switch (m3_ipc->state) {
+ case M3_STATE_RESET:
+ ver = wkup_m3_fw_version_read(m3_ipc);
+
+ if (ver == M3_VERSION_UNKNOWN ||
+ ver < M3_BASELINE_VERSION) {
+ dev_warn(dev, "CM3 Firmware Version %x not supported\n",
+ ver);
+ } else {
+ dev_info(dev, "CM3 Firmware Version = 0x%x\n", ver);
+ }
+
+ m3_ipc->state = M3_STATE_INITED;
+ wkup_m3_init_scale_data(m3_ipc, dev);
+ complete(&m3_ipc->sync_complete);
+ break;
+ case M3_STATE_MSG_FOR_RESET:
+ m3_ipc->state = M3_STATE_INITED;
+ complete(&m3_ipc->sync_complete);
+ break;
+ case M3_STATE_MSG_FOR_LP:
+ complete(&m3_ipc->sync_complete);
+ break;
+ case M3_STATE_UNKNOWN:
+ dev_warn(dev, "Unknown CM3 State\n");
+ }
+
+ am33xx_txev_enable(m3_ipc);
+
+ return IRQ_HANDLED;
+}
+
+static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
+{
+ struct device *dev = m3_ipc->dev;
+ mbox_msg_t dummy_msg = 0;
+ int ret;
+
+ if (!m3_ipc->mbox) {
+ dev_err(dev,
+ "No IPC channel to communicate with wkup_m3!\n");
+ return -EIO;
+ }
+
+ /*
+ * Write a dummy message to the mailbox in order to trigger the RX
+ * interrupt to alert the M3 that data is available in the IPC
+ * registers. We must enable the IRQ here and disable it after in
+ * the RX callback to avoid multiple interrupts being received
+ * by the CM3.
+ */
+ ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
+ if (ret < 0) {
+ dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&m3_ipc->sync_complete,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(dev, "MPU<->CM3 sync failure\n");
+ m3_ipc->state = M3_STATE_UNKNOWN;
+ return -EIO;
+ }
+
+ mbox_client_txdone(m3_ipc->mbox, 0);
+ return 0;
+}
+
+static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
+{
+ struct device *dev = m3_ipc->dev;
+ mbox_msg_t dummy_msg = 0;
+ int ret;
+
+ if (!m3_ipc->mbox) {
+ dev_err(dev,
+ "No IPC channel to communicate with wkup_m3!\n");
+ return -EIO;
+ }
+
+ ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
+ if (ret < 0) {
+ dev_err(dev, "%s: mbox_send_message() failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ mbox_client_txdone(m3_ipc->mbox, 0);
+ return 0;
+}
+
+static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
+{
+ return ((m3_ipc->state != M3_STATE_RESET) &&
+ (m3_ipc->state != M3_STATE_UNKNOWN));
+}
+
+static void wkup_m3_set_vtt_gpio(struct wkup_m3_ipc *m3_ipc, int gpio)
+{
+ m3_ipc->vtt_conf = (1 << IPC_VTT_STAT_SHIFT) |
+ (gpio << IPC_VTT_GPIO_PIN_SHIFT);
+}
+
+static void wkup_m3_set_io_isolation(struct wkup_m3_ipc *m3_ipc)
+{
+ m3_ipc->isolation_conf = (1 << IPC_IO_ISOLATION_STAT_SHIFT);
+}
+
+/* Public functions */
+/**
+ * wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ * @mem_type: memory type value read directly from emif
+ *
+ * wkup_m3 must know what memory type is in use to properly suspend
+ * and resume.
+ */
+static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
+{
+ m3_ipc->mem_type = mem_type;
+}
+
+/**
+ * wkup_m3_set_resume_address - Pass wkup_m3 resume address
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ * @addr: Physical address from which resume code should execute
+ */
+static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
+{
+ m3_ipc->resume_addr = (unsigned long)addr;
+}
+
+/**
+ * wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ *
+ * Returns code representing the status of a low power mode transition.
+ * 0 - Successful transition
+ * 1 - Failure to transition to low power state
+ */
+static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
+{
+ unsigned int i;
+ int val;
+
+ val = wkup_m3_ctrl_ipc_read(m3_ipc, 1);
+
+ i = M3_STATUS_RESP_MASK & val;
+ i >>= __ffs(M3_STATUS_RESP_MASK);
+
+ return i;
+}
+
+/**
+ * wkup_m3_prepare_low_power - Request preparation for transition to
+ * low power state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ * @state: A kernel suspend state to enter, either MEM or STANDBY
+ *
+ * Returns 0 if preparation was successful, otherwise returns error code
+ */
+static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
+{
+ struct device *dev = m3_ipc->dev;
+ int m3_power_state;
+ int ret = 0;
+
+ if (!wkup_m3_is_available(m3_ipc))
+ return -ENODEV;
+
+ switch (state) {
+ case WKUP_M3_DEEPSLEEP:
+ m3_power_state = IPC_CMD_DS0;
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->volt_scale_offsets, 5);
+ break;
+ case WKUP_M3_STANDBY:
+ m3_power_state = IPC_CMD_STANDBY;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
+ break;
+ case WKUP_M3_IDLE:
+ m3_power_state = IPC_CMD_IDLE;
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
+ break;
+ default:
+ return 1;
+ }
+
+ /* Program each required IPC register then write defaults to others */
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
+ wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type |
+ m3_ipc->vtt_conf |
+ m3_ipc->isolation_conf |
+ m3_ipc->halt, 4);
+
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
+
+ m3_ipc->state = M3_STATE_MSG_FOR_LP;
+
+ if (state == WKUP_M3_IDLE)
+ ret = wkup_m3_ping_noirq(m3_ipc);
+ else
+ ret = wkup_m3_ping(m3_ipc);
+
+ if (ret) {
+ dev_err(dev, "Unable to ping CM3\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * wkup_m3_finish_low_power - Return m3 to reset state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ *
+ * Returns 0 if reset was successful, otherwise returns error code
+ */
+static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc)
+{
+ struct device *dev = m3_ipc->dev;
+ int ret = 0;
+
+ if (!wkup_m3_is_available(m3_ipc))
+ return -ENODEV;
+
+ wkup_m3_ctrl_ipc_write(m3_ipc, IPC_CMD_RESET, 1);
+ wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
+
+ m3_ipc->state = M3_STATE_MSG_FOR_RESET;
+
+ ret = wkup_m3_ping(m3_ipc);
+ if (ret) {
+ dev_err(dev, "Unable to ping CM3\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * wkup_m3_request_wake_src - Get the wakeup source info passed from wkup_m3
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ */
+static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
+{
+ unsigned int wakeup_src_idx;
+ int j, val;
+
+ val = wkup_m3_ctrl_ipc_read(m3_ipc, 6);
+
+ wakeup_src_idx = val & M3_WAKE_SRC_MASK;
+
+ for (j = 0; j < ARRAY_SIZE(wakeups) - 1; j++) {
+ if (wakeups[j].irq_nr == wakeup_src_idx)
+ return wakeups[j].src;
+ }
+ return wakeups[j].src;
+}
+
+/**
+ * wkup_m3_set_rtc_only - Set the rtc_only flag
+ * @m3_ipc: Pointer to wkup_m3_ipc context
+ */
+static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
+{
+ if (m3_ipc_state)
+ m3_ipc_state->is_rtc_only = true;
+}
+
+static struct wkup_m3_ipc_ops ipc_ops = {
+ .set_mem_type = wkup_m3_set_mem_type,
+ .set_resume_address = wkup_m3_set_resume_address,
+ .prepare_low_power = wkup_m3_prepare_low_power,
+ .finish_low_power = wkup_m3_finish_low_power,
+ .request_pm_status = wkup_m3_request_pm_status,
+ .request_wake_src = wkup_m3_request_wake_src,
+ .set_rtc_only = wkup_m3_set_rtc_only,
+};
+
+/**
+ * wkup_m3_ipc_get - Return handle to wkup_m3_ipc
+ *
+ * Returns NULL if the wkup_m3 is not yet available, otherwise returns
+ * pointer to wkup_m3_ipc struct.
+ */
+struct wkup_m3_ipc *wkup_m3_ipc_get(void)
+{
+ if (m3_ipc_state)
+ get_device(m3_ipc_state->dev);
+ else
+ return NULL;
+
+ return m3_ipc_state;
+}
+EXPORT_SYMBOL_GPL(wkup_m3_ipc_get);
+
+/**
+ * wkup_m3_ipc_put - Free handle to wkup_m3_ipc returned from wkup_m3_ipc_get
+ * @m3_ipc: A pointer to wkup_m3_ipc struct returned by wkup_m3_ipc_get
+ */
+void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc)
+{
+ if (m3_ipc_state)
+ put_device(m3_ipc_state->dev);
+}
+EXPORT_SYMBOL_GPL(wkup_m3_ipc_put);
+
+static int wkup_m3_rproc_boot_thread(void *arg)
+{
+ struct wkup_m3_ipc *m3_ipc = arg;
+ struct device *dev = m3_ipc->dev;
+ int ret;
+
+ init_completion(&m3_ipc->sync_complete);
+
+ ret = rproc_boot(m3_ipc->rproc);
+ if (ret)
+ dev_err(dev, "rproc_boot failed\n");
+ else
+ m3_ipc_state = m3_ipc;
+
+ return 0;
+}
+
+static int wkup_m3_ipc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq, ret, temp;
+ phandle rproc_phandle;
+ struct rproc *m3_rproc;
+ struct task_struct *task;
+ struct wkup_m3_ipc *m3_ipc;
+ struct device_node *np = dev->of_node;
+
+ m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
+ if (!m3_ipc)
+ return -ENOMEM;
+
+ m3_ipc->ipc_mem_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(m3_ipc->ipc_mem_base))
+ return PTR_ERR(m3_ipc->ipc_mem_base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
+ 0, "wkup_m3_txev", m3_ipc);
+ if (ret) {
+ dev_err(dev, "request_irq failed\n");
+ return ret;
+ }
+
+ m3_ipc->mbox_client.dev = dev;
+ m3_ipc->mbox_client.tx_done = NULL;
+ m3_ipc->mbox_client.tx_prepare = NULL;
+ m3_ipc->mbox_client.rx_callback = NULL;
+ m3_ipc->mbox_client.tx_block = false;
+ m3_ipc->mbox_client.knows_txdone = false;
+
+ m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
+
+ if (IS_ERR(m3_ipc->mbox)) {
+ dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
+ PTR_ERR(m3_ipc->mbox));
+ return PTR_ERR(m3_ipc->mbox);
+ }
+
+ if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
+ dev_err(&pdev->dev, "could not get rproc phandle\n");
+ ret = -ENODEV;
+ goto err_free_mbox;
+ }
+
+ m3_rproc = rproc_get_by_phandle(rproc_phandle);
+ if (!m3_rproc) {
+ dev_err(&pdev->dev, "could not get rproc handle\n");
+ ret = -EPROBE_DEFER;
+ goto err_free_mbox;
+ }
+
+ m3_ipc->rproc = m3_rproc;
+ m3_ipc->dev = dev;
+ m3_ipc->state = M3_STATE_RESET;
+
+ m3_ipc->ops = &ipc_ops;
+
+ if (!of_property_read_u32(np, "ti,vtt-gpio-pin", &temp)) {
+ if (temp >= 0 && temp <= 31)
+ wkup_m3_set_vtt_gpio(m3_ipc, temp);
+ else
+ dev_warn(dev, "Invalid VTT GPIO(%d) pin\n", temp);
+ }
+
+ if (of_property_read_bool(np, "ti,set-io-isolation"))
+ wkup_m3_set_io_isolation(m3_ipc);
+
+ ret = of_property_read_string(np, "firmware-name",
+ &m3_ipc->sd_fw_name);
+ if (ret) {
+ dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
+ }
+
+ /*
+ * Wait for firmware loading completion in a thread so we
+ * can boot the wkup_m3 as soon as it's ready without holding
+ * up kernel boot
+ */
+ task = kthread_run(wkup_m3_rproc_boot_thread, m3_ipc,
+ "wkup_m3_rproc_loader");
+
+ if (IS_ERR(task)) {
+ dev_err(dev, "can't create rproc_boot thread\n");
+ ret = PTR_ERR(task);
+ goto err_put_rproc;
+ }
+
+ wkup_m3_ipc_dbg_init(m3_ipc);
+
+ return 0;
+
+err_put_rproc:
+ rproc_put(m3_rproc);
+err_free_mbox:
+ mbox_free_channel(m3_ipc->mbox);
+ return ret;
+}
+
+static int wkup_m3_ipc_remove(struct platform_device *pdev)
+{
+ wkup_m3_ipc_dbg_destroy(m3_ipc_state);
+
+ mbox_free_channel(m3_ipc_state->mbox);
+
+ rproc_shutdown(m3_ipc_state->rproc);
+ rproc_put(m3_ipc_state->rproc);
+
+ m3_ipc_state = NULL;
+
+ return 0;
+}
+
+static int __maybe_unused wkup_m3_ipc_suspend(struct device *dev)
+{
+ /*
+ * Nothing needs to be done on suspend even with rtc_only flag set
+ */
+ return 0;
+}
+
+static int __maybe_unused wkup_m3_ipc_resume(struct device *dev)
+{
+ if (m3_ipc_state->is_rtc_only) {
+ rproc_shutdown(m3_ipc_state->rproc);
+ rproc_boot(m3_ipc_state->rproc);
+ }
+
+ m3_ipc_state->is_rtc_only = false;
+
+ return 0;
+}
+
+static const struct dev_pm_ops wkup_m3_ipc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(wkup_m3_ipc_suspend, wkup_m3_ipc_resume)
+};
+
+static const struct of_device_id wkup_m3_ipc_of_match[] = {
+ { .compatible = "ti,am3352-wkup-m3-ipc", },
+ { .compatible = "ti,am4372-wkup-m3-ipc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, wkup_m3_ipc_of_match);
+
+static struct platform_driver wkup_m3_ipc_driver = {
+ .probe = wkup_m3_ipc_probe,
+ .remove = wkup_m3_ipc_remove,
+ .driver = {
+ .name = "wkup_m3_ipc",
+ .of_match_table = wkup_m3_ipc_of_match,
+ .pm = &wkup_m3_ipc_pm_ops,
+ },
+};
+
+module_platform_driver(wkup_m3_ipc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("wkup m3 remote processor ipc driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
diff --git a/drivers/soc/ux500/Kconfig b/drivers/soc/ux500/Kconfig
new file mode 100644
index 0000000000..0e04272edf
--- /dev/null
+++ b/drivers/soc/ux500/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config UX500_SOC_ID
+ bool "SoC bus for ST-Ericsson ux500"
+ depends on ARCH_U8500 || COMPILE_TEST
+ default ARCH_U8500
+ help
+ Include support for the SoC bus on the ARM RealView platforms
+ providing some sysfs information about the ASIC variant.
diff --git a/drivers/soc/ux500/Makefile b/drivers/soc/ux500/Makefile
new file mode 100644
index 0000000000..f1645397d6
--- /dev/null
+++ b/drivers/soc/ux500/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_UX500_SOC_ID) += ux500-soc-id.o
diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
new file mode 100644
index 0000000000..27d6e25a01
--- /dev/null
+++ b/drivers/soc/ux500/ux500-soc-id.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/sys_soc.h>
+
+#include <asm/cputype.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mach/map.h>
+
+/**
+ * struct dbx500_asic_id - fields of the ASIC ID
+ * @process: the manufacturing process, 0x40 is 40 nm 0x00 is "standard"
+ * @partnumber: hithereto 0x8500 for DB8500
+ * @revision: version code in the series
+ */
+struct dbx500_asic_id {
+ u16 partnumber;
+ u8 revision;
+ u8 process;
+};
+
+static struct dbx500_asic_id dbx500_id;
+
+static unsigned int __init ux500_read_asicid(phys_addr_t addr)
+{
+ void __iomem *virt = ioremap(addr, 4);
+ unsigned int asicid;
+
+ if (!virt)
+ return 0;
+
+ asicid = readl(virt);
+ iounmap(virt);
+
+ return asicid;
+}
+
+static void ux500_print_soc_info(unsigned int asicid)
+{
+ unsigned int rev = dbx500_id.revision;
+
+ pr_info("DB%4x ", dbx500_id.partnumber);
+
+ if (rev == 0x01)
+ pr_cont("Early Drop");
+ else if (rev >= 0xA0)
+ pr_cont("v%d.%d" , (rev >> 4) - 0xA + 1, rev & 0xf);
+ else
+ pr_cont("Unknown");
+
+ pr_cont(" [%#010x]\n", asicid);
+}
+
+static unsigned int partnumber(unsigned int asicid)
+{
+ return (asicid >> 8) & 0xffff;
+}
+
+/*
+ * SOC MIDR ASICID ADDRESS ASICID VALUE
+ * DB8500ed 0x410fc090 0x9001FFF4 0x00850001
+ * DB8500v1 0x411fc091 0x9001FFF4 0x008500A0
+ * DB8500v1.1 0x411fc091 0x9001FFF4 0x008500A1
+ * DB8500v2 0x412fc091 0x9001DBF4 0x008500B0
+ * DB8520v2.2 0x412fc091 0x9001DBF4 0x008500B2
+ * DB5500v1 0x412fc091 0x9001FFF4 0x005500A0
+ * DB9540 0x413fc090 0xFFFFDBF4 0x009540xx
+ */
+
+static void __init ux500_setup_id(void)
+{
+ unsigned int cpuid = read_cpuid_id();
+ unsigned int asicid = 0;
+ phys_addr_t addr = 0;
+
+ switch (cpuid) {
+ case 0x410fc090: /* DB8500ed */
+ case 0x411fc091: /* DB8500v1 */
+ addr = 0x9001FFF4;
+ break;
+
+ case 0x412fc091: /* DB8520 / DB8500v2 / DB5500v1 */
+ asicid = ux500_read_asicid(0x9001DBF4);
+ if (partnumber(asicid) == 0x8500 ||
+ partnumber(asicid) == 0x8520)
+ /* DB8500v2 */
+ break;
+
+ /* DB5500v1 */
+ addr = 0x9001FFF4;
+ break;
+
+ case 0x413fc090: /* DB9540 */
+ addr = 0xFFFFDBF4;
+ break;
+ }
+
+ if (addr)
+ asicid = ux500_read_asicid(addr);
+
+ if (!asicid) {
+ pr_err("Unable to identify SoC\n");
+ BUG();
+ }
+
+ dbx500_id.process = asicid >> 24;
+ dbx500_id.partnumber = partnumber(asicid);
+ dbx500_id.revision = asicid & 0xff;
+
+ ux500_print_soc_info(asicid);
+}
+
+static const char * __init ux500_get_machine(void)
+{
+ return kasprintf(GFP_KERNEL, "DB%4x", dbx500_id.partnumber);
+}
+
+static const char * __init ux500_get_family(void)
+{
+ return kasprintf(GFP_KERNEL, "ux500");
+}
+
+static const char * __init ux500_get_revision(void)
+{
+ unsigned int rev = dbx500_id.revision;
+
+ if (rev == 0x01)
+ return kasprintf(GFP_KERNEL, "%s", "ED");
+ else if (rev >= 0xA0)
+ return kasprintf(GFP_KERNEL, "%d.%d",
+ (rev >> 4) - 0xA + 1, rev & 0xf);
+
+ return kasprintf(GFP_KERNEL, "%s", "Unknown");
+}
+
+static ssize_t
+process_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ if (dbx500_id.process == 0x00)
+ return sprintf(buf, "Standard\n");
+
+ return sprintf(buf, "%02xnm\n", dbx500_id.process);
+}
+
+static DEVICE_ATTR_RO(process);
+
+static struct attribute *ux500_soc_attrs[] = {
+ &dev_attr_process.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(ux500_soc);
+
+static const char *db8500_read_soc_id(struct device_node *backupram)
+{
+ void __iomem *base;
+ const char *retstr;
+ u32 uid[5];
+
+ base = of_iomap(backupram, 0);
+ if (!base)
+ return NULL;
+ memcpy_fromio(uid, base + 0x1fc0, sizeof(uid));
+
+ /* Throw these device-specific numbers into the entropy pool */
+ add_device_randomness(uid, sizeof(uid));
+ retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
+ uid[0], uid[1], uid[2], uid[3], uid[4]);
+ iounmap(base);
+ return retstr;
+}
+
+static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr,
+ struct device_node *backupram)
+{
+ soc_dev_attr->soc_id = db8500_read_soc_id(backupram);
+ soc_dev_attr->machine = ux500_get_machine();
+ soc_dev_attr->family = ux500_get_family();
+ soc_dev_attr->revision = ux500_get_revision();
+ soc_dev_attr->custom_attr_group = ux500_soc_groups[0];
+}
+
+static int __init ux500_soc_device_init(void)
+{
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *backupram;
+
+ backupram = of_find_compatible_node(NULL, NULL, "ste,dbx500-backupram");
+ if (!backupram)
+ return 0;
+
+ ux500_setup_id();
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr) {
+ of_node_put(backupram);
+ return -ENOMEM;
+ }
+
+ soc_info_populate(soc_dev_attr, backupram);
+ of_node_put(backupram);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ return 0;
+}
+subsys_initcall(ux500_soc_device_init);
diff --git a/drivers/soc/versatile/Kconfig b/drivers/soc/versatile/Kconfig
new file mode 100644
index 0000000000..c3792c0a84
--- /dev/null
+++ b/drivers/soc/versatile/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ARM Versatile SoC drivers
+#
+config SOC_INTEGRATOR_CM
+ bool "SoC bus device for the ARM Integrator platform core modules"
+ depends on ARCH_INTEGRATOR
+ select SOC_BUS
+ help
+ Include support for the SoC bus on the ARM Integrator platform
+ core modules providing some sysfs information about the ASIC
+ variant.
+
+config SOC_REALVIEW
+ bool "SoC bus device for the ARM RealView platforms"
+ depends on ARCH_REALVIEW
+ select SOC_BUS
+ help
+ Include support for the SoC bus on the ARM RealView platforms
+ providing some sysfs information about the ASIC variant.
diff --git a/drivers/soc/versatile/Makefile b/drivers/soc/versatile/Makefile
new file mode 100644
index 0000000000..1e0a37c0a5
--- /dev/null
+++ b/drivers/soc/versatile/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SOC_INTEGRATOR_CM) += soc-integrator.o
+obj-$(CONFIG_SOC_REALVIEW) += soc-realview.o
diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c
new file mode 100644
index 0000000000..bab4ad87aa
--- /dev/null
+++ b/drivers/soc/versatile/soc-integrator.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+
+#define INTEGRATOR_HDR_ID_OFFSET 0x00
+
+static u32 integrator_coreid;
+
+static const struct of_device_id integrator_cm_match[] = {
+ { .compatible = "arm,core-module-integrator", },
+ { }
+};
+
+static const char *integrator_arch_str(u32 id)
+{
+ switch ((id >> 16) & 0xff) {
+ case 0x00:
+ return "ASB little-endian";
+ case 0x01:
+ return "AHB little-endian";
+ case 0x03:
+ return "AHB-Lite system bus, bi-endian";
+ case 0x04:
+ return "AHB";
+ case 0x08:
+ return "AHB system bus, ASB processor bus";
+ default:
+ return "Unknown";
+ }
+}
+
+static const char *integrator_fpga_str(u32 id)
+{
+ switch ((id >> 12) & 0xf) {
+ case 0x01:
+ return "XC4062";
+ case 0x02:
+ return "XC4085";
+ case 0x03:
+ return "XVC600";
+ case 0x04:
+ return "EPM7256AE (Altera PLD)";
+ default:
+ return "Unknown";
+ }
+}
+
+static ssize_t
+manufacturer_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%02x\n", integrator_coreid >> 24);
+}
+
+static DEVICE_ATTR_RO(manufacturer);
+
+static ssize_t
+arch_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", integrator_arch_str(integrator_coreid));
+}
+
+static DEVICE_ATTR_RO(arch);
+
+static ssize_t
+fpga_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", integrator_fpga_str(integrator_coreid));
+}
+
+static DEVICE_ATTR_RO(fpga);
+
+static ssize_t
+build_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%02x\n", (integrator_coreid >> 4) & 0xFF);
+}
+
+static DEVICE_ATTR_RO(build);
+
+static struct attribute *integrator_attrs[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_arch.attr,
+ &dev_attr_fpga.attr,
+ &dev_attr_build.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(integrator);
+
+static int __init integrator_soc_init(void)
+{
+ struct regmap *syscon_regmap;
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *np;
+ struct device *dev;
+ u32 val;
+ int ret;
+
+ np = of_find_matching_node(NULL, integrator_cm_match);
+ if (!np)
+ return -ENODEV;
+
+ syscon_regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(syscon_regmap))
+ return PTR_ERR(syscon_regmap);
+
+ ret = regmap_read(syscon_regmap, INTEGRATOR_HDR_ID_OFFSET,
+ &val);
+ if (ret)
+ return -ENODEV;
+ integrator_coreid = val;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->soc_id = "Integrator";
+ soc_dev_attr->machine = "Integrator";
+ soc_dev_attr->family = "Versatile";
+ soc_dev_attr->custom_attr_group = integrator_groups[0];
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return -ENODEV;
+ }
+ dev = soc_device_to_device(soc_dev);
+
+ dev_info(dev, "Detected ARM core module:\n");
+ dev_info(dev, " Manufacturer: %02x\n", (val >> 24));
+ dev_info(dev, " Architecture: %s\n", integrator_arch_str(val));
+ dev_info(dev, " FPGA: %s\n", integrator_fpga_str(val));
+ dev_info(dev, " Build: %02x\n", (val >> 4) & 0xFF);
+ dev_info(dev, " Rev: %c\n", ('A' + (val & 0x03)));
+
+ return 0;
+}
+device_initcall(integrator_soc_init);
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
new file mode 100644
index 0000000000..c6876d232d
--- /dev/null
+++ b/drivers/soc/versatile/soc-realview.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+
+/* System ID in syscon */
+#define REALVIEW_SYS_ID_OFFSET 0x00
+
+static const struct of_device_id realview_soc_of_match[] = {
+ { .compatible = "arm,realview-eb-soc", },
+ { .compatible = "arm,realview-pb1176-soc", },
+ { .compatible = "arm,realview-pb11mp-soc", },
+ { .compatible = "arm,realview-pba8-soc", },
+ { .compatible = "arm,realview-pbx-soc", },
+ { }
+};
+
+static u32 realview_coreid;
+
+static const char *realview_arch_str(u32 id)
+{
+ switch ((id >> 8) & 0xf) {
+ case 0x04:
+ return "AHB";
+ case 0x05:
+ return "Multi-layer AXI";
+ default:
+ return "Unknown";
+ }
+}
+
+static ssize_t
+manufacturer_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%02x\n", realview_coreid >> 24);
+}
+
+static DEVICE_ATTR_RO(manufacturer);
+
+static ssize_t
+board_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "HBI-%03x\n", ((realview_coreid >> 16) & 0xfff));
+}
+
+static DEVICE_ATTR_RO(board);
+
+static ssize_t
+fpga_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", realview_arch_str(realview_coreid));
+}
+
+static DEVICE_ATTR_RO(fpga);
+
+static ssize_t
+build_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%02x\n", (realview_coreid & 0xFF));
+}
+
+static DEVICE_ATTR_RO(build);
+
+static struct attribute *realview_attrs[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_board.attr,
+ &dev_attr_fpga.attr,
+ &dev_attr_build.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(realview);
+
+static int realview_soc_probe(struct platform_device *pdev)
+{
+ struct regmap *syscon_regmap;
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ syscon_regmap = syscon_regmap_lookup_by_phandle(np, "regmap");
+ if (IS_ERR(syscon_regmap))
+ return PTR_ERR(syscon_regmap);
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ ret = of_property_read_string(np, "compatible",
+ &soc_dev_attr->soc_id);
+ if (ret)
+ return -EINVAL;
+
+ soc_dev_attr->machine = "RealView";
+ soc_dev_attr->family = "Versatile";
+ soc_dev_attr->custom_attr_group = realview_groups[0];
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return -ENODEV;
+ }
+ ret = regmap_read(syscon_regmap, REALVIEW_SYS_ID_OFFSET,
+ &realview_coreid);
+ if (ret)
+ return -ENODEV;
+
+ dev_info(&pdev->dev, "RealView Syscon Core ID: 0x%08x, HBI-%03x\n",
+ realview_coreid,
+ ((realview_coreid >> 16) & 0xfff));
+ /* FIXME: add attributes for SoC to sysfs */
+ return 0;
+}
+
+static struct platform_driver realview_soc_driver = {
+ .probe = realview_soc_probe,
+ .driver = {
+ .name = "realview-soc",
+ .of_match_table = realview_soc_of_match,
+ },
+};
+builtin_platform_driver(realview_soc_driver);
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
new file mode 100644
index 0000000000..8a755a5c88
--- /dev/null
+++ b/drivers/soc/xilinx/Kconfig
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Xilinx SoC drivers"
+
+config ZYNQMP_POWER
+ bool "Enable Xilinx Zynq MPSoC Power Management driver"
+ depends on PM && ZYNQMP_FIRMWARE
+ default y
+ select MAILBOX
+ select ZYNQMP_IPI_MBOX
+ help
+ Say yes to enable power management support for ZyqnMP SoC.
+ This driver uses firmware driver as an interface for power
+ management request to firmware. It registers isr to handle
+ power management callbacks from firmware. It registers mailbox client
+ to handle power management callbacks from firmware.
+
+ If in doubt, say N.
+
+config ZYNQMP_PM_DOMAINS
+ bool "Enable Zynq MPSoC generic PM domains"
+ default y
+ depends on PM && ZYNQMP_FIRMWARE
+ select PM_GENERIC_DOMAINS
+ help
+ Say yes to enable device power management through PM domains
+ If in doubt, say N.
+
+config XLNX_EVENT_MANAGER
+ bool "Enable Xilinx Event Management Driver"
+ depends on ZYNQMP_FIRMWARE
+ default ZYNQMP_FIRMWARE
+ help
+ Say yes to enable event management support for Xilinx.
+ This driver uses firmware driver as an interface for event/power
+ management request to firmware.
+
+ If in doubt, say N.
+endmenu
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
new file mode 100644
index 0000000000..33d94395fd
--- /dev/null
+++ b/drivers/soc/xilinx/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o
+obj-$(CONFIG_XLNX_EVENT_MANAGER) += xlnx_event_manager.o
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
new file mode 100644
index 0000000000..86a048a10a
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Event Management Driver
+ *
+ * Copyright (C) 2021 Xilinx, Inc.
+ *
+ * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/firmware/xlnx-event-manager.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/hashtable.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1);
+
+static int virq_sgi;
+static int event_manager_availability = -EACCES;
+
+/* SGI number used for Event management driver */
+#define XLNX_EVENT_SGI_NUM (15)
+
+/* Max number of driver can register for same event */
+#define MAX_DRIVER_PER_EVENT (10U)
+
+/* Max HashMap Order for PM API feature check (1<<7 = 128) */
+#define REGISTERED_DRIVER_MAX_ORDER (7)
+
+#define MAX_BITS (32U) /* Number of bits available for error mask */
+
+#define FIRMWARE_VERSION_MASK (0xFFFFU)
+#define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U)
+
+static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
+static int sgi_num = XLNX_EVENT_SGI_NUM;
+
+static bool is_need_to_unregister;
+
+/**
+ * struct agent_cb - Registered callback function and private data.
+ * @agent_data: Data passed back to handler function.
+ * @eve_cb: Function pointer to store the callback function.
+ * @list: member to create list.
+ */
+struct agent_cb {
+ void *agent_data;
+ event_cb_func_t eve_cb;
+ struct list_head list;
+};
+
+/**
+ * struct registered_event_data - Registered Event Data.
+ * @key: key is the combine id(Node-Id | Event-Id) of type u64
+ * where upper u32 for Node-Id and lower u32 for Event-Id,
+ * And this used as key to index into hashmap.
+ * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc.
+ * @wake: If this flag set, firmware will wake up processor if is
+ * in sleep or power down state.
+ * @cb_list_head: Head of call back data list which contain the information
+ * about registered handler and private data.
+ * @hentry: hlist_node that hooks this entry into hashtable.
+ */
+struct registered_event_data {
+ u64 key;
+ enum pm_api_cb_id cb_type;
+ bool wake;
+ struct list_head cb_list_head;
+ struct hlist_node hentry;
+};
+
+static bool xlnx_is_error_event(const u32 node_id)
+{
+ if (node_id == EVENT_ERROR_PMC_ERR1 ||
+ node_id == EVENT_ERROR_PMC_ERR2 ||
+ node_id == EVENT_ERROR_PSM_ERR1 ||
+ node_id == EVENT_ERROR_PSM_ERR2)
+ return true;
+
+ return false;
+}
+
+static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data)
+{
+ u64 key = 0;
+ bool present_in_hash = false;
+ struct registered_event_data *eve_data;
+ struct agent_cb *cb_data;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
+
+ key = ((u64)node_id << 32U) | (u64)event;
+ /* Check for existing entry in hash table for given key id */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
+ if (eve_data->key == key) {
+ present_in_hash = true;
+ break;
+ }
+ }
+
+ if (!present_in_hash) {
+ /* Add new entry if not present in HASH table */
+ eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
+ if (!eve_data)
+ return -ENOMEM;
+ eve_data->key = key;
+ eve_data->cb_type = PM_NOTIFY_CB;
+ eve_data->wake = wake;
+ INIT_LIST_HEAD(&eve_data->cb_list_head);
+
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data) {
+ kfree(eve_data);
+ return -ENOMEM;
+ }
+ cb_data->eve_cb = cb_fun;
+ cb_data->agent_data = data;
+
+ /* Add into callback list */
+ list_add(&cb_data->list, &eve_data->cb_list_head);
+
+ /* Add into HASH table */
+ hash_add(reg_driver_map, &eve_data->hentry, key);
+ } else {
+ /* Search for callback function and private data in list */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ if (cb_pos->eve_cb == cb_fun &&
+ cb_pos->agent_data == data) {
+ return 0;
+ }
+ }
+
+ /* Add multiple handler and private data in list */
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data)
+ return -ENOMEM;
+ cb_data->eve_cb = cb_fun;
+ cb_data->agent_data = data;
+
+ list_add(&cb_data->list, &eve_data->cb_list_head);
+ }
+
+ return 0;
+}
+
+static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
+{
+ struct registered_event_data *eve_data;
+ struct agent_cb *cb_data;
+
+ /* Check for existing entry in hash table for given cb_type */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
+ if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
+ pr_err("Found as already registered\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Add new entry if not present */
+ eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
+ if (!eve_data)
+ return -ENOMEM;
+
+ eve_data->key = 0;
+ eve_data->cb_type = PM_INIT_SUSPEND_CB;
+ INIT_LIST_HEAD(&eve_data->cb_list_head);
+
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data)
+ return -ENOMEM;
+ cb_data->eve_cb = cb_fun;
+ cb_data->agent_data = data;
+
+ /* Add into callback list */
+ list_add(&cb_data->list, &eve_data->cb_list_head);
+
+ hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
+
+ return 0;
+}
+
+static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
+ struct hlist_node *tmp;
+
+ is_need_to_unregister = false;
+
+ /* Check for existing entry in hash table for given cb_type */
+ hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) {
+ if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
+ /* Delete the list of callback */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ if (cb_pos->eve_cb == cb_fun) {
+ is_callback_found = true;
+ list_del_init(&cb_pos->list);
+ kfree(cb_pos);
+ }
+ }
+ /* remove an object from a hashtable */
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ is_need_to_unregister = true;
+ }
+ }
+ if (!is_callback_found) {
+ pr_warn("Didn't find any registered callback for suspend event\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
+ event_cb_func_t cb_fun, void *data)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+ u64 key = ((u64)node_id << 32U) | (u64)event;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
+ struct hlist_node *tmp;
+
+ is_need_to_unregister = false;
+
+ /* Check for existing entry in hash table for given key id */
+ hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) {
+ if (eve_data->key == key) {
+ /* Delete the list of callback */
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ if (cb_pos->eve_cb == cb_fun &&
+ cb_pos->agent_data == data) {
+ is_callback_found = true;
+ list_del_init(&cb_pos->list);
+ kfree(cb_pos);
+ }
+ }
+
+ /* Remove HASH table if callback list is empty */
+ if (list_empty(&eve_data->cb_list_head)) {
+ /* remove an object from a HASH table */
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ is_need_to_unregister = true;
+ }
+ }
+ }
+ if (!is_callback_found) {
+ pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
+ node_id, event);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * xlnx_register_event() - Register for the event.
+ * @cb_type: Type of callback from pm_api_cb_id,
+ * PM_NOTIFY_CB - for Error Events,
+ * PM_INIT_SUSPEND_CB - for suspend callback.
+ * @node_id: Node-Id related to event.
+ * @event: Event Mask for the Error Event.
+ * @wake: Flag specifying whether the subsystem should be woken upon
+ * event notification.
+ * @cb_fun: Function pointer to store the callback function.
+ * @data: Pointer for the driver instance.
+ *
+ * Return: Returns 0 on successful registration else error code.
+ */
+int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
+ const bool wake, event_cb_func_t cb_fun, void *data)
+{
+ int ret = 0;
+ u32 eve;
+ int pos;
+
+ if (event_manager_availability)
+ return event_manager_availability;
+
+ if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
+ pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
+ return -EINVAL;
+ }
+
+ if (!cb_fun)
+ return -EFAULT;
+
+ if (cb_type == PM_INIT_SUSPEND_CB) {
+ ret = xlnx_add_cb_for_suspend(cb_fun, data);
+ } else {
+ if (!xlnx_is_error_event(node_id)) {
+ /* Add entry for Node-Id/Event in hash table */
+ ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data);
+ } else {
+ /* Add into Hash table */
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+
+ /* Add entry for Node-Id/Eve in hash table */
+ ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun,
+ data);
+ /* Break the loop if got error */
+ if (ret)
+ break;
+ }
+ if (ret) {
+ /* Skip the Event for which got the error */
+ pos--;
+ /* Remove registered(during this call) event from hash table */
+ for ( ; pos >= 0; pos--) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
+ }
+ }
+ }
+
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
+ event, ret);
+ return ret;
+ }
+
+ /* Register for Node-Id/Event combination in firmware */
+ ret = zynqmp_pm_register_notifier(node_id, event, wake, true);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
+ event, ret);
+ /* Remove already registered event from hash table */
+ if (xlnx_is_error_event(node_id)) {
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
+ }
+ } else {
+ xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
+ }
+ return ret;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xlnx_register_event);
+
+/**
+ * xlnx_unregister_event() - Unregister for the event.
+ * @cb_type: Type of callback from pm_api_cb_id,
+ * PM_NOTIFY_CB - for Error Events,
+ * PM_INIT_SUSPEND_CB - for suspend callback.
+ * @node_id: Node-Id related to event.
+ * @event: Event Mask for the Error Event.
+ * @cb_fun: Function pointer of callback function.
+ * @data: Pointer of agent's private data.
+ *
+ * Return: Returns 0 on successful unregistration else error code.
+ */
+int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
+ event_cb_func_t cb_fun, void *data)
+{
+ int ret = 0;
+ u32 eve, pos;
+
+ is_need_to_unregister = false;
+
+ if (event_manager_availability)
+ return event_manager_availability;
+
+ if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
+ pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
+ return -EINVAL;
+ }
+
+ if (!cb_fun)
+ return -EFAULT;
+
+ if (cb_type == PM_INIT_SUSPEND_CB) {
+ ret = xlnx_remove_cb_for_suspend(cb_fun);
+ } else {
+ /* Remove Node-Id/Event from hash table */
+ if (!xlnx_is_error_event(node_id)) {
+ xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
+ } else {
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
+ }
+ }
+
+ /* Un-register if list is empty */
+ if (is_need_to_unregister) {
+ /* Un-register for Node-Id/Event combination */
+ ret = zynqmp_pm_register_notifier(node_id, event, false, false);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\n",
+ __func__, node_id, event, ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xlnx_unregister_event);
+
+static void xlnx_call_suspend_cb_handler(const u32 *payload)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+ u32 cb_type = payload[0];
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
+
+ /* Check for existing entry in hash table for given cb_type */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
+ if (eve_data->cb_type == cb_type) {
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
+ is_callback_found = true;
+ }
+ }
+ }
+ if (!is_callback_found)
+ pr_warn("Didn't find any registered callback for suspend event\n");
+}
+
+static void xlnx_call_notify_cb_handler(const u32 *payload)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+ u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
+ int ret;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
+
+ /* Check for existing entry in hash table for given key id */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
+ if (eve_data->key == key) {
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
+ is_callback_found = true;
+ }
+
+ /* re register with firmware to get future events */
+ ret = zynqmp_pm_register_notifier(payload[1], payload[2],
+ eve_data->wake, true);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
+ payload[1], payload[2], ret);
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
+ list) {
+ /* Remove already registered event from hash table */
+ xlnx_remove_cb_for_notify_event(payload[1], payload[2],
+ cb_pos->eve_cb,
+ cb_pos->agent_data);
+ }
+ }
+ }
+ }
+ if (!is_callback_found)
+ pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
+ payload[1], payload[2]);
+}
+
+static void xlnx_get_event_callback_data(u32 *buf)
+{
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+}
+
+static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
+{
+ u32 cb_type, node_id, event, pos;
+ u32 payload[CB_MAX_PAYLOAD_SIZE] = {0};
+ u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0};
+
+ /* Get event data */
+ xlnx_get_event_callback_data(payload);
+
+ /* First element is callback type, others are callback arguments */
+ cb_type = payload[0];
+
+ if (cb_type == PM_NOTIFY_CB) {
+ node_id = payload[1];
+ event = payload[2];
+ if (!xlnx_is_error_event(node_id)) {
+ xlnx_call_notify_cb_handler(payload);
+ } else {
+ /*
+ * Each call back function expecting payload as an input arguments.
+ * We can get multiple error events as in one call back through error
+ * mask. So payload[2] may can contain multiple error events.
+ * In reg_driver_map database we store data in the combination of single
+ * node_id-error combination.
+ * So coping the payload message into event_data and update the
+ * event_data[2] with Error Mask for single error event and use
+ * event_data as input argument for registered call back function.
+ *
+ */
+ memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE));
+ /* Support Multiple Error Event */
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ if ((0 == (event & (1 << pos))))
+ continue;
+ event_data[2] = (event & (1 << pos));
+ xlnx_call_notify_cb_handler(event_data);
+ }
+ }
+ } else if (cb_type == PM_INIT_SUSPEND_CB) {
+ xlnx_call_suspend_cb_handler(payload);
+ } else {
+ pr_err("%s() Unsupported Callback %d\n", __func__, cb_type);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xlnx_event_cpuhp_start(unsigned int cpu)
+{
+ enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int xlnx_event_cpuhp_down(unsigned int cpu)
+{
+ disable_percpu_irq(virq_sgi);
+
+ return 0;
+}
+
+static void xlnx_disable_percpu_irq(void *data)
+{
+ disable_percpu_irq(virq_sgi);
+}
+
+static int xlnx_event_init_sgi(struct platform_device *pdev)
+{
+ int ret = 0;
+ int cpu = smp_processor_id();
+ /*
+ * IRQ related structures are used for the following:
+ * for each SGI interrupt ensure its mapped by GIC IRQ domain
+ * and that each corresponding linux IRQ for the HW IRQ has
+ * a handler for when receiving an interrupt from the remote
+ * processor.
+ */
+ struct irq_domain *domain;
+ struct irq_fwspec sgi_fwspec;
+ struct device_node *interrupt_parent = NULL;
+ struct device *parent = pdev->dev.parent;
+
+ /* Find GIC controller to map SGIs. */
+ interrupt_parent = of_irq_find_parent(parent->of_node);
+ if (!interrupt_parent) {
+ dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
+ return -EINVAL;
+ }
+
+ /* Each SGI needs to be associated with GIC's IRQ domain. */
+ domain = irq_find_host(interrupt_parent);
+ of_node_put(interrupt_parent);
+
+ /* Each mapping needs GIC domain when finding IRQ mapping. */
+ sgi_fwspec.fwnode = domain->fwnode;
+
+ /*
+ * When irq domain looks at mapping each arg is as follows:
+ * 3 args for: interrupt type (SGI), interrupt # (set later), type
+ */
+ sgi_fwspec.param_count = 1;
+
+ /* Set SGI's hwirq */
+ sgi_fwspec.param[0] = sgi_num;
+ virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
+
+ per_cpu(cpu_number1, cpu) = cpu;
+ ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
+ &cpu_number1);
+ WARN_ON(ret);
+ if (ret) {
+ irq_dispose_mapping(virq_sgi);
+ return ret;
+ }
+
+ irq_to_desc(virq_sgi);
+ irq_set_status_flags(virq_sgi, IRQ_PER_CPU);
+
+ return ret;
+}
+
+static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(cpu_number1, cpu) = cpu;
+
+ cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
+
+ on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
+
+ irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
+ free_percpu_irq(virq_sgi, &cpu_number1);
+ irq_dispose_mapping(virq_sgi);
+}
+
+static int xlnx_event_manager_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Feature check failed with %d\n", ret);
+ return ret;
+ }
+
+ if ((ret & FIRMWARE_VERSION_MASK) <
+ REGISTER_NOTIFIER_FIRMWARE_VERSION) {
+ dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n",
+ REGISTER_NOTIFIER_FIRMWARE_VERSION,
+ ret & FIRMWARE_VERSION_MASK);
+ return -EOPNOTSUPP;
+ }
+
+ /* Initialize the SGI */
+ ret = xlnx_event_init_sgi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret);
+ return ret;
+ }
+
+ /* Setup function for the CPU hot-plug cases */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
+ xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
+
+ ret = zynqmp_pm_register_sgi(sgi_num, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
+ xlnx_event_cleanup_sgi(pdev);
+ return ret;
+ }
+
+ event_manager_availability = 0;
+
+ dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num);
+ dev_info(&pdev->dev, "Xilinx Event Management driver probed\n");
+
+ return ret;
+}
+
+static void xlnx_event_manager_remove(struct platform_device *pdev)
+{
+ int i;
+ struct registered_event_data *eve_data;
+ struct hlist_node *tmp;
+ int ret;
+ struct agent_cb *cb_pos;
+ struct agent_cb *cb_next;
+
+ hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
+ list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
+ list_del_init(&cb_pos->list);
+ kfree(cb_pos);
+ }
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ }
+
+ ret = zynqmp_pm_register_sgi(0, 1);
+ if (ret)
+ dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
+
+ xlnx_event_cleanup_sgi(pdev);
+
+ event_manager_availability = -EACCES;
+}
+
+static struct platform_driver xlnx_event_manager_driver = {
+ .probe = xlnx_event_manager_probe,
+ .remove_new = xlnx_event_manager_remove,
+ .driver = {
+ .name = "xlnx_event_manager",
+ },
+};
+module_param(sgi_num, uint, 0);
+module_platform_driver(xlnx_event_manager_driver);
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
new file mode 100644
index 0000000000..c2c819701e
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Power Management
+ *
+ * Copyright (C) 2014-2019 Xilinx, Inc.
+ *
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/firmware/xlnx-event-manager.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+
+/**
+ * struct zynqmp_pm_work_struct - Wrapper for struct work_struct
+ * @callback_work: Work structure
+ * @args: Callback arguments
+ */
+struct zynqmp_pm_work_struct {
+ struct work_struct callback_work;
+ u32 args[CB_ARG_CNT];
+};
+
+static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
+static struct mbox_chan *rx_chan;
+static bool event_registered;
+
+enum pm_suspend_mode {
+ PM_SUSPEND_MODE_FIRST = 0,
+ PM_SUSPEND_MODE_STD = PM_SUSPEND_MODE_FIRST,
+ PM_SUSPEND_MODE_POWER_OFF,
+};
+
+#define PM_SUSPEND_MODE_FIRST PM_SUSPEND_MODE_STD
+
+static const char *const suspend_modes[] = {
+ [PM_SUSPEND_MODE_STD] = "standard",
+ [PM_SUSPEND_MODE_POWER_OFF] = "power-off",
+};
+
+static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
+
+static void zynqmp_pm_get_callback_data(u32 *buf)
+{
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+}
+
+static void suspend_event_callback(const u32 *payload, void *data)
+{
+ /* First element is callback API ID, others are callback arguments */
+ if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
+ return;
+
+ /* Copy callback arguments into work's structure */
+ memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
+ sizeof(zynqmp_pm_init_suspend_work->args));
+
+ queue_work(system_unbound_wq, &zynqmp_pm_init_suspend_work->callback_work);
+}
+
+static irqreturn_t zynqmp_pm_isr(int irq, void *data)
+{
+ u32 payload[CB_PAYLOAD_SIZE];
+
+ zynqmp_pm_get_callback_data(payload);
+
+ /* First element is callback API ID, others are callback arguments */
+ if (payload[0] == PM_INIT_SUSPEND_CB) {
+ switch (payload[1]) {
+ case SUSPEND_SYSTEM_SHUTDOWN:
+ orderly_poweroff(true);
+ break;
+ case SUSPEND_POWER_REQUEST:
+ pm_suspend(PM_SUSPEND_MEM);
+ break;
+ default:
+ pr_err("%s Unsupported InitSuspendCb reason "
+ "code %d\n", __func__, payload[1]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ipi_receive_callback(struct mbox_client *cl, void *data)
+{
+ struct zynqmp_ipi_message *msg = (struct zynqmp_ipi_message *)data;
+ u32 payload[CB_PAYLOAD_SIZE];
+ int ret;
+
+ memcpy(payload, msg->data, sizeof(msg->len));
+ /* First element is callback API ID, others are callback arguments */
+ if (payload[0] == PM_INIT_SUSPEND_CB) {
+ if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
+ return;
+
+ /* Copy callback arguments into work's structure */
+ memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
+ sizeof(zynqmp_pm_init_suspend_work->args));
+
+ queue_work(system_unbound_wq,
+ &zynqmp_pm_init_suspend_work->callback_work);
+
+ /* Send NULL message to mbox controller to ack the message */
+ ret = mbox_send_message(rx_chan, NULL);
+ if (ret)
+ pr_err("IPI ack failed. Error %d\n", ret);
+ }
+}
+
+/**
+ * zynqmp_pm_init_suspend_work_fn - Initialize suspend
+ * @work: Pointer to work_struct
+ *
+ * Bottom-half of PM callback IRQ handler.
+ */
+static void zynqmp_pm_init_suspend_work_fn(struct work_struct *work)
+{
+ struct zynqmp_pm_work_struct *pm_work =
+ container_of(work, struct zynqmp_pm_work_struct, callback_work);
+
+ if (pm_work->args[0] == SUSPEND_SYSTEM_SHUTDOWN) {
+ orderly_poweroff(true);
+ } else if (pm_work->args[0] == SUSPEND_POWER_REQUEST) {
+ pm_suspend(PM_SUSPEND_MEM);
+ } else {
+ pr_err("%s Unsupported InitSuspendCb reason code %d.\n",
+ __func__, pm_work->args[0]);
+ }
+}
+
+static ssize_t suspend_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int md;
+
+ for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
+ if (suspend_modes[md]) {
+ if (md == suspend_mode)
+ s += sprintf(s, "[%s] ", suspend_modes[md]);
+ else
+ s += sprintf(s, "%s ", suspend_modes[md]);
+ }
+
+ /* Convert last space to newline */
+ if (s != buf)
+ *(s - 1) = '\n';
+ return (s - buf);
+}
+
+static ssize_t suspend_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int md, ret = -EINVAL;
+
+ for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
+ if (suspend_modes[md] &&
+ sysfs_streq(suspend_modes[md], buf)) {
+ ret = 0;
+ break;
+ }
+
+ if (!ret && md != suspend_mode) {
+ ret = zynqmp_pm_set_suspend_mode(md);
+ if (likely(!ret))
+ suspend_mode = md;
+ }
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RW(suspend_mode);
+
+static int zynqmp_pm_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ u32 pm_api_version;
+ struct mbox_client *client;
+
+ zynqmp_pm_get_api_version(&pm_api_version);
+
+ /* Check PM API version number */
+ if (pm_api_version < ZYNQMP_PM_VERSION)
+ return -ENODEV;
+
+ /*
+ * First try to use Xilinx Event Manager by registering suspend_event_callback
+ * for suspend/shutdown event.
+ * If xlnx_register_event() returns -EACCES (Xilinx Event Manager
+ * is not available to use) or -ENODEV(Xilinx Event Manager not compiled),
+ * then use ipi-mailbox or interrupt method.
+ */
+ ret = xlnx_register_event(PM_INIT_SUSPEND_CB, 0, 0, false,
+ suspend_event_callback, NULL);
+ if (!ret) {
+ zynqmp_pm_init_suspend_work = devm_kzalloc(&pdev->dev,
+ sizeof(struct zynqmp_pm_work_struct),
+ GFP_KERNEL);
+ if (!zynqmp_pm_init_suspend_work) {
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0,
+ suspend_event_callback, NULL);
+ return -ENOMEM;
+ }
+ event_registered = true;
+
+ INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
+ zynqmp_pm_init_suspend_work_fn);
+ } else if (ret != -EACCES && ret != -ENODEV) {
+ dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", ret);
+ return ret;
+ } else if (of_property_present(pdev->dev.of_node, "mboxes")) {
+ zynqmp_pm_init_suspend_work =
+ devm_kzalloc(&pdev->dev,
+ sizeof(struct zynqmp_pm_work_struct),
+ GFP_KERNEL);
+ if (!zynqmp_pm_init_suspend_work)
+ return -ENOMEM;
+
+ INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
+ zynqmp_pm_init_suspend_work_fn);
+ client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->dev = &pdev->dev;
+ client->rx_callback = ipi_receive_callback;
+
+ rx_chan = mbox_request_channel_byname(client, "rx");
+ if (IS_ERR(rx_chan)) {
+ dev_err(&pdev->dev, "Failed to request rx channel\n");
+ return PTR_ERR(rx_chan);
+ }
+ } else if (of_property_present(pdev->dev.of_node, "interrupts")) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ zynqmp_pm_isr,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&pdev->dev),
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "devm_request_threaded_irq '%d' "
+ "failed with %d\n", irq, ret);
+ return ret;
+ }
+ } else {
+ dev_err(&pdev->dev, "Required property not found in DT node\n");
+ return -ENOENT;
+ }
+
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (ret) {
+ if (event_registered) {
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback,
+ NULL);
+ event_registered = false;
+ }
+ dev_err(&pdev->dev, "unable to create sysfs interface\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zynqmp_pm_remove(struct platform_device *pdev)
+{
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (event_registered)
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback, NULL);
+
+ if (!rx_chan)
+ mbox_free_channel(rx_chan);
+
+ return 0;
+}
+
+static const struct of_device_id pm_of_match[] = {
+ { .compatible = "xlnx,zynqmp-power", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, pm_of_match);
+
+static struct platform_driver zynqmp_pm_platform_driver = {
+ .probe = zynqmp_pm_probe,
+ .remove = zynqmp_pm_remove,
+ .driver = {
+ .name = "zynqmp_power",
+ .of_match_table = pm_of_match,
+ },
+};
+module_platform_driver(zynqmp_pm_platform_driver);